summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 11:32:39 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 11:32:39 +0000
commit56ae875861ab260b80a030f50c4aff9f9dc8fff0 (patch)
tree531412110fc901a5918c7f7442202804a83cada9
parentInitial commit. (diff)
downloadicinga2-56ae875861ab260b80a030f50c4aff9f9dc8fff0.tar.xz
icinga2-56ae875861ab260b80a030f50c4aff9f9dc8fff0.zip
Adding upstream version 2.14.2.upstream/2.14.2upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md46
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml5
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md24
-rw-r--r--.github/workflows/authors-file.yml39
-rw-r--r--.github/workflows/docker.yml37
-rwxr-xr-x.github/workflows/linux.bash95
-rw-r--r--.github/workflows/linux.yml56
-rw-r--r--.github/workflows/rpm.yml116
-rw-r--r--.github/workflows/windows.yml53
-rw-r--r--.gitignore24
-rw-r--r--.mailmap64
-rw-r--r--AUTHORS298
-rw-r--r--CHANGELOG.md5442
-rw-r--r--CMakeLists.txt531
-rw-r--r--CONTRIBUTING.md500
-rw-r--r--COPYING339
-rw-r--r--ICINGA2_VERSION2
-rw-r--r--NEWS1
-rw-r--r--README.md97
-rw-r--r--RELEASE.md421
-rw-r--r--agent/CMakeLists.txt14
-rw-r--r--agent/windows-setup-agent/.gitignore2
-rw-r--r--agent/windows-setup-agent/App.config6
-rw-r--r--agent/windows-setup-agent/EndpointInputBox.Designer.cs177
-rw-r--r--agent/windows-setup-agent/EndpointInputBox.cs52
-rw-r--r--agent/windows-setup-agent/EndpointInputBox.resx120
-rw-r--r--agent/windows-setup-agent/GlobalZonesInputBox.Designer.cs117
-rw-r--r--agent/windows-setup-agent/GlobalZonesInputBox.cs46
-rw-r--r--agent/windows-setup-agent/GlobalZonesInputBox.resx120
-rw-r--r--agent/windows-setup-agent/Icinga2SetupAgent.csproj262
-rw-r--r--agent/windows-setup-agent/Program.cs109
-rw-r--r--agent/windows-setup-agent/Properties/AssemblyInfo.cs36
-rw-r--r--agent/windows-setup-agent/Properties/Resources.Designer.cs73
-rw-r--r--agent/windows-setup-agent/Properties/Resources.resx124
-rw-r--r--agent/windows-setup-agent/Properties/Settings.Designer.cs26
-rw-r--r--agent/windows-setup-agent/Properties/Settings.settings7
-rw-r--r--agent/windows-setup-agent/ServiceStatus.Designer.cs132
-rw-r--r--agent/windows-setup-agent/ServiceStatus.cs37
-rw-r--r--agent/windows-setup-agent/ServiceStatus.resx580
-rw-r--r--agent/windows-setup-agent/SetupWizard.Designer.cs818
-rw-r--r--agent/windows-setup-agent/SetupWizard.cs574
-rw-r--r--agent/windows-setup-agent/SetupWizard.resx1432
-rw-r--r--agent/windows-setup-agent/app.manifest58
-rw-r--r--agent/windows-setup-agent/icinga-banner.pngbin0 -> 43315 bytes
-rw-r--r--agent/windows-setup-agent/icinga.icobin0 -> 27264 bytes
-rw-r--r--choco/CMakeLists.txt6
-rw-r--r--choco/chocolateyInstall.ps1.template.cmake20
-rw-r--r--choco/chocolateyUninstall.ps120
-rwxr-xr-xchoco/icinga2.nuspec.cmake32
-rw-r--r--cmake/FindJSON.cmake9
-rw-r--r--cmake/FindUTF8CPP.cmake7
-rw-r--r--cmake/InstallConfig.cmake47
-rw-r--r--cmake/SetFullDir.cmake11
-rw-r--r--config.h.cmake37
-rw-r--r--doc/.gitignore2
-rw-r--r--doc/01-about.md70
-rw-r--r--doc/02-installation.md672
-rw-r--r--doc/02-installation.md.d/01-Debian.md3
-rw-r--r--doc/02-installation.md.d/02-Ubuntu.md3
-rw-r--r--doc/02-installation.md.d/03-Raspbian.md3
-rw-r--r--doc/02-installation.md.d/04-Fedora.md3
-rw-r--r--doc/02-installation.md.d/05-CentOS.md3
-rw-r--r--doc/02-installation.md.d/06-RHEL.md3
-rw-r--r--doc/02-installation.md.d/07-OpenSUSE.md3
-rw-r--r--doc/02-installation.md.d/08-SLES.md3
-rw-r--r--doc/02-installation.md.d/09-Amazon-Linux.md3
-rw-r--r--doc/02-installation.md.d/10-Windows.md3
-rw-r--r--doc/03-monitoring-basics.md3305
-rw-r--r--doc/04-configuration.md737
-rw-r--r--doc/05-service-monitoring.md1007
-rw-r--r--doc/06-distributed-monitoring.md3516
-rw-r--r--doc/07-agent-based-monitoring.md484
-rw-r--r--doc/08-advanced-topics.md1208
-rw-r--r--doc/09-object-types.md1958
-rw-r--r--doc/10-icinga-template-library.md6114
-rw-r--r--doc/11-cli-commands.md734
-rw-r--r--doc/12-icinga2-api.md3032
-rw-r--r--doc/13-addons.md258
-rw-r--r--doc/14-features.md1438
-rw-r--r--doc/15-troubleshooting.md1997
-rw-r--r--doc/16-upgrading-icinga-2.md977
-rw-r--r--doc/17-language-reference.md1371
-rw-r--r--doc/18-library-reference.md1961
-rw-r--r--doc/19-technical-concepts.md2287
-rw-r--r--doc/20-script-debugger.md177
-rw-r--r--doc/21-development.md2680
-rw-r--r--doc/22-selinux.md312
-rw-r--r--doc/23-migrating-from-icinga-1x.md1585
-rw-r--r--doc/24-appendix.md695
-rw-r--r--doc/CMakeLists.txt20
-rw-r--r--doc/icinga2.899
-rw-r--r--doc/images/addons/dashing_icinga2.pngbin0 -> 883821 bytes
-rw-r--r--doc/images/addons/icinga_certificate_monitoring.pngbin0 -> 251618 bytes
-rw-r--r--doc/images/addons/icinga_reporting.pngbin0 -> 312814 bytes
-rw-r--r--doc/images/addons/icingaweb2_businessprocess.pngbin0 -> 181635 bytes
-rw-r--r--doc/images/addons/icingaweb2_grafana.pngbin0 -> 573452 bytes
-rw-r--r--doc/images/addons/icingaweb2_graphite.pngbin0 -> 252196 bytes
-rw-r--r--doc/images/addons/icingaweb2_maps.pngbin0 -> 543265 bytes
-rw-r--r--doc/images/addons/nano-syntax.pngbin0 -> 48641 bytes
-rw-r--r--doc/images/addons/vim-syntax.pngbin0 -> 38385 bytes
-rw-r--r--doc/images/advanced-topics/flapping-state-graph.pngbin0 -> 8132 bytes
-rw-r--r--doc/images/advanced-topics/icinga2_external_checks_freshness_icingaweb2.pngbin0 -> 30046 bytes
-rw-r--r--doc/images/advanced-topics/icingaweb2_downtime_handled.pngbin0 -> 39967 bytes
-rw-r--r--doc/images/api/icinga2_api_powershell_ise.pngbin0 -> 503887 bytes
-rw-r--r--doc/images/configuration/icinga_web_local_server.pngbin0 -> 302951 bytes
-rw-r--r--doc/images/development/windows_boost_build_dev_cmd.pngbin0 -> 14058 bytes
-rw-r--r--doc/images/development/windows_builds_gitlab_pipeline.pngbin0 -> 18092 bytes
-rw-r--r--doc/images/development/windows_powershell_posh_git.pngbin0 -> 10374 bytes
-rw-r--r--doc/images/development/windows_visual_studio_installer_01.pngbin0 -> 93643 bytes
-rw-r--r--doc/images/development/windows_visual_studio_installer_02.pngbin0 -> 96993 bytes
-rw-r--r--doc/images/development/windows_visual_studio_installer_03.pngbin0 -> 87282 bytes
-rw-r--r--doc/images/development/windows_visual_studio_tabs_c++.pngbin0 -> 15837 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_agent_checks_command_endpoint.pngbin0 -> 91755 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_endpoints.pngbin0 -> 75860 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_roles.pngbin0 -> 114197 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_satellite_config_sync.pngbin0 -> 88721 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenario_ha_masters_with_agents.pngbin0 -> 137403 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_satellites_agents.pngbin0 -> 147103 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_with_agents.pngbin0 -> 127139 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_monitoring_zones.pngbin0 -> 120164 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_windows_client_disk_icingaweb2.pngbin0 -> 104924 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_api_drivesize_icingaweb2.pngbin0 -> 38947 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_counter_icingaweb2.pngbin0 -> 28292 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_running_service.pngbin0 -> 72832 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_installer_01.pngbin0 -> 105133 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_installer_02.pngbin0 -> 16884 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_installer_03.pngbin0 -> 8091 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_installer_04.pngbin0 -> 8140 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_installer_05.pngbin0 -> 92081 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_01.pngbin0 -> 55167 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02.pngbin0 -> 61909 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02_global_zone.pngbin0 -> 60764 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_03.pngbin0 -> 56186 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_04.pngbin0 -> 69902 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_no_ticket.pngbin0 -> 59909 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_with_ticket.pngbin0 -> 57735 bytes
-rw-r--r--doc/images/distributed-monitoring/icinga2_windows_setup_wizard_examine_config.pngbin0 -> 45359 bytes
-rw-r--r--doc/images/icingadb/icingadb-architecture.pngbin0 -> 563761 bytes
-rw-r--r--doc/images/icingadb/icingadb-daemon.pngbin0 -> 527021 bytes
-rw-r--r--doc/images/icingadb/icingadb-icinga2.pngbin0 -> 529555 bytes
-rw-r--r--doc/images/icingadb/icingadb-redis.pngbin0 -> 526785 bytes
-rw-r--r--doc/scroll.js16
-rwxr-xr-xdoc/update-links.py41
-rw-r--r--doc/win-dev.ps199
-rw-r--r--etc/CMakeLists.txt69
-rw-r--r--etc/bash_completion.d/icinga217
-rw-r--r--etc/icinga2/conf.d/app.conf1
-rw-r--r--etc/icinga2/conf.d/commands.conf196
-rw-r--r--etc/icinga2/conf.d/downtimes.conf20
-rw-r--r--etc/icinga2/conf.d/groups.conf37
-rw-r--r--etc/icinga2/conf.d/hosts.conf52
-rw-r--r--etc/icinga2/conf.d/notifications.conf33
-rw-r--r--etc/icinga2/conf.d/services.conf117
-rw-r--r--etc/icinga2/conf.d/templates.conf83
-rw-r--r--etc/icinga2/conf.d/timeperiods.conf35
-rw-r--r--etc/icinga2/conf.d/users.conf17
-rw-r--r--etc/icinga2/conf.d/win32/hosts.conf43
-rw-r--r--etc/icinga2/conf.d/win32/services.conf92
-rw-r--r--etc/icinga2/constants.conf.cmake28
-rw-r--r--etc/icinga2/features-available/api.conf10
-rw-r--r--etc/icinga2/features-available/checker.conf5
-rw-r--r--etc/icinga2/features-available/command.conf7
-rw-r--r--etc/icinga2/features-available/compatlog.conf7
-rw-r--r--etc/icinga2/features-available/debuglog.conf10
-rw-r--r--etc/icinga2/features-available/elasticsearch.conf8
-rw-r--r--etc/icinga2/features-available/gelf.conf10
-rw-r--r--etc/icinga2/features-available/graphite.conf9
-rw-r--r--etc/icinga2/features-available/icingadb.conf5
-rw-r--r--etc/icinga2/features-available/ido-mysql.conf11
-rw-r--r--etc/icinga2/features-available/ido-pgsql.conf11
-rw-r--r--etc/icinga2/features-available/influxdb.conf25
-rw-r--r--etc/icinga2/features-available/influxdb2.conf27
-rw-r--r--etc/icinga2/features-available/journald.conf7
-rw-r--r--etc/icinga2/features-available/livestatus.conf6
-rw-r--r--etc/icinga2/features-available/mainlog.conf8
-rw-r--r--etc/icinga2/features-available/notification.conf5
-rw-r--r--etc/icinga2/features-available/opentsdb.conf25
-rw-r--r--etc/icinga2/features-available/perfdata.conf6
-rw-r--r--etc/icinga2/features-available/syslog.conf8
-rw-r--r--etc/icinga2/features-available/windowseventlog.conf8
-rw-r--r--etc/icinga2/features-enabled/checker.conf1
-rw-r--r--etc/icinga2/features-enabled/notification.conf1
-rw-r--r--etc/icinga2/features-enabled/windowseventlog.conf1
-rw-r--r--etc/icinga2/icinga2.conf57
-rwxr-xr-xetc/icinga2/scripts/mail-host-notification.sh177
-rwxr-xr-xetc/icinga2/scripts/mail-service-notification.sh190
-rw-r--r--etc/icinga2/win32/constants.conf28
-rw-r--r--etc/icinga2/win32/icinga2.conf55
-rw-r--r--etc/icinga2/zones.conf63
-rw-r--r--etc/icinga2/zones.d/README2
-rw-r--r--etc/initsystem/CMakeLists.txt44
-rw-r--r--etc/initsystem/icinga2.init.d.cmake198
-rw-r--r--etc/initsystem/icinga2.service.cmake30
-rw-r--r--etc/initsystem/icinga2.service.limits.conf9
-rw-r--r--etc/initsystem/icinga2.sysconfig.cmake15
-rw-r--r--etc/initsystem/prepare-dirs.cmake52
-rw-r--r--etc/initsystem/safe-reload.cmake52
-rw-r--r--etc/logrotate.d/icinga2.cmake21
-rw-r--r--icinga-app/CMakeLists.txt100
-rw-r--r--icinga-app/icinga.cpp949
-rw-r--r--icinga-app/icinga.icobin0 -> 27264 bytes
-rw-r--r--icinga-app/icinga.rc34
-rw-r--r--icinga-app/icinga2.cmake29
-rw-r--r--icinga-installer/CMakeLists.txt47
-rw-r--r--icinga-installer/bannrbmp.bmpbin0 -> 29914 bytes
-rw-r--r--icinga-installer/dlgbmp.bmpbin0 -> 615402 bytes
-rw-r--r--icinga-installer/icinga-installer.cpp312
-rw-r--r--icinga-installer/icinga2.wixpatch.cmake52
-rw-r--r--icinga-spec-version.h.cmake1
-rw-r--r--icinga-version.h.cmake2
-rw-r--r--itl/CMakeLists.txt8
-rw-r--r--itl/command-icinga.conf57
-rw-r--r--itl/command-nscp-local.conf347
-rw-r--r--itl/command-plugins-manubulon.conf407
-rw-r--r--itl/command-plugins-windows.conf319
-rw-r--r--itl/command-plugins.conf3258
-rw-r--r--itl/hangman165
-rw-r--r--itl/itl8
-rw-r--r--itl/manubulon7
-rw-r--r--itl/nscp3
-rw-r--r--itl/plugins8
-rw-r--r--itl/plugins-contrib9
-rw-r--r--itl/plugins-contrib.d/CMakeLists.txt6
-rw-r--r--itl/plugins-contrib.d/big-data.conf112
-rw-r--r--itl/plugins-contrib.d/databases.conf973
-rw-r--r--itl/plugins-contrib.d/hardware.conf267
-rw-r--r--itl/plugins-contrib.d/icingacli.conf145
-rw-r--r--itl/plugins-contrib.d/ipmi.conf123
-rw-r--r--itl/plugins-contrib.d/logmanagement.conf160
-rw-r--r--itl/plugins-contrib.d/metrics.conf62
-rw-r--r--itl/plugins-contrib.d/network-components.conf1089
-rw-r--r--itl/plugins-contrib.d/network-services.conf123
-rw-r--r--itl/plugins-contrib.d/operating-system.conf195
-rw-r--r--itl/plugins-contrib.d/raid-controller.conf122
-rw-r--r--itl/plugins-contrib.d/smart-attributes.conf24
-rw-r--r--itl/plugins-contrib.d/storage.conf119
-rw-r--r--itl/plugins-contrib.d/systemd.conf51
-rw-r--r--itl/plugins-contrib.d/virtualization.conf92
-rw-r--r--itl/plugins-contrib.d/vmware.conf1167
-rw-r--r--itl/plugins-contrib.d/web.conf759
-rw-r--r--itl/plugins-contrib.d/windows.conf28
-rw-r--r--itl/windows-plugins3
-rw-r--r--lib/CMakeLists.txt60
-rw-r--r--lib/base/CMakeLists.txt160
-rw-r--r--lib/base/application-environment.cpp17
-rw-r--r--lib/base/application-version.cpp17
-rw-r--r--lib/base/application.cpp1238
-rw-r--r--lib/base/application.hpp170
-rw-r--r--lib/base/application.ti14
-rw-r--r--lib/base/array-script.cpp260
-rw-r--r--lib/base/array.cpp380
-rw-r--r--lib/base/array.hpp117
-rw-r--r--lib/base/atomic-file.cpp123
-rw-r--r--lib/base/atomic-file.hpp41
-rw-r--r--lib/base/atomic.hpp91
-rw-r--r--lib/base/base64.cpp53
-rw-r--r--lib/base/base64.hpp25
-rw-r--r--lib/base/boolean-script.cpp26
-rw-r--r--lib/base/boolean.cpp9
-rw-r--r--lib/base/boolean.hpp27
-rw-r--r--lib/base/bulker.hpp119
-rw-r--r--lib/base/configobject-script.cpp36
-rw-r--r--lib/base/configobject.cpp701
-rw-r--r--lib/base/configobject.hpp101
-rw-r--r--lib/base/configobject.ti94
-rw-r--r--lib/base/configtype.cpp76
-rw-r--r--lib/base/configtype.hpp64
-rw-r--r--lib/base/configuration.cpp379
-rw-r--r--lib/base/configuration.hpp157
-rw-r--r--lib/base/configuration.ti164
-rw-r--r--lib/base/configwriter.cpp260
-rw-r--r--lib/base/configwriter.hpp67
-rw-r--r--lib/base/console.cpp203
-rw-r--r--lib/base/console.hpp91
-rw-r--r--lib/base/context.cpp64
-rw-r--r--lib/base/context.hpp54
-rw-r--r--lib/base/convert.cpp46
-rw-r--r--lib/base/convert.hpp84
-rw-r--r--lib/base/datetime-script.cpp28
-rw-r--r--lib/base/datetime.cpp58
-rw-r--r--lib/base/datetime.hpp40
-rw-r--r--lib/base/datetime.ti15
-rw-r--r--lib/base/debug.hpp49
-rw-r--r--lib/base/debuginfo.cpp98
-rw-r--r--lib/base/debuginfo.hpp36
-rw-r--r--lib/base/defer.hpp54
-rw-r--r--lib/base/dependencygraph.cpp50
-rw-r--r--lib/base/dependencygraph.hpp34
-rw-r--r--lib/base/dictionary-script.cpp119
-rw-r--r--lib/base/dictionary.cpp317
-rw-r--r--lib/base/dictionary.hpp91
-rw-r--r--lib/base/exception.cpp507
-rw-r--r--lib/base/exception.hpp166
-rw-r--r--lib/base/fifo.cpp124
-rw-r--r--lib/base/fifo.hpp48
-rw-r--r--lib/base/filelogger.cpp59
-rw-r--r--lib/base/filelogger.hpp33
-rw-r--r--lib/base/filelogger.ti17
-rw-r--r--lib/base/function-script.cpp50
-rw-r--r--lib/base/function.cpp37
-rw-r--r--lib/base/function.hpp89
-rw-r--r--lib/base/function.ti18
-rw-r--r--lib/base/functionwrapper.hpp149
-rw-r--r--lib/base/i2-base.hpp79
-rw-r--r--lib/base/initialize.cpp13
-rw-r--r--lib/base/initialize.hpp49
-rw-r--r--lib/base/io-engine.cpp155
-rw-r--r--lib/base/io-engine.hpp216
-rw-r--r--lib/base/journaldlogger.cpp87
-rw-r--r--lib/base/journaldlogger.hpp44
-rw-r--r--lib/base/journaldlogger.ti21
-rw-r--r--lib/base/json-script.cpp28
-rw-r--r--lib/base/json.cpp525
-rw-r--r--lib/base/json.hpp19
-rw-r--r--lib/base/lazy-init.hpp72
-rw-r--r--lib/base/library.cpp68
-rw-r--r--lib/base/library.hpp41
-rw-r--r--lib/base/loader.cpp38
-rw-r--r--lib/base/loader.hpp61
-rw-r--r--lib/base/logger.cpp326
-rw-r--r--lib/base/logger.hpp149
-rw-r--r--lib/base/logger.ti17
-rw-r--r--lib/base/math-script.cpp184
-rw-r--r--lib/base/namespace-script.cpp84
-rw-r--r--lib/base/namespace.cpp189
-rw-r--r--lib/base/namespace.hpp105
-rw-r--r--lib/base/netstring.cpp334
-rw-r--r--lib/base/netstring.hpp43
-rw-r--r--lib/base/networkstream.cpp81
-rw-r--r--lib/base/networkstream.hpp39
-rw-r--r--lib/base/number-script.cpp25
-rw-r--r--lib/base/number.cpp9
-rw-r--r--lib/base/number.hpp27
-rw-r--r--lib/base/object-packer.cpp246
-rw-r--r--lib/base/object-packer.hpp18
-rw-r--r--lib/base/object-script.cpp45
-rw-r--r--lib/base/object.cpp275
-rw-r--r--lib/base/object.hpp225
-rw-r--r--lib/base/objectlock.cpp55
-rw-r--r--lib/base/objectlock.hpp35
-rw-r--r--lib/base/objecttype.cpp57
-rw-r--r--lib/base/objecttype.hpp29
-rw-r--r--lib/base/perfdatavalue.cpp395
-rw-r--r--lib/base/perfdatavalue.hpp38
-rw-r--r--lib/base/perfdatavalue.ti20
-rw-r--r--lib/base/primitivetype.cpp64
-rw-r--r--lib/base/primitivetype.hpp62
-rw-r--r--lib/base/process.cpp1207
-rw-r--r--lib/base/process.hpp117
-rw-r--r--lib/base/reference-script.cpp35
-rw-r--r--lib/base/reference.cpp38
-rw-r--r--lib/base/reference.hpp40
-rw-r--r--lib/base/registry.hpp121
-rw-r--r--lib/base/ringbuffer.cpp91
-rw-r--r--lib/base/ringbuffer.hpp45
-rw-r--r--lib/base/scriptframe.cpp130
-rw-r--r--lib/base/scriptframe.hpp42
-rw-r--r--lib/base/scriptglobal.cpp110
-rw-r--r--lib/base/scriptglobal.hpp35
-rw-r--r--lib/base/scriptutils.cpp570
-rw-r--r--lib/base/scriptutils.hpp54
-rw-r--r--lib/base/serializer.cpp331
-rw-r--r--lib/base/serializer.hpp34
-rw-r--r--lib/base/shared-memory.hpp45
-rw-r--r--lib/base/shared-object.hpp73
-rw-r--r--lib/base/shared.hpp101
-rw-r--r--lib/base/singleton.hpp29
-rw-r--r--lib/base/socket.cpp430
-rw-r--r--lib/base/socket.hpp66
-rw-r--r--lib/base/stacktrace.cpp43
-rw-r--r--lib/base/stacktrace.hpp31
-rw-r--r--lib/base/statsfunction.hpp17
-rw-r--r--lib/base/stdiostream.cpp57
-rw-r--r--lib/base/stdiostream.hpp36
-rw-r--r--lib/base/stream.cpp149
-rw-r--r--lib/base/stream.hpp133
-rw-r--r--lib/base/streamlogger.cpp119
-rw-r--r--lib/base/streamlogger.hpp47
-rw-r--r--lib/base/streamlogger.ti14
-rw-r--r--lib/base/string-script.cpp138
-rw-r--r--lib/base/string.cpp468
-rw-r--r--lib/base/string.hpp208
-rw-r--r--lib/base/sysloglogger.cpp144
-rw-r--r--lib/base/sysloglogger.hpp56
-rw-r--r--lib/base/sysloglogger.ti19
-rw-r--r--lib/base/tcpsocket.cpp211
-rw-r--r--lib/base/tcpsocket.hpp102
-rw-r--r--lib/base/threadpool.cpp51
-rw-r--r--lib/base/threadpool.hpp101
-rw-r--r--lib/base/timer.cpp354
-rw-r--r--lib/base/timer.hpp65
-rw-r--r--lib/base/tlsstream.cpp71
-rw-r--r--lib/base/tlsstream.hpp129
-rw-r--r--lib/base/tlsutility.cpp1086
-rw-r--r--lib/base/tlsutility.hpp94
-rw-r--r--lib/base/type.cpp217
-rw-r--r--lib/base/type.hpp148
-rw-r--r--lib/base/typetype-script.cpp31
-rw-r--r--lib/base/unix.hpp49
-rw-r--r--lib/base/unixsocket.cpp53
-rw-r--r--lib/base/unixsocket.hpp32
-rw-r--r--lib/base/utility.cpp1975
-rw-r--r--lib/base/utility.hpp200
-rw-r--r--lib/base/value-operators.cpp719
-rw-r--r--lib/base/value.cpp264
-rw-r--r--lib/base/value.hpp251
-rw-r--r--lib/base/win32.hpp35
-rw-r--r--lib/base/windowseventloglogger-provider.mc5
-rw-r--r--lib/base/windowseventloglogger.cpp83
-rw-r--r--lib/base/windowseventloglogger.hpp37
-rw-r--r--lib/base/windowseventloglogger.ti15
-rw-r--r--lib/base/workqueue.cpp318
-rw-r--r--lib/base/workqueue.hpp154
-rw-r--r--lib/checker/CMakeLists.txt34
-rw-r--r--lib/checker/checkercomponent.cpp358
-rw-r--r--lib/checker/checkercomponent.hpp99
-rw-r--r--lib/checker/checkercomponent.ti18
-rw-r--r--lib/cli/CMakeLists.txt49
-rw-r--r--lib/cli/apisetupcommand.cpp59
-rw-r--r--lib/cli/apisetupcommand.hpp31
-rw-r--r--lib/cli/apisetuputility.cpp205
-rw-r--r--lib/cli/apisetuputility.hpp39
-rw-r--r--lib/cli/calistcommand.cpp89
-rw-r--r--lib/cli/calistcommand.hpp33
-rw-r--r--lib/cli/caremovecommand.cpp93
-rw-r--r--lib/cli/caremovecommand.hpp30
-rw-r--r--lib/cli/carestorecommand.cpp88
-rw-r--r--lib/cli/carestorecommand.hpp30
-rw-r--r--lib/cli/casigncommand.cpp108
-rw-r--r--lib/cli/casigncommand.hpp30
-rw-r--r--lib/cli/clicommand.cpp373
-rw-r--r--lib/cli/clicommand.hpp79
-rw-r--r--lib/cli/consolecommand.cpp723
-rw-r--r--lib/cli/consolecommand.hpp61
-rw-r--r--lib/cli/daemoncommand.cpp882
-rw-r--r--lib/cli/daemoncommand.hpp31
-rw-r--r--lib/cli/daemonutility.cpp285
-rw-r--r--lib/cli/daemonutility.hpp27
-rw-r--r--lib/cli/editline.hpp19
-rw-r--r--lib/cli/featuredisablecommand.cpp55
-rw-r--r--lib/cli/featuredisablecommand.hpp33
-rw-r--r--lib/cli/featureenablecommand.cpp50
-rw-r--r--lib/cli/featureenablecommand.hpp32
-rw-r--r--lib/cli/featurelistcommand.cpp34
-rw-r--r--lib/cli/featurelistcommand.hpp28
-rw-r--r--lib/cli/featureutility.cpp243
-rw-r--r--lib/cli/featureutility.hpp42
-rw-r--r--lib/cli/i2-cli.hpp14
-rw-r--r--lib/cli/internalsignalcommand.cpp67
-rw-r--r--lib/cli/internalsignalcommand.hpp33
-rw-r--r--lib/cli/nodesetupcommand.cpp559
-rw-r--r--lib/cli/nodesetupcommand.hpp36
-rw-r--r--lib/cli/nodeutility.cpp378
-rw-r--r--lib/cli/nodeutility.hpp49
-rw-r--r--lib/cli/nodewizardcommand.cpp815
-rw-r--r--lib/cli/nodewizardcommand.hpp36
-rw-r--r--lib/cli/objectlistcommand.cpp145
-rw-r--r--lib/cli/objectlistcommand.hpp36
-rw-r--r--lib/cli/objectlistutility.cpp155
-rw-r--r--lib/cli/objectlistutility.hpp34
-rw-r--r--lib/cli/pkinewcacommand.cpp29
-rw-r--r--lib/cli/pkinewcacommand.hpp29
-rw-r--r--lib/cli/pkinewcertcommand.cpp66
-rw-r--r--lib/cli/pkinewcertcommand.hpp32
-rw-r--r--lib/cli/pkirequestcommand.cpp93
-rw-r--r--lib/cli/pkirequestcommand.hpp32
-rw-r--r--lib/cli/pkisavecertcommand.cpp89
-rw-r--r--lib/cli/pkisavecertcommand.hpp32
-rw-r--r--lib/cli/pkisigncsrcommand.cpp56
-rw-r--r--lib/cli/pkisigncsrcommand.hpp32
-rw-r--r--lib/cli/pkiticketcommand.cpp55
-rw-r--r--lib/cli/pkiticketcommand.hpp31
-rw-r--r--lib/cli/pkiverifycommand.cpp226
-rw-r--r--lib/cli/pkiverifycommand.hpp32
-rw-r--r--lib/cli/variablegetcommand.cpp75
-rw-r--r--lib/cli/variablegetcommand.hpp34
-rw-r--r--lib/cli/variablelistcommand.cpp52
-rw-r--r--lib/cli/variablelistcommand.hpp34
-rw-r--r--lib/cli/variableutility.cpp76
-rw-r--r--lib/cli/variableutility.hpp31
-rw-r--r--lib/compat/CMakeLists.txt38
-rw-r--r--lib/compat/compatlogger.cpp614
-rw-r--r--lib/compat/compatlogger.hpp60
-rw-r--r--lib/compat/compatlogger.ti23
-rw-r--r--lib/compat/externalcommandlistener.cpp150
-rw-r--r--lib/compat/externalcommandlistener.hpp41
-rw-r--r--lib/compat/externalcommandlistener.ti20
-rw-r--r--lib/config/CMakeLists.txt47
-rw-r--r--lib/config/activationcontext.cpp61
-rw-r--r--lib/config/activationcontext.hpp46
-rw-r--r--lib/config/applyrule-targeted.cpp266
-rw-r--r--lib/config/applyrule.cpp189
-rw-r--r--lib/config/applyrule.hpp126
-rw-r--r--lib/config/config_lexer.ll253
-rw-r--r--lib/config/config_parser.yy1243
-rw-r--r--lib/config/configcompiler.cpp364
-rw-r--r--lib/config/configcompiler.hpp161
-rw-r--r--lib/config/configcompilercontext.cpp57
-rw-r--r--lib/config/configcompilercontext.hpp42
-rw-r--r--lib/config/configfragment.hpp26
-rw-r--r--lib/config/configitem.cpp849
-rw-r--r--lib/config/configitem.hpp106
-rw-r--r--lib/config/configitembuilder.cpp120
-rw-r--r--lib/config/configitembuilder.hpp58
-rw-r--r--lib/config/expression.cpp1068
-rw-r--r--lib/config/expression.hpp986
-rw-r--r--lib/config/i2-config.hpp16
-rw-r--r--lib/config/objectrule.cpp18
-rw-r--r--lib/config/objectrule.hpp33
-rw-r--r--lib/config/vmops.hpp274
-rw-r--r--lib/db_ido/CMakeLists.txt40
-rw-r--r--lib/db_ido/commanddbobject.cpp31
-rw-r--r--lib/db_ido/commanddbobject.hpp30
-rw-r--r--lib/db_ido/db_ido-itl.conf19
-rw-r--r--lib/db_ido/dbconnection.cpp583
-rw-r--r--lib/db_ido/dbconnection.hpp138
-rw-r--r--lib/db_ido/dbconnection.ti82
-rw-r--r--lib/db_ido/dbevents.cpp1884
-rw-r--r--lib/db_ido/dbevents.hpp128
-rw-r--r--lib/db_ido/dbobject.cpp430
-rw-r--r--lib/db_ido/dbobject.hpp112
-rw-r--r--lib/db_ido/dbquery.cpp52
-rw-r--r--lib/db_ido/dbquery.hpp72
-rw-r--r--lib/db_ido/dbreference.cpp19
-rw-r--r--lib/db_ido/dbreference.hpp30
-rw-r--r--lib/db_ido/dbtype.cpp141
-rw-r--r--lib/db_ido/dbtype.hpp90
-rw-r--r--lib/db_ido/dbvalue.cpp69
-rw-r--r--lib/db_ido/dbvalue.hpp52
-rw-r--r--lib/db_ido/endpointdbobject.cpp91
-rw-r--r--lib/db_ido/endpointdbobject.hpp37
-rw-r--r--lib/db_ido/hostdbobject.cpp423
-rw-r--r--lib/db_ido/hostdbobject.hpp38
-rw-r--r--lib/db_ido/hostgroupdbobject.cpp33
-rw-r--r--lib/db_ido/hostgroupdbobject.hpp34
-rw-r--r--lib/db_ido/i2-db_ido.hpp14
-rw-r--r--lib/db_ido/idochecktask.cpp197
-rw-r--r--lib/db_ido/idochecktask.hpp29
-rw-r--r--lib/db_ido/servicedbobject.cpp359
-rw-r--r--lib/db_ido/servicedbobject.hpp41
-rw-r--r--lib/db_ido/servicegroupdbobject.cpp32
-rw-r--r--lib/db_ido/servicegroupdbobject.hpp31
-rw-r--r--lib/db_ido/timeperioddbobject.cpp85
-rw-r--r--lib/db_ido/timeperioddbobject.hpp33
-rw-r--r--lib/db_ido/userdbobject.cpp161
-rw-r--r--lib/db_ido/userdbobject.hpp35
-rw-r--r--lib/db_ido/usergroupdbobject.cpp30
-rw-r--r--lib/db_ido/usergroupdbobject.hpp31
-rw-r--r--lib/db_ido/zonedbobject.cpp38
-rw-r--r--lib/db_ido/zonedbobject.hpp31
-rw-r--r--lib/db_ido_mysql/CMakeLists.txt41
-rw-r--r--lib/db_ido_mysql/idomysqlconnection.cpp1269
-rw-r--r--lib/db_ido_mysql/idomysqlconnection.hpp114
-rw-r--r--lib/db_ido_mysql/idomysqlconnection.ti42
-rw-r--r--lib/db_ido_mysql/schema/mysql.sql1666
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.0.2.sql20
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.1.0.sql17
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.11.0.sql89
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.12.7.sql15
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.13.0.sql23
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.13.3.sql15
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.2.0.sql23
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.3.0.sql26
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.4.0.sql75
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.5.0.sql103
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.6.0.sql151
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.8.0.sql81
-rw-r--r--lib/db_ido_mysql/schema/upgrade/2.8.1.sql67
-rw-r--r--lib/db_ido_pgsql/CMakeLists.txt41
-rw-r--r--lib/db_ido_pgsql/idopgsqlconnection.cpp1029
-rw-r--r--lib/db_ido_pgsql/idopgsqlconnection.hpp99
-rw-r--r--lib/db_ido_pgsql/idopgsqlconnection.ti39
-rw-r--r--lib/db_ido_pgsql/schema/pgsql.sql1733
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.0.2.sql17
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.1.0.sql17
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.2.0.sql21
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.3.0.sql26
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.4.0.sql185
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.5.0.sql85
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.6.0.sql161
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.8.0.sql32
-rw-r--r--lib/db_ido_pgsql/schema/upgrade/2.8.1.sql19
-rw-r--r--lib/icinga/CMakeLists.txt76
-rw-r--r--lib/icinga/apiactions.cpp962
-rw-r--r--lib/icinga/apiactions.hpp42
-rw-r--r--lib/icinga/apievents.cpp438
-rw-r--r--lib/icinga/apievents.hpp51
-rw-r--r--lib/icinga/checkable-check.cpp709
-rw-r--r--lib/icinga/checkable-comment.cpp75
-rw-r--r--lib/icinga/checkable-dependency.cpp176
-rw-r--r--lib/icinga/checkable-downtime.cpp64
-rw-r--r--lib/icinga/checkable-event.cpp81
-rw-r--r--lib/icinga/checkable-flapping.cpp114
-rw-r--r--lib/icinga/checkable-notification.cpp334
-rw-r--r--lib/icinga/checkable-script.cpp28
-rw-r--r--lib/icinga/checkable.cpp322
-rw-r--r--lib/icinga/checkable.hpp264
-rw-r--r--lib/icinga/checkable.ti192
-rw-r--r--lib/icinga/checkcommand.cpp22
-rw-r--r--lib/icinga/checkcommand.hpp32
-rw-r--r--lib/icinga/checkcommand.ti14
-rw-r--r--lib/icinga/checkresult.cpp34
-rw-r--r--lib/icinga/checkresult.hpp28
-rw-r--r--lib/icinga/checkresult.ti72
-rw-r--r--lib/icinga/cib.cpp346
-rw-r--r--lib/icinga/cib.hpp91
-rw-r--r--lib/icinga/clusterevents-check.cpp379
-rw-r--r--lib/icinga/clusterevents.cpp1623
-rw-r--r--lib/icinga/clusterevents.hpp102
-rw-r--r--lib/icinga/command.cpp68
-rw-r--r--lib/icinga/command.hpp30
-rw-r--r--lib/icinga/command.ti54
-rw-r--r--lib/icinga/comment.cpp258
-rw-r--r--lib/icinga/comment.hpp59
-rw-r--r--lib/icinga/comment.ti80
-rw-r--r--lib/icinga/compatutility.cpp302
-rw-r--r--lib/icinga/compatutility.hpp56
-rw-r--r--lib/icinga/customvarobject.cpp49
-rw-r--r--lib/icinga/customvarobject.hpp31
-rw-r--r--lib/icinga/customvarobject.ti15
-rw-r--r--lib/icinga/dependency-apply.cpp161
-rw-r--r--lib/icinga/dependency.cpp325
-rw-r--r--lib/icinga/dependency.hpp62
-rw-r--r--lib/icinga/dependency.ti101
-rw-r--r--lib/icinga/downtime.cpp584
-rw-r--r--lib/icinga/downtime.hpp99
-rw-r--r--lib/icinga/downtime.ti82
-rw-r--r--lib/icinga/envresolver.cpp20
-rw-r--r--lib/icinga/envresolver.hpp30
-rw-r--r--lib/icinga/eventcommand.cpp20
-rw-r--r--lib/icinga/eventcommand.hpp32
-rw-r--r--lib/icinga/eventcommand.ti15
-rw-r--r--lib/icinga/externalcommandprocessor.cpp2281
-rw-r--r--lib/icinga/externalcommandprocessor.hpp169
-rw-r--r--lib/icinga/host.cpp330
-rw-r--r--lib/icinga/host.hpp71
-rw-r--r--lib/icinga/host.ti48
-rw-r--r--lib/icinga/hostgroup.cpp108
-rw-r--r--lib/icinga/hostgroup.hpp43
-rw-r--r--lib/icinga/hostgroup.ti28
-rw-r--r--lib/icinga/i2-icinga.hpp15
-rw-r--r--lib/icinga/icinga-itl.conf15
-rw-r--r--lib/icinga/icingaapplication.cpp321
-rw-r--r--lib/icinga/icingaapplication.hpp52
-rw-r--r--lib/icinga/icingaapplication.ti41
-rw-r--r--lib/icinga/legacytimeperiod.cpp644
-rw-r--r--lib/icinga/legacytimeperiod.hpp45
-rw-r--r--lib/icinga/macroprocessor.cpp585
-rw-r--r--lib/icinga/macroprocessor.hpp75
-rw-r--r--lib/icinga/macroresolver.hpp31
-rw-r--r--lib/icinga/notification-apply.cpp161
-rw-r--r--lib/icinga/notification.cpp812
-rw-r--r--lib/icinga/notification.hpp135
-rw-r--r--lib/icinga/notification.ti111
-rw-r--r--lib/icinga/notificationcommand.cpp27
-rw-r--r--lib/icinga/notificationcommand.hpp36
-rw-r--r--lib/icinga/notificationcommand.ti14
-rw-r--r--lib/icinga/objectutils.cpp55
-rw-r--r--lib/icinga/objectutils.hpp29
-rw-r--r--lib/icinga/pluginutility.cpp218
-rw-r--r--lib/icinga/pluginutility.hpp42
-rw-r--r--lib/icinga/scheduleddowntime-apply.cpp159
-rw-r--r--lib/icinga/scheduleddowntime.cpp393
-rw-r--r--lib/icinga/scheduleddowntime.hpp60
-rw-r--r--lib/icinga/scheduleddowntime.ti76
-rw-r--r--lib/icinga/service-apply.cpp133
-rw-r--r--lib/icinga/service.cpp287
-rw-r--r--lib/icinga/service.hpp65
-rw-r--r--lib/icinga/service.ti71
-rw-r--r--lib/icinga/servicegroup.cpp111
-rw-r--r--lib/icinga/servicegroup.hpp43
-rw-r--r--lib/icinga/servicegroup.ti28
-rw-r--r--lib/icinga/timeperiod.cpp399
-rw-r--r--lib/icinga/timeperiod.hpp50
-rw-r--r--lib/icinga/timeperiod.ti47
-rw-r--r--lib/icinga/user.cpp103
-rw-r--r--lib/icinga/user.hpp44
-rw-r--r--lib/icinga/user.ti47
-rw-r--r--lib/icinga/usergroup.cpp128
-rw-r--r--lib/icinga/usergroup.hpp49
-rw-r--r--lib/icinga/usergroup.ti25
-rw-r--r--lib/icingadb/CMakeLists.txt32
-rw-r--r--lib/icingadb/icingadb-itl.conf24
-rw-r--r--lib/icingadb/icingadb-objects.cpp2966
-rw-r--r--lib/icingadb/icingadb-stats.cpp54
-rw-r--r--lib/icingadb/icingadb-utility.cpp319
-rw-r--r--lib/icingadb/icingadb.cpp311
-rw-r--r--lib/icingadb/icingadb.hpp241
-rw-r--r--lib/icingadb/icingadb.ti63
-rw-r--r--lib/icingadb/icingadbchecktask.cpp513
-rw-r--r--lib/icingadb/icingadbchecktask.hpp29
-rw-r--r--lib/icingadb/redisconnection.cpp773
-rw-r--r--lib/icingadb/redisconnection.hpp678
-rw-r--r--lib/livestatus/CMakeLists.txt65
-rw-r--r--lib/livestatus/aggregator.cpp18
-rw-r--r--lib/livestatus/aggregator.hpp44
-rw-r--r--lib/livestatus/andfilter.cpp15
-rw-r--r--lib/livestatus/andfilter.hpp26
-rw-r--r--lib/livestatus/attributefilter.cpp121
-rw-r--r--lib/livestatus/attributefilter.hpp33
-rw-r--r--lib/livestatus/avgaggregator.cpp38
-rw-r--r--lib/livestatus/avgaggregator.hpp42
-rw-r--r--lib/livestatus/column.cpp21
-rw-r--r--lib/livestatus/column.hpp37
-rw-r--r--lib/livestatus/combinerfilter.cpp10
-rw-r--r--lib/livestatus/combinerfilter.hpp31
-rw-r--r--lib/livestatus/commandstable.cpp142
-rw-r--r--lib/livestatus/commandstable.hpp41
-rw-r--r--lib/livestatus/commentstable.cpp178
-rw-r--r--lib/livestatus/commentstable.hpp49
-rw-r--r--lib/livestatus/contactgroupstable.cpp74
-rw-r--r--lib/livestatus/contactgroupstable.hpp39
-rw-r--r--lib/livestatus/contactstable.cpp278
-rw-r--r--lib/livestatus/contactstable.hpp50
-rw-r--r--lib/livestatus/countaggregator.cpp30
-rw-r--r--lib/livestatus/countaggregator.hpp37
-rw-r--r--lib/livestatus/downtimestable.cpp168
-rw-r--r--lib/livestatus/downtimestable.hpp51
-rw-r--r--lib/livestatus/endpointstable.cpp109
-rw-r--r--lib/livestatus/endpointstable.hpp41
-rw-r--r--lib/livestatus/filter.hpp28
-rw-r--r--lib/livestatus/historytable.hpp24
-rw-r--r--lib/livestatus/hostgroupstable.cpp473
-rw-r--r--lib/livestatus/hostgroupstable.hpp61
-rw-r--r--lib/livestatus/hoststable.cpp1517
-rw-r--r--lib/livestatus/hoststable.hpp133
-rw-r--r--lib/livestatus/i2-livestatus.hpp14
-rw-r--r--lib/livestatus/invavgaggregator.cpp38
-rw-r--r--lib/livestatus/invavgaggregator.hpp42
-rw-r--r--lib/livestatus/invsumaggregator.cpp37
-rw-r--r--lib/livestatus/invsumaggregator.hpp41
-rw-r--r--lib/livestatus/livestatuslistener.cpp211
-rw-r--r--lib/livestatus/livestatuslistener.hpp47
-rw-r--r--lib/livestatus/livestatuslistener.ti31
-rw-r--r--lib/livestatus/livestatuslogutility.cpp321
-rw-r--r--lib/livestatus/livestatuslogutility.hpp60
-rw-r--r--lib/livestatus/livestatusquery.cpp648
-rw-r--r--lib/livestatus/livestatusquery.hpp90
-rw-r--r--lib/livestatus/logtable.cpp229
-rw-r--r--lib/livestatus/logtable.hpp65
-rw-r--r--lib/livestatus/maxaggregator.cpp38
-rw-r--r--lib/livestatus/maxaggregator.hpp41
-rw-r--r--lib/livestatus/minaggregator.cpp45
-rw-r--r--lib/livestatus/minaggregator.hpp42
-rw-r--r--lib/livestatus/negatefilter.cpp14
-rw-r--r--lib/livestatus/negatefilter.hpp31
-rw-r--r--lib/livestatus/orfilter.cpp18
-rw-r--r--lib/livestatus/orfilter.hpp26
-rw-r--r--lib/livestatus/servicegroupstable.cpp323
-rw-r--r--lib/livestatus/servicegroupstable.hpp54
-rw-r--r--lib/livestatus/servicestable.cpp1200
-rw-r--r--lib/livestatus/servicestable.hpp115
-rw-r--r--lib/livestatus/statehisttable.cpp466
-rw-r--r--lib/livestatus/statehisttable.hpp75
-rw-r--r--lib/livestatus/statustable.cpp269
-rw-r--r--lib/livestatus/statustable.hpp61
-rw-r--r--lib/livestatus/stdaggregator.cpp40
-rw-r--r--lib/livestatus/stdaggregator.hpp43
-rw-r--r--lib/livestatus/sumaggregator.cpp37
-rw-r--r--lib/livestatus/sumaggregator.hpp41
-rw-r--r--lib/livestatus/table.cpp165
-rw-r--r--lib/livestatus/table.hpp73
-rw-r--r--lib/livestatus/timeperiodstable.cpp58
-rw-r--r--lib/livestatus/timeperiodstable.hpp39
-rw-r--r--lib/livestatus/zonestable.cpp92
-rw-r--r--lib/livestatus/zonestable.hpp40
-rw-r--r--lib/methods/CMakeLists.txt34
-rw-r--r--lib/methods/clusterchecktask.cpp117
-rw-r--r--lib/methods/clusterchecktask.hpp29
-rw-r--r--lib/methods/clusterzonechecktask.cpp218
-rw-r--r--lib/methods/clusterzonechecktask.hpp28
-rw-r--r--lib/methods/dummychecktask.cpp75
-rw-r--r--lib/methods/dummychecktask.hpp30
-rw-r--r--lib/methods/exceptionchecktask.cpp41
-rw-r--r--lib/methods/exceptionchecktask.hpp29
-rw-r--r--lib/methods/i2-methods.hpp15
-rw-r--r--lib/methods/icingachecktask.cpp209
-rw-r--r--lib/methods/icingachecktask.hpp29
-rw-r--r--lib/methods/ifwapichecktask.cpp531
-rw-r--r--lib/methods/ifwapichecktask.hpp27
-rw-r--r--lib/methods/methods-itl.conf90
-rw-r--r--lib/methods/nullchecktask.cpp50
-rw-r--r--lib/methods/nullchecktask.hpp30
-rw-r--r--lib/methods/nulleventtask.cpp26
-rw-r--r--lib/methods/nulleventtask.hpp30
-rw-r--r--lib/methods/pluginchecktask.cpp89
-rw-r--r--lib/methods/pluginchecktask.hpp33
-rw-r--r--lib/methods/plugineventtask.cpp61
-rw-r--r--lib/methods/plugineventtask.hpp33
-rw-r--r--lib/methods/pluginnotificationtask.cpp123
-rw-r--r--lib/methods/pluginnotificationtask.hpp36
-rw-r--r--lib/methods/randomchecktask.cpp65
-rw-r--r--lib/methods/randomchecktask.hpp29
-rw-r--r--lib/methods/sleepchecktask.cpp67
-rw-r--r--lib/methods/sleepchecktask.hpp30
-rw-r--r--lib/methods/timeperiodtask.cpp35
-rw-r--r--lib/methods/timeperiodtask.hpp28
-rw-r--r--lib/mysql_shim/CMakeLists.txt31
-rw-r--r--lib/mysql_shim/mysql_shim.def3
-rw-r--r--lib/mysql_shim/mysqlinterface.cpp119
-rw-r--r--lib/mysql_shim/mysqlinterface.hpp65
-rw-r--r--lib/notification/CMakeLists.txt34
-rw-r--r--lib/notification/notificationcomponent.cpp271
-rw-r--r--lib/notification/notificationcomponent.hpp38
-rw-r--r--lib/notification/notificationcomponent.ti19
-rw-r--r--lib/perfdata/CMakeLists.txt74
-rw-r--r--lib/perfdata/elasticsearchwriter.cpp685
-rw-r--r--lib/perfdata/elasticsearchwriter.hpp65
-rw-r--r--lib/perfdata/elasticsearchwriter.ti50
-rw-r--r--lib/perfdata/gelfwriter.cpp535
-rw-r--r--lib/perfdata/gelfwriter.hpp70
-rw-r--r--lib/perfdata/gelfwriter.ti45
-rw-r--r--lib/perfdata/graphitewriter.cpp514
-rw-r--r--lib/perfdata/graphitewriter.hpp69
-rw-r--r--lib/perfdata/graphitewriter.ti38
-rw-r--r--lib/perfdata/influxdb2writer.cpp44
-rw-r--r--lib/perfdata/influxdb2writer.hpp33
-rw-r--r--lib/perfdata/influxdb2writer.ti19
-rw-r--r--lib/perfdata/influxdbcommonwriter.cpp596
-rw-r--r--lib/perfdata/influxdbcommonwriter.hpp101
-rw-r--r--lib/perfdata/influxdbcommonwriter.ti88
-rw-r--r--lib/perfdata/influxdbwriter.cpp56
-rw-r--r--lib/perfdata/influxdbwriter.hpp31
-rw-r--r--lib/perfdata/influxdbwriter.ti35
-rw-r--r--lib/perfdata/opentsdbwriter.cpp525
-rw-r--r--lib/perfdata/opentsdbwriter.hpp62
-rw-r--r--lib/perfdata/opentsdbwriter.ti55
-rw-r--r--lib/perfdata/perfdatawriter.cpp201
-rw-r--r--lib/perfdata/perfdatawriter.hpp53
-rw-r--r--lib/perfdata/perfdatawriter.ti61
-rw-r--r--lib/pgsql_shim/CMakeLists.txt32
-rw-r--r--lib/pgsql_shim/pgsql_shim.def3
-rw-r--r--lib/pgsql_shim/pgsqlinterface.cpp108
-rw-r--r--lib/pgsql_shim/pgsqlinterface.hpp61
-rw-r--r--lib/remote/CMakeLists.txt67
-rw-r--r--lib/remote/actionshandler.cpp145
-rw-r--r--lib/remote/actionshandler.hpp32
-rw-r--r--lib/remote/apiaction.cpp40
-rw-r--r--lib/remote/apiaction.hpp69
-rw-r--r--lib/remote/apifunction.cpp35
-rw-r--r--lib/remote/apifunction.hpp59
-rw-r--r--lib/remote/apilistener-authority.cpp84
-rw-r--r--lib/remote/apilistener-configsync.cpp464
-rw-r--r--lib/remote/apilistener-filesync.cpp887
-rw-r--r--lib/remote/apilistener.cpp1970
-rw-r--r--lib/remote/apilistener.hpp265
-rw-r--r--lib/remote/apilistener.ti66
-rw-r--r--lib/remote/apiuser.cpp55
-rw-r--r--lib/remote/apiuser.hpp27
-rw-r--r--lib/remote/apiuser.ti31
-rw-r--r--lib/remote/configfileshandler.cpp94
-rw-r--r--lib/remote/configfileshandler.hpp30
-rw-r--r--lib/remote/configobjectslock.cpp24
-rw-r--r--lib/remote/configobjectslock.hpp72
-rw-r--r--lib/remote/configobjectutility.cpp377
-rw-r--r--lib/remote/configobjectutility.hpp47
-rw-r--r--lib/remote/configpackageshandler.cpp179
-rw-r--r--lib/remote/configpackageshandler.hpp54
-rw-r--r--lib/remote/configpackageutility.cpp413
-rw-r--r--lib/remote/configpackageutility.hpp73
-rw-r--r--lib/remote/configstageshandler.cpp225
-rw-r--r--lib/remote/configstageshandler.hpp56
-rw-r--r--lib/remote/consolehandler.cpp327
-rw-r--r--lib/remote/consolehandler.hpp50
-rw-r--r--lib/remote/createobjecthandler.cpp155
-rw-r--r--lib/remote/createobjecthandler.hpp30
-rw-r--r--lib/remote/deleteobjecthandler.cpp123
-rw-r--r--lib/remote/deleteobjecthandler.hpp30
-rw-r--r--lib/remote/endpoint.cpp138
-rw-r--r--lib/remote/endpoint.hpp68
-rw-r--r--lib/remote/endpoint.ti59
-rw-r--r--lib/remote/eventqueue.cpp351
-rw-r--r--lib/remote/eventqueue.hpp177
-rw-r--r--lib/remote/eventshandler.cpp137
-rw-r--r--lib/remote/eventshandler.hpp31
-rw-r--r--lib/remote/filterutility.cpp354
-rw-r--r--lib/remote/filterutility.hpp64
-rw-r--r--lib/remote/httphandler.cpp129
-rw-r--r--lib/remote/httphandler.hpp74
-rw-r--r--lib/remote/httpserverconnection.cpp613
-rw-r--r--lib/remote/httpserverconnection.hpp54
-rw-r--r--lib/remote/httputility.cpp80
-rw-r--r--lib/remote/httputility.hpp33
-rw-r--r--lib/remote/i2-remote.hpp14
-rw-r--r--lib/remote/infohandler.cpp100
-rw-r--r--lib/remote/infohandler.hpp30
-rw-r--r--lib/remote/jsonrpc.cpp157
-rw-r--r--lib/remote/jsonrpc.hpp39
-rw-r--r--lib/remote/jsonrpcconnection-heartbeat.cpp48
-rw-r--r--lib/remote/jsonrpcconnection-pki.cpp439
-rw-r--r--lib/remote/jsonrpcconnection.cpp388
-rw-r--r--lib/remote/jsonrpcconnection.hpp100
-rw-r--r--lib/remote/messageorigin.cpp10
-rw-r--r--lib/remote/messageorigin.hpp28
-rw-r--r--lib/remote/modifyobjecthandler.cpp168
-rw-r--r--lib/remote/modifyobjecthandler.hpp30
-rw-r--r--lib/remote/objectqueryhandler.cpp330
-rw-r--r--lib/remote/objectqueryhandler.hpp34
-rw-r--r--lib/remote/pkiutility.cpp452
-rw-r--r--lib/remote/pkiutility.hpp41
-rw-r--r--lib/remote/statushandler.cpp120
-rw-r--r--lib/remote/statushandler.hpp30
-rw-r--r--lib/remote/templatequeryhandler.cpp136
-rw-r--r--lib/remote/templatequeryhandler.hpp30
-rw-r--r--lib/remote/typequeryhandler.cpp156
-rw-r--r--lib/remote/typequeryhandler.hpp30
-rw-r--r--lib/remote/url-characters.hpp29
-rw-r--r--lib/remote/url.cpp363
-rw-r--r--lib/remote/url.hpp78
-rw-r--r--lib/remote/variablequeryhandler.cpp121
-rw-r--r--lib/remote/variablequeryhandler.hpp30
-rw-r--r--lib/remote/zone.cpp154
-rw-r--r--lib/remote/zone.hpp46
-rw-r--r--lib/remote/zone.ti25
-rw-r--r--mkdocs.yml33
-rw-r--r--plugins/CMakeLists.txt69
-rw-r--r--plugins/check_disk.cpp443
-rw-r--r--plugins/check_load.cpp244
-rw-r--r--plugins/check_memory.cpp215
-rw-r--r--plugins/check_network.cpp374
-rw-r--r--plugins/check_nscp_api.cpp512
-rw-r--r--plugins/check_perfmon.cpp387
-rw-r--r--plugins/check_ping.cpp508
-rw-r--r--plugins/check_procs.cpp325
-rw-r--r--plugins/check_service.cpp284
-rw-r--r--plugins/check_swap.cpp238
-rw-r--r--plugins/check_update.cpp248
-rw-r--r--plugins/check_uptime.cpp213
-rw-r--r--plugins/check_users.cpp225
-rw-r--r--plugins/thresholds.cpp276
-rw-r--r--plugins/thresholds.hpp64
-rw-r--r--test/CMakeLists.txt259
-rw-r--r--test/base-array.cpp162
-rw-r--r--test/base-base64.cpp45
-rw-r--r--test/base-convert.cpp60
-rw-r--r--test/base-dictionary.cpp200
-rw-r--r--test/base-fifo.cpp43
-rw-r--r--test/base-json.cpp110
-rw-r--r--test/base-match.cpp27
-rw-r--r--test/base-netstring.cpp25
-rw-r--r--test/base-object-packer.cpp264
-rw-r--r--test/base-object.cpp39
-rw-r--r--test/base-serialize.cpp68
-rw-r--r--test/base-shellescape.cpp32
-rw-r--r--test/base-stacktrace.cpp72
-rw-r--r--test/base-stream.cpp39
-rw-r--r--test/base-string.cpp104
-rw-r--r--test/base-timer.cpp61
-rw-r--r--test/base-tlsutility.cpp135
-rw-r--r--test/base-type.cpp47
-rw-r--r--test/base-utility.cpp138
-rw-r--r--test/base-value.cpp53
-rw-r--r--test/config-apply.cpp251
-rw-r--r--test/config-ops.cpp246
-rw-r--r--test/config/2742.conf21
-rw-r--r--test/config/5872.conf72
-rw-r--r--test/config/5912.conf.dis14
-rw-r--r--test/config/5926.conf23
-rw-r--r--test/config/5927.conf44
-rw-r--r--test/config/5980.conf58
-rw-r--r--test/config/6105.conf25
-rw-r--r--test/config/6479.conf44
-rw-r--r--test/config/6608.conf16
-rw-r--r--test/config/6968.conf27
-rw-r--r--test/config/7560.conf41
-rw-r--r--test/config/7683.conf27
-rw-r--r--test/config/8063.conf73
-rw-r--r--test/config/README2
-rw-r--r--test/config/templates.conf80
-rw-r--r--test/icinga-checkable-fixture.cpp28
-rw-r--r--test/icinga-checkable-flapping.cpp248
-rw-r--r--test/icinga-checkresult.cpp1032
-rw-r--r--test/icinga-dependencies.cpp101
-rw-r--r--test/icinga-legacytimeperiod.cpp694
-rw-r--r--test/icinga-macros.cpp50
-rw-r--r--test/icinga-notification.cpp215
-rw-r--r--test/icinga-perfdata.cpp407
-rw-r--r--test/icingaapplication-fixture.cpp32
-rw-r--r--test/icingaapplication-fixture.hpp21
-rw-r--r--test/livestatus-fixture.cpp53
-rw-r--r--test/livestatus.cpp107
-rw-r--r--test/livestatus/README12
-rw-r--r--test/livestatus/queries/commands/command4
-rw-r--r--test/livestatus/queries/commands/modattr4
-rw-r--r--test/livestatus/queries/comments/comment3
-rw-r--r--test/livestatus/queries/comments/comment_short4
-rw-r--r--test/livestatus/queries/contacts/contacts3
-rw-r--r--test/livestatus/queries/contacts/group3
-rw-r--r--test/livestatus/queries/contacts/modattr4
-rw-r--r--test/livestatus/queries/custom/scrambled2
-rw-r--r--test/livestatus/queries/custom/thruk_alert_history19
-rw-r--r--test/livestatus/queries/custom/thruk_comments7
-rw-r--r--test/livestatus/queries/downtimes/downtime3
-rw-r--r--test/livestatus/queries/downtimes/downtime_short4
-rw-r--r--test/livestatus/queries/endpoints/endpoints3
-rw-r--r--test/livestatus/queries/hosts/bygroup4
-rw-r--r--test/livestatus/queries/hosts/check4
-rw-r--r--test/livestatus/queries/hosts/command4
-rw-r--r--test/livestatus/queries/hosts/comment4
-rw-r--r--test/livestatus/queries/hosts/contact4
-rw-r--r--test/livestatus/queries/hosts/customvar4
-rw-r--r--test/livestatus/queries/hosts/downtime4
-rw-r--r--test/livestatus/queries/hosts/extra4
-rw-r--r--test/livestatus/queries/hosts/group3
-rw-r--r--test/livestatus/queries/hosts/host4
-rw-r--r--test/livestatus/queries/hosts/host_nagvis6
-rw-r--r--test/livestatus/queries/hosts/legacy4
-rw-r--r--test/livestatus/queries/hosts/modattr4
-rw-r--r--test/livestatus/queries/hosts/notification4
-rw-r--r--test/livestatus/queries/hosts/services4
-rw-r--r--test/livestatus/queries/hosts/state4
-rw-r--r--test/livestatus/queries/hosts/stats_sum5
-rw-r--r--test/livestatus/queries/log/alerts6
-rw-r--r--test/livestatus/queries/log/avail12
-rw-r--r--test/livestatus/queries/log/avail_svc13
-rw-r--r--test/livestatus/queries/log/class5
-rw-r--r--test/livestatus/queries/log/localhost_disk7
-rw-r--r--test/livestatus/queries/log/log4
-rw-r--r--test/livestatus/queries/log/minimal5
-rw-r--r--test/livestatus/queries/log/trend26
-rw-r--r--test/livestatus/queries/services/bygroup4
-rw-r--r--test/livestatus/queries/services/byhostgroup4
-rw-r--r--test/livestatus/queries/services/check4
-rw-r--r--test/livestatus/queries/services/command4
-rw-r--r--test/livestatus/queries/services/comment4
-rw-r--r--test/livestatus/queries/services/contact4
-rw-r--r--test/livestatus/queries/services/customvar4
-rw-r--r--test/livestatus/queries/services/downtime4
-rw-r--r--test/livestatus/queries/services/extra4
-rw-r--r--test/livestatus/queries/services/group3
-rw-r--r--test/livestatus/queries/services/legacy4
-rw-r--r--test/livestatus/queries/services/modattr4
-rw-r--r--test/livestatus/queries/services/notification4
-rw-r--r--test/livestatus/queries/services/services3
-rw-r--r--test/livestatus/queries/services/state4
-rw-r--r--test/livestatus/queries/special/services5
-rw-r--r--test/livestatus/queries/statehist/duration5
-rw-r--r--test/livestatus/queries/statehist/statehist5
-rw-r--r--test/livestatus/queries/statehist/statehist_disk6
-rw-r--r--test/livestatus/queries/statehist/sum9
-rw-r--r--test/livestatus/queries/status/checks4
-rw-r--r--test/livestatus/queries/status/custom4
-rw-r--r--test/livestatus/queries/status/livestatus4
-rw-r--r--test/livestatus/queries/status/program4
-rw-r--r--test/livestatus/queries/status/status3
-rw-r--r--test/livestatus/queries/timeperiods/timeperiod3
-rwxr-xr-xtest/livestatus/run_queries26
-rw-r--r--test/methods-pluginnotificationtask.cpp88
-rw-r--r--test/remote-configpackageutility.cpp25
-rw-r--r--test/remote-url.cpp128
-rw-r--r--test/test-runner.cpp21
-rw-r--r--third-party/CMakeLists.txt9
-rw-r--r--third-party/cmake/BoostTestTargets.cmake262
-rw-r--r--third-party/cmake/BoostTestTargetsDynamic.h9
-rw-r--r--third-party/cmake/BoostTestTargetsIncluded.h7
-rw-r--r--third-party/cmake/BoostTestTargetsStatic.h7
-rw-r--r--third-party/cmake/CopyResourcesToBuildTree.cmake83
-rw-r--r--third-party/cmake/FindBISON.cmake221
-rw-r--r--third-party/cmake/FindEditline.cmake86
-rw-r--r--third-party/cmake/FindFLEX.cmake185
-rw-r--r--third-party/cmake/FindGit.cmake73
-rw-r--r--third-party/cmake/FindMySQL.cmake142
-rw-r--r--third-party/cmake/FindPostgreSQL.cmake185
-rw-r--r--third-party/cmake/FindTermcap.cmake68
-rw-r--r--third-party/cmake/GNUInstallDirs.cmake245
-rw-r--r--third-party/cmake/GetForceIncludeDefinitions.cmake44
-rw-r--r--third-party/cmake/GetGitRevisionDescription.cmake284
-rw-r--r--third-party/cmake/GetGitRevisionDescription.cmake.in43
-rw-r--r--third-party/execvpe/CMakeLists.txt12
-rw-r--r--third-party/execvpe/execvpe.c208
-rw-r--r--third-party/execvpe/execvpe.h18
-rw-r--r--third-party/mmatch/CMakeLists.txt12
-rw-r--r--third-party/mmatch/mmatch.c309
-rw-r--r--third-party/mmatch/mmatch.h16
-rw-r--r--third-party/nlohmann_json/LICENSE21
-rw-r--r--third-party/nlohmann_json/json.hpp25447
-rw-r--r--third-party/socketpair/CMakeLists.txt12
-rw-r--r--third-party/socketpair/socketpair.c154
-rw-r--r--third-party/socketpair/socketpair.h46
-rw-r--r--third-party/utf8cpp/.circleci/config.yml13
-rw-r--r--third-party/utf8cpp/.gitignore4
-rw-r--r--third-party/utf8cpp/.gitmodules3
-rw-r--r--third-party/utf8cpp/CMakeLists.txt62
-rw-r--r--third-party/utf8cpp/LICENSE23
-rw-r--r--third-party/utf8cpp/README.md1503
-rw-r--r--third-party/utf8cpp/samples/docsample.cpp64
-rw-r--r--third-party/utf8cpp/source/utf8.h34
-rw-r--r--third-party/utf8cpp/source/utf8/checked.h319
-rw-r--r--third-party/utf8cpp/source/utf8/core.h387
-rw-r--r--third-party/utf8cpp/source/utf8/cpp11.h103
-rw-r--r--third-party/utf8cpp/source/utf8/cpp17.h103
-rw-r--r--third-party/utf8cpp/source/utf8/unchecked.h257
-rw-r--r--third-party/utf8cpp/tests/CMakeLists.txt43
-rw-r--r--third-party/utf8cpp/tests/apitests.cpp6
-rw-r--r--third-party/utf8cpp/tests/docker/Dockerfile5
-rw-r--r--third-party/utf8cpp/tests/negative.cpp59
-rw-r--r--third-party/utf8cpp/tests/noexceptionstests.cpp4
-rw-r--r--third-party/utf8cpp/tests/test_checked_api.h198
-rw-r--r--third-party/utf8cpp/tests/test_checked_iterator.h35
-rw-r--r--third-party/utf8cpp/tests/test_cpp11.cpp109
-rw-r--r--third-party/utf8cpp/tests/test_cpp17.cpp88
-rw-r--r--third-party/utf8cpp/tests/test_data/utf8_invalid.txtbin0 -> 20010 bytes
-rw-r--r--third-party/utf8cpp/tests/test_unchecked_api.h164
-rw-r--r--third-party/utf8cpp/tests/test_unchecked_iterator.h36
-rw-r--r--third-party/utf8cpp/utf8cppConfig.cmake.in6
-rw-r--r--tools/CMakeLists.txt6
-rw-r--r--tools/debug/gdb/.gitignore1
-rw-r--r--tools/debug/gdb/README.md40
-rw-r--r--tools/debug/gdb/gdbinit25
-rw-r--r--tools/debug/gdb/icingadbg.py64
-rw-r--r--tools/debug/natvis/Visualizers/icinga2.natstepfilter9
-rw-r--r--tools/debug/natvis/Visualizers/icinga2.natvis32
-rw-r--r--tools/debug/natvis/[Content_Types].xml1
-rw-r--r--tools/debug/natvis/extension.vsixmanifest18
-rw-r--r--tools/mkclass/CMakeLists.txt43
-rw-r--r--tools/mkclass/class_lexer.ll167
-rw-r--r--tools/mkclass/class_parser.yy558
-rw-r--r--tools/mkclass/classcompiler.cpp1485
-rw-r--r--tools/mkclass/classcompiler.hpp245
-rw-r--r--tools/mkclass/mkclass.cpp16
-rw-r--r--tools/mkembedconfig/CMakeLists.txt24
-rw-r--r--tools/mkembedconfig/mkembedconfig.c51
-rw-r--r--tools/mkunity/CMakeLists.txt47
-rw-r--r--tools/mkunity/mkunity.c20
-rw-r--r--tools/selinux/icinga2.fc23
-rw-r--r--tools/selinux/icinga2.if453
-rwxr-xr-xtools/selinux/icinga2.sh74
-rw-r--r--tools/selinux/icinga2.te290
-rw-r--r--tools/syntax/nano/icinga2.nanorc157
-rw-r--r--tools/syntax/vim/ftdetect/icinga2.vim2
-rw-r--r--tools/syntax/vim/syntax/icinga2.vim361
-rw-r--r--tools/win32/build-choco.ps142
-rw-r--r--tools/win32/build.ps127
-rw-r--r--tools/win32/configure-dev.ps169
-rw-r--r--tools/win32/configure.ps172
-rw-r--r--tools/win32/load-vsenv.ps159
-rw-r--r--tools/win32/test.ps133
1137 files changed, 220206 insertions, 0 deletions
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..7f6e121
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,46 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+## Describe the bug
+
+A clear and concise description of what the bug is.
+
+Please ensure to read https://github.com/Icinga/icinga2/blob/master/doc/15-troubleshooting.md first. Formatting tips: GitHub supports Markdown: https://guides.github.com/features/mastering-markdown/
+
+## To Reproduce
+
+Provide a link to a live example, or an unambiguous set of steps to reproduce this bug. Include configuration, logs, etc. to reproduce, if relevant.
+
+1.
+2.
+3.
+4.
+
+## Expected behavior
+
+A clear and concise description of what you expected to happen.
+
+## Screenshots
+
+If applicable, add screenshots to help explain your problem.
+
+## Your Environment
+
+Include as many relevant details about the environment you experienced the problem in
+
+* Version used (`icinga2 --version`):
+* Operating System and version:
+* Enabled features (`icinga2 feature list`):
+* Icinga Web 2 version and modules (System - About):
+* Config validation (`icinga2 daemon -C`):
+* If you run multiple Icinga 2 instances, the `zones.conf` file (or `icinga2 object list --type Endpoint` and `icinga2 object list --type Zone`) from all affected nodes.
+
+## Additional context
+
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..cfcedf2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Ask a question
+ url: https://community.icinga.com/c/icinga-2/6
+ about: Ask a question in our community forum
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..1d19ccf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,24 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+## Is your feature request related to a problem? Please describe.
+
+A clear and concise description of what the problem is. Ex. I'm always using this feature but am missing [...]
+
+## Describe the solution you'd like
+
+A clear and concise description of what you want to happen.
+
+## Describe alternatives you've considered
+
+A clear and concise description of any alternative solutions or features you've considered.
+
+## Additional context
+
+Add any other context or screenshots about the feature request here.
diff --git a/.github/workflows/authors-file.yml b/.github/workflows/authors-file.yml
new file mode 100644
index 0000000..3970aee
--- /dev/null
+++ b/.github/workflows/authors-file.yml
@@ -0,0 +1,39 @@
+name: AUTHORS file
+
+on:
+ pull_request: { }
+
+jobs:
+ authors-file:
+ name: AUTHORS file
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout HEAD
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Check whether ./AUTHORS is up-to-date
+ run: |
+ set -exo pipefail
+ sort -uo AUTHORS AUTHORS
+ git add AUTHORS
+ git log --format='format:%aN <%aE>' "$(
+ git merge-base "origin/$GITHUB_BASE_REF" "origin/$GITHUB_HEAD_REF"
+ )..origin/$GITHUB_HEAD_REF" >> AUTHORS
+ sort -uo AUTHORS AUTHORS
+ git diff AUTHORS >> AUTHORS.diff
+
+ - name: Complain if ./AUTHORS isn't up-to-date
+ run: |
+ if [ -s AUTHORS.diff ]; then
+ cat <<'EOF' >&2
+ There are the following new authors. If the commit author data is correct,
+ either add them to the AUTHORS file or update .mailmap. See gitmailmap(5) or:
+ https://git-scm.com/docs/gitmailmap
+ Don't hesitate to ask us for help if necessary.
+ EOF
+ cat AUTHORS.diff
+ exit 1
+ fi
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
new file mode 100644
index 0000000..dc23459
--- /dev/null
+++ b/.github/workflows/docker.yml
@@ -0,0 +1,37 @@
+name: Docker image
+
+on:
+ pull_request: {}
+ push:
+ branches:
+ - master
+ - 'support/*'
+ release:
+ types:
+ - published
+
+concurrency:
+ group: docker-${{ github.event_name == 'push' && github.sha || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ docker-release:
+ if: github.event_name == 'release'
+ concurrency: docker-release
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Docker image
+ uses: Icinga/docker-icinga2@master
+ with:
+ dockerhub-token: '${{ secrets.DOCKER_HUB_PERSONAL_TOKEN }}'
+
+ docker:
+ if: github.event_name != 'release'
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Docker image
+ uses: Icinga/docker-icinga2@master
+ with:
+ dockerhub-token: '${{ secrets.DOCKER_HUB_PERSONAL_TOKEN }}'
diff --git a/.github/workflows/linux.bash b/.github/workflows/linux.bash
new file mode 100755
index 0000000..fe0e7d5
--- /dev/null
+++ b/.github/workflows/linux.bash
@@ -0,0 +1,95 @@
+#!/bin/bash
+set -exo pipefail
+
+export PATH="/usr/lib/ccache:/usr/lib64/ccache:/opt/rh/devtoolset-11/root/usr/bin:$PATH"
+export CCACHE_DIR=/icinga2/ccache
+export CTEST_OUTPUT_ON_FAILURE=1
+CMAKE_OPTS=''
+
+case "$DISTRO" in
+ amazonlinux:2)
+ amazon-linux-extras install -y epel
+ yum install -y bison ccache cmake3 gcc-c++ flex ninja-build \
+ {libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
+
+ yum install -y bzip2 tar wget
+ wget https://boostorg.jfrog.io/artifactory/main/release/1.69.0/source/boost_1_69_0.tar.bz2
+ tar -xjf boost_1_69_0.tar.bz2
+
+ (
+ cd boost_1_69_0
+ ./bootstrap.sh --with-libraries=context,coroutine,date_time,filesystem,iostreams,program_options,regex,system,test,thread
+ ./b2
+ )
+
+ ln -vs /usr/bin/cmake3 /usr/local/bin/cmake
+ ln -vs /usr/bin/ninja-build /usr/local/bin/ninja
+ CMAKE_OPTS='-DBOOST_INCLUDEDIR=/boost_1_69_0 -DBOOST_LIBRARYDIR=/boost_1_69_0/stage/lib'
+ export LD_LIBRARY_PATH=/boost_1_69_0/stage/lib
+ ;;
+
+ amazonlinux:20*)
+ dnf install -y bison cmake flex gcc-c++ ninja-build \
+ {boost,libedit,mariadb1\*,ncurses,openssl,postgresql,systemd}-devel
+ ;;
+
+ centos:*)
+ yum install -y centos-release-scl epel-release
+ yum install -y bison ccache cmake3 devtoolset-11-gcc-c++ flex ninja-build \
+ {boost169,libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
+
+ ln -vs /usr/bin/cmake3 /usr/local/bin/cmake
+ ln -vs /usr/bin/ccache /usr/lib64/ccache/g++
+ CMAKE_OPTS='-DBOOST_INCLUDEDIR=/usr/include/boost169 -DBOOST_LIBRARYDIR=/usr/lib64/boost169'
+ ;;
+
+ debian:*|ubuntu:*)
+ apt-get update
+ DEBIAN_FRONTEND=noninteractive apt-get install --no-install-{recommends,suggests} -y bison \
+ ccache cmake flex g++ lib{boost-all,edit,mariadb,ncurses,pq,ssl,systemd}-dev ninja-build tzdata
+ ;;
+
+ fedora:*)
+ dnf install -y bison ccache cmake flex gcc-c++ ninja-build \
+ {boost,libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
+ ;;
+
+ opensuse/*)
+ zypper in -y bison ccache cmake flex gcc-c++ ninja {lib{edit,mariadb,openssl},ncurses,postgresql,systemd}-devel \
+ libboost_{context,coroutine,filesystem,iostreams,program_options,regex,system,test,thread}-devel
+ ;;
+
+ rockylinux:*)
+ dnf install -y 'dnf-command(config-manager)' epel-release
+
+ case "$DISTRO" in
+ *:8)
+ dnf config-manager --enable powertools
+ ;;
+ *)
+ dnf config-manager --enable crb
+ ;;
+ esac
+
+ dnf install -y bison ccache cmake gcc-c++ flex ninja-build \
+ {boost,libedit,mariadb,ncurses,openssl,postgresql,systemd}-devel
+ ;;
+esac
+
+mkdir /icinga2/build
+cd /icinga2/build
+
+cmake \
+ -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DICINGA2_UNITY_BUILD=ON \
+ -DUSE_SYSTEMD=ON \
+ -DICINGA2_USER=$(id -un) \
+ -DICINGA2_GROUP=$(id -gn) \
+ $CMAKE_OPTS ..
+
+ninja
+
+ninja test
+ninja install
+icinga2 daemon -C
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
new file mode 100644
index 0000000..6c3c5dd
--- /dev/null
+++ b/.github/workflows/linux.yml
@@ -0,0 +1,56 @@
+name: Linux
+
+on:
+ push:
+ branches:
+ - master
+ - 'support/*'
+ pull_request: {}
+
+concurrency:
+ group: linux-${{ github.event_name == 'push' && github.sha || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ linux:
+ name: ${{ matrix.distro }}
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ max-parallel: 2
+ matrix:
+ distro:
+ - amazonlinux:2
+ - amazonlinux:2023
+ - centos:7 # and RHEL 7
+ - debian:10
+ - debian:11 # and Raspbian 11
+ - debian:12 # and Raspbian 12
+ - fedora:37
+ - fedora:38
+ - fedora:39
+ - opensuse/leap:15.3 # SLES 15.3
+ - opensuse/leap:15.4 # and SLES 15.4
+ - opensuse/leap:15.5 # and SLES 15.5
+ - rockylinux:8 # RHEL 8
+ - rockylinux:9 # RHEL 9
+ - ubuntu:20.04
+ - ubuntu:22.04
+ - ubuntu:23.04
+ - ubuntu:23.10
+
+ steps:
+ - name: Checkout HEAD
+ uses: actions/checkout@v3
+
+ - name: Restore/backup ccache
+ uses: actions/cache@v3
+ with:
+ path: ccache
+ key: ccache/${{ matrix.distro }}
+
+ - name: Build
+ run: >-
+ docker run --rm -v "$(pwd):/icinga2" -e DISTRO=${{ matrix.distro }}
+ ${{ matrix.distro }} /icinga2/.github/workflows/linux.bash
diff --git a/.github/workflows/rpm.yml b/.github/workflows/rpm.yml
new file mode 100644
index 0000000..5cf8b10
--- /dev/null
+++ b/.github/workflows/rpm.yml
@@ -0,0 +1,116 @@
+name: .rpm
+
+on:
+ push:
+ branches:
+ - master
+ - 'support/*'
+ pull_request: {}
+
+concurrency:
+ group: rpm-${{ github.event_name == 'push' && github.sha || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ rpm:
+ name: .rpm (${{ matrix.distro.name }}, ${{ matrix.distro.release }})
+
+ strategy:
+ fail-fast: false
+ max-parallel: 1
+ matrix:
+ distro:
+ - name: sles
+ release: '12.5'
+ subscription: true
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Vars
+ id: vars
+ env:
+ GITLAB_RO_TOKEN: '${{ secrets.GITLAB_RO_TOKEN }}'
+ run: |
+ if [ ${{ matrix.distro.subscription }} = true ]; then
+ if [ "$(tr -d '\n' <<<"$GITLAB_RO_TOKEN" |wc -c)" -eq 0 ]; then
+ echo '::set-output name=CAN_BUILD::false'
+ echo '::set-output name=NEED_LOGIN::false'
+ else
+ echo '::set-output name=CAN_BUILD::true'
+ echo '::set-output name=NEED_LOGIN::true'
+ fi
+ else
+ echo '::set-output name=CAN_BUILD::true'
+ echo '::set-output name=NEED_LOGIN::false'
+ fi
+
+ - name: Checkout HEAD
+ if: "steps.vars.outputs.CAN_BUILD == 'true'"
+ uses: actions/checkout@v1
+
+ - name: Login
+ if: "steps.vars.outputs.NEED_LOGIN == 'true'"
+ env:
+ GITLAB_RO_TOKEN: '${{ secrets.GITLAB_RO_TOKEN }}'
+ run: |
+ docker login registry.icinga.com -u github-actions --password-stdin <<<"$GITLAB_RO_TOKEN"
+
+ - name: rpm-icinga2
+ if: "steps.vars.outputs.CAN_BUILD == 'true' && !matrix.distro.subscription"
+ run: |
+ set -exo pipefail
+ git clone https://git.icinga.com/packaging/rpm-icinga2.git
+ chmod o+w rpm-icinga2
+
+ - name: subscription-rpm-icinga2
+ if: "steps.vars.outputs.CAN_BUILD == 'true' && matrix.distro.subscription"
+ env:
+ GITLAB_RO_TOKEN: '${{ secrets.GITLAB_RO_TOKEN }}'
+ run: |
+ set -exo pipefail
+ git config --global credential.helper store
+ cat <<EOF >~/.git-credentials
+ https://github-actions:${GITLAB_RO_TOKEN}@git.icinga.com
+ EOF
+ git clone https://git.icinga.com/packaging/subscription-rpm-icinga2.git rpm-icinga2
+ chmod o+w rpm-icinga2
+
+ - name: Restore/backup ccache
+ if: "steps.vars.outputs.CAN_BUILD == 'true'"
+ id: ccache
+ uses: actions/cache@v1
+ with:
+ path: rpm-icinga2/ccache
+ key: |-
+ ${{ matrix.distro.name }}/${{ matrix.distro.release }}-ccache-${{ hashFiles('rpm-icinga2/ccache') }}
+
+ - name: Binary
+ if: "steps.vars.outputs.CAN_BUILD == 'true'"
+ run: |
+ set -exo pipefail
+ git checkout -B master
+ if [ -e rpm-icinga2/ccache ]; then
+ chmod -R o+w rpm-icinga2/ccache
+ fi
+ docker run --rm \
+ -v "$(pwd)/rpm-icinga2:/rpm-icinga2" \
+ -v "$(pwd)/.git:/icinga2.git:ro" \
+ -w /rpm-icinga2 \
+ -e ICINGA_BUILD_PROJECT=icinga2 \
+ -e ICINGA_BUILD_TYPE=snapshot \
+ -e UPSTREAM_GIT_URL=file:///icinga2.git \
+ registry.icinga.com/build-docker/${{ matrix.distro.name }}/${{ matrix.distro.release }} \
+ icinga-build-package
+
+ - name: Test
+ if: "steps.vars.outputs.CAN_BUILD == 'true'"
+ run: |
+ set -exo pipefail
+ docker run --rm \
+ -v "$(pwd)/rpm-icinga2:/rpm-icinga2" \
+ -w /rpm-icinga2 \
+ -e ICINGA_BUILD_PROJECT=icinga2 \
+ -e ICINGA_BUILD_TYPE=snapshot \
+ registry.icinga.com/build-docker/${{ matrix.distro.name }}/${{ matrix.distro.release }} \
+ icinga-build-test
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000..18d8e57
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,53 @@
+name: Windows
+
+on:
+ push:
+ branches:
+ - master
+ - 'support/*'
+ pull_request: {}
+
+concurrency:
+ group: windows-${{ github.event_name == 'push' && github.sha || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ windows:
+ name: Windows
+
+ strategy:
+ fail-fast: false
+ max-parallel: 1
+ matrix:
+ bits: [32, 64]
+
+ runs-on: windows-2019
+
+ env:
+ BITS: '${{ matrix.bits }}'
+ ICINGA_BUILD_TYPE: snapshot
+ UPSTREAM_GIT_URL: file://D:/a/icinga2/icinga2/.git
+
+ steps:
+ - name: Checkout HEAD
+ uses: actions/checkout@v1
+
+ - name: windows-icinga2
+ run: |
+ git clone https://git.icinga.com/packaging/windows-icinga2.git
+
+ - name: Build tools
+ run: |
+ & .\doc\win-dev.ps1
+
+ - name: Source
+ run: |
+ git checkout -B master
+ cd windows-icinga2
+ & .\source.ps1
+
+ - name: Binary
+ working-directory: windows-icinga2
+ run: |
+ New-Item -ItemType Directory -Path 'C:\Program Files\Icinga2\WillBeRemoved' -ErrorAction SilentlyContinue
+ & .\build.ps1
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ce91cc8
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,24 @@
+# Exclude all hidden files
+.*
+
+# Except those related to git and vagrant
+!.git*
+!.puppet*
+!.travis.yml
+!.mailmap
+
+## Tools
+*~
+tickets.pickle
+
+## Build artifacts
+build*/
+debug/
+release/
+cmake-build-debug
+/Testing/
+/install/
+/vendor/
+tools/selinux/icinga2.pp
+tools/selinux/icinga2_selinux.8
+tools/selinux/tmp
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..588d09a
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,64 @@
+<alexander.klimov@icinga.com> <alexander.klimov@netways.de>
+Alexander A. Klimov <alexander.klimov@icinga.com> <alexander.klimov@icinga.com>
+<alexander.klimov@icinga.com> <grandmaster@al2klimov.de>
+<assaf@aikilinux.com> <assaf.flatto@livepopuli.com>
+<atj@pulsewidth.org.uk> <adam.james@transitiv.co.uk>
+<bernd.erk@icinga.com> <bernd.erk@icinga.org>
+<blerim.sheqa@icinga.com> <blerim.sheqa@netways.de>
+<cycloon@is-root.org> <christian.gut@trendswm.de>
+<dirk.goetz@icinga.com> <dirk.goetz@netways.de>
+<eric.lippmann@icinga.com> <eric.lippmann@netways.de>
+<gunnar.beutner@icinga.com> <gunnar.beutner@netways.de>
+<gunnar.beutner@icinga.com> <gunnar@beutner.name>
+<gunnar.beutner@icinga.com> <gunnar@blade9.beutner.name>
+<jason.young@velaspan.com> <jyoung15@gmail.com>
+<jo.goossens@hosted-power.com> <sales@hosted-power.com>
+<johannes.meyer@icinga.com> <johannes.meyer@netways.de>
+<julian.brost@icinga.com> <julian@0x4a42.net>
+<julian.brost@icinga.com> <julian.brost@gmail.com>
+<lars.engels@0x20.net> <lars@0x20.net>
+<lennart.betz@icinga.com> <lennart.betz@netways.de>
+<mail@fabian-roehl.de> <FRoehl@freicon.de>
+<MarcusCaepio@users.noreply.github.com> <7324088+MarcusCaepio@users.noreply.github.com>
+<marius@graylog.com> <marius@torch.sh>
+<markus.frosch@icinga.com> <lazyfrosch@icinga.org>
+<markus.frosch@icinga.com> <markus@lazyfrosch.de>
+<mathias.aerts@delta.blue> <mathiasaerts@users.noreply.github.com>
+<michael.friedrich@icinga.com> <michael.friedrich@gmail.com>
+<michael.friedrich@icinga.com> <Michael.Friedrich@netways.de>
+<nicole.lang@icinga.com> <nicole.lang@netways.de>
+<noah.hilverling@icinga.com> <noah@hilverling.com>
+<sftw@leeclemens.net> <java@leeclemens.net>
+<thomas.widhalm@icinga.com> <thomas.widhalm@netways.de>
+<thomas.widhalm@icinga.com> <widhalmt@widhalmt.or.at>
+<tobias.vonderkrone@profitbricks.com> <tobias@vonderkrone.info>
+<yonas.habteab@icinga.com> <yonas.habteab@netways.de>
+Alex <alexp710@hotmail.com> <alexp710@hotmail.com>
+Baptiste Beauplat <lyknode@cilg.org> <lyknode@cilg.org>
+Carsten Köbke <carsten.koebke@gmx.de> Carsten Koebke <carsten.koebke@koebbes.de>
+Claudio Kuenzler <ck@claudiokuenzler.com>
+Diana Flach <diana.flach@icinga.com> <crunsher@bamberg.ccc.de>
+Diana Flach <diana.flach@icinga.com> <Crunsher@users.noreply.github.com>
+Diana Flach <diana.flach@icinga.com> <jean-marcel.flach@netways.de>
+Diana Flach <diana.flach@icinga.com> Jean Flach <jean-marcel.flach@icinga.com>
+Dolf Schimmel <dolf@transip.nl> <dolf@dolfschimmel.nl>
+Gunnar Beutner <gunnar.beutner@icinga.com> <icinga@net-icinga2.adm.netways.de>
+Henrik Triem <henrik.triem@icinga.com> <henrik.triem@netways.de>
+Henrik Triem <henrik.triem@icinga.com> Henrik Triem <43344334+htriem@users.noreply.github.com>
+<henrik.triem@icinga.com> <Henrik.Triem@icinga.com>
+Jens Schanz <jens.schanz@mueller.de> <mail@jensschanz.de>
+Jens Schanz <jens.schanz@mueller.de> Schanz, Jens <jens.schanz@mueller.de>
+Kálmán „KAMI” Szalai <kami911@gmail.com> <kami911@gmail.com>
+Lorenz Kästle <lorenz.kaestle@netways.de> <12514511+RincewindsHat@users.noreply.github.com>
+Marianne Spiller <github@spiller.me>
+Markus Waldmüller <markus.waldmueller@netways.de>
+Mattia Codato <mattia.codato@wuerth-phoenix.com> mcodato <64135571+mcodato@users.noreply.github.com>
+Michael Insel <mcktr55@gmail.com> <mcktr55@gmail.com>
+Michael Insel <mcktr55@gmail.com> <michael@email.de>
+Michael Insel <mcktr55@gmail.com> <michael@insel.email>
+nemtrif <ntrifunovic@hotmail.com> <nemtrif@users.noreply.github.com>
+nemtrif <ntrifunovic@hotmail.com> <ntrifunovic@hotmail.com>
+Robin O'Brien <robin@labs.epiuse.com> <robinjohnobrien@gmail.com>
+Roman Gerhardt <roman.gerhardt@cbc-x.com> <roman.gerhardt@cbc-x.com>
+Sebastian Chrostek <sebastian@chrostek.net> <sebastian@chrostek.net>
+Thomas Gelf <thomas.gelf@icinga.com> <thomas@gelf.net>
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..895a281
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,298 @@
+Aaron Bishop <erroneous@gmail.com>
+Adam Bolte <abolte@systemsaviour.com>
+Adam James <atj@pulsewidth.org.uk>
+akrus <akrus@flygroup.st>
+Alan Jenkins <alan.christopher.jenkins@gmail.com>
+Alan Litster <alan.litster@twentyci.co.uk>
+Alex <alexp710@hotmail.com>
+Alex Merry <alexander.merry@nanoporetech.com>
+Alexander A. Klimov <alexander.klimov@icinga.com>
+Alexander Fuhr <alexander.fuhr@netways.de>
+Alexander Schomburg <script.acc@alex.schomb.org>
+Alexander Stoll <astoll@netways.de>
+Alexander Wirt <formorer@debian.org>
+Alvar Penning <alvar.penning@icinga.com>
+Andrea Avancini <andrea.avancini@wuerth-phoenix.com>
+Andrea Kao <eirinikos@gmail.com>
+Andreas Maus <maus@badphish.ypbind.de>
+Andreas Scherbaum <andreas@scherbaum.biz>
+Andreas Unterkircher <unki@netshadow.net>
+Andres Ivanov <andres@andres.wtf>
+Andrew Jaffie <ajaffie@gmail.com>
+Andrew Meyer <ameyer+secure@nodnetwork.org>
+Andy Grunwald <andygrunwald@gmail.com>
+Ant1x <37016240+Ant1x@users.noreply.github.com>
+Arnd Hannemann <arnd@arndnet.de>
+Assaf Flatto <assaf@aikilinux.com>
+azthec <azthec@users.noreply.github.com>
+Baptiste Beauplat <lyknode@cilg.org>
+BarbUk <julien.virey@gmail.com>
+Bård Dahlmo-Lerbæk <bard.dahlmo-lerbaek@skatteetaten.no>
+Bas Couwenberg <sebastic@xs4all.nl>
+bascarsija <bascarsija.dev@gmail.com>
+Bastian Guse <bguse@nocopy.de>
+Bauerheim, Marcus <mbauerheim@spirit21.com>
+BausPhi <philipp.baus.studium@googlemail.com>
+Benedikt Heine <bebe@bebehei.de>
+Benjamin Groeber <Benjamin.Groeber@wuerth-phoenix.com>
+Bernd Arnold <wopfel@gmail.com>
+Bernd Erk <bernd.erk@icinga.com>
+Berthold Cogel <cogel@uni-koeln.de>
+Blerim Sheqa <blerim.sheqa@icinga.com>
+Brendan Jurd <direvus@gmail.com>
+Brian De Wolf <git@bldewolf.com>
+Brian Dockter <specus@gmail.com>
+Bruno Lingner <mail@hugo.ro>
+C C Magnus Gustavsson <magnus@gustavsson.se>
+Carlos Cesario <carloscesario@gmail.com>
+Carsten Köbke <carsten.koebke@gmx.de>
+Chris Boot <crb@tiger-computing.co.uk>
+Christian Birk <mail@birkc.de>
+Christian Gut <cycloon@is-root.org>
+Christian Harke <ch.harke@gmail.com>
+Christian Jonak <christian@jonak.org>
+Christian Lehmann <christian_lehmann@gmx.de>
+Christian Loos <cloos@netsandbox.de>
+Christian Schmidt <github@chsc.dk>
+Christopher Peterson <3893680+cspeterson@users.noreply.github.com>
+Christopher Schirner <schinken@bamberg.ccc.de>
+Claudio Bilotta <bilottalove@gmail.com>
+Claudio Kuenzler <ck@claudiokuenzler.com>
+Conrad Clement <cclement@printeron.com>
+cstegm <cstegm@users.noreply.github.com>
+ctrlaltca <ctrlaltca@gmail.com>
+Damiano Chini <damiano.chini@wuerth-phoenix.com>
+Daniel Bodky <daniel.bodky@netways.de>
+Daniel Helgenberger <daniel.helgenberger@m-box.de>
+Daniel Kesselberg <mail@danielkesselberg.de>
+Daniil Yaroslavtsev <dyaroslavtsev@confyrm.com>
+David Beck <techiscool@gmail.com>
+David Lublink <github.com@spam.lublink.net>
+Denis <zaharden@gmail.com>
+Dennis Lichtenthäler <dennis.lichtenthaeler@stiftung-tannenhof.de>
+dh.harald <dh.harald@gmail.com>
+Diana Flach <diana.flach@icinga.com>
+Dinesh Majrekar <dinesh.majrekar@serverchoice.com>
+Dirk Goetz <dirk.goetz@icinga.com>
+Dirk Melchers <dirk@dirk-melchers.de>
+Dolf Schimmel <dolf@transip.nl>
+Dominik Riva <driva@protonmail.com>
+dominik-r-s <43005480+dominik-r-s@users.noreply.github.com>
+Edgar Fuß <ef@math.uni-bonn.de>
+Eduard Güldner <eduard.gueldner@gmail.com>
+Edvin Seferovic <edvin@seferovic.net>
+Elias Ohm <eohm@novomind.com>
+Élie Bouttier <elie@bouttier.eu>
+Eric Lippmann <eric.lippmann@icinga.com>
+Evgeni Golov <evgeni@golov.de>
+Ewoud Kohl van Wijngaarden <ewoud@kohlvanwijngaarden.nl>
+Fabian Röhl <mail@fabian-roehl.de>
+Fabian Werner <47595490+fabieins@users.noreply.github.com>
+fbachmann <bachmann.f@gmail.com>
+Federico Cuello <federico.cuello@sociomantic.com>
+Federico Pires <federico.pires@upsight.com>
+Ferdi Gueran <ferdi.gueran@nextevolution.de>
+Feu Mourek <feu.mourek@icinga.com>
+Francesco Colista <fcolista@alpinelinux.org>
+Gaël Beaudoin <gaboo@gaboo.org>
+Georg Faerber <georg@riseup.net>
+Georg Haas <hax404foogit@hax404.de>
+Gerd von Egidy <gerd@egidy.de>
+gitmopp <mopp@gmx.net>
+Glauco Vinicius <gl4uc0@gmail.com>
+Greg Hewgill <greg@hewgill.com>
+Grischa Zengel <github.com@zengel.info>
+Gunnar Beutner <gunnar.beutner@icinga.com>
+Hannes Happle <info@h2-it.de>
+Hannes Van de Vel <h@nnes.be>
+Harald Laabs <github@dasr.de>
+Heike Jurzik <icinga@huhnix.org>
+Hendrik Röder <hendrik.biz@gmail.com>
+Henrik Triem <henrik.triem@icinga.com>
+Ian Kelling <ian@iankelling.org>
+Ildar Hizbulin <hizel@vyborg.ru>
+Irina Kaprizkina <ikapriz@gmail.com>
+Iustin Pop <iustin@k1024.org>
+Jaap Marcus <9754650+jaapmarcus@users.noreply.github.com>
+Jack <jackdev@mailbox.org>
+James Pharaoh <james@pharaoh.uk>
+Jan Andres <jan.andres@berenberg.de>
+Jan Beich <jbeich@FreeBSD.org>
+Jan Wagner <waja@cyconet.org>
+Janne Heß <janne@hess.ooo>
+Jason Young <jason.young@velaspan.com>
+Jean Flach <jean.flach@icinga.com>
+Jean-Louis Dupond <jean-louis@dupond.be>
+Jens Link <jenslink@quux.de>
+Jens Schanz <jens.schanz@mueller.de>
+Jeon Sang Wan <maxswjeon@naver.com>
+Jeremy Armstrong <lepubel@gmail.com>
+Jérôme Drouet <jerome.drouet@gmail.com>
+Jesse Morgan <morgajel@gmail.com>
+Jo Goossens <jo.goossens@hosted-power.com>
+Jochen Friedrich <j.friedrich@nwe.de>
+Johannes Meyer <johannes.meyer@icinga.com>
+Jonas Meurer <jonas@freesources.org>
+Jordi van Scheijen <jordi.vanscheijen@solvinity.com>
+Joseph L. Casale <jcasale@activenetwerx.com>
+jre3brg <jorge.rebelo@pt.bosch.com>
+Julian Brost <julian.brost@icinga.com>
+K0nne <34264690+K0nne@users.noreply.github.com>
+Kai Goller <kai.goller@netways.de>
+Kálmán „KAMI” Szalai <kami911@gmail.com>
+kiba <zombie32@gmail.com>
+Konstantin Kelemen <konstantin@kel.mn>
+krishna <gskrishna44@gmail.com>
+Lara <lara@uwu.is>
+Lars Engels <lars.engels@0x20.net>
+Lars Krüger <krueger-lars@web.de>
+Lars Vogdt <lrupp@users.noreply.github.com>
+Leah Oswald <mail@leahoswald.de>
+Lee Clemens <sftw@leeclemens.net>
+Lee Garrett <lgarrett@rocketjump.eu>
+Lennart Betz <lennart.betz@icinga.com>
+Leon Stringer <leon@priorsvle.com>
+lihan <tclh123@gmail.com>
+log1-c <24474580+log1-c@users.noreply.github.com>
+Lord Hepipud <contact@lordhepipud.de>
+Lorenz Kästle <lorenz.kaestle@netways.de>
+Louis Sautier <sautier.louis@gmail.com>
+Luca Lesinigo <luca@lm-net.it>
+Lucas Bremgartner <breml@users.noreply.github.com>
+Lucas Fairchild-Madar <lucas.madar@gmail.com>
+Luiz Amaral <luiz.amaral@innogames.com>
+Magnus Bäck <magnus@noun.se>
+Maik Stuebner <maik@stuebner.info>
+Malte Rabenseifner <mail@malte-rabenseifner.de>
+Manuel Reiter <reiter@csc.uni-frankfurt.de>
+Marc Rupprecht <marc.rupprecht@netways.de>
+Marcus van Dam <marcus@marcusvandam.nl>
+MarcusCaepio <MarcusCaepio@users.noreply.github.com>
+Marianne Spiller <github@spiller.me>
+Marius Bergmann <marius@yeai.de>
+Marius Sturm <marius@graylog.com>
+Mark Leary <mleary@mit.edu>
+Markus Frosch <markus.frosch@icinga.com>
+Markus Waldmüller <markus.waldmueller@netways.de>
+Markus Weber <github@ztweb.de>
+Martijn van Duren <m.vanduren@itisit.nl>
+Martin Neubert <martin.neubert@t-systems.com>
+Martin Stiborsky <martin.stiborsky@gmail.com>
+marxin <mliska@suse.cz>
+Mathias Aerts <mathias.aerts@delta.blue>
+Mathieu Arnold <mat@mat.cc>
+Mathieu Lutfy <mathieu@bidon.ca>
+Matthaus Owens <matthaus@puppetlabs.com>
+Matthias Baur <m.baur@syseleven.de>
+Matthias Schales <black-dragon131@web.de>
+Mattia Codato <mattia.codato@wuerth-phoenix.com>
+Maurice Meyer <morre@mor.re>
+Max Deparade <max.deparade@netways.de>
+Max Rosin <git@hackrid.de>
+Max Zhang <zhenzhan@tibco.com>
+Maximilian Eschenbacher <maximilian@eschenbacher.email>
+Maximilian Falkenstein <maxf@njsm.de>
+Mhd Sulhan <ms@kilabit.info>
+Micha Ahrweiler <me@schnitzi.net>
+Michael Friedrich <michael.friedrich@icinga.com>
+Michael Insel <mcktr55@gmail.com>
+Michael Kraus <michael.kraus@consol.de>
+Michael Newton <miken32@gmail.com>
+Michal Moravec <michal.moravec@macadmin.cz>
+Michal Petko <michal.petko@jumpshot.com>
+Mikesch-mp <mikesch-mp@koebbes.de>
+Mirco Bauer <meebey@meebey.net>
+Mirko Nardin <mirko.nardin@gmx.net>
+mocruz <mocruz@theworkshop.com>
+Muhammad Mominul Huque <nahidbinbaten1995@gmail.com>
+nemtrif <ntrifunovic@hotmail.com>
+Nicolai <nbuchwitz@users.noreply.github.com>
+Nicolas Limage <github@xephon.org>
+Nicole Lang <nicole.lang@icinga.com>
+Niflou <dubuscyr@gmail.com>
+Noah Hilverling <noah.hilverling@icinga.com>
+noobahoi <20069422+noobahoi@users.noreply.github.com>
+Obihörnchen <obihoernchende@gmail.com>
+Oleg Artenii <oleg@artenii.email>
+Pall Sigurdsson <palli-github@minor.is>
+Paolo Schiro <paolo.schiro@kpnqwest.it>
+Patrick <patrick.ihle@patteswelt.de>
+Patrick Dolinic <pdolinic@netways.de>
+Patrick Huy <frz@frz.cc>
+Paul Denning <paul.denning@dimensiondata.com>
+Paul Richards <paul@minimoo.org>
+Pawel Szafer <pszafer@gmail.com>
+Per von Zweigbergk <pvz@itassistans.se>
+Peter Eckel <6815386+peteeckel@users.noreply.github.com>
+Peter Eckel <pe-git@hindenburgring.com>
+Peter Eckel <pe-icinga2@hindenburgring.com>
+Petr Ruzicka <petr.ruzicka@gmail.com>
+Phil Hutchinson <phil@volumedia.co.uk>
+Philipp Dallig <philipp.dallig@gmail.com>
+Philipp Dorschner <philipp.dorschner@netways.de>
+pv2b <pvz@pvz.pp.se>
+Ralph Breier <ralph.breier@roedl.com>
+Reto Zeder <reto.zeder@arcade.ch>
+Ricardo Bartels <ricardo@bitchbrothers.com>
+RincewindsHat <12514511+RincewindsHat@users.noreply.github.com>
+Rinck H. Sonnenberg <r.sonnenberg@netson.nl>
+Robert Lindgren <robert.lindgren@gmail.com>
+Robert Scheck <robert@fedoraproject.org>
+Robin O'Brien <robin@labs.epiuse.com>
+Roland Hopferwieser <rhopfer@ica.jku.at>
+Roman Gerhardt <roman.gerhardt@cbc-x.com>
+Rostislav Opocensky <orbis@pictus.org>
+Rudy Gevaert <rudy.gevaert@ugent.be>
+Rune Darrud <theflyingcorpse@gmail.com>
+ryanohnemus <ryan.ohnemus@tradingtechnologies.com>
+sah <sah@mss.secunet.com>
+Sam Kottler <shk@linux.com>
+Sascha Westermann <sascha.westermann@hl-services.de>
+Sebastian Brückner <mail@invlid.com>
+Sebastian Chrostek <sebastian@chrostek.net>
+Sebastian Eikenberg <eikese@mail.uni-paderborn.de>
+Sebastian Marsching <sebastian-git-2016@marsching.com>
+Silas <67681686+Tqnsls@users.noreply.github.com>
+Simon Murray <spjmurray@yahoo.co.uk>
+Simon Ruderich <simon@ruderich.org>
+Siyalrach Anton Thomas <sat@level8.dk>
+Stefan Bethke <stb@lassitu.de>
+Stefan Triep <stefan@triep.net>
+Stefar77 <Stefar77@users.noreply.github.com>
+Stephan Platz <github@paalsteek.de>
+Stephan Tesch <stephan@tesch.cx>
+Steve McMaster <mcmaster@hurricanelabs.com>
+stevie-sy <38723488+stevie-sy@users.noreply.github.com>
+Strajan Sebastian Ioan <strajan.sebastian@yahoo.com>
+Strix <660956+MrStrix@users.noreply.github.com>
+Stuart Henderson <stu@spacehopper.org>
+Sven Nierlein <sven@nierlein.de>
+Sven Wegener <swegener@gentoo.org>
+sysadt <sysadt@protonmail.com>
+T. Mulyana <nothinux@gmail.com>
+teclogi <27726999+teclogi@users.noreply.github.com>
+Thomas Forrer <thomas.forrer@wuerth-phoenix.com>
+Thomas Gelf <thomas.gelf@icinga.com>
+Thomas Niedermeier <tniedermeier@thomas-krenn.com>
+Thomas Widhalm <thomas.widhalm@icinga.com>
+Tim Hardeck <thardeck@suse.de>
+Tim Weippert <weiti@weiti.eu>
+Timo Buhrmester <van.fstd@gmail.com>
+Tobias Birnbaum <osterd@gmx.de>
+Tobias Deiminger <haxtibal@posteo.de>
+Tobias von der Krone <tobias.vonderkrone@profitbricks.com>
+Tom Geissler <Tom@d7031.de>
+Uwe Ebel <kobmaki@aol.com>
+Valentin Hoebel <valentin@xenuser.org>
+vigiroux <vincent.giroux@nokia.com>
+Vytenis Darulis <vytenis@uber.com>
+Wenger Florian <wenger@unifox.at>
+Will Frey <will.frey@digitalreasoning.com>
+Winfried Angele <winfried.angele@gmail.com>
+Wolfgang Nieder <wnd@gmx.net>
+XnS <git@xns.be>
+Yannick Charton <tontonitch-pro@yahoo.fr>
+Yohan Jarosz <yohanjarosz@yahoo.fr>
+Yonas Habteab <yonas.habteab@icinga.com>
+Zachary McGibbon <zachary.mcgibbon@gmail.com>
+Zoltan Nagy <abesto@abesto.net>
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..f808c2f
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,5442 @@
+# Icinga 2 CHANGELOG
+
+**The latest release announcements are available on [https://icinga.com/blog/](https://icinga.com/blog/).**
+
+Please read the [upgrading](https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/)
+documentation before upgrading to a new release.
+
+Released closed milestones can be found on [GitHub](https://github.com/Icinga/icinga2/milestones?state=closed).
+
+## 2.14.2 (2024-01-18)
+
+Version 2.14.2 is a hotfix release for master nodes that mainly
+fixes excessive disk usage caused by the InfluxDB writers.
+
+* InfluxDB: truncate timestamps to whole seconds to save disk space. #9969
+* HttpServerConnection: log request processing time as well. #9970
+* Update Boost shipped on Windows to v1.84. #9970
+
+## 2.14.1 (2023-12-21)
+
+Version 2.14.1 is a hotfix release for masters and satellites that mainly
+prevents permanent disintegration of a whole cluster due to root CA expiry.
+
+### Security
+
+* Automatically renew own root CA and distribute it to all nodes. #9933
+* Update OpenSSL shipped on Windows to v3.0.12. #9946
+* Disable TLS renegotiation (handshake on existing connection). #9946
+
+### Bugfixes
+
+* Icinga DB feature: fix crash due to missing NULL pointer check. #9946
+* Icinga DB feature: fix data written into Redis crashing the Go daemon. #9946
+* GelfWriter: fix deadlock on stop/reload caused by busy queue. #9947
+* Don't lose notifications due to too long output, truncate it. #9947
+
+### Enhancements
+
+* Discard duplicate problem notifications due to state filtering. #9932
+* Speed up API filters targeting specific hosts/services to O(1). #9944
+* POST /v1/console/\*: return HTTP 503 while Icinga is reloading. #9947
+* Update Boost shipped on Windows to v1.83. #9946
+* Documentation: several fixes and improvements. #9921
+
+## 2.14.0 (2023-07-12)
+
+[Issues and PRs](https://github.com/Icinga/icinga2/issues?q=is%3Aclosed+milestone%3A2.14.0)
+
+### Notes
+
+Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/#upgrading-to-2-14
+
+Thanks to all contributors:
+[atj](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Aatj),
+[atwebm](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Aatwebm),
+[cspeterson](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Acspeterson),
+[cycloon](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Acycloon),
+[DamianoChini](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3ADamianoChini),
+[efuss](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Aefuss),
+[fabieins](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Afabieins),
+[haxtibal](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Ahaxtibal),
+[jaapmarcus](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Ajaapmarcus),
+[log1-c](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Alog1-c),
+[lrupp](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Alrupp),
+[maggu](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Amaggu),
+[mcodato](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Amcodato),
+[Napsty](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3ANapsty),
+[orbison](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Aorbison),
+[peteeckel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Apeteeckel),
+[slalomsk8er](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Aslalomsk8er),
+[stevie-sy](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3Astevie-sy),
+[Tqnsls](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+is%3Aclosed+milestone%3A2.14.0+author%3ATqnsls)
+
+### Breaking Changes
+
+* Remove CheckResultReader (which has been deprecated since v2.9). #9714
+* Remove StatusDataWriter (which has been deprecated since v2.9). #9715
+* ElasticsearchWriter: drop support for Elasticsearch < v7. #9812
+* Consider a checkable unreachable once one Dependency fails.
+ Previously all of them had to fail. (Consult the upgrading docs.) #8218
+* API: reject config modifications during reload with HTTP status 503. #9445
+* `icinga2 daemon`: to reduce config load time, write file needed by
+ `icinga2 object list` only if `--dump-objects` is given. #9586 #9591
+* Default email notification scripts: link to Icinga DB Web,
+ not the monitoring module. (Consult the upgrading docs.) #9742 #9757
+* API: for security reasons hide TicketSalt in /v1/variables. #7863
+
+#### Icinga 2 Config DSL
+
+* Disallow global variable modification after config commit start (i.e.
+ inside `object/apply T "x" { ... }`) to reduce config load time. #9740
+* Forbid Dependency cycles at config load time. #8389
+* Allow only strings in the arrays Host#groups, Service#groups and
+ User#groups. Needed for consistency, especially by the IDO. #9057
+* Disallow empty object names. (They worked only partially anyway.) #9409
+
+#### Windows Agent only
+
+The official MSIs don't include the following features anymore.
+They weren't intended, tested or needed on Windows and only waste build time,
+bandwidth and disk space. Both new installations and upgrades are affected.
+
+* ElasticsearchWriter #9704
+* GelfWriter #9704
+* GraphiteWriter #9704
+* InfluxdbWriter and Influxdb2Writer #9704
+* OpenTsdbWriter #9704
+* PerfdataWriter #9704
+
+We also don't ship the following files anymore.
+(You can still obtain them manually.)
+
+* `NSCP.msi` (NSClient++ installer) #9703
+* [doc/](doc) (Icinga 2 markdown documentation) #9705
+
+On the other hand MSIs are now 75% smaller than before.
+
+### Enhancements
+
+* Significantly reduce config load time of large setups.
+ #8118 #9555 #9557 #9572 #9577 #9603 #9608 #9627 #9648 #9657 #9662
+* Allow to connect dependencies via redundancy groups. Only parents within
+ one group are assumed to provide redundancy for each other. #8218
+* Built-in check command ifw-api, communicates directly with the Icinga for
+ Windows REST API. (Doesn't spawn a PowerShell process for that.) #9062
+* JournaldLogger which logs to systemd journal. #9000
+* API: POST /v1/objects: allow to discard some previously modified attributes,
+ i.e. to restore the config files' values. #9783
+* ElasticsearchWriter: support Elasticsearch v8. #9812
+* Support `$env.ENV_VAR_NAME$` macros. #8302
+* Speed up Icinga DB config dump. #9524
+* Default mail notification scripts: also print `$host.notes$` and `$service.notes$`. #9713
+* Enable built-in OpenSSL DH parameters to allow DHE TLS ciphers. #9811
+* Clean up global default TLS cipher list to improve security. #9809
+* Influxdb(2)Writer: write more precise timestamps (nanoseconds). #9599
+
+### Bugfixes
+
+* Icinga DB feature: normalize several Redis data not to crash the Go daemon.
+ #9772 #9775 #9792 #9793 #9794 #9805
+* Fix parsing of perfdata across multiple lines in plugin output. #8969
+* icinga check: fix last reload failure time. #8429 #9827
+* Resolve macros inside custom vars of IcingaApplication. #9779
+* SELinux: allow Icinga and its plugins to write to syslog. #9688
+* ElasticsearchWriter: fix data buffer flush race condition during stop. #9810
+* Trigger flexible downtimes not in the past if checkable is already down. #9726
+* Send downtime expiration notifications immediately, not after up to a minute. #9726
+
+#### Cluster
+
+* Don't hang in timed out connection attempt. #9711 #9725
+* Fix lost acknowledgements after re-connect. #9718
+* cluster-zone check: don't complain about not connected
+ other local zone members if there aren't any. #8595
+* Allow agent to update executions delegated to it via /v1/actions/execute-command. #8627
+
+#### API
+
+* Disallow breaking inter-object relationships by changing
+ relationship attributes at runtime, e.g. `Service#host_name`. #9407
+* Correct several HTTP response status codes. #7958 #9354
+* Correct Boolean field types previously reported by /v1/types as Number. #9514
+
+#### CLI
+
+* `icinga2 daemon`: fix -DConfiguration.Concurrency= flag
+ which now allows to override the number of threads. #9643
+* `icinga2 node wizard`: avoid unnecessary chown(2) which may fail and abort the wizard. #8744
+* Correct several log messages. #8895 #8965 #9663
+
+### ITL
+
+Add `linux_netdev` check command. #9045
+
+#### Command Argument Changes
+
+* `disk`: don't pass -m (`disk_megabytes`) by default. #9642
+* `disk`: pass -X fuse.portal (`disk_exclude_type`) by default. #9459
+* `http`: support multiple -k (`http_header`) as array. #8574
+* `icmp`: double defaults for -w (`icmp_wpl`) and -c (`icmp_cpl`). #9041
+* `logfiles`: pass --winwarncrit (`logfiles_winwarncrit`) without argument. #9056
+* `nwc_health`: pass SNMPv3-only args only when using SNMPv3. #9095
+* `vmware-esx-dc-runtime-tools` and `vmware-esx-soap-vm-runtime-tools`:
+ rename `--open-vm-tools` to `--open_vm_tools_ok` (`vmware_openvmtools`). #9611
+
+#### New Command Arguments
+
+| Command | Argument | Custom Variable | PR |
+|------------------------------------|------------------------------|------------------------------------------|-------|
+| `disk` | `-P` | `disk_inode_perfdata` | #9494 |
+| `esxi_hardware` | `--format` | `esxi_hardware_format` | #9435 |
+| `esxi_hardware` | `--pretty` | `esxi_hardware_pretty` | #9435 |
+| `http` | `--verify-host` | `http_verify_host` | #8005 |
+| `icingacli-businessprocess` | `--ack-is-ok` | `icingacli_businessprocess_ackisok` | #9103 |
+| `icingacli-businessprocess` | `--blame` | `icingacli_businessprocess_blame` | #9103 |
+| `icingacli-businessprocess` | `--colors` | `icingacli_businessprocess_colors` | #9103 |
+| `icingacli-businessprocess` | `--downtime-is-ok` | `icingacli_businessprocess_downtimeisok` | #9103 |
+| `icingacli-businessprocess` | `--root-cause` | `icingacli_businessprocess_rootcause` | #9103 |
+| `mem` | `-a` | `mem_available` | #9385 |
+| `mongodb` | `--disable_retry_writes` | `mongodb_disableretrywrites` | #9539 |
+| `mongodb` | `--ssl-ca-cert-file` | `mongodb_ssl_ca_cert_file` | #9610 |
+| `mysql` | `--extra-opts` | `mysql_extra_opts` | #9197 |
+| `nrpe` | `-3` | `nrpe_version_3` | #9296 |
+| `nrpe` | `-D` | `nrpe_no_logging` | #9016 |
+| `nrpe` | `-P` | `nrpe_payload_size` | #9032 |
+| `pgsql` | `--extra-opts` | `pgsql_extra_opts` | #9197 |
+| `postgres` | `$PGCONTROLDATA` (env. var.) | `postgres_pgcontroldata` | #8929 |
+| `postgres` | `--datadir` | `postgres_datadir` | #8924 |
+| `postgres` | `--language` | `postgres_language` | #8924 |
+| `postgres` | `--perflimit` | `postgres_perflimit` | #8924 |
+| `ssl_cert` | `--ignore-host-cn` | `ssl_cert_ignore_host_cn` | #9512 |
+| `ssl_cert` | `--ignore-ocsp-errors` | `ssl_cert_ignore_ocsp_errors` | #9512 |
+| `ssl_cert` | `--ignore-ocsp-timeout` | `ssl_cert_ignore_ocsp_timeout` | #9512 |
+| `ssl_cert` | `--ignore-tls-renegotiation` | `ssl_cert_ignore_tls_renegotiation` | #9042 |
+| `ssl_cert` | `--proxy` | `ssl_cert_proxy` | #8927 |
+| `tcp` | `--sni` | `tcp_sni` | #9347 |
+| `vmware-esx-dc-runtime-tools` | `--no_vm_tools_ok` | `vmware_novmtools` | #9611 |
+| `vmware-esx-soap-vm-runtime-tools` | `--no_vm_tools_ok` | `vmware_novmtools` | #9611 |
+
+### Miscellaneous
+
+* Require GCC 7+ for building to enable C++17. #9133 #9485 #9489
+* Require CMake v2.8.12+ for building.
+ (Compatibility with older ones will be removed from a future CMake version.) #9706
+* Repair config reload on OpenBSD by using waitpid(2), not a SIGCHLD handler. #9518
+* Ignore SIGHUP in main process to allow `/etc/rc.d/icinga2 reload`
+ sending it to all Icinga 2 processes on OpenBSD. #9622
+* Fix crash in debug build on macOS when API and debug log are enabled. #9497
+* Update Boost shipped on Windows to v1.82. #9761
+* Update OpenSSL shipped on Windows to v3.0.9. #9787
+* Update vendored https://github.com/nlohmann/json to v3.9.1. #9675
+* Update vendored https://github.com/nemtrif/utfcpp to v3.2.3. #9683
+* Documentation: several fixes and improvements. #8954 #9741 #9763 #9767 #9769 #9777
+* Several code quality improvements. #8815 #9106 #9250
+ #9508 #9517 #9537 #9594 #9605 #9606 #9641 #9658 #9702 #9717 #9738
+
+## 2.13.9 (2023-12-21)
+
+Version 2.13.9 is a hotfix release for masters and satellites that mainly
+prevents permanent disintegration of a whole cluster due to root CA expiry.
+
+### Security
+
+* Automatically renew own root CA and distribute it to all nodes. #9934
+* Update OpenSSL shipped on Windows to v3.0.12. #9945
+* Disable TLS renegotiation (handshake on existing connection). #9945
+
+### Bugfixes
+
+* Icinga DB feature: fix crash due to missing NULL pointer check. #9945
+* Icinga DB feature: fix data written into Redis crashing the Go daemon. #9945
+
+### Updates
+
+* Update Boost shipped on Windows to v1.83. #9945
+
+## 2.13.8 (2023-07-12)
+
+Version 2.13.8 is a maintenance release that fixes some bugs,
+especially Icinga DB crashes, and updates several bundled libraries.
+
+### Bugfixes
+
+* Icinga DB feature: normalize several Redis data not to crash the Go daemon. #9814
+* Don't hang in timed out connection attempt. #9815
+* Trigger flexible downtimes not in the past if checkable is already down. #9817
+* ElasticsearchWriter: fix data buffer flush race condition during stop. #9818
+* SELinux: allow Icinga and its plugins to write to syslog. #9819
+* Fix lost acknowledgements after re-connect. #9820
+* Fix parsing of perfdata across multiple lines in plugin output. #9821
+* cluster-zone check: don't complain about not connected
+ other local zone members if there aren't any. #9822
+
+### Updates
+
+* Update Boost shipped on Windows to v1.82. #9816
+* Update OpenSSL shipped on Windows to v3.0.9. #9816
+* Update vendored https://github.com/nlohmann/json to v3.9.1. #9816
+* Update vendored https://github.com/nemtrif/utfcpp to v3.2.3. #9816
+
+## 2.13.7 (2023-02-16)
+
+This security release updates Boost and OpenSSL libraries bundled on Windows
+and repairs broken SELinux policies. By the way it fixes several other bugs.
+
+### Security
+
+* Windows: update bundled OpenSSL to v1.1.1t. #9672
+
+### Bugfixes
+
+* SELinux: fix user and domain creation by explicitly setting the role. #9690
+* Signal handlers: don't interrupt and break plugins spawning. #9682
+* Icinga DB: take check\_period into account during overdue calculation. #9679
+* Avoid corrupted files: use fsync(2)/FlushFileBuffers() everywhere. #9681
+* Solaris: fix compile error. #9680
+
+### Enhancements
+
+* Windows: update bundled Boost to v1.81. #9678
+* Documentation: several fixes and improvements. #9671
+
+## 2.13.6 (2022-11-08)
+
+The main focus of version 2.13.6 is improved performance of Icinga DB and apply rules.
+Additionally, it includes bug fixes related to config loading and API permissions.
+
+### Bugfixes
+
+* Improve the throughput of the Icinga DB feature. #9550
+* Multiple changes to speed up evaluation of apply rules. #9559 #9565 #9558
+* Fix a possible crash on config loading related to `ignore_on_error`. #9560
+* Check API user permission on objects returned by joins. #9561
+* Windows: update bundled Boost and OpenSSL versions. #9562 #9567
+
+## 2.13.5 (2022-08-11)
+
+Version 2.13.5 is a maintenance release that fixes some bugs,
+improves logging and updates the documentation as well as a bundled library.
+
+### Bugfixes
+
+* Ensure not to write an incomplete (i.e. corrupt) state file. #9467
+* ITL: Render vars.apt\_upgrade=true as --upgrade, not --upgrade=true. #9458
+* Icinga DB: Don't surprise (and crash) the Go daemon with config types it doesn't know. #9480
+* Icinga DB: Add missing Redis SELinux policy. #9473
+* Windows: Don't spam the event log with non-error startup messages. #9457
+* Windows: Update bundled version of OpenSSL. #9460
+* Docs: Update RHEL 8 installation instructions. #9482
+* Docs: Add RHEL 9 installation instructions. #9482
+
+## 2.13.4 (2022-06-30)
+
+This release brings the final changes needed for the Icinga DB 1.0 release.
+Addtionally, it includes some fixes and a performance improvement resulting
+in faster config validation and reload times.
+
+### Bugfixes
+
+* Fix a race-condition involving object attribute updates that could result in a crash. #9395
+* After a host recovered, only send problem notifications for services after
+ they have been rechecked afterwards to avoid false notifications. #9348
+* Speed up config validation by avoiding redundant serialization of objects. #9400
+* Add a `separator` attribute to allow using arguments like `--key=value` as required by some
+ check plugins. This fixes the `--upgrade` and `--dist-upgrade` arguments of `check_apt`. #9397
+* Windows: Update bundled versions of Boost and OpenSSL. #9360 #9415
+
+### Icinga DB
+
+* Add an `icingadb` CheckCommand to allow checking if Icinga DB is healthy. #9417
+* Update documentation related to Icinga DB. #9423
+* Fix a bug where history events could miss the environment ID. #9396
+* Properly serialize attributes of command arguments when explicitly set to `null`. #9398
+* Rename some attributes to make the database schema more consistent. #9399 #9419 #9421
+* Make the error message more helpful if the API isn't set up #9418
+
+## 2.13.3 (2022-04-14)
+
+This version includes bugfixes for many features of Icinga 2, including fixes for multiple crashes.
+It also includes a number of fixes and improvements for Icinga DB.
+
+### API
+
+* The /v1/config/stages endpoint now immediately rejects parallel config updates
+ instead of accepting and then later failing to verify and activate them. #9328
+
+### Certificates
+
+* The lifetime of newly issued node certificates is reduced from 15 years to 397 days. #9337
+* Compare cluster certificate tickets in constant time. #9333
+
+### Notifications
+
+* Fix a crash that could happen while sending notifications shortly after Icinga 2 started. #9124
+* Fix missing or redundant notifications after certain combinations of state changes happened
+ while notifications were suppressed, for example during a downtime. #9285
+
+### Checks and Commands
+
+* Fix a deadlock when processing check results for checkables with dependencies. #9228
+* Fix a message routing loop that can happen for event commands that are executed within a zone
+ using `command_endpoint` that resulted in excessive execution of the command. #9260
+
+### Downtimes
+
+* Fix scheduling of downtimes for all services on child hosts. #9159
+* Creating fixed downtimes starting immediately now send a corresponding notification. #9158
+* Fix some issues involving daylight saving time changes that could result in an hour missing
+ from scheduled downtimes. This fix applies to time periods as well. #9238
+
+### Configuration
+
+* Fix the evaluation order of default templates when used in combination with apply rules.
+ Now default templates are imported first as stated in the documentation and
+ as it already happens for objects defined without using apply. #9290
+
+### IDO
+
+* Fix an issue where contacts were not written correctly to the notification history
+ if multiple IDO instances are active on the same node. #9242
+* Explicitly set the encoding for MySQL connections as a workaround for changed defaults
+ in Debian bullseye. #9312
+* Ship a MySQL schema upgrade that fixes inconsistent version information in the
+ full schema file and upgrade files which could have resulted in inaccurate reports
+ of an outdated schema version. #9139
+
+### Performance Data Writers
+
+* Fix a race condition in the InfluxDB Writers that could result in a crash. #9237
+* Fix a log message where Influxdb2Writer logged as InfluxdbWriter. #9315
+* All writers no longer send metrics multiple times after HA failovers. #9322
+
+### Build
+
+* Fix the order of linker flags to fix builds on some ARM platforms. #9164
+* Fix a regression introduced in 2.13.2 preventing non-unity builds. #9094
+* Fix an issue when building within an unrelated Git repository,
+ version information from that repository could incorrectly be used for Icinga 2. #9155
+* Windows: Update bundled Boost version to 1.78.0 and OpenSSL to 1.1.1n #9325
+
+### Internals
+
+* Fix some race conditions due to missing synchronization.
+ These race conditions should not have caused any practical problems
+ besides incorrect numbers in debug log message. #9306
+* Move the startup.log and status files created when validating incoming cluster config updates
+ to /var/lib/icinga2/api and always keep the last failed startup.log to ease debugging. #9335
+
+### Icinga DB
+
+* The `severity` attribute was updated to match the sort order Icinga Web 2 uses for the IDO.
+ The documentation for this attribute was already incorrect before and was updated
+ to reflect the current functionality. #9239 #9240
+* Fix the `is_sticky` attribute for comments. #9303
+* Fix missing updates of `is_reachable` and `severity` in the state tables. #9241
+* Removing an acknowledgement no longer incorrectly writes comment history. #9302
+* Fix multiple issues so that in an HA zone, both nodes now write consistent history. #9157 #9182 #9190
+* Fix that history events are no longer written when state information should be updated. #9252
+* Fix an issue where incomplete comment history events were generated. #9301
+ **Note:** when removing comments using the API, the dedicated remove-comment action
+ should be used instead of the objects API, otherwise no history event will be generated.
+* Fix handling of non-integer values for the order attribute of command arguments. #9181
+ **Note:** You should only specify integer values for order, other values are converted to integer
+ before use so using fractional numbers there has no effect.
+* Add a dependency on icingadb-redis.service to the systemd service file
+ so that Redis is stopped after Icinga 2. #9304
+* Buffer history events in memory when the Redis connection is lost. #9271
+* Add the previous soft state to the state tables. #9214
+* Add missing locking on object runtime updates. #9300
+
+## 2.13.2 (2021-11-12)
+
+This version only includes changes needed for the release of Icinga DB 1.0.0 RC2 and doesn't include any other bugfixes or features.
+
+### Icinga DB
+
+* Prefix command_id with command type #9085
+* Decouple environment from Icinga 2 Environment constant #9082
+* Make icinga:history:stream:*#event_id deterministic #9076
+* Add downtime.duration & service_state.host_id to Redis #9084
+* Sync checkables along with their states first #9081
+* Flush both buffered states and state checksums on initial dump #9079
+* Introduce icinga:history:stream:downtime#scheduled_by #9080
+* Actually write parent to parent_id of zones #9078
+* Set value in milliseconds for program_start in stats/heartbeat #9077
+* Clean up vanished objects from icinga:checksum:*:state #9074
+* Remove usernotification history stream #9073
+* Write IDs of notified users into notification history stream #9071
+* Make CheckResult#scheduling_source available to Icinga DB #9072
+* Stream runtime state updates only to icinga:runtime:state #9068
+* Publish Redis schema version via XADD icinga:schema #9069
+* Don't include checkable types in history IDs #9070
+* Remove unused Redis key 'icinga:zone:parent' #9075
+* Make sure object relationships are handled correctly during runtime updates #9089
+* Only log queries at debug level #9088
+
+## 2.13.1 (2021-08-19)
+
+The main focus of this version is a security vulnerability in the TLS certificate verification of our metrics writers ElasticsearchWriter, GelfWriter, InfluxdbWriter and Influxdb2Writer.
+
+Version 2.13.1 also fixes two issues indroduced with the 2.13.0 release.
+
+### Security
+
+* Add TLS server certificate validation to ElasticsearchWriter, GelfWriter, InfluxdbWriter and Influxdb2Writer ([GHSA-cxfm-8j5v-5qr2](https://github.com/Icinga/icinga2/security/advisories/GHSA-cxfm-8j5v-5qr2))
+
+Depending on your setup, manual intervention beyond installing the new versions
+may be required, so please read the more detailed information in the
+[release blog post](https://icinga.com/blog/2021/08/19/icinga-2-13-1-security-release//)
+carefully
+
+### Bugfixes
+
+* IDO PgSQL: Fix a string quoting regression introduced in 2.13.0 #8958
+* ApiListener: Automatically fall back to IPv4 in default configuration on systems without IPv6 support #8961
+
+## 2.13.0 (2021-08-03)
+
+[Issues and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.13.0)
+
+### Notes
+
+Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/#upgrading-to-v213
+
+Thanks to all contributors:
+[andygrunwald](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aandygrunwald+milestone%3A2.13.0),
+[BausPhi](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ABausPhi+milestone%3A2.13.0),
+[bebehei](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abebehei+milestone%3A2.13.0),
+[Bobobo-bo-Bo-bobo](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ABobobo-bo-Bo-bobo+milestone%3A2.13.0),
+[efuss](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aefuss+milestone%3A2.13.0),
+[froehl](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Afroehl+milestone%3A2.13.0),
+[iustin](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aiustin+milestone%3A2.13.0),
+[JochenFriedrich](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AJochenFriedrich+milestone%3A2.13.0),
+[leeclemens](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aleeclemens+milestone%3A2.13.0),
+[log1-c](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Alog1-c+milestone%3A2.13.0),
+[lyknode](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Alyknode+milestone%3A2.13.0),
+[m41kc0d3](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Am41kc0d3+milestone%3A2.13.0),
+[MarcusCaepio](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMarcusCaepio+milestone%3A2.13.0),
+[mathiasaerts](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amathiasaerts+milestone%3A2.13.0),
+[mcktr](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amcktr+milestone%3A2.13.0),
+[MEschenbacher](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMEschenbacher+milestone%3A2.13.0),
+[Napsty](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ANapsty+milestone%3A2.13.0),
+[netson](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Anetson+milestone%3A2.13.0),
+[pdolinic](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Apdolinic+milestone%3A2.13.0),
+[Ragnra](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ARagnra+milestone%3A2.13.0),
+[RincewindsHat](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ARincewindsHat+milestone%3A2.13.0),
+[sbraz](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asbraz+milestone%3A2.13.0),
+[sni](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asni+milestone%3A2.13.0),
+[sysadt](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asysadt+milestone%3A2.13.0),
+[XnS](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AXnS+milestone%3A2.13.0),
+[yayayayaka](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Ayayayayaka+milestone%3A2.13.0)
+
+### Enhancements
+
+* Core
+ * PerfdataValue: Add units of measurement #7871
+ * Flapping: Allow to ignore states in flapping detection #8600
+* Cluster
+ * Display log message if two nodes run on incompatible versions #8088
+* API
+ * /v1/actions/remove-downtime: Also remove child downtimes #8913
+ * Add API endpoint: /v1/actions/execute-command #8040
+ * /v1/actions/add-comment: Add param expiry #8035
+ * API-Event StateChange & CheckResult: Add acknowledgement and downtime_depth #7736
+ * Implement new API events ObjectCreated, ObjectDeleted and ObjectModified #8083
+ * Implement scheduling_endpoint attribute to checkable #6326
+* Windows
+ * Add support for Windows Event Log and write early log messages to it #8710
+* IDO
+ * MySQL: support larger host and service names #8425
+* ITL
+ * Add -S parameter for esxi_hardware ITL #8814
+ * Add CheckCommands for Thola #8683
+ * Add option ignore-sct for ssl_cert to ITL #8625
+ * Improve check_dns command when used with monitoring-plugins 2.3 #8589
+ * Add parameter -f to snmp-process #8569
+ * Add systemd CheckCommand #8568
+ * Add new options for ipmi-sensor #8498
+ * check_snmp_int: support -a #8003
+ * check_fail2ban: Add parameter fail2ban_jail to monitor a specific jail only #7960
+ * check_nrpe: Add parameters needed for PKI usage #7907
+* Metrics
+ * Support InfluxDB 2.0 #8719
+ * Add support for InfluxDB basic auth #8314
+* Docs
+ * Add info about ongoing support for IDO #8446
+ * Improve instructions on how to setup a Windows dev env #8400
+ * Improve instructions for installing wixtoolset on Windows #8397
+ * Add section about usage of satellites #8458
+ * Document command for verifying the parent node's certificate #8221
+ * Clarify TimePeriod/ScheduledDowntime time zone handling #8001
+* Misc
+ * Support TLS 1.3 #8718
+ * Livestatus: append app name to program_version #7931
+ * sd_notify() systemd about what we're doing right now #7874
+
+### Bugfixes
+
+* Core
+ * Fix state not being UNKNOWN after process timeout #8937
+ * Set a default severity for loggers #8846
+ * Fix integer overflow when converting large unsigned integers to string #8742
+ * StartUnixWorker(): don't exit() on fork() failure #8427
+ * Fix perf data parser not recognizing scientific notation #8492
+ * Close FDs based on /proc/self/fd #8442
+ * Fix check source getting overwritten on passive check result #8158
+ * Clean up temp files #8157
+ * Improve perf data parser to allow for special output (e.g. ASCII tables) #8008
+ * On check timeout first send SIGTERM #7918
+* Cluster
+ * Drop passive check results for unreachable hosts/services #8267
+ * Fix state timestamps set by the same check result differing across nodes #8101
+* API
+ * Do not override status codes that are not 200 #8532
+ * Update the SSL context after accepting incoming connections #8515
+ * Allow to create API User with password #8321
+ * Send Content-Type as API response header too #8108
+ * Display a correct status when removing a downtime #8104
+ * Display log message if a permission error occurs #8087
+ * Replace broken package name validation regex #8825 #8946
+* Windows
+ * Fix Windows command escape for \" #7092
+* Notifications/Downtimes
+ * Fix no re-notification for non OK state changes with time delay #8562
+ * TimePeriod/ScheduledDowntime: Improve DST handling #8921
+ * Don't send notifications while suppressed by checkable #8513
+ * Fix a crash while removing a downtime from a disappeared checkable #8229
+* IDO
+ * Update program status on stop #8730
+ * Also mark objects inactive in memory on object deactivation #8626
+ * IdoCheckTask: Don't override checkable critical with warn state #8613
+ * PostgreSQL: Do not set standard_conforming_strings to off #8123
+* ITL
+ * check_http: Fix assignment of check_adress blocking check by hostname #8109
+ * check_mysql: Don't set -H if -s is given #8020
+* Metrics
+ * OpenTSDB-Writer: Remove incorrect space causing missing tag error #8245
+
+## 2.12.5 (2021-07-15)
+
+Version 2.12.5 fixes two security vulnerabilities that may lead to privilege
+escalation for authenticated API users. Other improvements include several
+bugfixes related to downtimes, downtime notifications, and more reliable
+connection handling.
+
+### Security
+
+* Don't expose the PKI ticket salt via the API. This may lead to privilege
+ escalation for authenticated API users by them being able to request
+ certificates for other identities (CVE-2021-32739)
+* Don't expose IdoMysqlConnection, IdoPgsqlConnection, IcingaDB, and
+ ElasticsearchWriter passwords via the API (CVE-2021-32743)
+* Windows: Update bundled OpenSSL to version 1.1.1k #8885
+
+Depending on your setup, manual intervention beyond installing the new versions
+may be required, so please read the more detailed information in the
+[release blog post](https://icinga.com/blog/2021/07/15/releasing-icinga-2-12-5-and-2-11-10/)
+carefully.
+
+### Bugfixes
+
+* Don't send downtime end notification if downtime hasn't started #8877
+* Don't let a failed downtime creation block the others #8863
+* Support downtimes and comments for checkables with long names #8864
+* Trigger fixed downtimes immediately if the current time matches
+ (instead of waiting for the timer) #8889
+* Add configurable timeout for full connection handshake #8866
+
+### Enhancements
+
+* Replace existing downtimes on ScheduledDowntime change #8879
+* Improve crashlog #8865
+
+## 2.12.4 (2021-05-27)
+
+Version 2.12.4 is a maintenance release that fixes some crashes, improves error handling
+and adds compatibility for systems coming with newer Boost versions.
+
+### Bugfixes
+
+* Fix a crash when notification objects are deleted using the API #8782
+* Fix crashes that might occur during downtime scheduling if host or downtime objects are deleted using the API #8785
+* Fix an issue where notifications may incorrectly be skipped after a downtime ends #8775
+* Don't send reminder notification if the notification is still suppressed by a time period #8808
+* Fix an issue where attempting to create a duplicate object using the API
+ might result in the original object being deleted #8787
+* IDO: prioritize program status updates #8809
+* Improve exceptions handling, including a fix for an uncaught exception on Windows #8777
+* Retry file rename operations on Windows to avoid intermittent locking issues #8771
+
+### Enhancements
+
+* Support Boost 1.74 (Ubuntu 21.04, Fedora 34) #8792
+
+## 2.12.3 (2020-12-15)
+
+Version 2.12.3 resolves a security vulnerability with revoked certificates being
+renewed automatically ignoring the CRL.
+
+This version also resolves issues with high load on Windows regarding the config sync
+and not being able to disable/enable Icinga 2 features over the API.
+
+### Security
+
+* Fix that revoked certificates due for renewal will automatically be renewed ignoring the CRL (CVE-2020-29663)
+
+When a CRL is specified in the ApiListener configuration, Icinga 2 only used it
+when connections were established so far, but not when a certificate is requested.
+This allows a node to automatically renew a revoked certificate if it meets the
+other conditions for auto renewal (issued before 2017 or expires in less than 30 days).
+
+Because Icinga 2 currently (v2.12.3 and earlier) uses a validity duration of 15 years,
+this only affects setups with external certificate signing and revoked certificates
+that expire in less then 30 days.
+
+### Bugfixes
+
+* Improve config sync locking - resolves high load issues on Windows #8511
+* Fix runtime config updates being ignored for objects without zone #8549
+* Use proper buffer size for OpenSSL error messages #8542
+
+### Enhancements
+
+* On checkable recovery: re-check children that have a problem #8506
+
+## 2.12.2 (2020-12-01)
+
+Version 2.12.2 fixes several issues to improve the reliability of the cluster functionality.
+
+### Bugfixes
+
+* Fix a connection leak with misconfigured agents #8483
+* Properly sync changes of config objects in global zones done via the API #8474 #8470
+* Prevent other clients from being disconnected when replaying the cluster log takes very long #8496
+* Avoid duplicate connections between endpoints #8465
+* Ignore incoming config object updates for unknown zones #8461
+* Check timestamps before removing files in config sync #8495
+
+### Enhancements
+
+* Include HTTP status codes in log #8467
+
+## 2.12.1 (2020-10-15)
+
+Version 2.12.1 fixes several crashes, deadlocks and excessive check latencies.
+It also addresses several bugs regarding IDO, API, notifications and checks.
+
+### Bugfixes
+
+* Core
+ * Fix crashes during config update #8348 #8345
+ * Fix crash while removing a downtime #8228
+ * Ensure the daemon doesn't get killed by logrotate #8170
+ * Fix hangup during shutdown #8211
+ * Fix a deadlock in Icinga DB #8168
+ * Clean up zombie processes during reload #8376
+ * Reduce check latency #8276
+* IDO
+ * Prevent unnecessary IDO updates #8327 #8320
+ * Commit IDO MySQL transactions earlier #8349
+ * Make sure to insert IDO program status #8330
+ * Improve IDO queue stats logging #8271 #8328 #8379
+* Misc
+ * Ensure API connections are closed properly #8293
+ * Prevent unnecessary notifications #8299
+ * Don't skip null values of command arguments #8174
+ * Fix Windows .exe version #8234
+ * Reset Icinga check warning after successful config update #8189
+
+## 2.12.0 (2020-08-05)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.12.0)
+
+### Notes
+
+Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/#upgrading-to-v212
+
+Thanks to all contributors:
+[Ant1x](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AAnt1x+milestone%3A2.12.0),
+[azthec](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aazthec+milestone%3A2.12.0),
+[baurmatt](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abaurmatt+milestone%3A2.12.0),
+[bootc](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abootc+milestone%3A2.12.0),
+[Foxeronie](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AFoxeronie+milestone%3A2.12.0),
+[ggzengel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aggzengel+milestone%3A2.12.0),
+[islander](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aislander+milestone%3A2.12.0),
+[joni1993](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Ajoni1993+milestone%3A2.12.0),
+[KAMI911](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AKAMI911+milestone%3A2.12.0),
+[mcktr](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amcktr+milestone%3A2.12.0),
+[MichalMMac](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMichalMMac+milestone%3A2.12.0),
+[sebastic](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asebastic+milestone%3A2.12.0),
+[sthen](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asthen+milestone%3A2.12.0),
+[unki](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aunki+milestone%3A2.12.0),
+[vigiroux](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Avigiroux+milestone%3A2.12.0),
+[wopfel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Awopfel+milestone%3A2.12.0)
+
+### Breaking changes
+
+* Deprecate Windows plugins in favor of our
+ [PowerShell plugins](https://github.com/Icinga/icinga-powershell-plugins) #8071
+* Deprecate Livestatus #8051
+* Refuse acknowledging an already acknowledged checkable #7695
+* Config lexer: complain on EOF in heredocs, i.e. `{{{abc<EOF>` #7541
+
+### Enhancements
+
+* Core
+ * Implement new database backend: Icinga DB #7571
+ * Re-send notifications previously suppressed by their time periods #7816
+* API
+ * Host/Service: Add `acknowledgement_last_change` and `next_update` attributes #7881 #7534
+ * Improve error message for POST queries #7681
+ * /v1/actions/remove-comment: let users specify themselves #7646
+ * /v1/actions/remove-downtime: let users specify themselves #7645
+ * /v1/config/stages: Add 'activate' parameter #7535
+* CLI
+ * Add `pki verify` command for better TLS certificate troubleshooting #7843
+ * Add OpenSSL version to 'Build' section in --version #7833
+ * Improve experience with 'Node Setup for Agents/Satellite' #7835
+* DSL
+ * Add `get_template()` and `get_templates()` #7632
+ * `MacroProcessor::ResolveArguments()`: skip null argument values #7567
+ * Fix crash due to dependency apply rule with `ignore_on_error` and non-existing parent #7538
+ * Introduce ternary operator (`x ? y : z`) #7442
+ * LegacyTimePeriod: support specifying seconds #7439
+ * Add support for Lambda Closures (`() use(x) => x and () use(x) => { return x }`) #7417
+* ITL
+ * Add notemp parameter to oracle health #7748
+ * Add extended checks options to snmp-interface command template #7602
+ * Add file age check for Windows command definition #7540
+* Docs
+ * Development: Update debugging instructions #7867
+ * Add new API clients #7859
+ * Clarify CRITICAL vs. UNKNOWN #7665
+ * Explicitly explain how to disable freshness checks #7664
+ * Update installation for RHEL/CentOS 8 and SLES 15 #7640
+ * Add Powershell example to validate the certificate #7603
+* Misc
+ * Don't send `event::Heartbeat` to unauthenticated peers #7747
+ * OpenTsdbWriter: Add custom tag support #7357
+
+### Bugfixes
+
+* Core
+ * Fix JSON-RPC crashes #7532 #7737
+ * Fix zone definitions in zones #7546
+ * Fix deadlock during start on OpenBSD #7739
+ * Consider PENDING not a problem #7685
+ * Fix zombie processes after reload #7606
+ * Don't wait for checks to finish during reload #7894
+* Cluster
+ * Fix segfault during heartbeat timeout with clients not yet signed #7970
+ * Make the config update process mutually exclusive (Prevents file system race conditions) #7936
+ * Fix `check_timeout` not being forwarded to agent command endpoints #7861
+ * Config sync: Use a more friendly message when configs are equal and don't need a reload #7811
+ * Fix open connections when agent waits for CA approval #7686
+ * Consider a JsonRpcConnection alive on a single byte of TLS payload, not only on a whole message #7836
+ * Send JsonRpcConnection heartbeat every 20s instead of 10s #8102
+ * Use JsonRpcConnection heartbeat only to update connection liveness (m\_Seen) #8142
+ * Fix TLS context not being updated on signed certificate messages on agents #7654
+* API
+ * Close connections w/o successful TLS handshakes after 10s #7809
+ * Handle permission exceptions soon enough, returning 404 #7528
+* SELinux
+ * Fix safe-reload #7858
+ * Allow direct SMTP notifications #7749
+* Windows
+ * Terminate check processes with UNKNOWN state on timeout #7788
+ * Ensure that log replay files are properly renamed #7767
+* Metrics
+ * Graphite/OpenTSDB: Ensure that reconnect failure is detected #7765
+ * Always send 0 as value for thresholds #7696
+* Scripts
+ * Fix notification scripts to stay compatible with Dash #7706
+ * Fix bash line continuation in mail-host-notification.sh #7701
+ * Fix notification scripts string comparison #7647
+ * Service and host mail-notifications: Add line-breaks to very long output #6822
+ * Set correct UTF-8 email subject header (RFC1342) #6369
+* Misc
+ * DSL: Fix segfault due to passing null as custom function to `Array#{sort,map,reduce,filter,any,all}()` #8053
+ * CLI: `pki save-cert`: allow to specify --key and --cert for backwards compatibility #7995
+ * Catch exception when trusted cert is not readable during node setup on agent/satellite #7838
+ * CheckCommand ssl: Fix wrong parameter `-N` #7741
+ * Code quality fixes
+ * Small documentation fixes
+
+## 2.12.0 RC1 (2020-03-13)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.12.0)
+
+### Notes
+
+Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/#upgrading-to-v212
+
+Thanks to all contributors:
+[Ant1x](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AAnt1x+milestone%3A2.12.0),
+[azthec](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aazthec+milestone%3A2.12.0),
+[baurmatt](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abaurmatt+milestone%3A2.12.0),
+[bootc](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abootc+milestone%3A2.12.0),
+[Foxeronie](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AFoxeronie+milestone%3A2.12.0),
+[ggzengel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aggzengel+milestone%3A2.12.0),
+[islander](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aislander+milestone%3A2.12.0),
+[joni1993](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Ajoni1993+milestone%3A2.12.0),
+[KAMI911](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AKAMI911+milestone%3A2.12.0),
+[mcktr](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amcktr+milestone%3A2.12.0),
+[MichalMMac](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMichalMMac+milestone%3A2.12.0),
+[sebastic](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asebastic+milestone%3A2.12.0),
+[sthen](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asthen+milestone%3A2.12.0),
+[unki](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aunki+milestone%3A2.12.0),
+[vigiroux](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Avigiroux+milestone%3A2.12.0),
+[wopfel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Awopfel+milestone%3A2.12.0),
+
+### Breaking changes
+
+* Refuse acknowledging an already acknowledged checkable #7695
+* Config lexer: complain on EOF in heredocs, i.e. `{{{abc<EOF>` #7541
+
+### Enhancements
+
+* Core
+ * Implement new database backend: Icinga DB #7571
+* API
+ * Host/Service: Add `acknowledgement_last_change` and `next_update` attributes #7881 #7534
+ * Improve error message for POST queries #7681
+ * /v1/actions/remove-comment: let users specify themselves #7646
+ * /v1/actions/remove-downtime: let users specify themselves #7645
+ * /v1/config/stages: Add 'activate' parameter #7535
+* CLI
+ * Add `pki verify` command for better TLS certificate troubleshooting #7843
+ * Add OpenSSL version to 'Build' section in --version #7833
+ * Improve experience with 'Node Setup for Agents/Satellite' #7835
+* DSL
+ * Add `get_template()` and `get_templates()` #7632
+ * `MacroProcessor::ResolveArguments()`: skip null argument values #7567
+ * Fix crash due to dependency apply rule with `ignore_on_error` and non-existing parent #7538
+ * Introduce ternary operator (`x ? y : z`) #7442
+ * LegacyTimePeriod: support specifying seconds #7439
+ * Add support for Lambda Closures (`() use(x) => x and () use(x) => { return x }`) #7417
+* ITL
+ * Add notemp parameter to oracle health #7748
+ * Add extended checks options to snmp-interface command template #7602
+ * Add file age check for Windows command definition #7540
+* Docs
+ * Development: Update debugging instructions #7867
+ * Add new API clients #7859
+ * Clarify CRITICAL vs. UNKNOWN #7665
+ * Explicitly explain how to disable freshness checks #7664
+ * Update installation for RHEL/CentOS 8 and SLES 15 #7640
+ * Add Powershell example to validate the certificate #7603
+* Misc
+ * Don't send `event::Heartbeat` to unauthenticated peers #7747
+ * OpenTsdbWriter: Add custom tag support #7357
+
+### Bugfixes
+
+* Core
+ * Fix JSON-RPC crashes #7532 #7737
+ * Fix zone definitions in zones #7546
+ * Fix deadlock during start on OpenBSD #7739
+ * Consider PENDING not a problem #7685
+ * Fix zombie processes after reload #7606
+* Cluster
+ * Fix `check_timeout` not being forwarded to agent command endpoints #7861
+ * Config sync: Use a more friendly message when configs are equal and don't need a reload #7811
+ * Fix open connections when agent waits for CA approval #7686
+ * Fix TLS context not being updated on signed certificate messages on agents #7654
+* API
+ * Close connections w/o successful TLS handshakes after 10s #7809
+ * Handle permission exceptions soon enough, returning 404 #7528
+* SELinux
+ * Fix safe-reload #7858
+ * Allow direct SMTP notifications #7749
+* Windows
+ * Terminate check processes with UNKNOWN state on timeout #7788
+ * Ensure that log replay files are properly renamed #7767
+* Metrics
+ * Graphite/OpenTSDB: Ensure that reconnect failure is detected #7765
+ * Always send 0 as value for thresholds #7696
+* Scripts
+ * Fix notification scripts to stay compatible with Dash #7706
+ * Fix bash line continuation in mail-host-notification.sh #7701
+ * Fix notification scripts string comparison #7647
+ * Service and host mail-notifications: Add line-breaks to very long output #6822
+ * Set correct UTF-8 email subject header (RFC1342) #6369
+* Misc
+ * Catch exception when trusted cert is not readable during node setup on agent/satellite #7838
+ * CheckCommand ssl: Fix wrong parameter `-N` #7741
+ * Code quality fixes
+ * Small documentation fixes
+
+## 2.11.11 (2021-08-19)
+
+The main focus of these versions is a security vulnerability in the TLS certificate verification of our metrics writers ElasticsearchWriter, GelfWriter and InfluxdbWriter.
+
+### Security
+
+* Add TLS server certificate validation to ElasticsearchWriter, GelfWriter and InfluxdbWriter
+
+Depending on your setup, manual intervention beyond installing the new versions
+may be required, so please read the more detailed information in the
+[release blog post](https://icinga.com/blog/2021/08/19/icinga-2-13-1-security-release//)
+carefully
+
+## 2.11.10 (2021-07-15)
+
+Version 2.11.10 fixes two security vulnerabilities that may lead to privilege
+escalation for authenticated API users. Other improvements include several
+bugfixes related to downtimes, downtime notifications, and more reliable
+connection handling.
+
+### Security
+
+* Don't expose the PKI ticket salt via the API. This may lead to privilege
+ escalation for authenticated API users by them being able to request
+ certificates for other identities (CVE-2021-32739)
+* Don't expose IdoMysqlConnection, IdoPgsqlConnection, and ElasticsearchWriter
+ passwords via the API (CVE-2021-32743)
+* Windows: Update bundled OpenSSL to version 1.1.1k #8888
+
+Depending on your setup, manual intervention beyond installing the new versions
+may be required, so please read the more detailed information in the
+[release blog post](https://icinga.com/blog/2021/07/15/releasing-icinga-2-12-5-and-2-11-10/)
+carefully.
+
+### Bugfixes
+
+* Don't send downtime end notification if downtime hasn't started #8878
+* Don't let a failed downtime creation block the others #8871
+* Support downtimes and comments for checkables with long names #8870
+* Trigger fixed downtimes immediately if the current time matches
+ (instead of waiting for the timer) #8891
+* Add configurable timeout for full connection handshake #8872
+
+### Enhancements
+
+* Replace existing downtimes on ScheduledDowntime change #8880
+* Improve crashlog #8869
+
+## 2.11.9 (2021-05-27)
+
+Version 2.11.9 is a maintenance release that fixes some crashes, improves error handling
+and adds compatibility for systems coming with newer Boost versions.
+
+### Bugfixes
+
+* Fix a crash when notification objects are deleted using the API #8780
+* Fix crashes that might occur during downtime scheduling if host or downtime objects are deleted using the API #8784
+* Fix an issue where notifications may incorrectly be skipped after a downtime ends #8772
+* Fix an issue where attempting to create a duplicate object using the API
+ might result in the original object being deleted #8788
+* IDO: prioritize program status updates #8810
+* Improve exceptions handling, including a fix for an uncaught exception on Windows #8776
+* Retry file rename operations on Windows to avoid intermittent locking issues #8770
+
+### Enhancements
+
+* Support Boost 1.74 (Ubuntu 21.04, Fedora 34) #8793 #8802
+
+## 2.11.8 (2020-12-15)
+
+Version 2.11.8 resolves a security vulnerability with revoked certificates being
+renewed automatically ignoring the CRL.
+
+This version also resolves issues with high load on Windows regarding the config sync
+and not being able to disable/enable Icinga 2 features over the API.
+
+### Security
+
+* Fix that revoked certificates due for renewal will automatically be renewed ignoring the CRL (CVE-2020-29663)
+
+When a CRL is specified in the ApiListener configuration, Icinga 2 only used it
+when connections were established so far, but not when a certificate is requested.
+This allows a node to automatically renew a revoked certificate if it meets the
+other conditions for auto renewal (issued before 2017 or expires in less than 30 days).
+
+Because Icinga 2 currently (v2.12.3 and earlier) uses a validity duration of 15 years,
+this only affects setups with external certificate signing and revoked certificates
+that expire in less then 30 days.
+
+### Bugfixes
+
+* Improve config sync locking - resolves high load issues on Windows #8510
+* Fix runtime config updates being ignored for objects without zone #8550
+* Use proper buffer size for OpenSSL error messages #8543
+
+### Enhancements
+
+* On checkable recovery: re-check children that have a problem #8560
+
+## 2.11.7 (2020-12-01)
+
+Version 2.11.7 fixes several issues to improve the reliability of the cluster functionality.
+
+### Bugfixes
+
+* Fix a connection leak with misconfigured agents #8482
+* Properly sync changes of config objects in global zones done via the API #8473 #8457
+* Prevent other clients from being disconnected when replaying the cluster log takes very long #8475
+* Avoid duplicate connections between endpoints #8399
+* Ignore incoming config object updates for unknown zones #8459
+* Check timestamps before removing files in config sync #8486
+
+### Enhancements
+
+* Include HTTP status codes in log #8454
+
+## 2.11.6 (2020-10-15)
+
+Version 2.11.6 fixes several crashes, prevents unnecessary notifications
+and addresses several bugs in IDO and API.
+
+### Bugfixes
+
+* Crashes
+ * Fix crashes during config update #8337 #8308
+ * Fix crash while removing a downtime #8226
+ * Ensure the daemon doesn't get killed by logrotate #8227
+* IDO
+ * Prevent unnecessary IDO updates #8316 #8305
+ * Commit IDO MySQL transactions earlier #8298
+ * Make sure to insert IDO program status #8291
+ * Improve IDO queue stats logging #8270 #8325 #8378
+* API
+ * Ensure API connections are closed properly #8292
+ * Fix open connections when agent waits for CA approval #8230
+ * Close connections without successful TLS handshakes within 10s #8224
+* Misc
+ * Prevent unnecessary notifications #8300
+ * Fix Windows .exe version #8235
+ * Reset Icinga check warning after successful config update #8225
+
+## 2.11.5 (2020-08-05)
+
+Version 2.11.5 fixes file system race conditions
+in the config update process occurring in large HA environments
+and improves the cluster connection liveness mechanisms.
+
+### Bugfixes
+
+* Make the config update process mutually exclusive (Prevents file system race conditions) #8093
+* Consider a JsonRpcConnection alive on a single byte of TLS payload, not only on a whole message #8094
+* Send JsonRpcConnection heartbeat every 20s instead of 10s #8103
+* Use JsonRpcConnection heartbeat only to update connection liveness (m\_Seen) #8097
+
+## 2.11.4 (2020-06-18)
+
+Version 2.11.4 fixes a crash during a heartbeat timeout with clients not yet signed. It also resolves
+an issue with endpoints not reconnecting after a reload/deploy, which caused a lot of UNKNOWN states.
+
+### Bugfixes
+
+* Cluster
+ * Fix segfault during heartbeat timeout with clients not yet signed #7997
+ * Fix endpoints not reconnecting after reload (UNKNOWN hosts/services after reload) #8043
+* Setup
+ * Fix exception on trusted cert not readable during node setup #8044
+ * prepare-dirs: Only set permissions during directory creation #8046
+* DSL
+ * Fix segfault on missing compare function in Array functions (sort, map, reduce, filter, any, all) #8054
+
+## 2.11.3 (2020-03-02)
+
+The 2.11.3 release fixes a critical crash in our JSON-RPC connections. This mainly affects large HA
+enabled environments.
+
+### Bugfixes
+
+* Cluster
+ * JSON-RPC Crashes with 2.11 #7532
+
+## 2.11.2 (2019-10-24)
+
+2.11.2 fixes a problem where the newly introduced config sync "check-change-then-reload" functionality
+could cause endless reload loops with agents. The most visible parts are failing command endpoint checks
+with "not connected" UNKNOWN state. **Only applies to HA enabled zones with 2 masters and/or 2 satellites.**
+
+### Bugfixes
+
+* Cluster Config Sync
+ * Config sync checksum change detection may not work within high load HA clusters #7565
+
+## 2.11.1 (2019-10-17)
+
+This release fixes a hidden long lasting bug unveiled with 2.11 and distributed setups.
+If you are affected by agents/satellites not accepting configuration
+anymore, or not reloading, please upgrade.
+
+### Bugfixes
+
+* Cluster Config Sync
+ * Never accept authoritative config markers from other instances #7552
+ * This affects setups where agent/satellites are newer than the config master, e.g. satellite/agent=2.11.0, master=2.10.
+* Configuration
+ * Error message for `command_endpoint` should hint that zone is not set #7514
+ * Global variable 'ActiveStageOverride' has been set implicitly via 'ActiveStageOverride ... #7521
+
+### Documentation
+
+* Docs: Add upgrading/troubleshooting details for repos, config sync, agents #7526
+ * Explain repository requirements for 2.11: https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/#added-boost-166
+ * `command_endpoint` objects require a zone: https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/#agent-hosts-with-command-endpoint-require-a-zone
+ * Zones declared in zones.d are not loaded anymore: https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/#config-sync-zones-in-zones
+
+
+## 2.11.0 (2019-09-19)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.11.0)
+
+### Notes
+
+Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/
+
+Thanks to all contributors: [Obihoernchen](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AObihoernchen), [dasJ](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AdasJ), [sebastic](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Asebastic), [waja](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Awaja), [BarbUk](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ABarbUk), [alanlitster](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aalanlitster), [mcktr](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amcktr), [KAMI911](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AKAMI911), [peteeckel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Apeteeckel), [breml](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abreml), [episodeiv](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aepisodeiv), [Crited](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ACrited), [robert-scheck](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Arobert-scheck), [west0rmann](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Awest0rmann), [Napsty](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ANapsty), [Elias481](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AElias481), [uubk](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Auubk), [miso231](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amiso231), [neubi4](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aneubi4), [atj](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aatj), [mvanduren-itisit](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amvanduren-itisit), [jschanz](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Ajschanz), [MaBauMeBad](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMaBauMeBad), [markleary](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amarkleary), [leeclemens](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aleeclemens), [m4k5ym](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Am4k5ym)
+
+### Enhancements
+
+* Core
+ * Rewrite Network Stack (cluster, REST API) based on Boost Asio, Beast, Coroutines
+ * Technical concept: #7041
+ * Requires package updates: Boost >1.66 (either from packages.icinga.com, EPEL or backports). SLES11 & Ubuntu 14 are EOL.
+ * Require TLS 1.2 and harden default cipher list
+ * Improved Reload Handling (umbrella process, now 3 processes at runtime)
+ * Support running Icinga 2 in (Docker) containers natively in foreground
+ * Quality: Use Modern JSON for C++ library instead of YAJL (dead project)
+ * Quality: Improve handling of invalid UTF8 strings
+* API
+ * Fix crashes on Linux, Unix and Windows from Nessus scans #7431
+ * Locks and stalled waits are fixed with the core rewrite in #7071
+ * schedule-downtime action supports `all_services` for host downtimes
+ * Improve storage handling for runtime created objects in the `_api` package
+* Cluster
+ * HA aware features & improvements for failover handling #2941 #7062
+ * Improve cluster config sync with staging #6716
+ * Fixed that same downtime/comment objects would be synced again in a cluster loop #7198
+* Checks & Notifications
+ * Ensure that notifications during a restart are sent
+ * Immediately notify about a problem after leaving a downtime and still NOT-OK
+ * Improve reload handling and wait for features/metrics
+ * Store notification command results and sync them in HA enabled zones #6722
+* DSL/Configuration
+ * Add getenv() function
+ * Fix TimePeriod range support over midnight
+ * `concurrent_checks` in the Checker feature has no effect, use the global MaxConcurrentChecks constant instead
+* CLI
+ * Permissions: node wizard/setup, feature, api setup now run in the Icinga user context, not root
+ * `ca list` shows pending CSRs by default, `ca remove/restore` allow to delete signing requests
+* ITL
+ * Add new commands and missing attributes
+* Windows
+ * Update bundled NSClient++ to 0.5.2.39
+ * Refine agent setup wizard & update requirements to .NET 4.6
+* Documentation
+ * Service Monitoring: How to create plugins by example, check commands and a modern version of the supported plugin API with best practices
+ * Features: Better structure on metrics, and supported features
+ * Technical Concepts: TLS Network IO, Cluster Feature HA, Cluster Config Sync
+ * Development: Rewritten for better debugging and development experience for contributors including a style guide. Add nightly build setup instructions.
+ * Packaging: INSTALL.md was integrated into the Development chapter, being available at https://icinga.com/docs too.
+
+
+
+
+## 2.11.0 RC1 (2019-07-25)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.11.0)
+
+### Notes
+
+**This is the first release candidate for 2.11.**
+
+Upgrading docs: https://icinga.com/docs/icinga2/snapshot/doc/16-upgrading-icinga-2/
+
+Thanks to all contributors: [BarbUk](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ABarbUk), [alanlitster](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aalanlitster), [mcktr](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amcktr), [KAMI911](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AKAMI911), [peteeckel](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Apeteeckel), [breml](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Abreml), [episodeiv](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aepisodeiv), [Crited](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ACrited), [robert-scheck](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Arobert-scheck), [west0rmann](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Awest0rmann), [Napsty](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3ANapsty), [Elias481](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AElias481), [uubk](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Auubk), [miso231](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amiso231), [neubi4](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aneubi4), [atj](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aatj), [mvanduren-itisit](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amvanduren-itisit), [jschanz](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Ajschanz), [MaBauMeBad](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3AMaBauMeBad), [markleary](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Amarkleary), [leeclemens](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Aleeclemens), [m4k5ym](https://github.com/Icinga/icinga2/pulls?q=is%3Apr+author%3Am4k5ym)
+
+### Enhancements
+
+* Core
+ * Rewrite Network Stack (cluster, REST API) based on Boost Asio, Beast, Coroutines
+ * Technical concept: #7041
+ * Requires package updates: Boost >1.66 (either from packages.icinga.com, EPEL or backports). SLES11 & Ubuntu 14 are EOL.
+ * Require TLS 1.2 and harden default cipher list
+ * Improved Reload Handling (umbrella process, now 3 processes at runtime)
+ * Support running Icinga 2 in (Docker) containers natively in foreground
+ * Quality: Use Modern JSON for C++ library instead of YAJL (dead project)
+ * Quality: Improve handling of invalid UTF8 strings
+* API
+ * Fix crashes and problems with permission filters from recent Namespace introduction #6785 (thanks Elias Ohm) #6874 (backported to 2.10.5)
+ * Locks and stalled waits are fixed with the core rewrite in #7071
+ * schedule-downtime action supports `all_services` for host downtimes
+ * Improve storage handling for runtime created objects in the `_api` package
+* Cluster
+ * HA aware features & improvements for failover handling #2941 #7062
+ * Improve cluster config sync with staging #6716
+* Checks & Notifications
+ * Ensure that notifications during a restart are sent
+ * Immediately notify about a problem after leaving a downtime and still NOT-OK
+ * Improve reload handling and wait for features/metrics
+ * Store notification command results and sync them in HA enabled zones #6722
+* DSL/Configuration
+ * Add getenv() function
+ * Fix TimePeriod range support over midnight
+ * `concurrent_checks` in the Checker feature has no effect, use the global MaxConcurrentChecks constant instead
+* CLI
+ * Permissions: node wizard/setup, feature, api setup now run in the Icinga user context, not root
+ * `ca list` shows pending CSRs by default, `ca remove/restore` allow to delete signing requests
+* ITL
+ * Add new commands and missing attributes - thanks to all contributors!
+* Windows
+ * Update bundled NSClient++ to 0.5.2.39
+ * Update agent installer and OpenSSL
+* Documentation
+ * Service Monitoring: How to create plugins by example, check commands and a modern version of the supported plugin API with best practices.
+ * Features: Better structure on metrics, and supported features.
+ * Basics: Rename `Custom Attributes` to `Custom Variables`.
+ * Basics: Refine explanation of command arguments.
+ * Distributed: Reword `Icinga client` into `Icinga agent` and add new images for scenarios and modes.
+ * Security: Add TLS v1.2+ requirement, hardened cipher lists
+ * Technical Concepts: TLS Network IO, Cluster Feature HA, Cluster Config Sync, Core Reload Handling.
+ * Development: Rewritten for better debugging and development experience for contributors including a style guide. Add nightly build setup instructions.
+ * Packaging: INSTALL.md was integrated into the Development chapter available at https://icinga.com/docs too.
+
+
+
+
+## 2.10.7 (2019-10-17)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.10.7)
+
+### Bugfixes
+
+* Cluster config master must not load/sync its marker to other instances #7544
+ * This affects scenarios where the satellite/agent is newer than the master, e.g. master=2.10.x satellite=2.11.0
+
+
+## 2.10.6 (2019-07-30)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.10.6)
+
+### Bugfixes
+
+* Fix el7 not loading ECDHE cipher suites #7247
+
+
+## 2.10.5 (2019-05-23)
+
+[Issues and PRs](https://github.com/Icinga/icinga2/milestone/81?closed=1)
+
+### Bugfixes
+
+* Core
+ * Fix crashes with logrotate signals #6737 (thanks Elias Ohm)
+* API
+ * Fix crashes and problems with permission filters from recent Namespace introduction #6785 (thanks Elias Ohm) #6874 (backported from 2.11)
+ * Reduce log spam with locked connections (real fix is the network stack rewrite in 2.11) #6877
+* Cluster
+ * Fix problems with replay log rotation and storage #6932 (thanks Peter Eckel)
+* IDO DB
+ * Fix that reload shutdown deactivates hosts and hostgroups (introduced in 2.9) #7157
+* Documentation
+ * Improve the [REST API](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) chapter: Unix timestamp handling, filters, unify POST requests with filters in the body
+ * Better layout for the [features](https://icinga.com/docs/icinga2/latest/doc/14-features/) chapter, specifically metrics and events
+ * Split [object types](https://icinga.com/docs/icinga2/latest/doc/09-object-types/) into monitoring, runtime, features
+ * Add technical concepts for [cluster messages](https://icinga.com/docs/icinga2/latest/doc/19-technical-concepts/#json-rpc-message-api)
+
+
+## 2.10.4 (2019-03-19)
+
+### Notes
+
+* Fix TLS connections in Influxdb/Elasticsearch features leaking file descriptors (#6989 #7018 ref/IP/12219)
+* Fixes for delayed and one-time notifications (#5561 #6757)
+* Improve performance for downtimes/comments added in HA clusters (#6885 ref/IP/9235)
+* check_perfmon supports non-localized performance counter names (#5546 #6418)
+
+### Enhancement
+
+* [#6732](https://github.com/icinga/icinga2/issues/6732) (Windows, PR): Update Windows Agent with new design
+* [#6729](https://github.com/icinga/icinga2/issues/6729) (Windows): Polish the Windows Agent design
+* [#6418](https://github.com/icinga/icinga2/issues/6418) (Windows): check\_perfmon.exe: Add fallback support for localized performance counters
+
+### Bug
+
+* [#7020](https://github.com/icinga/icinga2/issues/7020) (Elasticsearch, PR): ElasticsearchWriter: don't leak sockets
+* [#7018](https://github.com/icinga/icinga2/issues/7018) (Elasticsearch): ElasticsearchWriter not closing SSL connections on Icinga2 2.10.3.1
+* [#6991](https://github.com/icinga/icinga2/issues/6991) (CLI, PR): PkiUtility::NewCa\(\): just warn if the CA files already exist
+* [#6990](https://github.com/icinga/icinga2/issues/6990) (InfluxDB, PR): InfluxdbWriter: don't leak sockets
+* [#6989](https://github.com/icinga/icinga2/issues/6989) (InfluxDB): InfluxdbWriter not closing connections Icinga2 2.10.3 CentOS 7
+* [#6976](https://github.com/icinga/icinga2/issues/6976) (Cluster, PR): Don't require OS headers to provide SO\_REUSEPORT
+* [#6896](https://github.com/icinga/icinga2/issues/6896) (Notifications, PR): Notification\#BeginExecuteNotification\(\): SetNextNotification\(\) correctly
+* [#6885](https://github.com/icinga/icinga2/issues/6885) (API, Configuration, PR): Don't run UpdateObjectAuthority for Comments and Downtimes
+* [#6800](https://github.com/icinga/icinga2/issues/6800) (Plugins, Windows, PR): Fix check\_perfmon to support non-localized names
+* [#6757](https://github.com/icinga/icinga2/issues/6757) (Notifications, PR): Fix that no\_more\_notifications gets reset when Recovery notifications are filtered away
+* [#5561](https://github.com/icinga/icinga2/issues/5561) (Notifications): Set the notification mode times.begin is not 0, the first notification has a delay
+* [#5546](https://github.com/icinga/icinga2/issues/5546) (Plugins, Windows): check\_perfmon.exe doesn't support cyrillic names of perf counters
+
+### Documentation
+
+* [#7033](https://github.com/icinga/icinga2/issues/7033) (Documentation, PR): Docs: Update supported package repos in Getting Started chapter
+* [#7028](https://github.com/icinga/icinga2/issues/7028) (Documentation, PR): Fix heading level in development chapter
+* [#7001](https://github.com/icinga/icinga2/issues/7001) (Documentation, PR): Assignment operators doc: tell what the { } are for
+* [#6995](https://github.com/icinga/icinga2/issues/6995) (Documentation, PR): Typo and link fix
+* [#6979](https://github.com/icinga/icinga2/issues/6979) (Documentation, PR): Doc: write systemd lower-case
+* [#6975](https://github.com/icinga/icinga2/issues/6975) (Documentation, PR): Fix nested hostgroup example
+* [#6949](https://github.com/icinga/icinga2/issues/6949) (Documentation, PR): Doc fix: update check\_rbl parameter
+* [#6708](https://github.com/icinga/icinga2/issues/6708) (Documentation, PR): Docs: Alpine needs 'edge/main' repository too
+* [#5430](https://github.com/icinga/icinga2/issues/5430) (Documentation): Documentation about dictionaries and assignements
+
+### Support
+
+* [#7032](https://github.com/icinga/icinga2/issues/7032) (code-quality, PR): Backport Defer class for 2.10
+* [#7030](https://github.com/icinga/icinga2/issues/7030) (Packages, PR): SELinux: add unreserved\_port\_type attribute to icinga2\_port\_t
+* [#7029](https://github.com/icinga/icinga2/issues/7029) (Packages): Add unreserved\_port\_type attribute to icinga2\_port\_t
+* [#7002](https://github.com/icinga/icinga2/issues/7002) (Plugins, Windows, PR): check\_network -h: drop non-existent feature
+* [#6987](https://github.com/icinga/icinga2/issues/6987) (Tests): base-base\_utility/comparepasswords\_issafe test fails on i386
+* [#6977](https://github.com/icinga/icinga2/issues/6977) (Tests, PR): Ignore failure of unit test base\_utility/comparepasswords\_issafe
+
+## 2.10.3 (2019-02-26)
+
+### Notes
+
+Bugfixes:
+
+- Stalled TLS connections on reload/Director deployments (#6816 #6898 ref/NC/588119)
+- 'Connection: close' header leading to unstable instance, affects Ruby clients (#6799)
+- Server time in the future breaks check result processing (#6797 ref/NC/595861)
+- ScheduledDowntimes: Generate downtime objects only on one HA endpoint (#2844 ref/IC/9673 ref/NC/590167 ref/NC/591721)
+- Improve activation & syncing for downtime objects generated from ScheduledDowntimes (#6826 ref/IC/9673 ref/NC/585559)
+- Generate a runtime downtime object from already running ScheduledDowntime objects (#6704)
+- DB IDO: Don't enqueue queries when the feature is paused in HA zones (#5876)
+- Crashes with localtime_r errors (#6887)
+
+Documentation updates:
+
+- Ephemeral port range blocking on Windows agents (ref/NC/597307)
+- Technical concepts for the check scheduler (#6775)
+- DB IDO cleanup (#6791)
+- Unified development docs (#6819)
+
+### Bug
+
+* [#6971](https://github.com/icinga/icinga2/issues/6971) (Notifications, PR): Activate downtimes before any checkable object
+* [#6968](https://github.com/icinga/icinga2/issues/6968) (API, PR): Secure ApiUser::GetByAuthHeader\(\) against timing attacks
+* [#6940](https://github.com/icinga/icinga2/issues/6940) (Plugins, Windows, PR): Fix check\_swap percentage calculation
+* [#6925](https://github.com/icinga/icinga2/issues/6925) (Plugins, Windows, PR): Fix check\_swap formatting
+* [#6924](https://github.com/icinga/icinga2/issues/6924) (PR): Fix double to long conversions
+* [#6922](https://github.com/icinga/icinga2/issues/6922) (API, DB IDO): IDO MySQL fails on start if check\_interval is a float \(Icinga 2.9.2\)
+* [#6920](https://github.com/icinga/icinga2/issues/6920) (PR): Downtime::AddDowntime\(\): place Downtimes in the same zone as the origin ScheduledDowntimes
+* [#6917](https://github.com/icinga/icinga2/issues/6917) (Cluster, Log, PR): Cluster: Delete object message should log that
+* [#6916](https://github.com/icinga/icinga2/issues/6916) (PR): Don't allow retry\_interval \<= 0
+* [#6914](https://github.com/icinga/icinga2/issues/6914) (Cluster, PR): ClusterEvents::AcknowledgementSet event should forward 'persistent' attribute
+* [#6913](https://github.com/icinga/icinga2/issues/6913) (Plugins, Windows): check\_swap return value wrong when no swap file configured
+* [#6901](https://github.com/icinga/icinga2/issues/6901) (API, PR): TcpSocket\#Bind\(\): also set SO\_REUSEPORT
+* [#6899](https://github.com/icinga/icinga2/issues/6899) (PR): Log: Ensure not to pass negative values to localtime\(\)
+* [#6898](https://github.com/icinga/icinga2/issues/6898) (API): API action restart-process fails on FreeBSD
+* [#6894](https://github.com/icinga/icinga2/issues/6894) (Check Execution, PR): Fix checkresults from the future breaking checks
+* [#6887](https://github.com/icinga/icinga2/issues/6887) (Check Execution, Windows): Icinga2 Windows Service does not start critical/checker: Exception occurred while checking 'hostname.tld'
+* [#6883](https://github.com/icinga/icinga2/issues/6883) (Check Execution, PR): Allow Checkable\#retry\_interval to be 0
+* [#6871](https://github.com/icinga/icinga2/issues/6871): Icinga2 crashes after localtime\_r call
+* [#6857](https://github.com/icinga/icinga2/issues/6857) (Plugins, Windows, PR): Url\#m\_Query: preserve order
+* [#6826](https://github.com/icinga/icinga2/issues/6826) (Configuration, PR): Downtime\#HasValidConfigOwner\(\): wait for ScheduledDowntimes
+* [#6821](https://github.com/icinga/icinga2/issues/6821) (Cluster, Configuration, PR): Don't delete downtimes in satellite zones
+* [#6820](https://github.com/icinga/icinga2/issues/6820) (Cluster, PR): Only create downtimes from non-paused ScheduledDowntime objects in HA enabled cluster zones
+* [#6817](https://github.com/icinga/icinga2/issues/6817) (API, PR): HttpServerConnection\#DataAvailableHandler\(\): be aware of being called multiple times concurrently
+* [#6816](https://github.com/icinga/icinga2/issues/6816) (API, Cluster): Stalled TLS connections and lock waits in SocketEventEngine
+* [#6814](https://github.com/icinga/icinga2/issues/6814) (API, PR): Restore 'Connection: close' behaviour in HTTP responses
+* [#6811](https://github.com/icinga/icinga2/issues/6811) (Plugins, Windows, PR): Fix state conditions in check\_memory and check\_swap
+* [#6810](https://github.com/icinga/icinga2/issues/6810) (Plugins, Windows): Windows check\_memory never gets critical
+* [#6808](https://github.com/icinga/icinga2/issues/6808) (API, PR): Remove redundand check for object existence on creation via API
+* [#6807](https://github.com/icinga/icinga2/issues/6807) (API): \[2.10.2\] Director deploy crashes the Icinga service \[FreeBSD\]
+* [#6799](https://github.com/icinga/icinga2/issues/6799) (API): "Connection: close" header leads to unstable instance
+* [#6797](https://github.com/icinga/icinga2/issues/6797) (Check Execution): Servertime in the future breaks check results processing
+* [#6750](https://github.com/icinga/icinga2/issues/6750) (Configuration, PR): \#6749 Wrong operator on stride variable causing incorrect behaviour
+* [#6749](https://github.com/icinga/icinga2/issues/6749) (Configuration): Stride is misinterpreted in multi-date legacydatetime
+* [#6748](https://github.com/icinga/icinga2/issues/6748) (CLI, PR): Fix api setup to automatically create the conf.d directory
+* [#6718](https://github.com/icinga/icinga2/issues/6718) (API, Cluster, PR): Call SSL\_shutdown\(\) at least twice
+* [#6704](https://github.com/icinga/icinga2/issues/6704) (Notifications, PR): Put newly configured already running ScheduledDowntime immediately in effect
+* [#6542](https://github.com/icinga/icinga2/issues/6542) (Configuration, Log): /var/log/icinga2/icinga2.log is growing very fast on satellites
+* [#6536](https://github.com/icinga/icinga2/issues/6536) (Windows, help wanted): check\_nscp\_api: Query arguments are sorted on Url::Format\(\)
+* [#4790](https://github.com/icinga/icinga2/issues/4790) (Notifications): Newly configured already running ScheduledDowntime not put into effect
+* [#3937](https://github.com/icinga/icinga2/issues/3937) (API): Icinga2 API: PUT request fails at 0-byte file
+* [#2844](https://github.com/icinga/icinga2/issues/2844) (Cluster): Duplicated scheduled downtimes created in cluster HA zone
+
+### Documentation
+
+* [#6956](https://github.com/icinga/icinga2/issues/6956) (Documentation, PR): Escape pipe symbol in api documentation
+* [#6944](https://github.com/icinga/icinga2/issues/6944) (Documentation, PR): Troubleshooting: Add notes on ephemeral port range blocking on Windows agents
+* [#6928](https://github.com/icinga/icinga2/issues/6928) (Documentation, PR): Doc: Add .NET 3.5 to the windows build stack
+* [#6825](https://github.com/icinga/icinga2/issues/6825) (Documentation, PR): Document that retry\_interval is only used after an active check result
+* [#6819](https://github.com/icinga/icinga2/issues/6819) (Documentation, PR): Enhance and unify development docs for debug, develop, package
+* [#6791](https://github.com/icinga/icinga2/issues/6791) (Documentation, PR): Docs: Add a section for DB IDO Cleanup
+* [#6776](https://github.com/icinga/icinga2/issues/6776) (Documentation, PR): Doc fix: update apache section
+* [#6775](https://github.com/icinga/icinga2/issues/6775) (Documentation, PR): Add technical docs for the check scheduler \(general, initial check, offsets\)
+* [#6751](https://github.com/icinga/icinga2/issues/6751) (Documentation, PR): Doc fix: documentation link for apt
+* [#6743](https://github.com/icinga/icinga2/issues/6743) (Documentation, PR): Doc fix: error in example path.
+* [#5341](https://github.com/icinga/icinga2/issues/5341) (Documentation): Enhance development documentation
+
+### Support
+
+* [#6972](https://github.com/icinga/icinga2/issues/6972) (PR): Fix formatting in development docs
+* [#6958](https://github.com/icinga/icinga2/issues/6958) (code-quality, PR): Debug: Log calls to ConfigObject::Deactivate\(\)
+* [#6897](https://github.com/icinga/icinga2/issues/6897) (PR): Validate Zone::GetLocalZone\(\) before using
+* [#6872](https://github.com/icinga/icinga2/issues/6872) (Windows): 2.10 is unstable \(Windows Agent\)
+* [#6843](https://github.com/icinga/icinga2/issues/6843) (Tests, Windows, PR): Improve AppVeyor builds
+* [#6479](https://github.com/icinga/icinga2/issues/6479) (code-quality, PR): SocketEvents: inherit from Stream
+* [#6477](https://github.com/icinga/icinga2/issues/6477) (code-quality): SocketEvents: inherit from Object
+
+## 2.10.2 (2018-11-14)
+
+### Bug
+
+* [#6770](https://github.com/icinga/icinga2/issues/6770) (PR): Fix deadlock in GraphiteWriter
+* [#6769](https://github.com/icinga/icinga2/issues/6769) (Cluster): Hanging TLS connections
+* [#6759](https://github.com/icinga/icinga2/issues/6759) (Log, PR): Fix possible double free in StreamLogger::BindStream\(\)
+* [#6753](https://github.com/icinga/icinga2/issues/6753): Icinga2.service state is reloading in systemd after safe-reload until systemd time-out
+* [#6740](https://github.com/icinga/icinga2/issues/6740) (DB IDO, PR): DB IDO: Don't enqueue queries when the feature is paused \(HA\)
+* [#6738](https://github.com/icinga/icinga2/issues/6738) (API, Cluster, PR): Ensure that API/JSON-RPC messages in the same session are processed and not stalled
+* [#6736](https://github.com/icinga/icinga2/issues/6736) (Crash): Stability issues with Icinga 2.10.x
+* [#6717](https://github.com/icinga/icinga2/issues/6717) (API, PR): Improve error handling for invalid child\_options for API downtime actions
+* [#6712](https://github.com/icinga/icinga2/issues/6712) (API): Downtime name not returned when error occurs
+* [#6711](https://github.com/icinga/icinga2/issues/6711) (API, Cluster): Slow API \(TLS-Handshake\)
+* [#6709](https://github.com/icinga/icinga2/issues/6709) (PR): Fix the Icinga2 version check for versions with more than 5 characters
+* [#6707](https://github.com/icinga/icinga2/issues/6707) (Compat, PR): Fix regression for wrong objects.cache path overwriting icinga2.debug file
+* [#6705](https://github.com/icinga/icinga2/issues/6705) (CLI, Compat, Configuration): Crash "icinga2 object list" command with 2.10.1-1 on CentOS 7
+* [#6703](https://github.com/icinga/icinga2/issues/6703): Check command 'icinga' breaks when vars.icinga\_min\_version is defined \(2.10.x\)
+* [#6635](https://github.com/icinga/icinga2/issues/6635) (API): API TLS session connection closed after 2 requests
+* [#5876](https://github.com/icinga/icinga2/issues/5876) (DB IDO): IDO Work queue on the inactive node growing when switching connection between redundant master servers
+
+### Documentation
+
+* [#6714](https://github.com/icinga/icinga2/issues/6714) (Documentation, PR): Docs: Add package related changes to the upgrading docs
+
+### Support
+
+* [#6773](https://github.com/icinga/icinga2/issues/6773) (Installation, Packages, PR): Initialize ICINGA2\_ERROR\_LOG inside the systemd environment
+* [#6771](https://github.com/icinga/icinga2/issues/6771) (Tests, PR): Implement unit tests for Dictionary initializers
+* [#6760](https://github.com/icinga/icinga2/issues/6760) (Packages, Tests, PR): armhf: Apply workaround for timer tests with std::bind callbacks
+* [#6710](https://github.com/icinga/icinga2/issues/6710) (Packages): Crash when upgrading from 2.10.0 to 2.10.1 \(SELinux related\)
+
+## 2.10.1 (2018-10-18)
+
+### Bug
+
+* [#6696](https://github.com/icinga/icinga2/issues/6696) (PR): Remove default environment, regression from e678fa1aa5
+* [#6694](https://github.com/icinga/icinga2/issues/6694): v2.10.0 sets a default environment "production" in SNI
+* [#6691](https://github.com/icinga/icinga2/issues/6691) (PR): Add missing shutdown/program state dumps for SIGUSR2 reload handler
+* [#6689](https://github.com/icinga/icinga2/issues/6689): State file not updated on reload
+* [#6685](https://github.com/icinga/icinga2/issues/6685) (API, PR): Fix regression with API permission filters and namespaces in v2.10
+* [#6682](https://github.com/icinga/icinga2/issues/6682) (API): API process-check-result fails in 2.10.0
+* [#6679](https://github.com/icinga/icinga2/issues/6679) (Windows, PR): Initialize Configuration::InitRunDir for Windows and writing the PID file
+* [#6624](https://github.com/icinga/icinga2/issues/6624) (Check Execution): Master Reload Causes Passive Check State Change
+* [#6592](https://github.com/icinga/icinga2/issues/6592): Reloads seem to reset the check atempt count. Also notifications go missing shortly after a reload.
+
+### Documentation
+
+* [#6701](https://github.com/icinga/icinga2/issues/6701) (Documentation, PR): Add GitHub release tag to README
+* [#6700](https://github.com/icinga/icinga2/issues/6700) (Documentation, PR): Enhance the addon chapter in the docs
+* [#6699](https://github.com/icinga/icinga2/issues/6699) (Documentation, PR): Update to https://icinga.com/
+* [#6692](https://github.com/icinga/icinga2/issues/6692) (Documentation, PR): Update release docs for Chocolatey
+* [#6690](https://github.com/icinga/icinga2/issues/6690) (Documentation, PR): Extend 09-object-types.md with argument array
+* [#6674](https://github.com/icinga/icinga2/issues/6674) (Documentation, PR): Add a note to the docs on \>2 endpoints in a zone
+* [#6673](https://github.com/icinga/icinga2/issues/6673) (Documentation, PR): Update RELEASE docs
+* [#6672](https://github.com/icinga/icinga2/issues/6672) (Documentation, PR): Extend upgrade docs
+* [#6671](https://github.com/icinga/icinga2/issues/6671) (Documentation): Zone requirements changed in 2.10 - Undocumented Change
+
+### Support
+
+* [#6681](https://github.com/icinga/icinga2/issues/6681) (code-quality, PR): Fix spelling errors.
+* [#6677](https://github.com/icinga/icinga2/issues/6677) (Packages, Windows): icinga does not start after Update to 2.10
+
+## 2.10.0 (2018-10-11)
+
+### Notes
+
+* Support for namespaces, details in [this blogpost](https://icinga.com/2018/09/17/icinga-2-dsl-feature-namespaces-coming-in-v2-10/)
+* Only send acknowledgement notification to users notified about a problem before, thanks for sponsoring to the [Max-Planck-Institut for Marine Mikrobiologie](https://www.mpi-bremen.de)
+* More child options for scheduled downtimes
+* Performance improvements and fixes for the TLS connections inside cluster/REST API
+* Better logging for HTTP requests and less verbose object creation (e.g. downtimes via Icinga Web 2 & REST API)
+* New configuration path constants, e.g. ConfigDir
+* Fixed problem with dependencies rescheduling parent checks too fast
+* Fixed problem with logging in systemd and syslog
+* Improved vim syntax highlighting
+* [Technical concepts docs](https://icinga.com/docs/icinga2/latest/doc/19-technical-concepts/) update with config compiler and TLS network IO
+
+### Enhancement
+
+* [#6663](https://github.com/icinga/icinga2/issues/6663) (API, Log, PR): Silence config compiler logging for runtime created objects
+* [#6657](https://github.com/icinga/icinga2/issues/6657) (API, Log, PR): Enable the HTTP request body debug log entry for release builds
+* [#6655](https://github.com/icinga/icinga2/issues/6655) (API, Log, PR): Improve logging for disconnected HTTP clients
+* [#6651](https://github.com/icinga/icinga2/issues/6651) (Plugins, PR): Add 'used' feature to check\_swap
+* [#6633](https://github.com/icinga/icinga2/issues/6633) (API, Cluster, PR): Use a dynamic thread pool for API connections
+* [#6632](https://github.com/icinga/icinga2/issues/6632) (Cluster, PR): Increase the cluster reconnect frequency to 10s
+* [#6616](https://github.com/icinga/icinga2/issues/6616) (API, Cluster, PR): Add ApiListener\#tls\_handshake\_timeout option
+* [#6611](https://github.com/icinga/icinga2/issues/6611) (Notifications): Allow types = \[ Recovery \] to always send recovery notifications
+* [#6595](https://github.com/icinga/icinga2/issues/6595) (API, Cluster, PR): Allow to configure anonymous clients limit inside the ApiListener object
+* [#6532](https://github.com/icinga/icinga2/issues/6532) (Configuration, PR): Add child\_options to ScheduledDowntime
+* [#6531](https://github.com/icinga/icinga2/issues/6531) (API, PR): Expose Zone\#all\_parents via API
+* [#6527](https://github.com/icinga/icinga2/issues/6527) (Notifications, PR): Acknowledgment notifications should only be send if problem notification has been send
+* [#6521](https://github.com/icinga/icinga2/issues/6521) (Configuration, PR): Implement references
+* [#6512](https://github.com/icinga/icinga2/issues/6512) (Cluster, PR): Refactor environment for API connections
+* [#6511](https://github.com/icinga/icinga2/issues/6511) (Cluster, PR): ApiListener: Add support for dynamic port handling
+* [#6509](https://github.com/icinga/icinga2/issues/6509) (Configuration, PR): Implement support for namespaces
+* [#6508](https://github.com/icinga/icinga2/issues/6508) (Configuration, PR): Implement the Dictionary\#clear script function
+* [#6506](https://github.com/icinga/icinga2/issues/6506) (PR): Improve path handling in cmake and daemon
+* [#6460](https://github.com/icinga/icinga2/issues/6460) (Log, help wanted): Feature suggestion: Do not log warnings when env elements are undefined in CheckCommand objects
+* [#6455](https://github.com/icinga/icinga2/issues/6455) (Log, PR): Log something when the Filelogger has been started
+* [#6379](https://github.com/icinga/icinga2/issues/6379) (Configuration, PR): Throw config error when using global zones as parent
+* [#6356](https://github.com/icinga/icinga2/issues/6356) (Log, PR): Fix logging under systemd
+* [#6339](https://github.com/icinga/icinga2/issues/6339) (Log, help wanted): On systemd, icinga2 floods the system log, and this cannot simply be opted out of
+* [#6110](https://github.com/icinga/icinga2/issues/6110) (Configuration, PR): Implement support for optionally specifying the 'var' keyword in 'for' loops
+* [#6047](https://github.com/icinga/icinga2/issues/6047) (Notifications): Acknowledgment notifications should only be sent if the user already received a problem notification
+* [#4282](https://github.com/icinga/icinga2/issues/4282) (API, Log): Icinga should log HTTP bodies for API requests
+
+### Bug
+
+* [#6658](https://github.com/icinga/icinga2/issues/6658) (API, PR): Ensure that HTTP/1.0 or 'Connection: close' headers are properly disconnecting the client
+* [#6652](https://github.com/icinga/icinga2/issues/6652) (Plugins, PR): Fix check\_memory thresholds in 'used' mode
+* [#6647](https://github.com/icinga/icinga2/issues/6647) (CLI, PR): node setup: always respect --accept-config and --accept-commands
+* [#6643](https://github.com/icinga/icinga2/issues/6643) (Check Execution, Notifications, PR): Fix that check\_timeout was used for Event/Notification commands too
+* [#6639](https://github.com/icinga/icinga2/issues/6639) (Windows, PR): Ensure to \_unlink before renaming replay log on Windows
+* [#6622](https://github.com/icinga/icinga2/issues/6622) (DB IDO, PR): Ensure to use UTC timestamps for IDO PgSQL cleanup queries
+* [#6603](https://github.com/icinga/icinga2/issues/6603) (Check Execution, Cluster): CheckCommand 'icinga' seems to ignore retry interval via command\_endpoint
+* [#6575](https://github.com/icinga/icinga2/issues/6575): LTO builds fail on Linux
+* [#6566](https://github.com/icinga/icinga2/issues/6566) (Cluster): Master disconnects during signing process
+* [#6546](https://github.com/icinga/icinga2/issues/6546) (API, CLI, PR): Overridden path constants not passed to config validation in /v1/config/stages API call
+* [#6530](https://github.com/icinga/icinga2/issues/6530) (DB IDO, PR): IDO/MySQL: avoid empty queries
+* [#6519](https://github.com/icinga/icinga2/issues/6519) (CLI, PR): Reset terminal on erroneous console exit
+* [#6517](https://github.com/icinga/icinga2/issues/6517) (Cluster): Not all Endpoints can't reconnect due to "Client TLS handshake failed" error after "reload or restart"
+* [#6514](https://github.com/icinga/icinga2/issues/6514) (API): API using "Connection: close" header results in infinite threads
+* [#6507](https://github.com/icinga/icinga2/issues/6507) (Cluster): Variable name conflict in constants.conf / Problem with TLS verification, CN and Environment variable
+* [#6503](https://github.com/icinga/icinga2/issues/6503) (Log, PR): Reduce the log level for missing env macros to debug
+* [#6485](https://github.com/icinga/icinga2/issues/6485) (Log): Icinga logs discarding messages still as warning and not as notice
+* [#6475](https://github.com/icinga/icinga2/issues/6475) (Compat, PR): lib-\>compat-\>statusdatawriter: fix notifications\_enabled
+* [#6430](https://github.com/icinga/icinga2/issues/6430) (Log, PR): Fix negative 'empty in' value in WorkQueue log message
+* [#6427](https://github.com/icinga/icinga2/issues/6427) (Configuration, Crash, PR): Improve error message for serializing objects with recursive references
+* [#6409](https://github.com/icinga/icinga2/issues/6409) (Configuration, Crash): Assigning vars.x = vars causes Icinga 2 segfaults
+* [#6408](https://github.com/icinga/icinga2/issues/6408) (PR): ObjectLock\#Unlock\(\): don't reset m\_Object-\>m\_LockOwner too early
+* [#6386](https://github.com/icinga/icinga2/issues/6386) (Configuration, PR): Fix that TimePeriod segments are not cleared on restart
+* [#6382](https://github.com/icinga/icinga2/issues/6382) (CLI, help wanted): icinga2 console breaks the terminal on errors
+* [#6313](https://github.com/icinga/icinga2/issues/6313) (Plugins, Windows, PR): Fix wrong calculation of check\_swap windows plugin
+* [#6304](https://github.com/icinga/icinga2/issues/6304) (Configuration, Notifications): Timeout defined in NotificationCommand is ignored and uses check\_timeout
+* [#5815](https://github.com/icinga/icinga2/issues/5815) (Plugins, Windows): swap-windows check delivers wrong result
+* [#5375](https://github.com/icinga/icinga2/issues/5375) (Check Execution, PR): Parents who are non-active should not be rescheduled
+* [#5052](https://github.com/icinga/icinga2/issues/5052) (Cluster, Windows): Replay log not working with Windows client
+* [#5022](https://github.com/icinga/icinga2/issues/5022) (Check Execution): Dependencies may reschedule passive checks, triggering freshness checks
+
+### ITL
+
+* [#6646](https://github.com/icinga/icinga2/issues/6646) (ITL, PR): Update ITL and Docs for memory-windows - show used
+* [#6640](https://github.com/icinga/icinga2/issues/6640) (ITL): Update ITL and Docs for memory-windows - show used
+* [#6563](https://github.com/icinga/icinga2/issues/6563) (ITL, PR): \[Feature\] Cloudera service health CheckCommand
+* [#6561](https://github.com/icinga/icinga2/issues/6561) (ITL, PR): \[Feature\] Ceph health CheckCommand
+* [#6504](https://github.com/icinga/icinga2/issues/6504) (ITL, PR): squashfs ignored
+* [#6491](https://github.com/icinga/icinga2/issues/6491) (ITL, PR): Feature/itl vmware health
+* [#6481](https://github.com/icinga/icinga2/issues/6481) (ITL): command-plugins.conf check\_disk exclude squashfs
+
+### Documentation
+
+* [#6670](https://github.com/icinga/icinga2/issues/6670) (Documentation, PR): Add technical concepts for the config compiler and daemon CLI command
+* [#6665](https://github.com/icinga/icinga2/issues/6665) (Documentation, PR): Make the two modes of check\_http more obvious.
+* [#6615](https://github.com/icinga/icinga2/issues/6615) (Documentation, PR): Update distributed monitoring docs for 2.10
+* [#6610](https://github.com/icinga/icinga2/issues/6610) (Documentation, PR): Add "TLS Network IO" into technical concepts docs
+* [#6607](https://github.com/icinga/icinga2/issues/6607) (Documentation, PR): Enhance development docs with GDB backtrace and thread list
+* [#6606](https://github.com/icinga/icinga2/issues/6606) (Documentation, PR): Enhance contributing docs
+* [#6598](https://github.com/icinga/icinga2/issues/6598) (Documentation, PR): doc/09-object-types: states filter ignored for Acknowledgements
+* [#6597](https://github.com/icinga/icinga2/issues/6597) (Documentation, PR): Add Fedora to development docs for debuginfo packages
+* [#6593](https://github.com/icinga/icinga2/issues/6593) (Documentation, help wanted): Include CA Proxy in 3rd scenario in Distributed Monitoring docs
+* [#6573](https://github.com/icinga/icinga2/issues/6573) (Documentation, PR): Fix operator precedence table
+* [#6528](https://github.com/icinga/icinga2/issues/6528) (Documentation, PR): Document default of User\#enable\_notifications
+* [#6502](https://github.com/icinga/icinga2/issues/6502) (Documentation, PR): Update 17-language-reference.md
+* [#6501](https://github.com/icinga/icinga2/issues/6501) (Documentation, PR): Update 03-monitoring-basics.md
+* [#6488](https://github.com/icinga/icinga2/issues/6488) (Documentation, ITL, PR): Fix typo with the CheckCommand cert
+
+### Support
+
+* [#6669](https://github.com/icinga/icinga2/issues/6669) (PR): Don't throw an error when namespace indexers don't find a valid key
+* [#6668](https://github.com/icinga/icinga2/issues/6668) (Installation, PR): Enhance vim syntax highlighting for 2.10
+* [#6661](https://github.com/icinga/icinga2/issues/6661) (API, Log, code-quality, PR): Cache the peer address in the HTTP server
+* [#6642](https://github.com/icinga/icinga2/issues/6642) (PR): Allow to override MaxConcurrentChecks constant
+* [#6621](https://github.com/icinga/icinga2/issues/6621) (code-quality, PR): Remove unused timestamp function in DB IDO
+* [#6618](https://github.com/icinga/icinga2/issues/6618) (PR): Silence compiler warning for nice\(\)
+* [#6591](https://github.com/icinga/icinga2/issues/6591) (PR): Fix static initializer priority for namespaces in LTO builds
+* [#6588](https://github.com/icinga/icinga2/issues/6588) (PR): Fix using full path in prepare-dirs/safe-reload scripts
+* [#6586](https://github.com/icinga/icinga2/issues/6586) (PR): Fix non-unity builds on CentOS 7 with std::shared\_ptr
+* [#6583](https://github.com/icinga/icinga2/issues/6583) (Documentation, Installation, PR): Update PostgreSQL library path variable in INSTALL.md
+* [#6574](https://github.com/icinga/icinga2/issues/6574) (PR): Move new downtime constants into the Icinga namespace
+* [#6570](https://github.com/icinga/icinga2/issues/6570) (Cluster, PR): Increase limit for simultaneously connected anonymous TLS clients
+* [#6567](https://github.com/icinga/icinga2/issues/6567) (PR): ApiListener: Dump the state file port detail as number
+* [#6556](https://github.com/icinga/icinga2/issues/6556) (Installation, Windows, PR): windows: Allow suppression of extra actions in the MSI package
+* [#6544](https://github.com/icinga/icinga2/issues/6544) (code-quality, PR): Remove \#include for deprecated header file
+* [#6539](https://github.com/icinga/icinga2/issues/6539) (PR): Build fix for CentOS 7 and non-unity builds
+* [#6526](https://github.com/icinga/icinga2/issues/6526) (code-quality, PR): icinga::PackObject\(\): shorten conversion to string
+* [#6510](https://github.com/icinga/icinga2/issues/6510) (Tests, Windows, PR): Update windows build scripts
+* [#6494](https://github.com/icinga/icinga2/issues/6494) (Tests, PR): Test PackObject
+* [#6489](https://github.com/icinga/icinga2/issues/6489) (code-quality, PR): Implement object packer for consistent hashing
+* [#6484](https://github.com/icinga/icinga2/issues/6484) (Packages): Packages from https://packages.icinga.com are not Systemd Type=notify enabled?
+* [#6469](https://github.com/icinga/icinga2/issues/6469) (Installation, Windows, PR): Fix Windows Agent resize behavior
+* [#6458](https://github.com/icinga/icinga2/issues/6458) (code-quality, PR): Fix debug build log entry for ConfigItem activation priority
+* [#6456](https://github.com/icinga/icinga2/issues/6456) (code-quality, PR): Keep notes for immediately log flushing
+* [#6440](https://github.com/icinga/icinga2/issues/6440) (code-quality, PR): Fix typo
+* [#6410](https://github.com/icinga/icinga2/issues/6410) (code-quality, PR): Remove unused code
+* [#4959](https://github.com/icinga/icinga2/issues/4959) (Installation, Windows): Windows Agent Wizard Window resizes with screen, hiding buttons
+
+## 2.9.3 (2019-07-30)
+
+[Issue and PRs](https://github.com/Icinga/icinga2/issues?utf8=%E2%9C%93&q=milestone%3A2.9.3)
+
+### Bugfixes
+
+* Fix el7 not loading ECDHE cipher suites #7247
+* Fix checkresults from the future breaking checks #6797 ref/NC/595861
+* DB IDO: Don't enqueue queries when the feature is paused (HA) #5876
+
+## 2.9.2 (2018-09-26)
+
+### Enhancement
+
+* [#6602](https://github.com/icinga/icinga2/issues/6602) (API, Cluster, PR): Improve TLS handshake exception logging
+* [#6568](https://github.com/icinga/icinga2/issues/6568) (Configuration, PR): Ensure that config object types are committed in dependent load order
+* [#6497](https://github.com/icinga/icinga2/issues/6497) (Configuration, PR): Improve error logging for match/regex/cidr\_match functions and unsupported dictionary usage
+
+### Bug
+
+* [#6596](https://github.com/icinga/icinga2/issues/6596) (Crash, PR): Fix crash on API queries with Fedora 28 hardening and GCC 8
+* [#6581](https://github.com/icinga/icinga2/issues/6581) (Configuration, PR): Shuffle items before config validation
+* [#6569](https://github.com/icinga/icinga2/issues/6569) (DB IDO): Custom Vars not updated after upgrade
+* [#6533](https://github.com/icinga/icinga2/issues/6533) (Crash): Icinga2 crashes after using some api-commands on Fedora 28
+* [#6505](https://github.com/icinga/icinga2/issues/6505) (Cluster, PR): Fix clusterzonecheck if not connected
+* [#6498](https://github.com/icinga/icinga2/issues/6498) (Configuration, PR): Fix regression with MatchAny false conditions on match/regex/cidr\_match
+* [#6496](https://github.com/icinga/icinga2/issues/6496) (Configuration): error with match and type matchany
+
+### Documentation
+
+* [#6590](https://github.com/icinga/icinga2/issues/6590) (DB IDO, Documentation, PR): Update workaround for custom vars
+* [#6572](https://github.com/icinga/icinga2/issues/6572) (Documentation, PR): Add note about workaround for broken custom vars
+
+### Support
+
+* [#6540](https://github.com/icinga/icinga2/issues/6540) (Configuration): Evaluate a fixed config compiler commit order
+* [#6486](https://github.com/icinga/icinga2/issues/6486) (Configuration): Configuration validation w/ ScheduledDowntimes performance decreased in 2.9
+* [#6442](https://github.com/icinga/icinga2/issues/6442) (Configuration): Error while evaluating "assign where match" expression: std::bad\_cast
+
+## 2.9.1 (2018-07-24)
+
+### Bug
+
+* [#6457](https://github.com/icinga/icinga2/issues/6457) (PR): Ensure that timer thread is initialized after Daemonize\(\)
+* [#6449](https://github.com/icinga/icinga2/issues/6449): icinga r2.9.0-1 init.d script overrides PATH variable
+* [#6445](https://github.com/icinga/icinga2/issues/6445): Problem with daemonize \(init scripts, -d\) on Debian 8 / CentOS 6 / Ubuntu 14 / SLES 11 in 2.9
+* [#6444](https://github.com/icinga/icinga2/issues/6444) (PR): SELinux: allow systemd notify
+* [#6443](https://github.com/icinga/icinga2/issues/6443): selinux and 2.9
+
+### Support
+
+* [#6470](https://github.com/icinga/icinga2/issues/6470) (code-quality, PR): Fix spelling errors.
+* [#6467](https://github.com/icinga/icinga2/issues/6467) (Tests, PR): Start and stop the timer thread lazily
+* [#6461](https://github.com/icinga/icinga2/issues/6461) (Tests): Broken tests with fix from \#6457
+* [#6451](https://github.com/icinga/icinga2/issues/6451) (Packages, PR): Fix initscripts
+* [#6450](https://github.com/icinga/icinga2/issues/6450) (Packages): init script helpers - source: not found
+
+## 2.9.0 (2018-07-17)
+
+### Notes
+
+- Elasticsearch 6 Support
+- icinga health check supports minimum version parameter, ido thresholds for query rate, dummy check is executed in-memory, avoids plugin call
+- `ApplicationVersion` constant in the configuration
+- Setup wizards: global zone, disable conf.d inclusion, unified parameter handling
+- TTL support for check results, pretty formatting for REST API queries
+- TLS support for IDO PostgreSQL
+- Improvements for check scheduling, concurrent checks with command endpoints, downtime notification handling, scheduled downtimes and memory handling with many API requests
+
+### Enhancement
+
+* [#6400](https://github.com/icinga/icinga2/issues/6400) (Plugins, Windows, PR): Enhance debug logging for check\_nscp\_api
+* [#6321](https://github.com/icinga/icinga2/issues/6321) (Log, PR): Update log message for skipped certificate renewal
+* [#6305](https://github.com/icinga/icinga2/issues/6305) (PR): Introduce the 'Environment' variable
+* [#6299](https://github.com/icinga/icinga2/issues/6299) (Check Execution, Log, PR): Change log level for failed event command execution
+* [#6285](https://github.com/icinga/icinga2/issues/6285) (CLI, Log, PR): Add support for config validation log timestamps
+* [#6270](https://github.com/icinga/icinga2/issues/6270) (Configuration, PR): Add activation priority for config object types
+* [#6236](https://github.com/icinga/icinga2/issues/6236) (DB IDO, PR): Add TLS support for DB IDO PostgreSQL feature
+* [#6219](https://github.com/icinga/icinga2/issues/6219) (Elasticsearch, PR): Add support for Elasticsearch 6
+* [#6211](https://github.com/icinga/icinga2/issues/6211) (DB IDO): IDO pgsql with TLS support
+* [#6209](https://github.com/icinga/icinga2/issues/6209) (CLI, PR): Unify zone name settings in node setup/wizard; add connection-less mode for node setup
+* [#6208](https://github.com/icinga/icinga2/issues/6208) (CLI): Add connection-less support for node setup CLI command
+* [#6206](https://github.com/icinga/icinga2/issues/6206) (Configuration, PR): Add ApplicationVersion built-in constant
+* [#6205](https://github.com/icinga/icinga2/issues/6205) (API, PR): API: Unify verbose error messages
+* [#6194](https://github.com/icinga/icinga2/issues/6194) (Elasticsearch, Graylog, PR): Elasticsearch/GELF: Add metric unit to performance data fields
+* [#6170](https://github.com/icinga/icinga2/issues/6170) (Configuration, Windows, PR): Add option to windows installer to add global zones
+* [#6158](https://github.com/icinga/icinga2/issues/6158) (API, Log): Review API debugging: verboseErrors and diagnostic information
+* [#6136](https://github.com/icinga/icinga2/issues/6136) (Check Execution, PR): Add counter for current concurrent checks to Icinga check
+* [#6131](https://github.com/icinga/icinga2/issues/6131) (Log, PR): Log which ticket was invalid on the master
+* [#6109](https://github.com/icinga/icinga2/issues/6109) (Plugins, PR): Add 'used' feature to check\_memory
+* [#6090](https://github.com/icinga/icinga2/issues/6090) (Notifications, PR): Fixed URL encoding for HOSTNAME and SERVICENAME in mail notification
+* [#6078](https://github.com/icinga/icinga2/issues/6078) (Check Execution, PR): Add more metrics and details to built-in 'random' check
+* [#6039](https://github.com/icinga/icinga2/issues/6039) (Configuration, PR): Improve location info for some error messages
+* [#6033](https://github.com/icinga/icinga2/issues/6033) (Compat): Deprecate StatusDataWriter
+* [#6032](https://github.com/icinga/icinga2/issues/6032) (Compat): Deprecate CompatLogger
+* [#6010](https://github.com/icinga/icinga2/issues/6010) (Cluster, PR): Move the endpoint list into a new line for the 'cluster' check
+* [#5996](https://github.com/icinga/icinga2/issues/5996) (PR): Add systemd watchdog and adjust reload behaviour
+* [#5985](https://github.com/icinga/icinga2/issues/5985) (DB IDO, PR): Add query thresholds for the 'ido' check: Rate and pending queries
+* [#5979](https://github.com/icinga/icinga2/issues/5979) (CLI, PR): Add quit, exit and help
+* [#5973](https://github.com/icinga/icinga2/issues/5973) (API, Check Execution, PR): Add 'ttl' support for check result freshness via REST API
+* [#5959](https://github.com/icinga/icinga2/issues/5959) (API, PR): API: Add 'pretty' parameter for beautified JSON response bodies
+* [#5905](https://github.com/icinga/icinga2/issues/5905) (Elasticsearch): Add support for Elasticsearch 6
+* [#5888](https://github.com/icinga/icinga2/issues/5888) (DB IDO, PR): FindMySQL: Support mariadbclient implementation
+* [#5877](https://github.com/icinga/icinga2/issues/5877) (API): Add pretty format to REST API parameters \(for debugging\)
+* [#5811](https://github.com/icinga/icinga2/issues/5811) (CLI, PR): Update NodeName/ZoneName constants with 'api setup'
+* [#5767](https://github.com/icinga/icinga2/issues/5767) (CLI, PR): Implement ability to make global zones configurable during node wizard/setup
+* [#5733](https://github.com/icinga/icinga2/issues/5733) (Plugins, Windows, PR): Make --perf-syntax also change short message
+* [#5729](https://github.com/icinga/icinga2/issues/5729) (CLI, Cluster, PR): Correct node wizard output formatting
+* [#5675](https://github.com/icinga/icinga2/issues/5675) (InfluxDB, PR): Add pdv unit to influxdbwriter if not empty + doc
+* [#5627](https://github.com/icinga/icinga2/issues/5627) (InfluxDB, Metrics): InfluxDBWriter: Send metric unit \(perfdata\)
+* [#5605](https://github.com/icinga/icinga2/issues/5605) (CLI, Cluster, Configuration): Disable conf.d inclusion in node setup wizards
+* [#5509](https://github.com/icinga/icinga2/issues/5509) (Cluster, wishlist): Add metrics about communication between endpoints
+* [#5444](https://github.com/icinga/icinga2/issues/5444) (Cluster): Display endpoints in the second line of the ClusterCheckTask output
+* [#5426](https://github.com/icinga/icinga2/issues/5426) (CLI, Configuration, PR): Add the ability to disable the conf.d inclusion through the node wizard
+* [#5418](https://github.com/icinga/icinga2/issues/5418) (Plugins, Windows): Feature request: check\_perfmon.exe - Change name of counter in output
+* [#4966](https://github.com/icinga/icinga2/issues/4966) (CLI, Cluster): Unify setting of master zones name
+* [#4508](https://github.com/icinga/icinga2/issues/4508) (CLI): node wizard/setup: allow to disable conf.d inclusion
+* [#3455](https://github.com/icinga/icinga2/issues/3455) (API, Log): startup.log in stage dir has no timestamps
+* [#3245](https://github.com/icinga/icinga2/issues/3245) (CLI, help wanted, wishlist): Add option to Windows installer to add global zone during setup
+* [#2287](https://github.com/icinga/icinga2/issues/2287) (help wanted, wishlist): Please support systemd startup notification
+
+### Bug
+
+* [#6429](https://github.com/icinga/icinga2/issues/6429) (PR): Make HttpServerConnection\#m\_DataHandlerMutex a boost::recursive\_mutex
+* [#6428](https://github.com/icinga/icinga2/issues/6428) (API): Director kickstart wizard querying the API results in TLS stream disconnected infinite loop
+* [#6411](https://github.com/icinga/icinga2/issues/6411) (Plugins, Windows, PR): Windows: Conform to the Plugin API spec for performance label quoting
+* [#6407](https://github.com/icinga/icinga2/issues/6407) (Windows, PR): Fix wrong UOM in check\_uptime windows plugin
+* [#6405](https://github.com/icinga/icinga2/issues/6405) (Windows, PR): TcpSocket\#Bind\(\): reuse socket addresses on Windows, too
+* [#6403](https://github.com/icinga/icinga2/issues/6403) (API, PR): Conform to RFC for CRLF in HTTP requests
+* [#6401](https://github.com/icinga/icinga2/issues/6401) (Elasticsearch, InfluxDB, PR): Fix connection error handling in Elasticsearch and InfluxDB features
+* [#6397](https://github.com/icinga/icinga2/issues/6397) (Plugins, Windows, PR): TlsStream\#IsEof\(\): fix false positive EOF indicator
+* [#6394](https://github.com/icinga/icinga2/issues/6394) (Crash, Elasticsearch): Icinga will throw an exception, if ElasticSearch is not reachable
+* [#6393](https://github.com/icinga/icinga2/issues/6393) (API, Elasticsearch, PR): Stream\#ReadLine\(\): fix false positive buffer underflow indicator
+* [#6387](https://github.com/icinga/icinga2/issues/6387) (Configuration, Crash, Windows, PR): Remove ApiUser password\_hash functionality
+* [#6383](https://github.com/icinga/icinga2/issues/6383) (API, CLI, PR): HttpRequest\#ParseBody\(\): indicate success on complete body
+* [#6378](https://github.com/icinga/icinga2/issues/6378) (Windows): Analyze Windows reload behaviour
+* [#6371](https://github.com/icinga/icinga2/issues/6371) (API, Cluster, PR): ApiListener\#NewClientHandlerInternal\(\): Explicitly close the TLS stream on any failure
+* [#6368](https://github.com/icinga/icinga2/issues/6368) (CLI, PR): Fix program option parsing
+* [#6365](https://github.com/icinga/icinga2/issues/6365) (CLI): Different behavior between `icinga2 -V` and `icinga2 --version`
+* [#6355](https://github.com/icinga/icinga2/issues/6355) (API): HTTP header size too low: Long URLs and session cookies cause bad requests
+* [#6354](https://github.com/icinga/icinga2/issues/6354) (Elasticsearch): ElasticsearchWriter not writing to ES
+* [#6336](https://github.com/icinga/icinga2/issues/6336) (Log, PR): Fix unnecessary blank in log message
+* [#6324](https://github.com/icinga/icinga2/issues/6324) (Crash, PR): Ensure that password hash generation from OpenSSL is atomic
+* [#6319](https://github.com/icinga/icinga2/issues/6319) (Windows): Windows service restart fails and config validate runs forever
+* [#6297](https://github.com/icinga/icinga2/issues/6297) (Cluster, PR): Execute event commands only on actively checked host/service objects in an HA zone
+* [#6294](https://github.com/icinga/icinga2/issues/6294) (API, Configuration, PR): Ensure that group memberships on API object creation are unique
+* [#6292](https://github.com/icinga/icinga2/issues/6292) (Notifications, PR): Fix problem with reminder notifications if the checkable is flapping
+* [#6290](https://github.com/icinga/icinga2/issues/6290) (OpenTSDB, PR): Fixed opentsdb metric name with colon chars
+* [#6282](https://github.com/icinga/icinga2/issues/6282) (Configuration): Issue when using excludes in TimePeriod Objects
+* [#6279](https://github.com/icinga/icinga2/issues/6279) (Crash): segfault with sha1\_block\_data\_order\_avx of libcrypto
+* [#6255](https://github.com/icinga/icinga2/issues/6255) (Configuration): On debian based systems /etc/default/icinga2 is not read/used
+* [#6242](https://github.com/icinga/icinga2/issues/6242) (Plugins, Windows): Sporadic check\_nscp\_api timeouts
+* [#6239](https://github.com/icinga/icinga2/issues/6239) (Plugins, Windows, PR): Fix Windows check\_memory rounding
+* [#6231](https://github.com/icinga/icinga2/issues/6231) (Notifications): icinga2.8 - Notifications are sent even in downtime
+* [#6218](https://github.com/icinga/icinga2/issues/6218) (PR): attempt to fix issue \#5277
+* [#6217](https://github.com/icinga/icinga2/issues/6217) (Check Execution, PR): Fix check behavior on restart
+* [#6204](https://github.com/icinga/icinga2/issues/6204) (API, PR): API: Check if objects exists and return proper error message
+* [#6195](https://github.com/icinga/icinga2/issues/6195) (API, Crash, PR): Fix crash in remote api console
+* [#6193](https://github.com/icinga/icinga2/issues/6193) (Crash, Graylog, PR): GelfWriter: Fix crash on invalid performance data metrics
+* [#6184](https://github.com/icinga/icinga2/issues/6184) (API): debug console with API connection sometimes hangs since 2.8.2
+* [#6125](https://github.com/icinga/icinga2/issues/6125) (Configuration, PR): Fix description of the NotificationComponent in notification.conf
+* [#6077](https://github.com/icinga/icinga2/issues/6077) (API, PR): Allow to pass raw performance data in 'process-check-result' API action
+* [#6057](https://github.com/icinga/icinga2/issues/6057) (Notifications): Icinga2 sends notifications without logging about it and despite having a downtime
+* [#6020](https://github.com/icinga/icinga2/issues/6020) (CLI, PR): Fix crash when running 'icinga2 console' without HOME environment variable
+* [#6019](https://github.com/icinga/icinga2/issues/6019): icinga2 console -r crashes when run without a HOME environment variable
+* [#6016](https://github.com/icinga/icinga2/issues/6016) (Notifications, PR): Check notification state filters for problems only, not for Custom, etc.
+* [#5988](https://github.com/icinga/icinga2/issues/5988) (Check Execution, Cluster, PR): Fix concurrent checks limit while using command\_endpoint
+* [#5964](https://github.com/icinga/icinga2/issues/5964) (Metrics, OpenTSDB, PR): OpenTSDB writer - Fix function for escaping host tag chars.
+* [#5963](https://github.com/icinga/icinga2/issues/5963) (Metrics, OpenTSDB): OpenTSDB writer is escaping wrong chars for host names.
+* [#5952](https://github.com/icinga/icinga2/issues/5952) (Notifications): Custom notifications are filtered by object state
+* [#5940](https://github.com/icinga/icinga2/issues/5940) (PR): Remove deprecated Chocolatey functions
+* [#5928](https://github.com/icinga/icinga2/issues/5928) (PR): Fix build problem with MSVC
+* [#5908](https://github.com/icinga/icinga2/issues/5908) (Windows): Icinga2 fails to build on Windows
+* [#5901](https://github.com/icinga/icinga2/issues/5901) (PR): Do not replace colons in plugin output
+* [#5885](https://github.com/icinga/icinga2/issues/5885) (PR): Workaround for GCC bug 61321
+* [#5884](https://github.com/icinga/icinga2/issues/5884): Icinga2 fails to build
+* [#5872](https://github.com/icinga/icinga2/issues/5872) (PR): Replace incorrect fclose\(\) call with pclose\(\)
+* [#5863](https://github.com/icinga/icinga2/issues/5863) (PR): Fix glob error handling
+* [#5861](https://github.com/icinga/icinga2/issues/5861) (PR): Fix incorrect memory access
+* [#5860](https://github.com/icinga/icinga2/issues/5860) (PR): Fix memory leaks in the unit tests
+* [#5853](https://github.com/icinga/icinga2/issues/5853) (Plugins, Windows, PR): Fix missing space in check\_service output
+* [#5840](https://github.com/icinga/icinga2/issues/5840) (Elasticsearch, PR): Fix newline terminator for bulk requests in ElasticsearchWriter
+* [#5796](https://github.com/icinga/icinga2/issues/5796) (CLI, PR): Fix error reporting for 'icinga2 console -r'
+* [#5795](https://github.com/icinga/icinga2/issues/5795) (Elasticsearch): ElasticsearchWriter gives "Unexpected response code 400" with Elasticsearch 6.x
+* [#5763](https://github.com/icinga/icinga2/issues/5763) (API): "icinga2 api setup" should explicitly set the NodeName constant in constants.conf
+* [#5753](https://github.com/icinga/icinga2/issues/5753) (API, Cluster, Metrics, PR): Fix that RingBuffer does not get updated and add metrics about communication between endpoints
+* [#5718](https://github.com/icinga/icinga2/issues/5718) (API, PR): API: Fix http status codes
+* [#5550](https://github.com/icinga/icinga2/issues/5550) (API): Verify error codes and returned log messages in API actions
+* [#5277](https://github.com/icinga/icinga2/issues/5277) (Notifications): Flexible downtime is expired at end\_time, not trigger\_time+duration
+* [#5095](https://github.com/icinga/icinga2/issues/5095) (API): Wrong HTTP status code when API request fails
+* [#5083](https://github.com/icinga/icinga2/issues/5083) (Check Execution): Initial checks are not executed immediately
+* [#4786](https://github.com/icinga/icinga2/issues/4786) (API): API: Command process-check-result fails if it contains performance data
+* [#4785](https://github.com/icinga/icinga2/issues/4785) (Compat): Semicolons in plugin output are converted to colon
+* [#4732](https://github.com/icinga/icinga2/issues/4732) (API, Configuration): Duplicate groups allowed when creating host
+* [#4436](https://github.com/icinga/icinga2/issues/4436) (Check Execution): New objects not scheduled to check immediately
+* [#4272](https://github.com/icinga/icinga2/issues/4272) (Cluster, Configuration): Duplicating downtime from ScheduledDowntime object on each restart
+* [#3431](https://github.com/icinga/icinga2/issues/3431) (Cluster): Eventhandler trigger on all endpoints in high available zone
+
+### ITL
+
+* [#6389](https://github.com/icinga/icinga2/issues/6389) (ITL, PR): New ITL command nscp-local-tasksched
+* [#6348](https://github.com/icinga/icinga2/issues/6348) (ITL, PR): Fix for catalogued locally databases. Fixes \#6338
+* [#6338](https://github.com/icinga/icinga2/issues/6338) (ITL): db2\_health not working with catalogued databases, as --hostname is always used
+* [#6308](https://github.com/icinga/icinga2/issues/6308) (ITL, PR): Update lsi-raid ITL command
+* [#6263](https://github.com/icinga/icinga2/issues/6263) (ITL, PR): ITL: Add default thresholds to windows check commands
+* [#6139](https://github.com/icinga/icinga2/issues/6139) (ITL, PR): itl/disk: Ignore overlay and netfs filesystems
+* [#6045](https://github.com/icinga/icinga2/issues/6045) (ITL, PR): Move the "passive" check command to command-icinga.conf
+* [#6043](https://github.com/icinga/icinga2/issues/6043) (ITL): ITL "plugins" has an implicit dependency on "itl"
+* [#6034](https://github.com/icinga/icinga2/issues/6034) (ITL, PR): ITL by\_ssh add -E parameter
+* [#5958](https://github.com/icinga/icinga2/issues/5958) (ITL, PR): Add minimum version check to the built-in icinga command
+* [#5954](https://github.com/icinga/icinga2/issues/5954) (ITL, PR): ITL: Add mongodb --authdb parameter support
+* [#5951](https://github.com/icinga/icinga2/issues/5951) (ITL, PR): itl: Add command parameters for snmp-memory
+* [#5921](https://github.com/icinga/icinga2/issues/5921) (ITL, PR): Add icingacli-director check to ITL
+* [#5920](https://github.com/icinga/icinga2/issues/5920) (ITL): Add Check for Director Jobs to ITL
+* [#5914](https://github.com/icinga/icinga2/issues/5914) (ITL, PR): Fix for wrong attribute in ITL mongodb CheckCommand
+* [#5906](https://github.com/icinga/icinga2/issues/5906) (ITL, PR): Add check\_openmanage command to ITL.
+* [#5902](https://github.com/icinga/icinga2/issues/5902) (ITL, PR): Add parameter --octetlength to snmp-storage command.
+* [#5817](https://github.com/icinga/icinga2/issues/5817) (ITL): mongodb\_address vs mongodb\_host
+* [#5812](https://github.com/icinga/icinga2/issues/5812) (ITL): Better way to check required parameters in notification scripts
+* [#5805](https://github.com/icinga/icinga2/issues/5805) (ITL, PR): Add support for LD\_LIBRARY\_PATH env variable in oracle\_health ITL CheckCommand
+* [#5792](https://github.com/icinga/icinga2/issues/5792) (ITL, PR): ITL: Add check\_rpc
+* [#5787](https://github.com/icinga/icinga2/issues/5787) (Check Execution, ITL): random check should provide performance data metrics
+* [#5744](https://github.com/icinga/icinga2/issues/5744) (Check Execution, ITL, PR): Implement DummyCheckTask and move dummy into embedded in-memory checks
+* [#5717](https://github.com/icinga/icinga2/issues/5717) (ITL, PR): add order tags to disk check
+* [#5714](https://github.com/icinga/icinga2/issues/5714) (ITL): disk check in icinga2/itl/command-plugins.conf lacks order tags
+* [#5260](https://github.com/icinga/icinga2/issues/5260) (ITL): CheckCommand mongodb does not expose authdb option
+
+### Documentation
+
+* [#6436](https://github.com/icinga/icinga2/issues/6436) (Documentation, PR): Update tested Elasticsearch version
+* [#6435](https://github.com/icinga/icinga2/issues/6435) (Documentation, PR): Add note on sysconfig shell variables for Systemd to the Upgrading docs
+* [#6433](https://github.com/icinga/icinga2/issues/6433) (Documentation, PR): Docs: Fix typos in 03-monitoring-basics.md
+* [#6426](https://github.com/icinga/icinga2/issues/6426) (Documentation, PR): Update 'Upgrading to 2.9' docs
+* [#6413](https://github.com/icinga/icinga2/issues/6413) (Documentation, PR): Fix table in Livestatus Filters
+* [#6391](https://github.com/icinga/icinga2/issues/6391) (Documentation, PR): Docs: Fix icinga.com link
+* [#6390](https://github.com/icinga/icinga2/issues/6390) (Documentation, Windows, PR): Docs: Update Windows wizard images
+* [#6375](https://github.com/icinga/icinga2/issues/6375) (Documentation, PR): some minor fixes in the flapping documentation
+* [#6374](https://github.com/icinga/icinga2/issues/6374) (Documentation, PR): Docs: Add an additional note for VMWare timeouts on Ubuntu 16.04 LTS
+* [#6373](https://github.com/icinga/icinga2/issues/6373) (Documentation, PR): Drop command template imports for versions \< 2.6 in the docs
+* [#6372](https://github.com/icinga/icinga2/issues/6372) (Documentation, PR): Remove the import of 'legacy-timeperiod' in the docs
+* [#6350](https://github.com/icinga/icinga2/issues/6350) (Documentation, PR): clarify the permision system of the api in the docs
+* [#6344](https://github.com/icinga/icinga2/issues/6344) (Documentation, PR): README: Fix broken community link
+* [#6330](https://github.com/icinga/icinga2/issues/6330) (Documentation, PR): Fix $ipaddress6$ attribute name typo in the docs
+* [#6317](https://github.com/icinga/icinga2/issues/6317) (Documentation, PR): Add a note on Windows NSClient++ CPU checks to the docs
+* [#6289](https://github.com/icinga/icinga2/issues/6289) (Documentation, PR): Update release documentation with git tag signing key configuration
+* [#6286](https://github.com/icinga/icinga2/issues/6286) (Documentation): Update Windows wizard screenshots in the docs
+* [#6283](https://github.com/icinga/icinga2/issues/6283) (Documentation, PR): edit Icinga license info so that GitHub recognizes it
+* [#6271](https://github.com/icinga/icinga2/issues/6271) (Documentation, PR): Enhance advanced topics with \(scheduled\) downtimes
+* [#6267](https://github.com/icinga/icinga2/issues/6267) (Documentation, PR): Update docs to reflect required user\* attributes for notification objects
+* [#6265](https://github.com/icinga/icinga2/issues/6265) (Documentation): Notifications user/user\_groups required
+* [#6264](https://github.com/icinga/icinga2/issues/6264) (Documentation, PR): Enhance "Getting Started" chapter
+* [#6262](https://github.com/icinga/icinga2/issues/6262) (Documentation, PR): Enhance the environment variables chapter
+* [#6254](https://github.com/icinga/icinga2/issues/6254) (Documentation, PR): Enhance release documentation
+* [#6253](https://github.com/icinga/icinga2/issues/6253) (Documentation, PR): Doc: Add note for not fully supported Plugin collections
+* [#6243](https://github.com/icinga/icinga2/issues/6243) (Documentation, PR): Update PostgreSQL documentation
+* [#6226](https://github.com/icinga/icinga2/issues/6226) (Documentation, PR): Fix broken SELinux anchor in the documentation
+* [#6224](https://github.com/icinga/icinga2/issues/6224) (Documentation, PR): Update volatile docs
+* [#6216](https://github.com/icinga/icinga2/issues/6216) (Documentation): Volatile service explanation
+* [#6180](https://github.com/icinga/icinga2/issues/6180) (Documentation, PR): Doc: fixed wrong information about defaulting
+* [#6128](https://github.com/icinga/icinga2/issues/6128) (Documentation, PR): Adding documentation for configurable global zones during setup
+* [#6067](https://github.com/icinga/icinga2/issues/6067) (Documentation, Windows, PR): Improve Windows builds and testing
+* [#6022](https://github.com/icinga/icinga2/issues/6022) (Configuration, Documentation, PR): Update default config and documentation for the "library" keyword
+* [#6018](https://github.com/icinga/icinga2/issues/6018) (Documentation): Move init configuration from getting-started
+* [#6000](https://github.com/icinga/icinga2/issues/6000) (Documentation, PR): Add newline to COPYING to fix Github license detection
+* [#5948](https://github.com/icinga/icinga2/issues/5948) (Documentation, PR): doc: Improve INSTALL documentation
+* [#4958](https://github.com/icinga/icinga2/issues/4958) (Check Execution, Documentation): How to set the HOME environment variable
+
+### Support
+
+* [#6439](https://github.com/icinga/icinga2/issues/6439) (PR): Revert "Fix obsolete parameter in Systemd script"
+* [#6423](https://github.com/icinga/icinga2/issues/6423) (PR): Fix missing next check update causing the scheduler to execute checks too often
+* [#6421](https://github.com/icinga/icinga2/issues/6421) (Check Execution): High CPU load due to seemingly ignored check\_interval
+* [#6412](https://github.com/icinga/icinga2/issues/6412) (Plugins, Windows, PR): Fix output formatting in windows plugins
+* [#6402](https://github.com/icinga/icinga2/issues/6402) (Cluster, code-quality, PR): Use SSL\_pending\(\) for remaining TLS stream data
+* [#6384](https://github.com/icinga/icinga2/issues/6384) (PR): Remove leftover for sysconfig file parsing
+* [#6381](https://github.com/icinga/icinga2/issues/6381) (Packages, PR): Fix sysconfig not being handled correctly by sysvinit
+* [#6377](https://github.com/icinga/icinga2/issues/6377) (code-quality, PR): Fix missing name for workqueue while creating runtime objects via API
+* [#6364](https://github.com/icinga/icinga2/issues/6364) (code-quality): lib/base/workqueue.cpp:212: assertion failed: !m\_Name.IsEmpty\(\)
+* [#6361](https://github.com/icinga/icinga2/issues/6361) (API, Cluster): Analyse socket IO handling with HTTP/JSON-RPC
+* [#6359](https://github.com/icinga/icinga2/issues/6359) (Configuration, PR): Fix ScheduledDowntimes replicating on restart
+* [#6357](https://github.com/icinga/icinga2/issues/6357) (API, PR): Increase header size to 8KB for HTTP requests
+* [#6347](https://github.com/icinga/icinga2/issues/6347) (Packages, PR): SELinux: Allow notification plugins to read local users
+* [#6343](https://github.com/icinga/icinga2/issues/6343) (Check Execution, Cluster, PR): Fix that checks with command\_endpoint don't return any check results
+* [#6337](https://github.com/icinga/icinga2/issues/6337): Checks via command\_endpoint are not executed \(snapshot packages only\)
+* [#6328](https://github.com/icinga/icinga2/issues/6328) (Installation, Packages, PR): Rework sysconfig file/startup environment
+* [#6320](https://github.com/icinga/icinga2/issues/6320) (PR): Ensure that icinga\_min\_version parameter is optional
+* [#6309](https://github.com/icinga/icinga2/issues/6309) (PR): Fix compiler warning in checkercomponent.ti
+* [#6306](https://github.com/icinga/icinga2/issues/6306) (code-quality, PR): Adjust message for CheckResultReader deprecation
+* [#6301](https://github.com/icinga/icinga2/issues/6301) (Documentation, code-quality, PR): Adjust deprecation removal for compat features
+* [#6295](https://github.com/icinga/icinga2/issues/6295) (Compat, PR): Deprecate compatlog feature
+* [#6238](https://github.com/icinga/icinga2/issues/6238) (Notifications, PR): Implement better way to check parameters in notification scripts
+* [#6233](https://github.com/icinga/icinga2/issues/6233) (Check Execution): Verify next check execution on daemon reload
+* [#6229](https://github.com/icinga/icinga2/issues/6229) (Packages, PR): Don't use shell variables in sysconfig
+* [#6214](https://github.com/icinga/icinga2/issues/6214) (Packages): Reload-internal with unresolved shell variable
+* [#6201](https://github.com/icinga/icinga2/issues/6201) (Windows, PR): Handle exceptions from X509Certificate2
+* [#6199](https://github.com/icinga/icinga2/issues/6199) (API, PR): Return 500 when no api action is successful
+* [#6198](https://github.com/icinga/icinga2/issues/6198) (Compat, PR): Deprecate Statusdatawriter
+* [#6187](https://github.com/icinga/icinga2/issues/6187) (code-quality, PR): Remove Icinga Studio Screenshots
+* [#6181](https://github.com/icinga/icinga2/issues/6181) (Tests, PR): tests: Ensure IcingaApplication is initialized before adding config
+* [#6174](https://github.com/icinga/icinga2/issues/6174) (API, PR): Fix crash without CORS setting
+* [#6173](https://github.com/icinga/icinga2/issues/6173) (API, Crash): Using the API crashes Icinga2 in v2.8.1-537-g064fc80
+* [#6171](https://github.com/icinga/icinga2/issues/6171) (code-quality, PR): Update copyright of the Windows Agent to 2018
+* [#6163](https://github.com/icinga/icinga2/issues/6163) (PR): Fix reload handling by updating the PID file before process overtake
+* [#6160](https://github.com/icinga/icinga2/issues/6160) (code-quality, PR): Replace std::vector:push\_back calls with initializer list
+* [#6126](https://github.com/icinga/icinga2/issues/6126) (PR): Require systemd headers
+* [#6113](https://github.com/icinga/icinga2/issues/6113) (Tests, PR): appveyor: Disable artifacts until we use them
+* [#6107](https://github.com/icinga/icinga2/issues/6107) (code-quality, PR): Allow MYSQL\_LIB to be specified by ENV variable
+* [#6105](https://github.com/icinga/icinga2/issues/6105) (Tests): Snapshot builds fail on livestatus tests
+* [#6098](https://github.com/icinga/icinga2/issues/6098) (API, code-quality, PR): Clean up CORS implementation
+* [#6085](https://github.com/icinga/icinga2/issues/6085) (Cluster, Crash, PR): Fix crash with anonymous clients on certificate signing request and storing sent bytes
+* [#6083](https://github.com/icinga/icinga2/issues/6083) (Log, code-quality, PR): Fix wrong type logging in ConfigItem::Commit
+* [#6082](https://github.com/icinga/icinga2/issues/6082) (Installation, Packages): PID file removed after reload
+* [#6063](https://github.com/icinga/icinga2/issues/6063) (Compat, PR): Deprecate CheckResultReader
+* [#6062](https://github.com/icinga/icinga2/issues/6062) (code-quality, PR): Remove the obsolete 'make-agent-config.py' script
+* [#6061](https://github.com/icinga/icinga2/issues/6061) (code-quality, PR): Remove jenkins test scripts
+* [#6060](https://github.com/icinga/icinga2/issues/6060) (code-quality, PR): Remove Icinga development docker scripts
+* [#6059](https://github.com/icinga/icinga2/issues/6059) (code-quality, PR): Remove Icinga Studio
+* [#6058](https://github.com/icinga/icinga2/issues/6058) (code-quality, PR): Clean up the Icinga plugins a bit
+* [#6055](https://github.com/icinga/icinga2/issues/6055) (Check Execution, Windows, code-quality, PR): methods: Remove unused clrchecktask feature
+* [#6054](https://github.com/icinga/icinga2/issues/6054) (Check Execution, Windows, code-quality): Remove unused clrchecktask
+* [#6051](https://github.com/icinga/icinga2/issues/6051) (code-quality, PR): Set FOLDER cmake property for the icingaloader target
+* [#6050](https://github.com/icinga/icinga2/issues/6050) (code-quality, PR): Replace boost::algorithm::split calls with String::Split
+* [#6044](https://github.com/icinga/icinga2/issues/6044) (code-quality, PR): Implement support for frozen arrays and dictionaries
+* [#6038](https://github.com/icinga/icinga2/issues/6038) (PR): Fix missing include for boost::split
+* [#6037](https://github.com/icinga/icinga2/issues/6037) (PR): Fix build error on Windows
+* [#6029](https://github.com/icinga/icinga2/issues/6029) (code-quality, PR): Remove duplicate semicolons
+* [#6028](https://github.com/icinga/icinga2/issues/6028) (Packages): python notification not running when icinga ran as a service
+* [#6026](https://github.com/icinga/icinga2/issues/6026) (Check Execution, Windows, PR): Fix flapping support for Windows
+* [#6025](https://github.com/icinga/icinga2/issues/6025) (Windows): Implement Flapping on Windows
+* [#6023](https://github.com/icinga/icinga2/issues/6023): Icinga should check whether the libsystemd library is available
+* [#6017](https://github.com/icinga/icinga2/issues/6017) (PR): Remove build breaking include
+* [#6015](https://github.com/icinga/icinga2/issues/6015) (code-quality, PR): Fix whitespaces in CMakeLists files
+* [#6009](https://github.com/icinga/icinga2/issues/6009) (PR): Build fix for ancient versions of GCC
+* [#6008](https://github.com/icinga/icinga2/issues/6008) (PR): Fix compatibility with CMake \< 3.1
+* [#6007](https://github.com/icinga/icinga2/issues/6007) (PR): Fix missing include
+* [#6005](https://github.com/icinga/icinga2/issues/6005) (PR): Fix incorrect dependencies for mkunity targets
+* [#5999](https://github.com/icinga/icinga2/issues/5999) (PR): Build fix
+* [#5998](https://github.com/icinga/icinga2/issues/5998) (code-quality, PR): Build all remaining libraries as object libraries
+* [#5997](https://github.com/icinga/icinga2/issues/5997) (PR): Use gcc-ar and gcc-ranlib when building with -flto
+* [#5994](https://github.com/icinga/icinga2/issues/5994) (InfluxDB, PR): InfluxDBWriter: Fix macro in template
+* [#5993](https://github.com/icinga/icinga2/issues/5993) (code-quality, PR): Use CMake object libraries for our libs
+* [#5992](https://github.com/icinga/icinga2/issues/5992) (code-quality, PR): Remove unused includes
+* [#5984](https://github.com/icinga/icinga2/issues/5984) (DB IDO, PR): Fix missing static libraries for DB IDO
+* [#5983](https://github.com/icinga/icinga2/issues/5983) (code-quality, PR): Use initializer lists for arrays and dictionaries
+* [#5980](https://github.com/icinga/icinga2/issues/5980) (code-quality, PR): Explicitly pass 1 or 0 for notification filters in DB IDO
+* [#5974](https://github.com/icinga/icinga2/issues/5974) (PR): Fix non-unity builds with the icinga check
+* [#5971](https://github.com/icinga/icinga2/issues/5971) (code-quality, PR): Remove libdemo and libhello
+* [#5970](https://github.com/icinga/icinga2/issues/5970) (code-quality, PR): Allocate ConfigItemBuilder objects on the stack
+* [#5969](https://github.com/icinga/icinga2/issues/5969) (code-quality, PR): Remove the WorkQueue::m\_StatsMutex instance variable
+* [#5968](https://github.com/icinga/icinga2/issues/5968) (code-quality, PR): Update the RingBuffer class to use a regular mutex instead of ObjectLock
+* [#5967](https://github.com/icinga/icinga2/issues/5967) (code-quality, PR): Avoid accessing attributes for validators where not necessary
+* [#5965](https://github.com/icinga/icinga2/issues/5965) (code-quality, PR): Avoid unnecessary casts in the JSON encoder
+* [#5961](https://github.com/icinga/icinga2/issues/5961) (PR): Fix macro warning from the icinga check
+* [#5960](https://github.com/icinga/icinga2/issues/5960): Macro warning from the icinga check
+* [#5957](https://github.com/icinga/icinga2/issues/5957) (code-quality, PR): Change a bunch more copyright headers for 2018
+* [#5955](https://github.com/icinga/icinga2/issues/5955) (Configuration, code-quality, PR): Avoid mutex contention in the config parser
+* [#5946](https://github.com/icinga/icinga2/issues/5946) (code-quality, PR): Use clang-tidy to add some more C++11 features
+* [#5945](https://github.com/icinga/icinga2/issues/5945) (code-quality, PR): Fix incorrect indentation for code generated by mkclass
+* [#5944](https://github.com/icinga/icinga2/issues/5944) (code-quality, PR): Add the final keyword to classes
+* [#5939](https://github.com/icinga/icinga2/issues/5939) (PR): Build fix for Debian wheezy
+* [#5937](https://github.com/icinga/icinga2/issues/5937) (code-quality, PR): Remove inline methods and use explicit template instantiation to minimize the number of weak symbols
+* [#5936](https://github.com/icinga/icinga2/issues/5936) (code-quality, PR): Clean up source lists in the CMakeLists.txt files
+* [#5935](https://github.com/icinga/icinga2/issues/5935) (code-quality, PR): Implement support for precompiled headers
+* [#5934](https://github.com/icinga/icinga2/issues/5934) (code-quality, PR): Add more include/library paths for MySQL and PostgreSQL
+* [#5933](https://github.com/icinga/icinga2/issues/5933) (code-quality, PR): Change copyright headers for 2018
+* [#5932](https://github.com/icinga/icinga2/issues/5932) (code-quality, PR): Fix copyright header in cli/troubleshootcommand.hpp
+* [#5931](https://github.com/icinga/icinga2/issues/5931) (code-quality, PR): Improve detection for linker flags
+* [#5930](https://github.com/icinga/icinga2/issues/5930) (code-quality, PR): Replace boost::function with std::function
+* [#5929](https://github.com/icinga/icinga2/issues/5929) (code-quality, PR): Get rid of boost::assign::list\_of in mkclass
+* [#5927](https://github.com/icinga/icinga2/issues/5927) (code-quality, PR): Build libraries as static libraries
+* [#5909](https://github.com/icinga/icinga2/issues/5909) (code-quality, PR): WIP: Improve build times
+* [#5903](https://github.com/icinga/icinga2/issues/5903) (code-quality, PR): Cleanup CompatUtility class and features
+* [#5897](https://github.com/icinga/icinga2/issues/5897) (code-quality, PR): Remove unnecessary inline statements
+* [#5894](https://github.com/icinga/icinga2/issues/5894) (code-quality, PR): Remove string\_iless
+* [#5891](https://github.com/icinga/icinga2/issues/5891) (code-quality, PR): Update .gitignore
+* [#5889](https://github.com/icinga/icinga2/issues/5889) (code-quality, PR): execvpe: Fixup indention for readability
+* [#5887](https://github.com/icinga/icinga2/issues/5887) (PR): Windows build fix
+* [#5886](https://github.com/icinga/icinga2/issues/5886) (code-quality): Remove unnecessary 'inline' keyword
+* [#5882](https://github.com/icinga/icinga2/issues/5882) (code-quality, PR): Avoid unnecessary allocations
+* [#5871](https://github.com/icinga/icinga2/issues/5871) (code-quality, PR): Unit tests for the LegacyTimePeriod class
+* [#5868](https://github.com/icinga/icinga2/issues/5868) (Configuration, code-quality, PR): Use std::unique\_ptr for Expression objects
+* [#5865](https://github.com/icinga/icinga2/issues/5865) (code-quality, PR): Add missing initializer in Utility::NewUniqueID\(\)
+* [#5862](https://github.com/icinga/icinga2/issues/5862) (code-quality, PR): Replace a few more NULLs with nullptr
+* [#5858](https://github.com/icinga/icinga2/issues/5858) (Tests, code-quality, PR): Travis: Add support for Coverity
+* [#5857](https://github.com/icinga/icinga2/issues/5857) (code-quality, PR): Fix compiler warnings
+* [#5855](https://github.com/icinga/icinga2/issues/5855) (PR): Fix build problems with Visual Studio 2017
+* [#5848](https://github.com/icinga/icinga2/issues/5848) (code-quality, PR): Fix COPYING format
+* [#5846](https://github.com/icinga/icinga2/issues/5846) (code-quality, PR): Fix compiler warnings
+* [#5831](https://github.com/icinga/icinga2/issues/5831) (Check Execution, Configuration): No checks were launched on snapshot version 2.8.0.71 \(RHEL6\)
+* [#5827](https://github.com/icinga/icinga2/issues/5827) (code-quality, PR): Replace StatsFunction with Function
+* [#5825](https://github.com/icinga/icinga2/issues/5825) (code-quality, PR): Replace boost::assign::list\_of with initializer lists
+* [#5824](https://github.com/icinga/icinga2/issues/5824) (code-quality, PR): Replace a few Boost features with equivalent C++11 features
+* [#5821](https://github.com/icinga/icinga2/issues/5821) (Packages, Windows): check\_disk build error
+* [#5819](https://github.com/icinga/icinga2/issues/5819) (code-quality, PR): Avoid unnecessary allocations in the FunctionCallExpression class
+* [#5816](https://github.com/icinga/icinga2/issues/5816) (code-quality, PR): Re-implement WrapFunction\(\) using C++11 features
+* [#5809](https://github.com/icinga/icinga2/issues/5809) (Documentation, Installation, PR): Raise required OpenSSL version to 1.0.1
+* [#5758](https://github.com/icinga/icinga2/issues/5758) (Documentation, Packages): Completely remove the spec file from the icinga2 repository
+* [#5743](https://github.com/icinga/icinga2/issues/5743) (CLI, Configuration, Installation): node setup: Deprecate --master\_host and use --parent\_host instead
+* [#5725](https://github.com/icinga/icinga2/issues/5725) (code-quality, PR): Use real UUIDs for Utility::NewUniqueID
+* [#5388](https://github.com/icinga/icinga2/issues/5388) (Packages, PR): Handle mis-detection with clang on RHEL/CentOS 7
+* [#3246](https://github.com/icinga/icinga2/issues/3246) (Installation): Add option to windows installer to disable inclusion of conf.d directory
+
+## 2.8.4 (2018-04-25)
+
+### Bug
+
+* [#6257](https://github.com/icinga/icinga2/issues/6257) (Check Execution): Plugins crash when run from icinga2-2.8.3
+
+### Support
+
+* [#6260](https://github.com/icinga/icinga2/issues/6260) (Check Execution, PR): Revert "fixup set rlimit stack failed condition"
+
+## 2.8.3 (2018-04-24)
+
+### Notes
+
+- Fix InfluxDB backslash escaping
+- Fix Elasticsearch crash on invalid performance data
+- Sysconfig file settings are taken into account
+- Support multiple parameters for check_nscp_api
+- Documentation enhancements and fixes
+
+### Bug
+
+* [#6207](https://github.com/icinga/icinga2/issues/6207) (Plugins, Windows, PR): Fix multiple parameter problems for check\_nscp\_api
+* [#6196](https://github.com/icinga/icinga2/issues/6196) (InfluxDB, Metrics, PR): Fix InfluxDB backslash escaping
+* [#6192](https://github.com/icinga/icinga2/issues/6192) (Crash, Elasticsearch, PR): Elasticsearch: Fix crash with invalid performance data metrics
+* [#6191](https://github.com/icinga/icinga2/issues/6191) (Crash, Elasticsearch): Invalid Perfdata causing Segmentation fault with ElasticsearchWriter
+* [#6182](https://github.com/icinga/icinga2/issues/6182) (InfluxDB): Windows Disk performance data broken in InfluxDB
+* [#6179](https://github.com/icinga/icinga2/issues/6179) (CLI, Crash, PR): Fix crash in api user command
+* [#6178](https://github.com/icinga/icinga2/issues/6178) (API, Crash): Error: boost::bad\_any\_cast: failed conversion using boost::any\_cast
+* [#6140](https://github.com/icinga/icinga2/issues/6140): Force check has no effect
+* [#6119](https://github.com/icinga/icinga2/issues/6119) (PR): fixup set rlimit stack failed condition
+* [#5925](https://github.com/icinga/icinga2/issues/5925) (Crash, PR): Fix missing variable name in ApiListener::Start
+* [#5924](https://github.com/icinga/icinga2/issues/5924) (Crash): The lock variable in ApiListener::Start is missing its name
+* [#5881](https://github.com/icinga/icinga2/issues/5881) (API, PR): Fix package error message
+* [#5706](https://github.com/icinga/icinga2/issues/5706) (Plugins, Windows): nscp\_api - cannot use check\_cpu with "time" argument used multiple times
+
+### Documentation
+
+* [#6227](https://github.com/icinga/icinga2/issues/6227) (Documentation, PR): Fix missing anchors in CLI commands chapter
+* [#6203](https://github.com/icinga/icinga2/issues/6203) (Documentation, PR): Add docs for script debugger and API filters
+* [#6177](https://github.com/icinga/icinga2/issues/6177) (Documentation, PR): Doc: Fix typo in API user creation example
+* [#6176](https://github.com/icinga/icinga2/issues/6176) (Documentation, PR): hashed\_password -\> password\_hash. Fixes \#6175
+* [#6175](https://github.com/icinga/icinga2/issues/6175) (Documentation): ApiUser does not know hashed\_password Attribute
+* [#6166](https://github.com/icinga/icinga2/issues/6166) (Documentation, PR): Fix broken link in README
+* [#6145](https://github.com/icinga/icinga2/issues/6145) (Documentation, PR): Fix incorrect parameter name in the API documentation
+* [#6102](https://github.com/icinga/icinga2/issues/6102) (Documentation, PR): Fix typo in Apply for Rules documentation
+* [#6080](https://github.com/icinga/icinga2/issues/6080) (Documentation, PR): Document the 'ignore\_on\_error' attribute for object creation
+* [#6068](https://github.com/icinga/icinga2/issues/6068) (Documentation, PR): Fix the explanation of `types` and `states` for user objects
+* [#5913](https://github.com/icinga/icinga2/issues/5913) (Documentation, ITL, PR): Enhance http\_certificate parameter documentation
+* [#5838](https://github.com/icinga/icinga2/issues/5838) (Documentation, PR): services.conf has also be moved to zones.d/global-templates/
+* [#5797](https://github.com/icinga/icinga2/issues/5797) (Documentation): Document the ignore\_on\_error parameter for CreateObjectHandler::HandleRequest
+* [#5610](https://github.com/icinga/icinga2/issues/5610) (Documentation, ITL): http check doesn't map the critical ssl certificate age option
+
+### Support
+
+* [#6250](https://github.com/icinga/icinga2/issues/6250) (PR): Fix typo
+* [#6241](https://github.com/icinga/icinga2/issues/6241) (Packages, PR): Fix Sysconfig file detection for Icinga 2 settings
+* [#6230](https://github.com/icinga/icinga2/issues/6230) (PR): Unbreak build against Boost 1.67
+* [#6215](https://github.com/icinga/icinga2/issues/6215) (Configuration, Packages): Sysconfig limits and settings are not respected
+* [#6202](https://github.com/icinga/icinga2/issues/6202) (Packages, code-quality, PR): Use VERSION instead of icinga2.spec
+
+## 2.8.2 (2018-03-22)
+
+### Notes
+
+A bugfix release with a focus on security.
+
+Most of these have been brought to our attention by the community and we are very thankful for that. Special thanks to Michael H., Julian and Michael O., who helped by reporting and assisting us in fixing security bugs. CVEs have also been requested for these issues, they are as follows: CVE-2017-16933, CVE-2018-6532, CVE-2018-6533, CVE-2018-6534, CVE-2018-6535, CVE-2018-6536.
+
+### Enhancement
+
+* [#5715](https://github.com/icinga/icinga2/issues/5715) (API, PR): Hash API password and comparison
+
+### Bug
+
+* [#6153](https://github.com/icinga/icinga2/issues/6153) (API, PR): Improve error handling for empty packages in /v1/config/packages
+* [#6147](https://github.com/icinga/icinga2/issues/6147) (PR): Fix incorrect argument type for JsonRpc::SendMessage
+* [#6146](https://github.com/icinga/icinga2/issues/6146) (PR): Ensure that SetCorked\(\) works properly
+* [#6134](https://github.com/icinga/icinga2/issues/6134) (PR): Fix incorrect HTTP content length limits
+* [#6133](https://github.com/icinga/icinga2/issues/6133) (PR): Limit the number of HTTP/JSON-RPC requests we read in parallel
+* [#6132](https://github.com/icinga/icinga2/issues/6132) (PR): Fix HTTP parser crash/hang
+* [#6129](https://github.com/icinga/icinga2/issues/6129): api/packages not created by prepare-dir/daemon
+* [#5995](https://github.com/icinga/icinga2/issues/5995) (InfluxDB, PR): Fix InfluxDB requests
+* [#5991](https://github.com/icinga/icinga2/issues/5991): Partial privilege escalation via PID file manipulation
+* [#5987](https://github.com/icinga/icinga2/issues/5987) (Elasticsearch, InfluxDB, Metrics): InfluxDBWriter and ElasticsearchWriter stop writing to HTTP API
+* [#5943](https://github.com/icinga/icinga2/issues/5943) (PR): Fix incorrect ::Start call
+* [#5793](https://github.com/icinga/icinga2/issues/5793): CVE-2017-16933: root privilege escalation via prepare-dirs \(init script and systemd service file\)
+* [#5760](https://github.com/icinga/icinga2/issues/5760) (Crash, PR): Fix incorrect socket handling for the HTTP client
+
+### Documentation
+
+* [#6172](https://github.com/icinga/icinga2/issues/6172) (Documentation, PR): Docs: Add a note to only query the NSClient++ API from the local Icinga 2 client
+* [#6111](https://github.com/icinga/icinga2/issues/6111) (Documentation, PR): Add Upgrading to Icinga 2.8.2 chapter
+* [#6089](https://github.com/icinga/icinga2/issues/6089) (Documentation, PR): Docs: Fix bracket in notification example
+* [#6086](https://github.com/icinga/icinga2/issues/6086) (Documentation, PR): Upgrading: Make it more clear that the Director script is just an example
+* [#6075](https://github.com/icinga/icinga2/issues/6075) (Documentation, PR): Explain how to register functions in the global scope
+* [#6014](https://github.com/icinga/icinga2/issues/6014) (Documentation, PR): Docs: Add IDO DB tuning tips
+* [#6006](https://github.com/icinga/icinga2/issues/6006) (Documentation, PR): Fix wrong nscp-local include in the docs
+
+### Support
+
+* [#6148](https://github.com/icinga/icinga2/issues/6148) (PR): Fix ApiUser unit test
+* [#6135](https://github.com/icinga/icinga2/issues/6135) (API, Cluster, PR): Limit JSON RPC message size
+* [#6115](https://github.com/icinga/icinga2/issues/6115) (PR): Fix incorrect size of request limits
+* [#6114](https://github.com/icinga/icinga2/issues/6114) (PR): Fix typo in prepare-dirs
+* [#6104](https://github.com/icinga/icinga2/issues/6104) (PR): Fix nullptr dereferences
+* [#6103](https://github.com/icinga/icinga2/issues/6103) (PR): HTTP Security fixes
+* [#5982](https://github.com/icinga/icinga2/issues/5982) (Packages, PR): SELinux: Allows icinga2\_t to send sigkill to all domains it transitions to
+* [#5916](https://github.com/icinga/icinga2/issues/5916) (Packages): Unable to kill process group after check timeout if SElinux is enabled
+* [#5850](https://github.com/icinga/icinga2/issues/5850) (Installation, PR): init script security fixes
+* [#5764](https://github.com/icinga/icinga2/issues/5764) (InfluxDB, code-quality, PR): Improve InfluxdbWriter performance
+* [#5759](https://github.com/icinga/icinga2/issues/5759) (code-quality, PR): Make default getters and setters non-virtual
+
+## 2.8.1 (2018-01-17)
+
+### Enhancement
+
+* [#5856](https://github.com/icinga/icinga2/issues/5856) (PR): Implement AppLocal deployment support for UCRT
+
+### Bug
+
+* [#5986](https://github.com/icinga/icinga2/issues/5986) (DB IDO, PR): Fix wrong schema constraint for fresh 2.8.0 installations
+* [#5947](https://github.com/icinga/icinga2/issues/5947) (DB IDO): Duplicate entry constraint violations in 2.8
+* [#5907](https://github.com/icinga/icinga2/issues/5907) (PR): Windows plugin check\_swap build fix
+* [#5808](https://github.com/icinga/icinga2/issues/5808) (Crash, PR): Fix missing variable name which can lead to segfaults
+* [#5807](https://github.com/icinga/icinga2/issues/5807) (Crash): icinga v2.8.0 crashes frequently with "segmentation fault" on Debian 8.9
+* [#5804](https://github.com/icinga/icinga2/issues/5804) (Log, PR): Silence UpdateRepository message errors
+* [#5776](https://github.com/icinga/icinga2/issues/5776) (Cluster, Log): 2.8.0: warning/JsonRpcConnection: Call to non-existent function 'event::UpdateRepository'
+* [#5746](https://github.com/icinga/icinga2/issues/5746) (Livestatus, PR): livestatus: custom variables return empty arrays instead of strings
+* [#5716](https://github.com/icinga/icinga2/issues/5716) (Livestatus, PR): add bogus zero reply in livestatus when aggregate and non matching filter
+* [#5626](https://github.com/icinga/icinga2/issues/5626) (Livestatus, help wanted): Empty result set with non-matching filters in Livestatus stats query
+
+### ITL
+
+* [#5785](https://github.com/icinga/icinga2/issues/5785) (ITL, PR): ITL: Drop ssl\_sni default setting
+* [#5775](https://github.com/icinga/icinga2/issues/5775) (ITL): Default usage of ssl\_sni in check\_tcp
+
+### Documentation
+
+* [#5972](https://github.com/icinga/icinga2/issues/5972) (Documentation, PR): Update 08-advanced-topics.md
+* [#5942](https://github.com/icinga/icinga2/issues/5942) (Documentation, PR): Add some technical insights into the cluster-zone health check and log lag
+* [#5922](https://github.com/icinga/icinga2/issues/5922) (Documentation, PR): Fix link format in documentation
+* [#5918](https://github.com/icinga/icinga2/issues/5918) (Documentation, PR): Fix typo in SELinux documentation
+* [#5911](https://github.com/icinga/icinga2/issues/5911) (Documentation, PR): Update ElasticsearchWriter docs for 5.x support only
+* [#5866](https://github.com/icinga/icinga2/issues/5866) (Documentation, PR): Remove redundant FreeBSD from restart instructions and add openSUSE
+* [#5864](https://github.com/icinga/icinga2/issues/5864) (Documentation, PR): Add missing initdb to PostgreSQL documentation
+* [#5835](https://github.com/icinga/icinga2/issues/5835) (Documentation, PR): Fixes postgres schema upgrade path
+* [#5833](https://github.com/icinga/icinga2/issues/5833) (Documentation, PR): fix formatting error
+* [#5790](https://github.com/icinga/icinga2/issues/5790) (Documentation, PR): Documentation fixes
+* [#5783](https://github.com/icinga/icinga2/issues/5783) (Documentation, PR): Fix formatting in value types docs
+* [#5773](https://github.com/icinga/icinga2/issues/5773) (Documentation, Windows, PR): Update Windows Client requirements for 2.8
+* [#5757](https://github.com/icinga/icinga2/issues/5757) (Documentation, PR): Add documentation about automatic service restarts with systemd
+
+### Support
+
+* [#5989](https://github.com/icinga/icinga2/issues/5989) (PR): changelog.py: Adjust categories and labels: Enhancement, Bug, ITL, Documentation, Support
+* [#5938](https://github.com/icinga/icinga2/issues/5938) (Packages, Windows): chocolatey outdated version
+* [#5893](https://github.com/icinga/icinga2/issues/5893) (code-quality, PR): Whitespace fix
+* [#5892](https://github.com/icinga/icinga2/issues/5892) (Installation, PR): Enable installing the init scripts on Solaris
+* [#5851](https://github.com/icinga/icinga2/issues/5851) (Plugins, Windows, PR): Fix check\_service returning Warning instead of Critical
+* [#5780](https://github.com/icinga/icinga2/issues/5780) (Packages, Windows): Icinga Agent Windows 2.8.0 msvcr120.dll is missing
+
+## 2.8.0 (2017-11-16)
+
+### Notes
+
+* Certificate path changed to /var/lib/icinga2/certs - check the upgrading docs!
+* DB IDO 2.8.0 schema upgrade
+* Cluster/Clients: Forward certificate signing requests over multiple levels
+* Cluster/Clients: Support on-demand signing next to ticket based certificate request signing
+* New flapping detection algorithm
+* Add ElasticsearchWriter feature with HTTP proxy support
+* Add CORS support for the REST API
+* Deprecate `flapping_threshold` config option
+* Remove client configuration mode "bottom up"
+* Remove classicui meta configuration package
+* Remove deprecated `enable_legacy_mode` in Graphite feature
+* Spec file was moved to https://github.com/icinga/icinga-packaging
+* ITL CheckCommand definition updates
+* Documentation updates
+
+### Enhancement
+
+* [#5682](https://github.com/icinga/icinga2/issues/5682) (Cluster, Configuration, PR): Implement support for migrating certificates to /var/lib/icinga2/certs
+* [#5681](https://github.com/icinga/icinga2/issues/5681) (CLI, Cluster, Windows): Update Windows wizard from enhanced CSR signing \(optional ticket\)
+* [#5679](https://github.com/icinga/icinga2/issues/5679) (CLI, Cluster): Migration path for improved certificate signing in the cluster
+* [#5606](https://github.com/icinga/icinga2/issues/5606) (Cluster, PR): Remove bottom-up client mode
+* [#5602](https://github.com/icinga/icinga2/issues/5602) (Windows, PR): Add windows process elevation and log message if user does not have privileges to read/write files
+* [#5587](https://github.com/icinga/icinga2/issues/5587) (Log, PR): SyslogLogger: Implement option to set syslog facility
+* [#5580](https://github.com/icinga/icinga2/issues/5580) (Configuration, PR): Implement new script functions: path\_exists, glob and glob\_recursive
+* [#5571](https://github.com/icinga/icinga2/issues/5571) (CLI, Cluster, PR): Implement support for forwarding certificate signing requests in the cluster
+* [#5569](https://github.com/icinga/icinga2/issues/5569) (Metrics, PR): ElasticWriter: Add basic auth and TLS support for Elasticsearch behind an HTTP proxy
+* [#5554](https://github.com/icinga/icinga2/issues/5554) (API, Cluster, PR): Add subjectAltName extension for all non-CA certificates
+* [#5547](https://github.com/icinga/icinga2/issues/5547) (API, PR): Add optional reload parameter to config stage upload
+* [#5538](https://github.com/icinga/icinga2/issues/5538) (Metrics): Add ElasticsearchWriter feature
+* [#5534](https://github.com/icinga/icinga2/issues/5534) (Configuration, PR): Implement get\_services\(host {name,object}\) and add host object support for get\_service\(\)
+* [#5527](https://github.com/icinga/icinga2/issues/5527) (API, PR): API: Add execution\_{start,end} attribute to 'process-check-result' action
+* [#5450](https://github.com/icinga/icinga2/issues/5450) (CLI, Cluster): Enhance CSR Autosigning \(CA proxy, etc.\)
+* [#5443](https://github.com/icinga/icinga2/issues/5443) (API, PR): Add CORS support and set response header 'Access-Control-Allow-Origin'
+* [#5435](https://github.com/icinga/icinga2/issues/5435) (Plugins, Windows, PR): Add -d option to check\_service
+* [#5002](https://github.com/icinga/icinga2/issues/5002) (API, wishlist): API process-check-result allow setting timestamp
+* [#4912](https://github.com/icinga/icinga2/issues/4912) (Configuration): new function get\_services\(host\_name\)
+* [#4799](https://github.com/icinga/icinga2/issues/4799) (Cluster): Remove cluster/client mode "bottom up" w/ repository.d and node update-config
+* [#4769](https://github.com/icinga/icinga2/issues/4769) (API): Validate and activate config package stages without triggering a reload
+* [#4326](https://github.com/icinga/icinga2/issues/4326) (API): API should provide CORS Header
+* [#3891](https://github.com/icinga/icinga2/issues/3891) (Plugins): Add option to specify ServiceDescription instead of ServiceName with check\_service.exe
+
+### Bug
+
+* [#5728](https://github.com/icinga/icinga2/issues/5728) (Plugins, Windows, PR): Fix check\_service not working with names
+* [#5720](https://github.com/icinga/icinga2/issues/5720) (Check Execution): Flapping tests and bugs
+* [#5710](https://github.com/icinga/icinga2/issues/5710) (CLI, Configuration, PR): Include default global zones during node wizard/setup
+* [#5707](https://github.com/icinga/icinga2/issues/5707) (CLI): node wizard/setup override zones.conf but do not include default global zones \(director-global, global-templates\)
+* [#5696](https://github.com/icinga/icinga2/issues/5696) (PR): Fix fork error handling
+* [#5641](https://github.com/icinga/icinga2/issues/5641) (PR): Fix compiler warnings on macOS 10.13
+* [#5635](https://github.com/icinga/icinga2/issues/5635) (Configuration, PR): Fix match\(\), regex\(\), cidr\_match\(\) behaviour with MatchAll and empty arrays
+* [#5634](https://github.com/icinga/icinga2/issues/5634) (Configuration): match\(\) for arrays returns boolean true if array is empty
+* [#5620](https://github.com/icinga/icinga2/issues/5620) (API, PR): Ensure that the REST API config package/stage creation is atomic
+* [#5617](https://github.com/icinga/icinga2/issues/5617): Crash with premature EOF on resource limited OS
+* [#5614](https://github.com/icinga/icinga2/issues/5614) (PR): Fixed missing include statement in unit tests
+* [#5584](https://github.com/icinga/icinga2/issues/5584) (Windows): Build error on Windows
+* [#5581](https://github.com/icinga/icinga2/issues/5581) (API, Cluster, Crash, PR): Fix possible race condition in ApiListener locking
+* [#5558](https://github.com/icinga/icinga2/issues/5558) (API, PR): Don't sent scheme and hostname in request
+* [#5515](https://github.com/icinga/icinga2/issues/5515) (Windows): Config validation fails on Windows with unprivileged account
+* [#5500](https://github.com/icinga/icinga2/issues/5500) (Crash, PR): Process: Fix JSON parsing error on process helper crash
+* [#5497](https://github.com/icinga/icinga2/issues/5497) (API, PR): API: Fix requested attrs/joins/meta type errors in object query response
+* [#5485](https://github.com/icinga/icinga2/issues/5485) (DB IDO, PR): Ensure that expired/removed downtimes/comments are correctly updated in DB IDO
+* [#5377](https://github.com/icinga/icinga2/issues/5377) (API, Log): Sending wrong value for key causes ugly stacktrace
+* [#5231](https://github.com/icinga/icinga2/issues/5231) (Check Execution, PR): Report failure to kill check command after exceeding timeout
+* [#4981](https://github.com/icinga/icinga2/issues/4981) (Check Execution): Failure to kill check command after exceeding timeout is not reported
+
+### ITL
+
+* [#5678](https://github.com/icinga/icinga2/issues/5678) (ITL, PR): Added missing "-q" parameter to check\_ntp\_peer
+* [#5672](https://github.com/icinga/icinga2/issues/5672) (ITL, PR): add itl snmp-service for manubulon plugin check\_snmp\_win.pl
+* [#5647](https://github.com/icinga/icinga2/issues/5647) (ITL, PR): Allow to disable thresholds for ipmi CheckCommand
+* [#5640](https://github.com/icinga/icinga2/issues/5640) (ITL, PR): ITL: Support weathermap data in snmp\_interface CheckCommand
+* [#5638](https://github.com/icinga/icinga2/issues/5638) (ITL, PR): Add support for check\_address as default in database CheckCommand objects
+* [#5578](https://github.com/icinga/icinga2/issues/5578) (ITL, PR): ITL: Re-Add ssl\_sni attribute for check\_tcp
+* [#5577](https://github.com/icinga/icinga2/issues/5577) (ITL): ssl CheckCommand does not support SNI
+* [#5570](https://github.com/icinga/icinga2/issues/5570) (ITL, PR): check\_esxi\_hardware.py with new --no-lcd parameter
+* [#5559](https://github.com/icinga/icinga2/issues/5559) (ITL, PR): Exclude configfs from disk checks
+* [#5427](https://github.com/icinga/icinga2/issues/5427) (ITL): Update negate CheckCommand definition
+* [#5401](https://github.com/icinga/icinga2/issues/5401) (ITL, PR): itl: Add manubulon/check\_snmp\_env.pl as CheckCommand snmp-env
+* [#5394](https://github.com/icinga/icinga2/issues/5394) (ITL, PR): itl: add additional mssql\_health arguments
+* [#5387](https://github.com/icinga/icinga2/issues/5387) (ITL, PR): Add missing options to snmp CheckCommand definition
+
+### Documentation
+
+* [#5768](https://github.com/icinga/icinga2/issues/5768) (Documentation, PR): Update .mailmap and AUTHORS
+* [#5761](https://github.com/icinga/icinga2/issues/5761) (Documentation, PR): Fix wrong anchors in the documentation
+* [#5755](https://github.com/icinga/icinga2/issues/5755) (Documentation, PR): Fix missing Accept header in troubleshooting docs
+* [#5754](https://github.com/icinga/icinga2/issues/5754) (Documentation, PR): Improve documentation of cipher\_list
+* [#5752](https://github.com/icinga/icinga2/issues/5752) (Documentation, PR): Add Noah Hilverling to .mailmap
+* [#5748](https://github.com/icinga/icinga2/issues/5748) (Documentation, PR): Fix missing word in pin checks in a zone doc chapter
+* [#5741](https://github.com/icinga/icinga2/issues/5741) (Documentation, PR): Fix manual certificate creation chapter in the docs
+* [#5738](https://github.com/icinga/icinga2/issues/5738) (Documentation, PR): Update release docs
+* [#5734](https://github.com/icinga/icinga2/issues/5734) (Documentation, PR): Fix broken links inside the documentation
+* [#5727](https://github.com/icinga/icinga2/issues/5727) (Documentation, PR): Update upgrading documentation for 2.8
+* [#5708](https://github.com/icinga/icinga2/issues/5708) (Documentation, PR): Fixed grammar and spelling mistakes
+* [#5703](https://github.com/icinga/icinga2/issues/5703) (Documentation): Minor documentation typos in flapping detection description
+* [#5695](https://github.com/icinga/icinga2/issues/5695) (Documentation, PR): Enhance Security chapter for Distributed Monitoring documentation
+* [#5691](https://github.com/icinga/icinga2/issues/5691) (Documentation, PR): Fixed doc formatting
+* [#5690](https://github.com/icinga/icinga2/issues/5690) (Documentation): Improve documentation of cipher\_list
+* [#5688](https://github.com/icinga/icinga2/issues/5688) (Documentation, PR): Fixed typos and punctuation
+* [#5680](https://github.com/icinga/icinga2/issues/5680) (Documentation): Review documentation for enhanced CSR signing and update migration chapter for 2.8
+* [#5677](https://github.com/icinga/icinga2/issues/5677) (Documentation, PR): Fix typo in threshold syntax documentation
+* [#5668](https://github.com/icinga/icinga2/issues/5668) (Documentation, PR): Enhance Monitoring Basics in the documentation
+* [#5667](https://github.com/icinga/icinga2/issues/5667) (Documentation): Explain which values can be used for set\_if in command arguments
+* [#5666](https://github.com/icinga/icinga2/issues/5666) (Documentation): Explain the notification with users defined on host/service in a dedicated docs chapter
+* [#5665](https://github.com/icinga/icinga2/issues/5665) (Documentation): Better explanations and iteration details for "apply for" documentation
+* [#5664](https://github.com/icinga/icinga2/issues/5664) (Documentation): Add usage examples to the "apply" chapter based on custom attribute values
+* [#5663](https://github.com/icinga/icinga2/issues/5663) (Documentation): Explain custom attribute value types and nested dictionaries
+* [#5662](https://github.com/icinga/icinga2/issues/5662) (Documentation): Explain how to use a different host check command
+* [#5655](https://github.com/icinga/icinga2/issues/5655) (Documentation, PR): Enhance documentation with more details on value types for object attributes
+* [#5576](https://github.com/icinga/icinga2/issues/5576) (Documentation, PR): Fixed downtime example in documentation
+* [#5568](https://github.com/icinga/icinga2/issues/5568) (Documentation, PR): Add documentation for multi-line plugin output for API actions
+* [#5511](https://github.com/icinga/icinga2/issues/5511) (Cluster, Documentation, Windows): SSL errors with leading zeros in certificate serials \(created \< v2.4\) with OpenSSL 1.1.0
+* [#5379](https://github.com/icinga/icinga2/issues/5379) (Documentation, PR): Set shell prompt for commands to be \#
+* [#5186](https://github.com/icinga/icinga2/issues/5186) (Documentation): Document boolean values understood by set\_if
+* [#5060](https://github.com/icinga/icinga2/issues/5060) (Documentation): Missing documentation for macro\(\)
+* [#4015](https://github.com/icinga/icinga2/issues/4015) (Documentation): Add documentation for host state calculation from plugin exit codes
+
+### Support
+
+* [#5765](https://github.com/icinga/icinga2/issues/5765) (Configuration, PR): Fix default configuration example for ElasticsearchWriter
+* [#5739](https://github.com/icinga/icinga2/issues/5739) (Metrics, PR): Rename ElasticWriter to ElasticsearchWriter
+* [#5732](https://github.com/icinga/icinga2/issues/5732) (Check Execution, DB IDO, PR): Fix flapping calculation and events
+* [#5730](https://github.com/icinga/icinga2/issues/5730) (PR): Add missing trims to GetMasterHostPort and remove Convert.ToString from variables that are strings already
+* [#5719](https://github.com/icinga/icinga2/issues/5719) (Cluster, Installation, Windows, PR): Update Windows Wizard for 2.8 and new signing methods
+* [#5687](https://github.com/icinga/icinga2/issues/5687) (Cluster, Log, PR): Improve error message for unknown cluster message functions
+* [#5686](https://github.com/icinga/icinga2/issues/5686) (Log): Ugly stacktrace with mismatching versions in cluster
+* [#5643](https://github.com/icinga/icinga2/issues/5643) (PR): Fix debug builds on Apple Clang 9.0.0 \(macOS High Sierra\)
+* [#5637](https://github.com/icinga/icinga2/issues/5637) (InfluxDB, PR): Fix unnecessary String\(\) casts in InfluxdbWriter
+* [#5629](https://github.com/icinga/icinga2/issues/5629) (InfluxDB, Metrics, code-quality): Remove the unnecessary String\(\) casts in influxdbwriter.cpp
+* [#5624](https://github.com/icinga/icinga2/issues/5624) (PR): Fixed missing include statement in unit test
+* [#5619](https://github.com/icinga/icinga2/issues/5619) (Packages, PR): Exit early in changelog.py if GitHub API fetch fails
+* [#5616](https://github.com/icinga/icinga2/issues/5616) (PR): Fix a build warning
+* [#5608](https://github.com/icinga/icinga2/issues/5608) (CLI, Cluster, PR): Fix certificate paths for installers
+* [#5604](https://github.com/icinga/icinga2/issues/5604) (Packages, PR): Remove the icinga2-classicui-package and update documentation
+* [#5601](https://github.com/icinga/icinga2/issues/5601) (Installation, Packages, PR): Ensure that the cache directory always is set and add a note to upgrading docs
+* [#5563](https://github.com/icinga/icinga2/issues/5563) (Cluster, PR): Implement additional logging for the JsonRpc class
+* [#5545](https://github.com/icinga/icinga2/issues/5545) (Installation, Windows, PR): Add Edit button to Windows Setup Wizard
+* [#5488](https://github.com/icinga/icinga2/issues/5488) (code-quality, PR): Implement additional functions for printing values with LLDB/GDB
+* [#5486](https://github.com/icinga/icinga2/issues/5486) (Graphite, PR): Graphite: Remove deprecated legacy schema mode
+* [#5301](https://github.com/icinga/icinga2/issues/5301) (Installation, Packages): Remove the icinga2-classicui-config package
+* [#5258](https://github.com/icinga/icinga2/issues/5258) (Installation, PR): Fix clang compiler detection on Fedora and macOS
+* [#4992](https://github.com/icinga/icinga2/issues/4992) (Graphite): Remove deprecated GraphiteWriter feature enable\_legacy\_mode
+* [#4982](https://github.com/icinga/icinga2/issues/4982) (Notifications, Tests): Verify and fix flapping detection
+
+## 2.7.2 (2017-11-09)
+
+### Notes
+
+* Fixed invalid attribute names in the systemd unit file
+* Fixed incorrect unique constraint for IDO DB
+* Moved spec file to the icinga-packaging Git repository
+* Documentation updates
+
+### Bug
+
+* [#5636](https://github.com/icinga/icinga2/issues/5636) (DB IDO, PR): Fix unique constraint matching for UPDATE downtime/comment runtime tables in DB IDO
+* [#5623](https://github.com/icinga/icinga2/issues/5623) (DB IDO): Duplicate Key on MySQL after upgrading to v2.7.1
+* [#5603](https://github.com/icinga/icinga2/issues/5603) (DB IDO): Icinga 2.7.1 IDO Unique Key Constraint Violation with PostgreSQL
+
+### Documentation
+
+* [#5653](https://github.com/icinga/icinga2/issues/5653) (Documentation, PR): Docs: Fix default value for `snmp\_nocrypt` for Manubulon CheckCommand definitions
+* [#5652](https://github.com/icinga/icinga2/issues/5652) (Documentation, PR): Docs: Fix missing default value for cluster-zone checks
+* [#5632](https://github.com/icinga/icinga2/issues/5632) (Documentation, PR): Docs: Mention SELinux in Getting Started chapter
+
+### Support
+
+* [#5736](https://github.com/icinga/icinga2/issues/5736) (Packages, PR): Remove spec file
+* [#5612](https://github.com/icinga/icinga2/issues/5612) (Documentation, Packages, PR): Improve documentation and systemd config on TaskMax
+
+## 2.7.1 (2017-09-21)
+
+### Notes
+
+* Fixes and upgrade documentation for notificatication scripts introduced in 2.7.0
+* InfluxdbWriter attribute `socket_timeout` introduced in 2.7.0 was deprecated (will be removed in 2.8.0). Details in #5469 and #5460
+* Livestatus bygroup table stats fixes for NagVis
+* DB IDO: Fixes for downtime/comment history queries not correctly updating the end time
+* check_nscp_api allows white spaces in arguments
+* Bugfixes
+* Documentation updates
+
+### Enhancement
+
+* [#5523](https://github.com/icinga/icinga2/issues/5523) (Cluster, Log, PR): Enhance client connect/sync logging and include bytes/zone in logs
+* [#5474](https://github.com/icinga/icinga2/issues/5474) (Notifications, PR): Notification scripts - make HOSTADDRESS optional
+* [#5468](https://github.com/icinga/icinga2/issues/5468) (Notifications, PR): Make notification mails more readable. Remove redundancy and cruft.
+
+### Bug
+
+* [#5585](https://github.com/icinga/icinga2/issues/5585) (DB IDO, PR): Fix where clause for non-matching {downtime,comment}history IDO database updates
+* [#5566](https://github.com/icinga/icinga2/issues/5566) (Cluster, Log, PR): Logs: Change config sync update to highlight an information, not an error
+* [#5539](https://github.com/icinga/icinga2/issues/5539) (Plugins, Windows, PR): check\_nscp\_api: Allow arguments containing spaces
+* [#5537](https://github.com/icinga/icinga2/issues/5537) (Plugins): check\_nscp\_api: support spaces in query arguments
+* [#5524](https://github.com/icinga/icinga2/issues/5524) (Cluster, PR): Change FIFO::Optimize\(\) frequency for large messages
+* [#5513](https://github.com/icinga/icinga2/issues/5513) (Cluster): Node in Cluster loses connection
+* [#5504](https://github.com/icinga/icinga2/issues/5504) (InfluxDB, PR): Fix TLS Race Connecting to InfluxDB
+* [#5503](https://github.com/icinga/icinga2/issues/5503) (Livestatus, PR): Fix grouping for Livestatus queries with 'Stats'
+* [#5502](https://github.com/icinga/icinga2/issues/5502) (Notifications, PR): Fix duplicate variable in notification scripts
+* [#5495](https://github.com/icinga/icinga2/issues/5495) (Notifications, PR): Fix parameter order for AcknowledgeSvcProblem / AcknowledgeHostProblem / apiactions:AcknowledgeProblem
+* [#5492](https://github.com/icinga/icinga2/issues/5492) (DB IDO): Comments may not be removed correctly
+* [#5484](https://github.com/icinga/icinga2/issues/5484) (Log): Timestamp comparison of config files logs a wrong message
+* [#5483](https://github.com/icinga/icinga2/issues/5483) (DB IDO, PR): Fix config validation for DB IDO categories 'DbCatEverything'
+* [#5469](https://github.com/icinga/icinga2/issues/5469) (InfluxDB): Failure to connect to InfluxDB increases CPU utilisation by 100% for every failure
+* [#5466](https://github.com/icinga/icinga2/issues/5466) (DB IDO, PR): DB IDO: Fix host's unreachable state in history tables
+* [#5460](https://github.com/icinga/icinga2/issues/5460) (InfluxDB): Icinga 2.7 InfluxdbWriter fails to write metrics to InfluxDB over HTTPS
+* [#5458](https://github.com/icinga/icinga2/issues/5458) (DB IDO): IDO donwtimehistory records orphaned from scheduleddowntime records following restart
+* [#5405](https://github.com/icinga/icinga2/issues/5405) (DB IDO): IDO statehistory table does not show hosts going to "UNREACHABLE" state.
+* [#5078](https://github.com/icinga/icinga2/issues/5078) (Compat, Livestatus): Livestatus hostsbygroup and servicesbyhostgroup do not work
+
+### ITL
+
+* [#5543](https://github.com/icinga/icinga2/issues/5543) (ITL, PR): ITL: Correct arguments for ipmi-sensor CheckCommand
+
+### Documentation
+
+* [#5594](https://github.com/icinga/icinga2/issues/5594) (Documentation, PR): Docs: Enhance certificate and configuration troubleshooting chapter
+* [#5593](https://github.com/icinga/icinga2/issues/5593) (Documentation, PR): Docs: Add a note for upgrading to 2.7
+* [#5583](https://github.com/icinga/icinga2/issues/5583) (Documentation, PR): Docs: Add example for Windows service monitoring with check\_nscp\_api
+* [#5582](https://github.com/icinga/icinga2/issues/5582) (Documentation, PR): Docs: Add firewall details for check\_nscp\_api
+* [#5549](https://github.com/icinga/icinga2/issues/5549) (Documentation, PR): Fix cli command used to enable debuglog feature on windows
+* [#5536](https://github.com/icinga/icinga2/issues/5536) (Documentation, PR): Fixed nscp-disk service example
+* [#5522](https://github.com/icinga/icinga2/issues/5522) (Documentation, PR): Docs: Update freshness checks; add chapter for external check results
+* [#5516](https://github.com/icinga/icinga2/issues/5516) (Documentation, PR): Updates the install dependencies for Debian 9 'stretch'
+* [#5506](https://github.com/icinga/icinga2/issues/5506) (Documentation, PR): Docs: Fix wrong parameter for ITL CheckCommand nscp\_api
+* [#5496](https://github.com/icinga/icinga2/issues/5496) (Documentation, PR): Docs: Update examples for match/regex/cidr\_match and mode for arrays \(Match{All,Any}\)
+* [#5494](https://github.com/icinga/icinga2/issues/5494) (Documentation, PR): Docs: Add section for multiple template imports
+* [#5491](https://github.com/icinga/icinga2/issues/5491) (Documentation, PR): Update "Getting Started" documentation with Alpine Linux
+* [#5487](https://github.com/icinga/icinga2/issues/5487) (Documentation, PR): Docs: Enhance Troubleshooting with nscp-local, check\_source, wrong thresholds
+* [#5476](https://github.com/icinga/icinga2/issues/5476) (Documentation, PR): Docs: Fix ITL chapter TOC; add introduction with mini TOC
+* [#5475](https://github.com/icinga/icinga2/issues/5475) (Documentation, PR): Docs: Add a note on required configuration updates for new notification scripts in v2.7.0
+* [#5461](https://github.com/icinga/icinga2/issues/5461) (Documentation, PR): Update Icinga repository release rpm location
+* [#5457](https://github.com/icinga/icinga2/issues/5457) (Documentation, PR): Add Changelog generation script for GitHub API
+* [#5428](https://github.com/icinga/icinga2/issues/5428) (Documentation): "Plugin Check Commands" section inside ITL docs needs adjustments
+
+### Support
+
+* [#5599](https://github.com/icinga/icinga2/issues/5599) (PR): changelog.py: Add "backported" to the list of ignored labels
+* [#5590](https://github.com/icinga/icinga2/issues/5590) (Cluster, Log, PR): Silence log level for configuration file updates
+* [#5529](https://github.com/icinga/icinga2/issues/5529) (Log, PR): Change two more loglines for checkables so checkable is quoted
+* [#5528](https://github.com/icinga/icinga2/issues/5528) (Log, PR): Change loglines for checkables so checkable is quoted
+* [#5501](https://github.com/icinga/icinga2/issues/5501) (Installation, Packages, PR): SELinux: fixes for 2.7.0
+* [#5479](https://github.com/icinga/icinga2/issues/5479) (Packages): Icinga2 2.7.0 requires SELinux boolean icinga2\_can\_connect\_all on CentOS 7 even for default port
+* [#5477](https://github.com/icinga/icinga2/issues/5477) (Installation, Packages, PR): Systemd: Add DefaultTasksMax=infinity to service file
+* [#5392](https://github.com/icinga/icinga2/issues/5392) (Packages, PR): Ensure the cache directory exists
+* [#4918](https://github.com/icinga/icinga2/issues/4918) (Packages): cgroup: fork rejected by pids controller in /system.slice/icinga2.service
+* [#4414](https://github.com/icinga/icinga2/issues/4414) (Packages): /usr/lib/icinga2/prepare-dirs does not create /var/cache/icinga2
+
+## 2.7.0 (2017-08-02)
+
+### Notes
+
+* New mail notification scripts. Please note that this requires a configuration update to NotificationCommand objects, Notification apply rules for specific settings and of course the notification scripts. More can be found [here](https://github.com/Icinga/icinga2/pull/5475).
+* check_nscp_api plugin for NSClient++ REST API checks
+* Work queues for features including logs & metrics
+* More metrics for the "icinga" check
+* Many bugfixes
+
+### Enhancement
+
+* [#5421](https://github.com/icinga/icinga2/issues/5421) (Plugins, Windows, PR): Windows Plugins: Add new parameter to check\_disk to show used space
+* [#5348](https://github.com/icinga/icinga2/issues/5348) (Configuration, PR): Implement support for handling exceptions in user scripts
+* [#5331](https://github.com/icinga/icinga2/issues/5331) (Graylog, PR): GelfWriter: Add 'check\_command' to CHECK RESULT/\* NOTIFICATION/STATE CHANGE messages
+* [#5330](https://github.com/icinga/icinga2/issues/5330) (Graphite, PR): GraphiteWriter: Add 'connected' to stats; fix reconnect exceptions
+* [#5329](https://github.com/icinga/icinga2/issues/5329) (Graylog, PR): GelfWriter: Use async work queue and add feature metric stats
+* [#5320](https://github.com/icinga/icinga2/issues/5320) (Configuration, PR): zones.conf: Add global-templates & director-global by default
+* [#5287](https://github.com/icinga/icinga2/issues/5287) (Graphite, InfluxDB, Metrics, PR): Use workqueues in Graphite and InfluxDB features
+* [#5284](https://github.com/icinga/icinga2/issues/5284) (Check Execution, PR): Add feature stats to 'icinga' check as performance data metrics
+* [#5280](https://github.com/icinga/icinga2/issues/5280) (API, Cluster, Log, PR): Implement WorkQueue metric stats and periodic logging
+* [#5266](https://github.com/icinga/icinga2/issues/5266) (API, Cluster, PR): Add API & Cluster metric stats to /v1/status & icinga check incl. performance data
+* [#5264](https://github.com/icinga/icinga2/issues/5264) (Configuration, PR): Implement new array match functionality
+* [#5247](https://github.com/icinga/icinga2/issues/5247) (Log, PR): Add target object in cluster error messages to debug log
+* [#5246](https://github.com/icinga/icinga2/issues/5246) (API, Cluster, PR): Add subjectAltName X509 ext for certificate requests
+* [#5242](https://github.com/icinga/icinga2/issues/5242) (Configuration, PR): Allow expressions for the type in object/template declarations
+* [#5241](https://github.com/icinga/icinga2/issues/5241) (InfluxDB, PR): Verbose InfluxDB Error Logging
+* [#5239](https://github.com/icinga/icinga2/issues/5239) (Plugins, Windows, PR): Add NSCP API check plugin for NSClient++ HTTP API
+* [#5212](https://github.com/icinga/icinga2/issues/5212) (Cluster, Log): Add additional logging for config sync
+* [#5145](https://github.com/icinga/icinga2/issues/5145): Add a GitHub issue template
+* [#5133](https://github.com/icinga/icinga2/issues/5133) (API, wishlist): ApiListener: Metrics for cluster data
+* [#5106](https://github.com/icinga/icinga2/issues/5106) (Configuration): Add director-global as global zone to the default zones.conf configuration
+* [#4945](https://github.com/icinga/icinga2/issues/4945) (API, Log): No hint for missing permissions in Icinga2 log for API user
+* [#4925](https://github.com/icinga/icinga2/issues/4925): Update changelog generation scripts for GitHub
+* [#4411](https://github.com/icinga/icinga2/issues/4411) (InfluxDB, Log, Metrics): Better Debugging for InfluxdbWriter
+* [#4288](https://github.com/icinga/icinga2/issues/4288) (Cluster, Log): Add check information to the debuglog when check result is discarded
+* [#4242](https://github.com/icinga/icinga2/issues/4242) (Configuration): Default mail notification from header
+* [#3557](https://github.com/icinga/icinga2/issues/3557) (Log): Log started and stopped features
+
+### Bug
+
+* [#5433](https://github.com/icinga/icinga2/issues/5433) (CLI, PR): Fix: update feature list help text
+* [#5367](https://github.com/icinga/icinga2/issues/5367) (CLI, Crash): Unable to start icinga2 with kernel-3.10.0-514.21.2 RHEL7
+* [#5350](https://github.com/icinga/icinga2/issues/5350) (Plugins): check\_nscp\_api not building on Debian wheezy
+* [#5316](https://github.com/icinga/icinga2/issues/5316) (Livestatus, PR): Fix for stats min operator
+* [#5308](https://github.com/icinga/icinga2/issues/5308) (Configuration, PR): Improve validation for attributes which must not be 'null'
+* [#5297](https://github.com/icinga/icinga2/issues/5297) (PR): Fix compiler warnings
+* [#5295](https://github.com/icinga/icinga2/issues/5295) (Notifications, PR): Fix missing apostrophe in notification log
+* [#5292](https://github.com/icinga/icinga2/issues/5292) (PR): Build fix for OpenSSL 0.9.8 and stack\_st\_X509\_EXTENSION
+* [#5288](https://github.com/icinga/icinga2/issues/5288) (Configuration): Hostgroup using assign for Host with groups = null segfault
+* [#5278](https://github.com/icinga/icinga2/issues/5278) (PR): Build fix for I2\_LEAK\_DEBUG
+* [#5262](https://github.com/icinga/icinga2/issues/5262) (Graylog, PR): Fix performance data processing in GelfWriter feature
+* [#5259](https://github.com/icinga/icinga2/issues/5259) (API, PR): Don't allow acknowledgement expire timestamps in the past
+* [#5256](https://github.com/icinga/icinga2/issues/5256) (Configuration): Config type changes break object serialization \(JsonEncode\)
+* [#5250](https://github.com/icinga/icinga2/issues/5250) (API, Compat): Acknowledgement expire time in the past
+* [#5245](https://github.com/icinga/icinga2/issues/5245) (Notifications, PR): Fix that host downtimes might be triggered even if their state is Up
+* [#5224](https://github.com/icinga/icinga2/issues/5224) (Configuration, Notifications): Icinga sends notifications even though a Downtime object exists
+* [#5223](https://github.com/icinga/icinga2/issues/5223) (Plugins, Windows): Wrong return Code for Windows ICMP
+* [#5219](https://github.com/icinga/icinga2/issues/5219) (InfluxDB): InfluxDBWriter feature might block and leak memory
+* [#5211](https://github.com/icinga/icinga2/issues/5211) (API, Cluster): Config received is always accepted by client even if own config is newer
+* [#5194](https://github.com/icinga/icinga2/issues/5194) (API, CLI): No subjectAltName in Icinga CA created CSRs
+* [#5168](https://github.com/icinga/icinga2/issues/5168) (Windows): include files from other volume/partition
+* [#5146](https://github.com/icinga/icinga2/issues/5146) (Configuration): parsing of scheduled downtime object allow typing range instead of ranges
+* [#5132](https://github.com/icinga/icinga2/issues/5132) (Graphite): GraphiteWriter can slow down Icinga's check result processing
+* [#5062](https://github.com/icinga/icinga2/issues/5062) (Compat): icinga2 checkresults error
+* [#5043](https://github.com/icinga/icinga2/issues/5043) (API): API POST request with 'attrs' as array returns bad\_cast error
+* [#5040](https://github.com/icinga/icinga2/issues/5040) (Cluster): CRL loading fails due to incorrect return code check
+* [#5033](https://github.com/icinga/icinga2/issues/5033) (DB IDO): Flexible downtimes which are not triggered must not update DB IDO's actual\_end\_time in downtimehistory table
+* [#4984](https://github.com/icinga/icinga2/issues/4984) (API): Wrong response type when unauthorized
+* [#4983](https://github.com/icinga/icinga2/issues/4983) (Livestatus): Typo in livestatus key worst\_services\_state for hostgroups table
+* [#4956](https://github.com/icinga/icinga2/issues/4956) (DB IDO, PR): Fix persistent comments for Acknowledgements
+* [#4941](https://github.com/icinga/icinga2/issues/4941) (Metrics, PR): PerfData: Server Timeouts for InfluxDB Writer
+* [#4927](https://github.com/icinga/icinga2/issues/4927) (InfluxDB, Metrics): InfluxDbWriter error 500 hanging Icinga daemon
+* [#4913](https://github.com/icinga/icinga2/issues/4913) (API): acknowledge-problem api sending notifications when notify is false
+* [#4909](https://github.com/icinga/icinga2/issues/4909) (CLI): icinga2 feature disable fails on already disabled feature
+* [#4896](https://github.com/icinga/icinga2/issues/4896) (Plugins): Windows Agent: performance data of check\_perfmon
+* [#4832](https://github.com/icinga/icinga2/issues/4832) (API, Configuration): API max\_check\_attempts validation
+* [#4818](https://github.com/icinga/icinga2/issues/4818): Acknowledgements marked with Persistent Comment are not honored
+* [#4779](https://github.com/icinga/icinga2/issues/4779): Superflous error messages for non-exisiting lsb\_release/sw\_vers commands \(on NetBSD\)
+* [#4778](https://github.com/icinga/icinga2/issues/4778): Fix for traditional glob\(3\) behaviour
+* [#4777](https://github.com/icinga/icinga2/issues/4777): NetBSD execvpe.c fix
+* [#4709](https://github.com/icinga/icinga2/issues/4709) (API): Posting config stage fails on FreeBSD
+* [#4696](https://github.com/icinga/icinga2/issues/4696) (Notifications): Notifications are sent when reloading Icinga 2 even though they're deactivated via modified attributes
+* [#4666](https://github.com/icinga/icinga2/issues/4666) (Graylog, Metrics): GelfWriter with enable\_send\_perfdata breaks checks
+* [#4532](https://github.com/icinga/icinga2/issues/4532) (Graylog, Metrics): Icinga 2 "hangs" if the GelfWriter cannot send messages
+* [#4440](https://github.com/icinga/icinga2/issues/4440) (DB IDO, Log): Exceptions might be better than exit in IDO
+* [#3664](https://github.com/icinga/icinga2/issues/3664) (DB IDO): mysql\_error cannot be used for mysql\_init
+* [#3483](https://github.com/icinga/icinga2/issues/3483) (Compat): Stacktrace on Command Pipe Error
+* [#3410](https://github.com/icinga/icinga2/issues/3410) (Livestatus): Livestatus: Problem with stats min operator
+* [#121](https://github.com/icinga/icinga2/issues/121) (CLI, PR): give only warnings if feature is already disabled
+
+### ITL
+
+* [#5384](https://github.com/icinga/icinga2/issues/5384) (ITL, PR): Remove default value for 'dns\_query\_type'
+* [#5383](https://github.com/icinga/icinga2/issues/5383) (ITL): Monitoring-Plugins check\_dns command does not support the `-q` flag
+* [#5372](https://github.com/icinga/icinga2/issues/5372) (ITL, PR): Update ITL CheckCommand description attribute, part 2
+* [#5363](https://github.com/icinga/icinga2/issues/5363) (ITL, PR): Update missing description attributes for ITL CheckCommand definitions
+* [#5347](https://github.com/icinga/icinga2/issues/5347) (ITL, PR): Improve ITL CheckCommand description attribute
+* [#5344](https://github.com/icinga/icinga2/issues/5344) (ITL, PR): Add ip4-or-ipv6 import to logstash ITL command
+* [#5343](https://github.com/icinga/icinga2/issues/5343) (ITL): logstash ITL command misses import
+* [#5236](https://github.com/icinga/icinga2/issues/5236) (ITL, PR): ITL: Add some missing arguments to ssl\_cert
+* [#5210](https://github.com/icinga/icinga2/issues/5210) (ITL, PR): Add report mode to db2\_health
+* [#5170](https://github.com/icinga/icinga2/issues/5170) (ITL, PR): Enhance mail notifications scripts and add support for command line parameters
+* [#5139](https://github.com/icinga/icinga2/issues/5139) (ITL, PR): Add more options to ldap CheckCommand
+* [#5129](https://github.com/icinga/icinga2/issues/5129) (ITL): Additional parameters for perfout manubulon scripts
+* [#5126](https://github.com/icinga/icinga2/issues/5126) (ITL, PR): Added support to NRPE v2 in NRPE CheckCommand
+* [#5075](https://github.com/icinga/icinga2/issues/5075) (ITL, PR): fix mitigation for nwc\_health
+* [#5063](https://github.com/icinga/icinga2/issues/5063) (ITL, PR): Add additional arguments to mssql\_health
+* [#5046](https://github.com/icinga/icinga2/issues/5046) (ITL): Add querytype to dns check
+* [#5019](https://github.com/icinga/icinga2/issues/5019) (ITL, PR): Added CheckCommand definitions for SMART, RAID controller and IPMI ping check
+* [#5015](https://github.com/icinga/icinga2/issues/5015) (ITL, PR): nwc\_health\_report attribute requires a value
+* [#4987](https://github.com/icinga/icinga2/issues/4987) (ITL): Review `dummy` entry in ITL
+* [#4985](https://github.com/icinga/icinga2/issues/4985) (ITL): Allow hpasm command from ITL to run in local mode
+* [#4964](https://github.com/icinga/icinga2/issues/4964) (ITL, PR): ITL: check\_icmp: add missing TTL attribute
+* [#4839](https://github.com/icinga/icinga2/issues/4839) (ITL): Remove deprecated dns\_expected\_answer attribute
+* [#4826](https://github.com/icinga/icinga2/issues/4826) (ITL): Prepare icingacli-businessprocess for next release
+* [#4661](https://github.com/icinga/icinga2/issues/4661) (ITL): ITL - check\_oracle\_health - report option to shorten output
+* [#124](https://github.com/icinga/icinga2/issues/124) (ITL, PR): FreeBSD's /dev/fd can either be inside devfs, or be of type fdescfs.
+* [#123](https://github.com/icinga/icinga2/issues/123) (ITL, PR): ITL: Update ipmi CheckCommand attributes
+* [#120](https://github.com/icinga/icinga2/issues/120) (ITL, PR): Add new parameter for check\_http: -L: Wrap output in HTML link
+* [#117](https://github.com/icinga/icinga2/issues/117) (ITL, PR): Support --only-critical for check\_apt
+* [#115](https://github.com/icinga/icinga2/issues/115) (ITL, PR): Inverse Interface Switch for snmp-interface
+* [#114](https://github.com/icinga/icinga2/issues/114) (ITL, PR): Adding -A to snmp interfaces check
+
+### Documentation
+
+* [#5448](https://github.com/icinga/icinga2/issues/5448) (Documentation, PR): Update documentation for 2.7.0
+* [#5440](https://github.com/icinga/icinga2/issues/5440) (Documentation, PR): Add missing notification state filter to documentation
+* [#5425](https://github.com/icinga/icinga2/issues/5425) (Documentation, PR): Fix formatting in API docs
+* [#5410](https://github.com/icinga/icinga2/issues/5410) (Documentation): Update docs for better compatibility with mkdocs
+* [#5393](https://github.com/icinga/icinga2/issues/5393) (Documentation, PR): Fix typo in the documentation
+* [#5378](https://github.com/icinga/icinga2/issues/5378) (Documentation, PR): Fixed warnings when using mkdocs
+* [#5370](https://github.com/icinga/icinga2/issues/5370) (Documentation, PR): Rename ChangeLog to CHANGELOG.md
+* [#5366](https://github.com/icinga/icinga2/issues/5366) (Documentation, PR): Fixed wrong node in documentation chapter Client/Satellite Linux Setup
+* [#5365](https://github.com/icinga/icinga2/issues/5365) (Documentation, PR): Update package documentation for Debian Stretch
+* [#5358](https://github.com/icinga/icinga2/issues/5358) (Documentation, PR): Add documentation for securing mysql on Debian/Ubuntu.
+* [#5357](https://github.com/icinga/icinga2/issues/5357) (Documentation, Notifications, PR): Notification Scripts: Ensure that mail from address works on Debian/RHEL/SUSE \(mailutils vs mailx\)
+* [#5354](https://github.com/icinga/icinga2/issues/5354) (Documentation, PR): Docs: Fix built-in template description and URLs
+* [#5349](https://github.com/icinga/icinga2/issues/5349) (Documentation, PR): Docs: Fix broken format for notes/tips in CLI command chapter
+* [#5339](https://github.com/icinga/icinga2/issues/5339) (Documentation, ITL, PR): Add accept\_cname to dns CheckCommand
+* [#5336](https://github.com/icinga/icinga2/issues/5336) (Documentation, PR): Docs: Fix formatting issues and broken URLs
+* [#5333](https://github.com/icinga/icinga2/issues/5333) (Documentation, PR): Update documentation for enhanced notification scripts
+* [#5324](https://github.com/icinga/icinga2/issues/5324) (Documentation, PR): Fix phrasing in Getting Started chapter
+* [#5317](https://github.com/icinga/icinga2/issues/5317) (Documentation, PR): Fix typo in INSTALL.md
+* [#5315](https://github.com/icinga/icinga2/issues/5315) (Documentation, PR): Docs: Replace nagios-plugins by monitoring-plugins for Debian/Ubuntu
+* [#5314](https://github.com/icinga/icinga2/issues/5314) (Documentation, PR): Document Common name \(CN\) in client setup
+* [#5309](https://github.com/icinga/icinga2/issues/5309) (Documentation, PR): Docs: Replace the command pipe w/ the REST API as Icinga Web 2 requirement in 'Getting Started' chapter
+* [#5291](https://github.com/icinga/icinga2/issues/5291) (Documentation): Update docs for RHEL/CentOS 5 EOL
+* [#5285](https://github.com/icinga/icinga2/issues/5285) (Documentation, PR): Fix sysstat installation in troubleshooting docs
+* [#5279](https://github.com/icinga/icinga2/issues/5279) (Documentation, PR): Docs: Add API query example for acknowledgements w/o expire time
+* [#5275](https://github.com/icinga/icinga2/issues/5275) (Documentation, PR): Add troubleshooting hints for cgroup fork errors
+* [#5244](https://github.com/icinga/icinga2/issues/5244) (Documentation, PR): Add a PR review section to CONTRIBUTING.md
+* [#5237](https://github.com/icinga/icinga2/issues/5237) (Documentation, PR): Docs: Add a note for Windows debuglog to the troubleshooting chapter
+* [#5227](https://github.com/icinga/icinga2/issues/5227) (Documentation, ITL, PR): feature/itl-vmware-esx-storage-path-standbyok
+* [#5216](https://github.com/icinga/icinga2/issues/5216) (Documentation, PR): Remove "... is is ..." in CONTRIBUTING.md
+* [#5206](https://github.com/icinga/icinga2/issues/5206) (Documentation): Typo in Getting Started Guide
+* [#5203](https://github.com/icinga/icinga2/issues/5203) (Documentation, PR): Fix typo in Getting Started chapter
+* [#5184](https://github.com/icinga/icinga2/issues/5184) (Documentation, PR): Doc/appendix: fix malformed markdown links
+* [#5181](https://github.com/icinga/icinga2/issues/5181) (Documentation, PR): List SELinux packages required for building RPMs
+* [#5178](https://github.com/icinga/icinga2/issues/5178) (Documentation, Windows): Documentation vague on "update-windows" check plugin
+* [#5175](https://github.com/icinga/icinga2/issues/5175) (Documentation): Add a note about flapping problems to the docs
+* [#5174](https://github.com/icinga/icinga2/issues/5174) (Documentation, PR): Add missing object type to Apply Rules doc example
+* [#5173](https://github.com/icinga/icinga2/issues/5173) (Documentation): Object type missing from ping Service example in docs
+* [#5167](https://github.com/icinga/icinga2/issues/5167) (Documentation): Add more assign where expression examples
+* [#5166](https://github.com/icinga/icinga2/issues/5166) (API, Documentation): Set zone attribute to no\_user\_modify for API POST requests
+* [#5165](https://github.com/icinga/icinga2/issues/5165) (Documentation, PR): Syntax error In Dependencies chapter
+* [#5164](https://github.com/icinga/icinga2/issues/5164) (Documentation, ITL, PR): ITL: Add CheckCommand ssl\_cert, fix ssl attributes
+* [#5161](https://github.com/icinga/icinga2/issues/5161) (Documentation, PR): ITL documentation - disk-windows usage note with % thresholds
+* [#5157](https://github.com/icinga/icinga2/issues/5157) (Documentation): "Three Levels with master, Satellites, and Clients" chapter is not clear about client config
+* [#5156](https://github.com/icinga/icinga2/issues/5156) (Documentation): Add CONTRIBUTING.md
+* [#5155](https://github.com/icinga/icinga2/issues/5155) (Documentation): 3.5. Apply Rules topic in the docs needs work.
+* [#5151](https://github.com/icinga/icinga2/issues/5151) (Documentation, PR): Replace http:// links with https:// links where a secure website exists
+* [#5150](https://github.com/icinga/icinga2/issues/5150) (Documentation): Invalid links in documentation
+* [#5149](https://github.com/icinga/icinga2/issues/5149) (Documentation, PR): Update documentation, change http:// links to https:// links where a website exists
+* [#5144](https://github.com/icinga/icinga2/issues/5144) (Documentation): Extend troubleshooting docs w/ environment analysis and common tools
+* [#5143](https://github.com/icinga/icinga2/issues/5143) (Documentation): Docs: Explain how to include your own config tree instead of conf.d
+* [#5142](https://github.com/icinga/icinga2/issues/5142) (Documentation): Add an Elastic Stack Integrations chapter to feature documentation
+* [#5140](https://github.com/icinga/icinga2/issues/5140) (Documentation): Documentation should explain that runtime modifications are not immediately updated for "object list"
+* [#5137](https://github.com/icinga/icinga2/issues/5137) (Documentation): Doc updates: Getting Started w/ own config, Troubleshooting w/ debug console
+* [#5111](https://github.com/icinga/icinga2/issues/5111) (Documentation): Fix duration attribute requirement for schedule-downtime API action
+* [#5104](https://github.com/icinga/icinga2/issues/5104) (Documentation, PR): Correct link to nscp documentation
+* [#5097](https://github.com/icinga/icinga2/issues/5097) (Documentation): The last example for typeof\(\) is missing the result
+* [#5090](https://github.com/icinga/icinga2/issues/5090) (Cluster, Documentation): EventHandler to be executed at the endpoint
+* [#5077](https://github.com/icinga/icinga2/issues/5077) (Documentation): Replace the 'command' feature w/ the REST API for Icinga Web 2
+* [#5016](https://github.com/icinga/icinga2/issues/5016) (Documentation, ITL, PR): Add fuse.gvfs-fuse-daemon to disk\_exclude\_type
+* [#5010](https://github.com/icinga/icinga2/issues/5010) (Documentation): \[Documentation\] Missing parameter for SNMPv3 auth
+* [#3560](https://github.com/icinga/icinga2/issues/3560) (Documentation): Explain check\_memorys and check\_disks thresholds
+* [#1880](https://github.com/icinga/icinga2/issues/1880) (Documentation): add a section for 'monitoring the icinga2 node'
+
+### Support
+
+* [#5359](https://github.com/icinga/icinga2/issues/5359) (CLI, PR): Fixed missing closing bracket in CLI command pki new-cert.
+* [#5332](https://github.com/icinga/icinga2/issues/5332) (Configuration, Notifications, PR): Notification Scripts: notification\_type is always required
+* [#5326](https://github.com/icinga/icinga2/issues/5326) (Documentation, Installation, PR): Install the images directory containing the needed PNGs for the markd
+* [#5310](https://github.com/icinga/icinga2/issues/5310) (Packages, PR): RPM: Disable SELinux policy hardlink
+* [#5306](https://github.com/icinga/icinga2/issues/5306) (Documentation, Packages, PR): Remove CentOS 5 from 'Getting started' docs
+* [#5304](https://github.com/icinga/icinga2/issues/5304) (Documentation, Packages, PR): Update INSTALL.md for RPM builds
+* [#5303](https://github.com/icinga/icinga2/issues/5303) (Packages, PR): RPM: Fix builds on Amazon Linux
+* [#5299](https://github.com/icinga/icinga2/issues/5299) (Notifications): Ensure that "mail from" works on RHEL/CentOS
+* [#5286](https://github.com/icinga/icinga2/issues/5286) (Configuration, PR): Fix verbose mode in notifications scripts
+* [#5265](https://github.com/icinga/icinga2/issues/5265) (PR): Move PerfdataValue\(\) class into base library
+* [#5252](https://github.com/icinga/icinga2/issues/5252) (Tests, PR): travis: Update to trusty as CI environment
+* [#5251](https://github.com/icinga/icinga2/issues/5251) (Tests): Update Travis CI environment to trusty
+* [#5248](https://github.com/icinga/icinga2/issues/5248) (Tests, PR): Travis: Run config validation at the end
+* [#5238](https://github.com/icinga/icinga2/issues/5238) (DB IDO, PR): Remove deprecated "DbCat1 | DbCat2" notation for DB IDO categories
+* [#5229](https://github.com/icinga/icinga2/issues/5229) (Installation, PR): CMake: require a GCC version according to INSTALL.md
+* [#5226](https://github.com/icinga/icinga2/issues/5226) (Packages, PR): RPM spec: don't enable features after an upgrade
+* [#5225](https://github.com/icinga/icinga2/issues/5225) (DB IDO, PR): Don't call mysql\_error\(\) after a failure of mysql\_init\(\)
+* [#5218](https://github.com/icinga/icinga2/issues/5218) (Packages): icinga2.spec: Allow selecting g++ compiler on older SUSE release builds
+* [#5189](https://github.com/icinga/icinga2/issues/5189) (Documentation, Packages, PR): RPM packaging updates
+* [#5188](https://github.com/icinga/icinga2/issues/5188) (Documentation, Packages): Boost \>= 1.48 required
+* [#5177](https://github.com/icinga/icinga2/issues/5177) (Packages): Issues Packing icinga 2.6.3 tar.gz to RPM
+* [#5153](https://github.com/icinga/icinga2/issues/5153) (Packages, PR): Changed dependency of selinux subpackage
+* [#5127](https://github.com/icinga/icinga2/issues/5127) (Installation, PR): Improve systemd service file
+* [#5102](https://github.com/icinga/icinga2/issues/5102) (Compat, Configuration, Packages): Deprecate the icinga2-classicui-config package
+* [#5101](https://github.com/icinga/icinga2/issues/5101) (Packages, Windows): Fix incorrect metadata for the Chocolatey package
+* [#5100](https://github.com/icinga/icinga2/issues/5100) (Packages, Windows): Update Chocolatey package to match current guidelines
+* [#5094](https://github.com/icinga/icinga2/issues/5094) (Cluster, Configuration): Log message "Object cannot be deleted because it was not created using the API"
+* [#5087](https://github.com/icinga/icinga2/issues/5087) (Configuration): Function metadata should show available arguments
+* [#5042](https://github.com/icinga/icinga2/issues/5042) (DB IDO, PR): Add link to upgrade documentation to log message
+* [#4977](https://github.com/icinga/icinga2/issues/4977) (Cluster, Installation): icinga2/api/log directory is not created
+* [#4921](https://github.com/icinga/icinga2/issues/4921) (Installation, Packages): No network dependency for /etc/init.d/icinga2
+* [#4781](https://github.com/icinga/icinga2/issues/4781) (Packages): Improve SELinux Policy
+* [#4776](https://github.com/icinga/icinga2/issues/4776) (Installation): NetBSD install path fixes
+* [#4621](https://github.com/icinga/icinga2/issues/4621) (Configuration, Notifications, Packages): notifications always enabled after update
+
+## 2.6.3 (2017-03-29)
+
+### Bug
+
+* [#5080](https://github.com/icinga/icinga2/issues/5080) (DB IDO): Missing index use can cause icinga\_downtimehistory queries to hang indefinitely
+* [#4989](https://github.com/icinga/icinga2/issues/4989) (Check Execution): Icinga daemon runs with nice 5 after reload
+* [#4930](https://github.com/icinga/icinga2/issues/4930) (Cluster): Change "Discarding 'config update object'" log messages to notice log level
+* [#4603](https://github.com/icinga/icinga2/issues/4603) (DB IDO): With too many comments, Icinga reload process won't finish reconnecting to Database
+
+### Documentation
+
+* [#5057](https://github.com/icinga/icinga2/issues/5057) (Documentation): Update Security section in the Distributed Monitoring chapter
+* [#5055](https://github.com/icinga/icinga2/issues/5055) (Documentation, ITL): mysql\_socket attribute missing in the documentation for the mysql CheckCommand
+* [#5035](https://github.com/icinga/icinga2/issues/5035) (Documentation): Docs: Typo in Distributed Monitoring chapter
+* [#5030](https://github.com/icinga/icinga2/issues/5030) (Documentation): Advanced topics: Mention the API and explain stick acks, fixed/flexible downtimes
+* [#5029](https://github.com/icinga/icinga2/issues/5029) (Documentation): Advanced topics: Wrong acknowledgement notification filter
+* [#4996](https://github.com/icinga/icinga2/issues/4996) (Documentation): documentation: mixed up host names in 6-distributed-monitoring.md
+* [#4980](https://github.com/icinga/icinga2/issues/4980) (Documentation): Add OpenBSD and AlpineLinux package repositories to the documentation
+* [#4955](https://github.com/icinga/icinga2/issues/4955) (Documentation, ITL): Review CheckCommand documentation including external URLs
+* [#4954](https://github.com/icinga/icinga2/issues/4954) (Documentation): Add an example for /v1/actions/process-check-result which uses filter/type
+* [#3133](https://github.com/icinga/icinga2/issues/3133) (Documentation): Add practical examples for apply expressions
+
+## 2.6.2 (2017-02-13)
+
+### Bug
+
+* [#4952](https://github.com/icinga/icinga2/issues/4952) (API, CLI): Icinga crashes while trying to remove configuration files for objects which no longer exist
+
+## 2.6.1 (2017-01-31)
+
+### Notes
+
+This release addresses a number of bugs we have identified in version 2.6.0.
+
+The documentation changes reflect our recent move to GitHub.
+
+### Enhancement
+
+* [#4923](https://github.com/icinga/icinga2/issues/4923): Migration to Github
+* [#4813](https://github.com/icinga/icinga2/issues/4813): Include argument name for log message about incorrect set\_if values
+
+### Bug
+
+* [#4950](https://github.com/icinga/icinga2/issues/4950): IDO schema update is not compatible to MySQL 5.7
+* [#4882](https://github.com/icinga/icinga2/issues/4882): Crash - Error: parse error: premature EOF
+* [#4877](https://github.com/icinga/icinga2/issues/4877) (DB IDO): IDO MySQL schema not working on MySQL 5.7
+* [#4874](https://github.com/icinga/icinga2/issues/4874) (DB IDO): IDO: Timestamps in PostgreSQL may still have a time zone offset
+* [#4867](https://github.com/icinga/icinga2/issues/4867): SIGPIPE shutdown on config reload
+
+### Documentation
+
+* [#4944](https://github.com/icinga/icinga2/issues/4944) (Documentation, PR): doc/6-distributed-monitoring.md: Fix typo
+* [#4934](https://github.com/icinga/icinga2/issues/4934) (Documentation): Update contribution section for GitHub
+* [#4917](https://github.com/icinga/icinga2/issues/4917) (Documentation): Incorrect license file mentioned in README.md
+* [#4916](https://github.com/icinga/icinga2/issues/4916) (Documentation): Add travis-ci build status logo to README.md
+* [#4908](https://github.com/icinga/icinga2/issues/4908) (Documentation): Move domain to icinga.com
+* [#4885](https://github.com/icinga/icinga2/issues/4885) (Documentation): SLES 12 SP2 libboost\_thread package requires libboost\_chrono
+* [#4869](https://github.com/icinga/icinga2/issues/4869) (Documentation): Update RELEASE.md
+* [#4868](https://github.com/icinga/icinga2/issues/4868) (Documentation): Add more build details to INSTALL.md
+* [#4803](https://github.com/icinga/icinga2/issues/4803) (Documentation): Update Repositories in Docs
+
+### Support
+
+* [#4870](https://github.com/icinga/icinga2/issues/4870) (Packages): SLES11 SP4 dependency on Postgresql \>= 8.4
+
+## 2.6.0 (2016-12-13)
+
+### Notes
+
+* Client/Satellite setup
+ * The "bottom up" client configuration mode has been deprecated. Check [#13255](https://dev.icinga.com/issues/13255) for additional details and migration.
+* Linux/Unix daemon
+ * Ensure that Icinga 2 does not leak file descriptors to executed commands.
+ * There are 2 processes started instead of previously just one process.
+* Windows client
+ * Package bundles NSClient++ 0.5.0. ITL CheckCommands have been updated too.
+ * Allow to configure the user account for the Icinga 2 service. This is useful if several checks require administrator permissions (e.g. check_update.exe)
+ * Bugfixes for check plugins
+* Cluster and API
+ * Provide location information for objects and templates in the API
+ * Improve log message for ignored config updates
+ * Fix cluster resync problem with API created objects (hosts, downtimes, etc.)
+ * Fix that API-created objects in a global zone are not synced to child endpoints
+* Notifications
+ * Several bugfixes for downtime, custom and flapping notifications
+* New ITL CheckCommands: logstash, glusterfs, iostats
+* Package builds require a compiler which supports C++11 features (gcc-c++ >= 4.7, clang++)
+* DB IDO
+ * Schema upgrade required (2.6.0.sql)
+ * This update fixes timestamp columns required by Icinga Web 2 and might take a while. Please ensure to schedule a maintenance task for your database upgrade.
+
+### Enhancement
+
+* [#4798](https://github.com/icinga/icinga2/issues/4798) (Cluster): Deprecate cluster/client mode "bottom up" w/ repository.d and node update-config
+* [#4770](https://github.com/icinga/icinga2/issues/4770) (API): Allow to evaluate macros through the API
+* [#4713](https://github.com/icinga/icinga2/issues/4713) (Cluster): Check whether nodes are synchronizing the API log before putting them into UNKNOWN
+* [#4651](https://github.com/icinga/icinga2/issues/4651) (Plugins): Review windows plugins performance output
+* [#4631](https://github.com/icinga/icinga2/issues/4631) (Configuration): Suppress compiler warnings for auto-generated code
+* [#4622](https://github.com/icinga/icinga2/issues/4622) (Cluster): Improve log message for ignored config updates
+* [#4590](https://github.com/icinga/icinga2/issues/4590): Make sure that libmethods is automatically loaded even when not using the ITL
+* [#4587](https://github.com/icinga/icinga2/issues/4587) (Configuration): Implement support for default templates
+* [#4580](https://github.com/icinga/icinga2/issues/4580) (API): Provide location information for objects and templates in the API
+* [#4576](https://github.com/icinga/icinga2/issues/4576): Use lambda functions for INITIALIZE\_ONCE
+* [#4575](https://github.com/icinga/icinga2/issues/4575): Use 'auto' for iterator declarations
+* [#4571](https://github.com/icinga/icinga2/issues/4571): Implement an rvalue constructor for the String and Value classes
+* [#4570](https://github.com/icinga/icinga2/issues/4570) (Configuration): Implement a command-line argument for "icinga2 console" to allow specifying a script file
+* [#4563](https://github.com/icinga/icinga2/issues/4563) (Configuration): Remove unused method: ApplyRule::DiscardRules
+* [#4559](https://github.com/icinga/icinga2/issues/4559): Replace BOOST\_FOREACH with range-based for loops
+* [#4557](https://github.com/icinga/icinga2/issues/4557): Add -fvisibility=hidden to the default compiler flags
+* [#4537](https://github.com/icinga/icinga2/issues/4537): Implement an environment variable to keep Icinga from closing FDs on startup
+* [#4536](https://github.com/icinga/icinga2/issues/4536): Avoid unnecessary string copies
+* [#4535](https://github.com/icinga/icinga2/issues/4535): Remove deprecated functions
+* [#3684](https://github.com/icinga/icinga2/issues/3684) (Configuration): Command line option for config syntax validation
+* [#2968](https://github.com/icinga/icinga2/issues/2968): Better message for apply errors
+
+### Bug
+
+* [#4831](https://github.com/icinga/icinga2/issues/4831) (CLI): Wrong help string for node setup cli command argument --master\_host
+* [#4828](https://github.com/icinga/icinga2/issues/4828) (API): Crash in CreateObjectHandler \(regression from \#11684
+* [#4802](https://github.com/icinga/icinga2/issues/4802): Icinga tries to delete Downtime objects that were statically configured
+* [#4801](https://github.com/icinga/icinga2/issues/4801): Sending a HUP signal to the child process for execution actually kills it
+* [#4791](https://github.com/icinga/icinga2/issues/4791) (DB IDO): PostgreSQL: Don't use timestamp with timezone for UNIX timestamp columns
+* [#4789](https://github.com/icinga/icinga2/issues/4789) (Notifications): Recovery notifications sent for Not-Problem notification type if notified before
+* [#4775](https://github.com/icinga/icinga2/issues/4775) (Cluster): Crash w/ SendNotifications cluster handler and check result with empty perfdata
+* [#4771](https://github.com/icinga/icinga2/issues/4771): Config validation crashes when using command\_endpoint without also having an ApiListener object
+* [#4752](https://github.com/icinga/icinga2/issues/4752) (Graphite): Performance data writer for Graphite : Values without fraction limited to 2147483647 \(7FFFFFFF\)
+* [#4740](https://github.com/icinga/icinga2/issues/4740): SIGALRM handling may be affected by recent commit
+* [#4726](https://github.com/icinga/icinga2/issues/4726) (Notifications): Flapping notifications sent for soft state changes
+* [#4717](https://github.com/icinga/icinga2/issues/4717) (API): Icinga crashes while deleting a config file which doesn't exist anymore
+* [#4678](https://github.com/icinga/icinga2/issues/4678) (Configuration): Configuration validation fails when setting tls\_protocolmin to TLSv1.2
+* [#4674](https://github.com/icinga/icinga2/issues/4674) (CLI): Parse error: "premature EOF" when running "icinga2 node update-config"
+* [#4665](https://github.com/icinga/icinga2/issues/4665): Crash in ClusterEvents::SendNotificationsAPIHandler
+* [#4646](https://github.com/icinga/icinga2/issues/4646) (Notifications): Forced custom notification is setting "force\_next\_notification": true permanently
+* [#4644](https://github.com/icinga/icinga2/issues/4644) (API): Crash in HttpRequest::Parse while processing HTTP request
+* [#4630](https://github.com/icinga/icinga2/issues/4630) (Configuration): Validation does not highlight the correct attribute
+* [#4629](https://github.com/icinga/icinga2/issues/4629) (CLI): broken: icinga2 --version
+* [#4620](https://github.com/icinga/icinga2/issues/4620) (API): Invalid API filter error messages
+* [#4619](https://github.com/icinga/icinga2/issues/4619) (CLI): Cli: boost::bad\_get on icinga::String::String\(icinga::Value&&\)
+* [#4616](https://github.com/icinga/icinga2/issues/4616): Build fails with Visual Studio 2015
+* [#4606](https://github.com/icinga/icinga2/issues/4606): Remove unused last\_in\_downtime field
+* [#4602](https://github.com/icinga/icinga2/issues/4602) (CLI): Last option highlighted as the wrong one, even when it is not the culprit
+* [#4599](https://github.com/icinga/icinga2/issues/4599): Unexpected state changes with max\_check\_attempts = 2
+* [#4583](https://github.com/icinga/icinga2/issues/4583) (Configuration): Debug hints for dictionary expressions are nested incorrectly
+* [#4574](https://github.com/icinga/icinga2/issues/4574) (Notifications): Don't send Flapping\* notifications when downtime is active
+* [#4573](https://github.com/icinga/icinga2/issues/4573) (DB IDO): Getting error during schema update
+* [#4572](https://github.com/icinga/icinga2/issues/4572) (Configuration): Config validation shouldnt allow 'endpoints = \[ "" \]'
+* [#4566](https://github.com/icinga/icinga2/issues/4566) (Notifications): Fixed downtimes scheduled for a future date trigger DOWNTIMESTART notifications
+* [#4564](https://github.com/icinga/icinga2/issues/4564): Add missing initializer for WorkQueue::m\_NextTaskID
+* [#4555](https://github.com/icinga/icinga2/issues/4555): Fix compiler warnings
+* [#4541](https://github.com/icinga/icinga2/issues/4541) (DB IDO): Don't link against libmysqlclient\_r
+* [#4538](https://github.com/icinga/icinga2/issues/4538): Don't update TimePeriod ranges for inactive objects
+* [#4423](https://github.com/icinga/icinga2/issues/4423) (Metrics): InfluxdbWriter does not write state other than 0
+* [#4369](https://github.com/icinga/icinga2/issues/4369) (Plugins): check\_network performance data in invalid format - ingraph
+* [#4169](https://github.com/icinga/icinga2/issues/4169) (Cluster): Cluster resync problem with API created objects
+* [#4098](https://github.com/icinga/icinga2/issues/4098) (API): Objects created in a global zone are not synced to child endpoints
+* [#4010](https://github.com/icinga/icinga2/issues/4010) (API): API requests from execute-script action are too verbose
+* [#3802](https://github.com/icinga/icinga2/issues/3802) (Compat): SCHEDULE\_AND\_PROPAGATE\_HOST\_DOWNTIME command missing
+* [#3801](https://github.com/icinga/icinga2/issues/3801) (Compat): SCHEDULE\_AND\_PROPAGATE\_TRIGGERED\_HOST\_DOWNTIME command missing
+* [#3575](https://github.com/icinga/icinga2/issues/3575) (DB IDO): MySQL 5.7.9, Incorrect datetime value Error
+* [#3565](https://github.com/icinga/icinga2/issues/3565) (Plugins): Windows Agent: performance data of check\_perfmon
+* [#3564](https://github.com/icinga/icinga2/issues/3564) (Plugins): Windows Agent: Performance data values for check\_perfmon.exe are invalid sometimes
+* [#3220](https://github.com/icinga/icinga2/issues/3220) (Plugins): Implement support for resolving DNS hostnames in check\_ping.exe
+* [#2847](https://github.com/icinga/icinga2/issues/2847): File descriptors are leaked to child processes which makes SELinux unhappy
+
+### ITL
+
+* [#4842](https://github.com/icinga/icinga2/issues/4842) (ITL): Add tempdir attribute to postgres CheckCommand
+* [#4837](https://github.com/icinga/icinga2/issues/4837) (ITL): Add sudo option to mailq CheckCommand
+* [#4836](https://github.com/icinga/icinga2/issues/4836) (ITL): Add verbose parameter to http CheckCommand
+* [#4835](https://github.com/icinga/icinga2/issues/4835) (ITL): Add timeout option to mysql\_health CheckCommand
+* [#4714](https://github.com/icinga/icinga2/issues/4714) (ITL): Default values for check\_swap are incorrect
+* [#4710](https://github.com/icinga/icinga2/issues/4710) (ITL): snmp\_miblist variable to feed the -m option of check\_snmp is missing in the snmpv3 CheckCommand object
+* [#4684](https://github.com/icinga/icinga2/issues/4684) (ITL): Add a radius CheckCommand for the radius check provide by nagios-plugins
+* [#4681](https://github.com/icinga/icinga2/issues/4681) (ITL): Add CheckCommand definition for check\_logstash
+* [#4677](https://github.com/icinga/icinga2/issues/4677) (ITL): Problem passing arguments to nscp-local CheckCommand objects
+* [#4672](https://github.com/icinga/icinga2/issues/4672) (ITL): Add timeout option to oracle\_health CheckCommand
+* [#4618](https://github.com/icinga/icinga2/issues/4618) (ITL): Hangman easter egg is broken
+* [#4608](https://github.com/icinga/icinga2/issues/4608) (ITL): Add CheckCommand definition for check\_iostats
+* [#4597](https://github.com/icinga/icinga2/issues/4597) (ITL): Default disk plugin check should not check inodes
+* [#4595](https://github.com/icinga/icinga2/issues/4595) (ITL): Manubulon: Add missing procurve memory flag
+* [#4585](https://github.com/icinga/icinga2/issues/4585) (ITL): Fix code style violations in the ITL
+* [#4582](https://github.com/icinga/icinga2/issues/4582) (ITL): Incorrect help text for check\_swap
+* [#4543](https://github.com/icinga/icinga2/issues/4543) (ITL): ITL - check\_vmware\_esx - specify a datacenter/vsphere server for esx/host checks
+* [#4324](https://github.com/icinga/icinga2/issues/4324) (ITL): Add CheckCommand definition for check\_glusterfs
+
+### Documentation
+
+* [#4862](https://github.com/icinga/icinga2/issues/4862) (Documentation): "2.1.4. Installation Paths" should contain systemd paths
+* [#4861](https://github.com/icinga/icinga2/issues/4861) (Documentation): Update "2.1.3. Enabled Features during Installation" - outdated "feature list"
+* [#4859](https://github.com/icinga/icinga2/issues/4859) (Documentation): Update package instructions for Fedora
+* [#4851](https://github.com/icinga/icinga2/issues/4851) (Documentation): Update README.md and correct project URLs
+* [#4846](https://github.com/icinga/icinga2/issues/4846) (Documentation): Add a note for boolean values in the disk CheckCommand section
+* [#4845](https://github.com/icinga/icinga2/issues/4845) (Documentation): Troubleshooting: Add examples for fetching the executed command line
+* [#4840](https://github.com/icinga/icinga2/issues/4840) (Documentation): Update Windows screenshots in the client documentation
+* [#4838](https://github.com/icinga/icinga2/issues/4838) (Documentation): Add example for concurrent\_checks in CheckerComponent object type
+* [#4829](https://github.com/icinga/icinga2/issues/4829) (Documentation): Missing API headers for X-HTTP-Method-Override
+* [#4827](https://github.com/icinga/icinga2/issues/4827) (Documentation): Fix example in PNP template docs
+* [#4821](https://github.com/icinga/icinga2/issues/4821) (Documentation): Add a note about removing "conf.d" on the client for "top down command endpoint" setups
+* [#4809](https://github.com/icinga/icinga2/issues/4809) (Documentation): Update API and Library Reference chapters
+* [#4804](https://github.com/icinga/icinga2/issues/4804) (Documentation): Add a note about default template import to the CheckCommand object
+* [#4800](https://github.com/icinga/icinga2/issues/4800) (Documentation): Docs: Typo in "CLI commands" chapter
+* [#4793](https://github.com/icinga/icinga2/issues/4793) (Documentation): Docs: ITL plugins contrib order
+* [#4787](https://github.com/icinga/icinga2/issues/4787) (Documentation): Doc: Swap packages.icinga.org w/ DebMon
+* [#4780](https://github.com/icinga/icinga2/issues/4780) (Documentation): Add a note about pinning checks w/ command\_endpoint
+* [#4736](https://github.com/icinga/icinga2/issues/4736) (Documentation): Docs: wrong heading level for commands.conf and groups.conf
+* [#4708](https://github.com/icinga/icinga2/issues/4708) (Documentation): Add more Timeperiod examples in the documentation
+* [#4706](https://github.com/icinga/icinga2/issues/4706) (Documentation): Add an example of multi-parents configuration for the Migration chapter
+* [#4705](https://github.com/icinga/icinga2/issues/4705) (Documentation): Typo in the documentation
+* [#4699](https://github.com/icinga/icinga2/issues/4699) (Documentation): Fix some spelling mistakes
+* [#4667](https://github.com/icinga/icinga2/issues/4667) (Documentation): Add documentation for logrotation for the mainlog feature
+* [#4653](https://github.com/icinga/icinga2/issues/4653) (Documentation): Corrections for distributed monitoring chapter
+* [#4641](https://github.com/icinga/icinga2/issues/4641) (Documentation): Docs: Migrating Notification example tells about filters instead of types
+* [#4639](https://github.com/icinga/icinga2/issues/4639) (Documentation): GDB example in the documentation isn't working
+* [#4636](https://github.com/icinga/icinga2/issues/4636) (Documentation): Add development docs for writing a core dump file
+* [#4601](https://github.com/icinga/icinga2/issues/4601) (Documentation): Typo in distributed monitoring docs
+* [#4596](https://github.com/icinga/icinga2/issues/4596) (Documentation): Update service monitoring and distributed docs
+* [#4589](https://github.com/icinga/icinga2/issues/4589) (Documentation): Fix help output for update-links.py
+* [#4584](https://github.com/icinga/icinga2/issues/4584) (Documentation): Add missing reference to libmethods for the default ITL command templates
+* [#4492](https://github.com/icinga/icinga2/issues/4492) (Documentation): Add information about function 'range'
+
+### Support
+
+* [#4796](https://github.com/icinga/icinga2/issues/4796) (Installation): Sort Changelog by category
+* [#4792](https://github.com/icinga/icinga2/issues/4792) (Tests): Add unit test for notification state/type filter checks
+* [#4724](https://github.com/icinga/icinga2/issues/4724) (Packages): Update .mailmap for icinga.com
+* [#4671](https://github.com/icinga/icinga2/issues/4671) (Packages): Windows Installer should include NSClient++ 0.5.0
+* [#4612](https://github.com/icinga/icinga2/issues/4612) (Tests): Unit tests randomly crash after the tests have completed
+* [#4607](https://github.com/icinga/icinga2/issues/4607) (Packages): Improve support for building the chocolatey package
+* [#4588](https://github.com/icinga/icinga2/issues/4588) (Installation): Use raw string literals in mkembedconfig
+* [#4578](https://github.com/icinga/icinga2/issues/4578) (Installation): Improve detection for the -flto compiler flag
+* [#4569](https://github.com/icinga/icinga2/issues/4569) (Installation): Set versions for all internal libraries
+* [#4558](https://github.com/icinga/icinga2/issues/4558) (Installation): Update cmake config to require a compiler that supports C++11
+* [#4556](https://github.com/icinga/icinga2/issues/4556) (Installation): logrotate file is not properly generated when the logrotate binary resides in /usr/bin
+* [#4551](https://github.com/icinga/icinga2/issues/4551) (Tests): Implement unit tests for state changes
+* [#2943](https://github.com/icinga/icinga2/issues/2943) (Installation): Make the user account configurable for the Windows service
+* [#2792](https://github.com/icinga/icinga2/issues/2792) (Tests): Livestatus tests don't work on OS X
+
+## 2.5.4 (2016-08-30)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4277](https://github.com/icinga/icinga2/issues/4277): many check commands executed at same time when master reload
+
+## 2.5.3 (2016-08-25)
+
+### Notes
+
+This release addresses an issue with PostgreSQL support for the IDO database module.
+
+### Bug
+
+* [#4554](https://github.com/icinga/icinga2/issues/4554) (DB IDO): ido pgsql migration from 2.4.0 to 2.5.0 : wrong size for config\_hash
+
+## 2.5.2 (2016-08-24)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4550](https://github.com/icinga/icinga2/issues/4550): Icinga 2 sends SOFT recovery notifications
+* [#4549](https://github.com/icinga/icinga2/issues/4549) (DB IDO): Newly added group member tables in the IDO database are not updated
+
+### Documentation
+
+* [#4548](https://github.com/icinga/icinga2/issues/4548) (Documentation): Wrong formatting in client docs
+
+## 2.5.1 (2016-08-23)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4544](https://github.com/icinga/icinga2/issues/4544) (Notifications): Icinga 2 sends recovery notifications for SOFT NOT-OK states
+
+## 2.5.0 (2016-08-23)
+
+### Notes
+
+* InfluxdbWriter feature
+* API
+ * New endpoints: /v1/variables and /v1/templates (GET requests), /v1/action/generate-ticket (POST request)
+ * State/type filters for notifications/users are now string values (PUT, POST, GET requests)
+* Configuration
+ * TimePeriod excludes/includes attributes
+ * DateTime object for formatting time strings
+ * New prototype methods: Array#filter, Array#unique, Array#map, Array#reduce
+ * icinga2.conf now includes plugins-contrib, manubulon, windows-plugins, nscp by default (ITL CheckCommand definitions)
+ * Performance improvements (config compiler and validation)
+* CLI
+ * 'icinga2 object list' formats state/type filters as string values
+ * Compiled config files are now visible with "notice" debug level (hidden by default)
+ * CA serial file now uses a hash value (HA cluster w/ 2 CA directories)
+* Cluster
+ * There is a known issue with >2 endpoints inside a zone. Icinga 2 will now log a warning.
+ * Support for accepted ciphers and minimum TLS version
+ * Connection and error logging has been improved.
+* DB IDO
+ * Schema upgrade required (2.5.0.sql)
+ * Incremental config dump (performance boost)
+ * `categories` attribute is now an array. Previous method is deprecated and to be removed.
+ * DbCatLog is not enabled by default anymore.
+ * SSL support for MySQL
+* New packages
+ * vim-icinga2 for syntax highlighting
+ * libicinga2 (Debian), icinga2-libs (RPM) for Icinga Studio packages
+
+### Enhancement
+
+* [#4516](https://github.com/icinga/icinga2/issues/4516): Remove some unused \#includes
+* [#4498](https://github.com/icinga/icinga2/issues/4498): Remove unnecessary Dictionary::Contains calls
+* [#4493](https://github.com/icinga/icinga2/issues/4493) (Cluster): Improve performance for Endpoint config validation
+* [#4491](https://github.com/icinga/icinga2/issues/4491): Improve performance for type lookups
+* [#4487](https://github.com/icinga/icinga2/issues/4487) (DB IDO): Incremental updates for the IDO database
+* [#4486](https://github.com/icinga/icinga2/issues/4486) (DB IDO): Remove unused code from the IDO classes
+* [#4485](https://github.com/icinga/icinga2/issues/4485) (API): Add API action for generating a PKI ticket
+* [#4479](https://github.com/icinga/icinga2/issues/4479) (Configuration): Implement comparison operators for the Array class
+* [#4467](https://github.com/icinga/icinga2/issues/4467): Implement the System\#sleep function
+* [#4465](https://github.com/icinga/icinga2/issues/4465) (Configuration): Implement support for namespaces
+* [#4464](https://github.com/icinga/icinga2/issues/4464) (CLI): Implement support for inspecting variables with LLDB/GDB
+* [#4457](https://github.com/icinga/icinga2/issues/4457): Implement support for marking functions as deprecated
+* [#4454](https://github.com/icinga/icinga2/issues/4454): Include compiler name/version and build host name in --version
+* [#4451](https://github.com/icinga/icinga2/issues/4451) (Configuration): Move internal script functions into the 'Internal' namespace
+* [#4449](https://github.com/icinga/icinga2/issues/4449): Improve logging for the WorkQueue class
+* [#4445](https://github.com/icinga/icinga2/issues/4445): Rename/Remove experimental script functions
+* [#4443](https://github.com/icinga/icinga2/issues/4443): Implement process\_check\_result script method for the Checkable class
+* [#4442](https://github.com/icinga/icinga2/issues/4442) (API): Support for determining the Icinga 2 version via the API
+* [#4431](https://github.com/icinga/icinga2/issues/4431) (Notifications): Add the notification type into the log message
+* [#4424](https://github.com/icinga/icinga2/issues/4424) (Cluster): Enhance TLS handshake error messages with connection information
+* [#4415](https://github.com/icinga/icinga2/issues/4415) (API): Remove obsolete debug log message
+* [#4410](https://github.com/icinga/icinga2/issues/4410) (Configuration): Add map/reduce and filter functionality for the Array class
+* [#4403](https://github.com/icinga/icinga2/issues/4403) (CLI): Add history for icinga2 console
+* [#4398](https://github.com/icinga/icinga2/issues/4398) (Cluster): Log a warning if there are more than 2 zone endpoint members
+* [#4393](https://github.com/icinga/icinga2/issues/4393) (Cluster): Include IP address and port in the "New connection" log message
+* [#4388](https://github.com/icinga/icinga2/issues/4388) (Configuration): Implement the \_\_ptr script function
+* [#4386](https://github.com/icinga/icinga2/issues/4386) (Cluster): Improve error messages for failed certificate validation
+* [#4381](https://github.com/icinga/icinga2/issues/4381) (Cluster): Improve log message for connecting nodes without configured Endpoint object
+* [#4352](https://github.com/icinga/icinga2/issues/4352) (Cluster): Enhance client disconnect message for "No data received on new API connection."
+* [#4348](https://github.com/icinga/icinga2/issues/4348) (DB IDO): Do not populate logentries table by default
+* [#4325](https://github.com/icinga/icinga2/issues/4325) (API): API: Add missing downtime\_depth attribute
+* [#4314](https://github.com/icinga/icinga2/issues/4314) (DB IDO): Change Ido\*Connection 'categories' attribute to an array
+* [#4295](https://github.com/icinga/icinga2/issues/4295) (DB IDO): Enhance IDO check with schema version info
+* [#4294](https://github.com/icinga/icinga2/issues/4294) (DB IDO): Update DB IDO schema version to 1.14.1
+* [#4290](https://github.com/icinga/icinga2/issues/4290) (API): Implement support for getting a list of global variables from the API
+* [#4281](https://github.com/icinga/icinga2/issues/4281) (API): Support for enumerating available templates via the API
+* [#4268](https://github.com/icinga/icinga2/issues/4268) (Metrics): InfluxDB Metadata
+* [#4206](https://github.com/icinga/icinga2/issues/4206) (Cluster): Add lag threshold for cluster-zone check
+* [#4178](https://github.com/icinga/icinga2/issues/4178) (API): Improve logging for HTTP API requests
+* [#4154](https://github.com/icinga/icinga2/issues/4154) (Configuration): Remove the \(unused\) 'inherits' keyword
+* [#4129](https://github.com/icinga/icinga2/issues/4129) (Configuration): Improve performance for field accesses
+* [#4061](https://github.com/icinga/icinga2/issues/4061) (Configuration): Allow strings in state/type filters
+* [#4048](https://github.com/icinga/icinga2/issues/4048): Cleanup downtimes created by ScheduleDowntime
+* [#4046](https://github.com/icinga/icinga2/issues/4046) (Configuration): Config parser should not log names of included files by default
+* [#3999](https://github.com/icinga/icinga2/issues/3999) (API): ApiListener: Make minimum TLS version configurable
+* [#3997](https://github.com/icinga/icinga2/issues/3997) (API): ApiListener: Force server's preferred cipher
+* [#3911](https://github.com/icinga/icinga2/issues/3911) (Graphite): Add acknowledgement type to Graphite, InfluxDB, OpenTSDB metadata
+* [#3888](https://github.com/icinga/icinga2/issues/3888) (API): Implement SSL cipher configuration support for the API feature
+* [#3763](https://github.com/icinga/icinga2/issues/3763): Add name attribute for WorkQueue class
+* [#3562](https://github.com/icinga/icinga2/issues/3562) (Metrics): Add InfluxDbWriter feature
+* [#3400](https://github.com/icinga/icinga2/issues/3400): Remove the deprecated IcingaStatusWriter feature
+* [#3237](https://github.com/icinga/icinga2/issues/3237) (Metrics): Gelf module: expose 'perfdata' fields for 'CHECK\_RESULT' events
+* [#3224](https://github.com/icinga/icinga2/issues/3224) (Configuration): Implement support for formatting date/time
+* [#3178](https://github.com/icinga/icinga2/issues/3178) (DB IDO): Add SSL support for the IdoMysqlConnection feature
+* [#2970](https://github.com/icinga/icinga2/issues/2970) (Metrics): Add timestamp support for GelfWriter
+* [#2040](https://github.com/icinga/icinga2/issues/2040): Exclude option for TimePeriod definitions
+
+### Bug
+
+* [#4534](https://github.com/icinga/icinga2/issues/4534) (CLI): Icinga2 segault on startup
+* [#4524](https://github.com/icinga/icinga2/issues/4524) (API): API Remote crash via Google Chrome
+* [#4520](https://github.com/icinga/icinga2/issues/4520) (Configuration): Memory leak when using closures
+* [#4512](https://github.com/icinga/icinga2/issues/4512) (Cluster): Incorrect certificate validation error message
+* [#4511](https://github.com/icinga/icinga2/issues/4511): ClrCheck is null on \*nix
+* [#4505](https://github.com/icinga/icinga2/issues/4505) (CLI): Cannot set ownership for user 'icinga' group 'icinga' on file '/var/lib/icinga2/ca/serial.txt'.
+* [#4504](https://github.com/icinga/icinga2/issues/4504) (API): API: events for DowntimeTriggered does not provide needed information
+* [#4502](https://github.com/icinga/icinga2/issues/4502) (DB IDO): IDO query fails due to key contraint violation for the icinga\_customvariablestatus table
+* [#4501](https://github.com/icinga/icinga2/issues/4501) (Cluster): DB IDO started before daemonizing \(no systemd\)
+* [#4500](https://github.com/icinga/icinga2/issues/4500) (DB IDO): Query for customvariablestatus incorrectly updates the host's/service's insert ID
+* [#4499](https://github.com/icinga/icinga2/issues/4499) (DB IDO): Insert fails for the icinga\_scheduleddowntime table due to duplicate key
+* [#4497](https://github.com/icinga/icinga2/issues/4497): Fix incorrect detection of the 'Concurrency' variable
+* [#4496](https://github.com/icinga/icinga2/issues/4496) (API): API: action schedule-downtime requires a duration also when fixed is true
+* [#4495](https://github.com/icinga/icinga2/issues/4495): Use hash-based serial numbers for new certificates
+* [#4490](https://github.com/icinga/icinga2/issues/4490) (Cluster): ClusterEvents::NotificationSentAllUsersAPIHandler\(\) does not set notified\_users
+* [#4488](https://github.com/icinga/icinga2/issues/4488): Replace GetType\(\)-\>GetName\(\) calls with GetReflectionType\(\)-\>GetName\(\)
+* [#4484](https://github.com/icinga/icinga2/issues/4484) (Cluster): Only allow sending command\_endpoint checks to directly connected child zones
+* [#4483](https://github.com/icinga/icinga2/issues/4483) (DB IDO): ido CheckCommand returns returns "Could not connect to database server" when HA enabled
+* [#4481](https://github.com/icinga/icinga2/issues/4481) (DB IDO): Fix the "ido" check command for use with command\_endpoint
+* [#4478](https://github.com/icinga/icinga2/issues/4478): CompatUtility::GetCheckableNotificationStateFilter is returning an incorrect value
+* [#4476](https://github.com/icinga/icinga2/issues/4476) (DB IDO): Importing mysql schema fails
+* [#4475](https://github.com/icinga/icinga2/issues/4475) (CLI): pki sign-csr does not log where it is writing the certificate file
+* [#4472](https://github.com/icinga/icinga2/issues/4472) (DB IDO): IDO marks objects as inactive on shutdown
+* [#4471](https://github.com/icinga/icinga2/issues/4471) (DB IDO): IDO does duplicate config updates
+* [#4466](https://github.com/icinga/icinga2/issues/4466) (Configuration): 'use' keyword cannot be used with templates
+* [#4462](https://github.com/icinga/icinga2/issues/4462) (Notifications): Add log message if notifications are forced \(i.e. filters are not checked\)
+* [#4461](https://github.com/icinga/icinga2/issues/4461) (Notifications): Notification resent, even if interval = 0
+* [#4460](https://github.com/icinga/icinga2/issues/4460) (DB IDO): Fixed downtime start does not update actual\_start\_time
+* [#4458](https://github.com/icinga/icinga2/issues/4458): Flexible downtimes should be removed after trigger\_time+duration
+* [#4455](https://github.com/icinga/icinga2/issues/4455): Disallow casting "" to an Object
+* [#4447](https://github.com/icinga/icinga2/issues/4447): Handle I/O errors while writing the Icinga state file more gracefully
+* [#4446](https://github.com/icinga/icinga2/issues/4446) (Notifications): Incorrect downtime notification events
+* [#4444](https://github.com/icinga/icinga2/issues/4444): Fix building Icinga with -fvisibility=hidden
+* [#4439](https://github.com/icinga/icinga2/issues/4439) (Configuration): Icinga doesn't delete temporary icinga2.debug file when config validation fails
+* [#4434](https://github.com/icinga/icinga2/issues/4434) (Notifications): Notification sent too fast when one master fails
+* [#4430](https://github.com/icinga/icinga2/issues/4430) (Cluster): Remove obsolete README files in tools/syntax
+* [#4427](https://github.com/icinga/icinga2/issues/4427) (Notifications): Missing notification for recovery during downtime
+* [#4425](https://github.com/icinga/icinga2/issues/4425) (DB IDO): Change the way outdated comments/downtimes are deleted on restart
+* [#4420](https://github.com/icinga/icinga2/issues/4420) (Notifications): Multiple notifications when master fails
+* [#4418](https://github.com/icinga/icinga2/issues/4418) (DB IDO): icinga2 IDO reload performance significant slower with latest snapshot release
+* [#4417](https://github.com/icinga/icinga2/issues/4417) (Notifications): Notification interval mistimed
+* [#4413](https://github.com/icinga/icinga2/issues/4413) (DB IDO): icinga2 empties custom variables, host-, servcie- and contactgroup members at the end of IDO database reconnection
+* [#4412](https://github.com/icinga/icinga2/issues/4412) (Notifications): Reminder notifications ignore HA mode
+* [#4405](https://github.com/icinga/icinga2/issues/4405) (DB IDO): Deprecation warning should include object type and name
+* [#4401](https://github.com/icinga/icinga2/issues/4401) (Metrics): Incorrect escaping / formatting of perfdata to InfluxDB
+* [#4399](https://github.com/icinga/icinga2/issues/4399): Icinga stats min\_execution\_time and max\_execution\_time are invalid
+* [#4394](https://github.com/icinga/icinga2/issues/4394): icinga check reports "-1" for minimum latency and execution time and only uptime has a number but 0
+* [#4391](https://github.com/icinga/icinga2/issues/4391) (DB IDO): Do not clear {host,service,contact}group\_members tables on restart
+* [#4384](https://github.com/icinga/icinga2/issues/4384) (API): Fix URL encoding for '&'
+* [#4380](https://github.com/icinga/icinga2/issues/4380) (Cluster): Increase cluster reconnect interval
+* [#4378](https://github.com/icinga/icinga2/issues/4378) (Notifications): Optimize two ObjectLocks into one in Notification::BeginExecuteNotification method
+* [#4376](https://github.com/icinga/icinga2/issues/4376) (Cluster): CheckerComponent sometimes fails to schedule checks in time
+* [#4375](https://github.com/icinga/icinga2/issues/4375) (Cluster): Duplicate messages for command\_endpoint w/ master and satellite
+* [#4372](https://github.com/icinga/icinga2/issues/4372) (API): state\_filters\_real shouldn't be visible in the API
+* [#4371](https://github.com/icinga/icinga2/issues/4371) (Notifications): notification.notification\_number runtime attribute returning 0 \(instead of 1\) in first notification e-mail
+* [#4370](https://github.com/icinga/icinga2/issues/4370): Test the change with HARD OK transitions
+* [#4363](https://github.com/icinga/icinga2/issues/4363) (DB IDO): IDO module starts threads before daemonize
+* [#4356](https://github.com/icinga/icinga2/issues/4356) (DB IDO): DB IDO query queue does not clean up with v2.4.10-520-g124c80b
+* [#4349](https://github.com/icinga/icinga2/issues/4349) (DB IDO): Add missing index on state history for DB IDO cleanup
+* [#4345](https://github.com/icinga/icinga2/issues/4345): Ensure to clear the SSL error queue before calling SSL\_{read,write,do\_handshake}
+* [#4343](https://github.com/icinga/icinga2/issues/4343) (Configuration): include\_recursive should gracefully handle inaccessible files
+* [#4341](https://github.com/icinga/icinga2/issues/4341) (API): Icinga incorrectly disconnects all endpoints if one has a wrong certificate
+* [#4340](https://github.com/icinga/icinga2/issues/4340) (DB IDO): deadlock in ido reconnect
+* [#4329](https://github.com/icinga/icinga2/issues/4329) (Metrics): Key Escapes in InfluxDB Writer Don't Work
+* [#4313](https://github.com/icinga/icinga2/issues/4313) (Configuration): Icinga crashes when using include\_recursive in an object definition
+* [#4309](https://github.com/icinga/icinga2/issues/4309) (Configuration): ConfigWriter::EmitScope incorrectly quotes dictionary keys
+* [#4300](https://github.com/icinga/icinga2/issues/4300) (DB IDO): Comment/Downtime delete queries are slow
+* [#4293](https://github.com/icinga/icinga2/issues/4293) (DB IDO): Overflow in current\_notification\_number column in DB IDO MySQL
+* [#4287](https://github.com/icinga/icinga2/issues/4287) (DB IDO): Program status table is not updated in IDO after starting icinga
+* [#4283](https://github.com/icinga/icinga2/issues/4283) (Cluster): Icinga 2 satellite crashes
+* [#4278](https://github.com/icinga/icinga2/issues/4278) (DB IDO): SOFT state changes with the same state are not logged
+* [#4275](https://github.com/icinga/icinga2/issues/4275) (API): Trying to delete an object protected by a permissions filter, ends up deleting all objects that match the filter instead
+* [#4274](https://github.com/icinga/icinga2/issues/4274) (Notifications): Duplicate notifications
+* [#4264](https://github.com/icinga/icinga2/issues/4264) (Metrics): InfluxWriter doesnt sanitize the data before sending
+* [#4259](https://github.com/icinga/icinga2/issues/4259): Flapping Notifications dependent on state change
+* [#4258](https://github.com/icinga/icinga2/issues/4258): last SOFT state should be hard \(max\_check\_attempts\)
+* [#4257](https://github.com/icinga/icinga2/issues/4257) (Configuration): Incorrect custom variable name in the hosts.conf example config
+* [#4255](https://github.com/icinga/icinga2/issues/4255) (Configuration): Config validation should not delete comments/downtimes w/o reference
+* [#4244](https://github.com/icinga/icinga2/issues/4244): SOFT OK-state after returning from a soft state
+* [#4239](https://github.com/icinga/icinga2/issues/4239) (Notifications): Downtime notifications do not pass author and comment
+* [#4232](https://github.com/icinga/icinga2/issues/4232): Problems with check scheduling for HARD state changes \(standalone/command\_endpoint\)
+* [#4231](https://github.com/icinga/icinga2/issues/4231) (DB IDO): Volatile check results for OK-\>OK transitions are logged into DB IDO statehistory
+* [#4187](https://github.com/icinga/icinga2/issues/4187): Icinga 2 client gets killed during network scans
+* [#4171](https://github.com/icinga/icinga2/issues/4171) (DB IDO): Outdated downtime/comments not removed from IDO database \(restart\)
+* [#4134](https://github.com/icinga/icinga2/issues/4134) (Configuration): Don't allow flow control keywords outside of other flow control constructs
+* [#4121](https://github.com/icinga/icinga2/issues/4121) (Notifications): notification interval = 0 not honoured in HA clusters
+* [#4106](https://github.com/icinga/icinga2/issues/4106) (Notifications): last\_problem\_notification should be synced in HA cluster
+* [#4077](https://github.com/icinga/icinga2/issues/4077): Numbers are not properly formatted in runtime macro strings
+* [#4002](https://github.com/icinga/icinga2/issues/4002): Don't violate POSIX by ensuring that the argument to usleep\(3\) is less than 1000000
+* [#3954](https://github.com/icinga/icinga2/issues/3954) (Cluster): High load when pinning command endpoint on HA cluster
+* [#3949](https://github.com/icinga/icinga2/issues/3949) (DB IDO): IDO: entry\_time of all comments is set to the date and time when Icinga 2 was restarted
+* [#3902](https://github.com/icinga/icinga2/issues/3902): Hang in TlsStream::Handshake
+* [#3820](https://github.com/icinga/icinga2/issues/3820) (Configuration): High CPU usage with self-referenced parent zone config
+* [#3805](https://github.com/icinga/icinga2/issues/3805) (Metrics): GELF multi-line output
+* [#3627](https://github.com/icinga/icinga2/issues/3627) (API): /v1 returns HTML even if JSON is requested
+* [#3486](https://github.com/icinga/icinga2/issues/3486) (Notifications): Notification times w/ empty begin/end specifications prevent sending notifications
+* [#3370](https://github.com/icinga/icinga2/issues/3370): Race condition in CreatePipeOverlapped
+* [#3365](https://github.com/icinga/icinga2/issues/3365) (DB IDO): IDO: there is no usable object index on icinga\_{scheduleddowntime,comments}
+* [#3364](https://github.com/icinga/icinga2/issues/3364) (DB IDO): IDO: check\_source should not be a TEXT field
+* [#3361](https://github.com/icinga/icinga2/issues/3361) (DB IDO): Missing indexes for icinga\_endpoints\* and icinga\_zones\* tables in DB IDO schema
+* [#3355](https://github.com/icinga/icinga2/issues/3355) (DB IDO): IDO: icinga\_host/service\_groups alias columns are TEXT columns
+* [#3229](https://github.com/icinga/icinga2/issues/3229): Function::Invoke should optionally register ScriptFrame
+* [#2996](https://github.com/icinga/icinga2/issues/2996) (Cluster): Custom notification external commands do not work in a master-master setup
+* [#2039](https://github.com/icinga/icinga2/issues/2039): Disable immediate hard state after first checkresult
+
+### ITL
+
+* [#4518](https://github.com/icinga/icinga2/issues/4518) (ITL): ITL uses unsupported arguments for check\_swap on Debian wheezy/Ubuntu trusty
+* [#4506](https://github.com/icinga/icinga2/issues/4506) (ITL): Add interfacetable CheckCommand options --trafficwithpkt and --snmp-maxmsgsize
+* [#4477](https://github.com/icinga/icinga2/issues/4477) (ITL): Add perfsyntax parameter to nscp-local-counter CheckCommand
+* [#4456](https://github.com/icinga/icinga2/issues/4456) (ITL): Add custom variables for all check\_swap arguments
+* [#4437](https://github.com/icinga/icinga2/issues/4437) (ITL): Add command definition for check\_mysql\_query
+* [#4421](https://github.com/icinga/icinga2/issues/4421) (ITL): -q option for check\_ntp\_time is wrong
+* [#4416](https://github.com/icinga/icinga2/issues/4416) (ITL): Add check command definition for check\_graphite
+* [#4397](https://github.com/icinga/icinga2/issues/4397) (ITL): A lot of missing parameters for \(latest\) mysql\_health
+* [#4379](https://github.com/icinga/icinga2/issues/4379) (ITL): Add support for "-A" command line switch to CheckCommand "snmp-process"
+* [#4359](https://github.com/icinga/icinga2/issues/4359) (ITL): ITL: check\_iftraffic64.pl default values, wrong postfix value in CheckCommand
+* [#4332](https://github.com/icinga/icinga2/issues/4332) (ITL): Add check command definition for db2\_health
+* [#4305](https://github.com/icinga/icinga2/issues/4305) (ITL): Add check command definitions for kdc and rbl
+* [#4297](https://github.com/icinga/icinga2/issues/4297) (ITL): add check command for plugin check\_apache\_status
+* [#4276](https://github.com/icinga/icinga2/issues/4276) (ITL): Adding option to access ifName for manubulon snmp-interface check command
+* [#4254](https://github.com/icinga/icinga2/issues/4254) (ITL): Add "fuse.gvfsd-fuse" to the list of excluded file systems for check\_disk
+* [#4250](https://github.com/icinga/icinga2/issues/4250) (ITL): Add CIM port parameter for esxi\_hardware CheckCommand
+* [#4023](https://github.com/icinga/icinga2/issues/4023) (ITL): Add "retries" option to check\_snmp command
+* [#3711](https://github.com/icinga/icinga2/issues/3711) (ITL): icinga2.conf: Include plugins-contrib, manubulon, windows-plugins, nscp by default
+* [#3683](https://github.com/icinga/icinga2/issues/3683) (ITL): Add IPv4/IPv6 support to the rest of the monitoring-plugins
+* [#3012](https://github.com/icinga/icinga2/issues/3012) (ITL): Extend CheckCommand definitions for nscp-local
+
+### Documentation
+
+* [#4521](https://github.com/icinga/icinga2/issues/4521) (Documentation): Typo in Notification object documentation
+* [#4517](https://github.com/icinga/icinga2/issues/4517) (Documentation): Documentation is missing for the API permissions that are new in 2.5.0
+* [#4513](https://github.com/icinga/icinga2/issues/4513) (Documentation): Development docs: Add own section for gdb backtrace from a running process
+* [#4510](https://github.com/icinga/icinga2/issues/4510) (Documentation): Docs: API example uses wrong attribute name
+* [#4489](https://github.com/icinga/icinga2/issues/4489) (Documentation): Missing documentation for "legacy-timeperiod" template
+* [#4470](https://github.com/icinga/icinga2/issues/4470) (Documentation): The description for the http\_certificate attribute doesn't have the right default value
+* [#4468](https://github.com/icinga/icinga2/issues/4468) (Documentation): Add URL and short description for Monitoring Plugins inside the ITL documentation
+* [#4453](https://github.com/icinga/icinga2/issues/4453) (Documentation): Rewrite Client and Cluster chapter and; add service monitoring chapter
+* [#4419](https://github.com/icinga/icinga2/issues/4419) (Documentation): Incorrect API permission name for /v1/status in the documentation
+* [#4396](https://github.com/icinga/icinga2/issues/4396) (Documentation): Missing explanation for three level clusters with CSR auto-signing
+* [#4395](https://github.com/icinga/icinga2/issues/4395) (Documentation): Incorrect documentation about apply rules in zones.d directories
+* [#4387](https://github.com/icinga/icinga2/issues/4387) (Documentation): Improve author information about check\_yum
+* [#4361](https://github.com/icinga/icinga2/issues/4361) (Documentation): pkg-config is not listed as a build requirement in INSTALL.md
+* [#4337](https://github.com/icinga/icinga2/issues/4337) (Documentation): Add a note to the docs that API POST updates to custom attributes/groups won't trigger re-evaluation
+* [#4333](https://github.com/icinga/icinga2/issues/4333) (Documentation): Documentation: Setting up Plugins section is broken
+* [#4328](https://github.com/icinga/icinga2/issues/4328) (Documentation): Typo in Manubulon CheckCommand documentation
+* [#4318](https://github.com/icinga/icinga2/issues/4318) (Documentation): Migration docs still show unsupported CHANGE\_\*MODATTR external commands
+* [#4306](https://github.com/icinga/icinga2/issues/4306) (Documentation): Add a note about creating Zone/Endpoint objects with the API
+* [#4299](https://github.com/icinga/icinga2/issues/4299) (Documentation): Incorrect URL for API examples in the documentation
+* [#4265](https://github.com/icinga/icinga2/issues/4265) (Documentation): Improve "Endpoint" documentation
+* [#4263](https://github.com/icinga/icinga2/issues/4263) (Documentation): Fix systemd client command formatting
+* [#4238](https://github.com/icinga/icinga2/issues/4238) (Documentation): Missing quotes for API action URL
+* [#4236](https://github.com/icinga/icinga2/issues/4236) (Documentation): Use HTTPS for debmon.org links in the documentation
+* [#4217](https://github.com/icinga/icinga2/issues/4217) (Documentation): node setup: Add a note for --endpoint syntax for client-master connection
+* [#4124](https://github.com/icinga/icinga2/issues/4124) (Documentation): Documentation review
+* [#3612](https://github.com/icinga/icinga2/issues/3612) (Documentation): Update SELinux documentation
+
+### Support
+
+* [#4526](https://github.com/icinga/icinga2/issues/4526) (Packages): Revert dependency on firewalld on RHEL
+* [#4494](https://github.com/icinga/icinga2/issues/4494) (Installation): Remove unused functions from icinga-installer
+* [#4452](https://github.com/icinga/icinga2/issues/4452) (Packages): Error compiling on windows due to changes in apilistener around minimum tls version
+* [#4432](https://github.com/icinga/icinga2/issues/4432) (Packages): Windows build broken since ref 11292
+* [#4404](https://github.com/icinga/icinga2/issues/4404) (Installation): Increase default systemd timeout
+* [#4344](https://github.com/icinga/icinga2/issues/4344) (Packages): Build fails with Visual Studio 2013
+* [#4327](https://github.com/icinga/icinga2/issues/4327) (Packages): Icinga fails to build with OpenSSL 1.1.0
+* [#4251](https://github.com/icinga/icinga2/issues/4251) (Tests): Add debugging mode for Utility::GetTime
+* [#4234](https://github.com/icinga/icinga2/issues/4234) (Tests): Boost tests are missing a dependency on libmethods
+* [#4230](https://github.com/icinga/icinga2/issues/4230) (Installation): Windows: Error with repository handler \(missing /var/lib/icinga2/api/repository path\)
+* [#4211](https://github.com/icinga/icinga2/issues/4211) (Packages): Incorrect filter in pick.py
+* [#4190](https://github.com/icinga/icinga2/issues/4190) (Packages): Windows Installer: Remove dependency on KB2999226 package
+* [#4148](https://github.com/icinga/icinga2/issues/4148) (Packages): RPM update starts disabled icinga2 service
+* [#4147](https://github.com/icinga/icinga2/issues/4147) (Packages): Reload permission error with SELinux
+* [#4135](https://github.com/icinga/icinga2/issues/4135) (Installation): Add script for automatically cherry-picking commits for minor versions
+* [#3829](https://github.com/icinga/icinga2/issues/3829) (Packages): Provide packages for icinga-studio on Fedora
+* [#3708](https://github.com/icinga/icinga2/issues/3708) (Packages): Firewalld Service definition for Icinga
+* [#2606](https://github.com/icinga/icinga2/issues/2606) (Packages): Package for syntax highlighting
+
+## 2.4.9 (2016-05-19)
+
+### Notes
+
+This release fixes a number of issues introduced in 2.4.8.
+
+### Bug
+
+* [#4225](https://github.com/icinga/icinga2/issues/4225) (Compat): Command Pipe thread 100% CPU Usage
+* [#4224](https://github.com/icinga/icinga2/issues/4224): Checks are not executed anymore on command
+* [#4222](https://github.com/icinga/icinga2/issues/4222) (Configuration): Segfault when trying to start 2.4.8
+* [#4221](https://github.com/icinga/icinga2/issues/4221) (Metrics): Error: Function call 'rename' for file '/var/spool/icinga2/tmp/service-perfdata' failed with error code 2, 'No such file or directory'
+
+## 2.4.10 (2016-05-19)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4227](https://github.com/icinga/icinga2/issues/4227): Checker component doesn't execute any checks for command\_endpoint
+
+## 2.4.8 (2016-05-17)
+
+### Notes
+
+* Bugfixes
+* Support for limiting the maximum number of concurrent checks (new configuration option)
+* HA-aware features now wait for connected cluster nodes in the same zone (e.g. DB IDO)
+* The 'icinga' check now alerts on failed reloads
+
+### Enhancement
+
+* [#4203](https://github.com/icinga/icinga2/issues/4203) (Cluster): Only activate HARunOnce objects once there's a cluster connection
+* [#4198](https://github.com/icinga/icinga2/issues/4198): Move CalculateExecutionTime and CalculateLatency into the CheckResult class
+* [#4196](https://github.com/icinga/icinga2/issues/4196) (Cluster): Remove unused cluster commands
+* [#4149](https://github.com/icinga/icinga2/issues/4149) (CLI): Implement SNI support for the CLI commands
+* [#4103](https://github.com/icinga/icinga2/issues/4103): Add support for subjectAltName in SSL certificates
+* [#3919](https://github.com/icinga/icinga2/issues/3919) (Configuration): Internal check for config problems
+* [#3321](https://github.com/icinga/icinga2/issues/3321): "icinga" check should have state WARNING when the last reload failed
+* [#2993](https://github.com/icinga/icinga2/issues/2993) (Metrics): PerfdataWriter: Better failure handling for file renames across file systems
+* [#2896](https://github.com/icinga/icinga2/issues/2896) (Cluster): Alert config reload failures with the icinga check
+* [#2468](https://github.com/icinga/icinga2/issues/2468): Maximum concurrent service checks
+
+### Bug
+
+* [#4219](https://github.com/icinga/icinga2/issues/4219) (DB IDO): Postgresql warnings on startup
+* [#4212](https://github.com/icinga/icinga2/issues/4212): assertion failed: GetResumeCalled\(\)
+* [#4210](https://github.com/icinga/icinga2/issues/4210) (API): Incorrect variable names for joined fields in filters
+* [#4204](https://github.com/icinga/icinga2/issues/4204) (DB IDO): Ensure that program status updates are immediately updated in DB IDO
+* [#4202](https://github.com/icinga/icinga2/issues/4202) (API): API: Missing error handling for invalid JSON request body
+* [#4182](https://github.com/icinga/icinga2/issues/4182): Crash in UnameHelper
+* [#4180](https://github.com/icinga/icinga2/issues/4180): Expired downtimes are not removed
+* [#4170](https://github.com/icinga/icinga2/issues/4170) (API): Icinga Crash with the workflow Create\_Host-\> Downtime for the Host -\> Delete Downtime -\> Remove Host
+* [#4145](https://github.com/icinga/icinga2/issues/4145) (Configuration): Wrong log severity causes segfault
+* [#4120](https://github.com/icinga/icinga2/issues/4120): notification sent out during flexible downtime
+* [#4038](https://github.com/icinga/icinga2/issues/4038) (API): inconsistent API /v1/objects/\* response for PUT requests
+* [#4037](https://github.com/icinga/icinga2/issues/4037) (Compat): Command pipe overloaded: Can't send external Icinga command to the local command file
+* [#4029](https://github.com/icinga/icinga2/issues/4029) (API): Icinga2 API: deleting service with cascade=1 does not delete dependant notification
+* [#3938](https://github.com/icinga/icinga2/issues/3938): Crash with empty ScheduledDowntime 'ranges' attribute
+* [#3932](https://github.com/icinga/icinga2/issues/3932): "day -X" time specifications are parsed incorrectly
+* [#3912](https://github.com/icinga/icinga2/issues/3912) (Compat): Empty author/text attribute for comment/downtimes external commands causing crash
+* [#3881](https://github.com/icinga/icinga2/issues/3881) (Cluster): Icinga2 agent gets stuck after disconnect and won't relay messages
+* [#3707](https://github.com/icinga/icinga2/issues/3707) (Configuration): Comments and downtimes of deleted checkable objects are not deleted
+* [#3526](https://github.com/icinga/icinga2/issues/3526): Icinga crashes with a segfault on receiving a lot of check results for nonexisting hosts/services
+* [#3316](https://github.com/icinga/icinga2/issues/3316) (Configuration): Service apply without name possible
+
+### ITL
+
+* [#4184](https://github.com/icinga/icinga2/issues/4184) (ITL): 'disk' CheckCommand: Exclude 'cgroup' and 'tracefs' by default
+* [#3634](https://github.com/icinga/icinga2/issues/3634) (ITL): Provide icingacli in the ITL
+
+### Documentation
+
+* [#4205](https://github.com/icinga/icinga2/issues/4205) (Documentation): Add the category to the generated changelog
+* [#4193](https://github.com/icinga/icinga2/issues/4193) (Documentation): Missing documentation for event commands w/ execution bridge
+* [#4144](https://github.com/icinga/icinga2/issues/4144) (Documentation): Incorrect chapter headings for Object\#to\_string and Object\#type
+
+### Support
+
+* [#4146](https://github.com/icinga/icinga2/issues/4146) (Packages): Update chocolatey packages and RELEASE.md
+
+## 2.4.7 (2016-04-21)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4142](https://github.com/icinga/icinga2/issues/4142) (DB IDO): Crash in IdoMysqlConnection::ExecuteMultipleQueries
+
+## 2.4.6 (2016-04-20)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4140](https://github.com/icinga/icinga2/issues/4140) (DB IDO): Failed assertion in IdoPgsqlConnection::FieldToEscapedString
+
+### Documentation
+
+* [#4141](https://github.com/icinga/icinga2/issues/4141) (Documentation): Update RELEASE.md
+* [#4136](https://github.com/icinga/icinga2/issues/4136) (Documentation): Docs: Zone attribute 'endpoints' is an array
+
+### Support
+
+* [#4139](https://github.com/icinga/icinga2/issues/4139) (Packages): Icinga 2 fails to build on Ubuntu Xenial
+
+## 2.4.5 (2016-04-20)
+
+### Notes
+
+* Windows Installer changed from NSIS to MSI
+* New configuration attribute for hosts and services: check_timeout (overrides the CheckCommand's timeout when set)
+* ITL updates
+* Lots of bugfixes
+
+### Enhancement
+
+* [#3023](https://github.com/icinga/icinga2/issues/3023) (Configuration): Implement support for overriding check command timeout
+
+### Bug
+
+* [#4131](https://github.com/icinga/icinga2/issues/4131) (Configuration): Vim Syntax Highlighting does not work with assign where
+* [#4116](https://github.com/icinga/icinga2/issues/4116) (API): icinga2 crashes when a command\_endpoint is set, but the api feature is not active
+* [#4114](https://github.com/icinga/icinga2/issues/4114): Compiler warning in NotifyActive
+* [#4109](https://github.com/icinga/icinga2/issues/4109) (API): Navigation attributes are missing in /v1/objects/\<type\>
+* [#4104](https://github.com/icinga/icinga2/issues/4104) (Configuration): Segfault during config validation if host exists, service does not exist any longer and downtime expires
+* [#4095](https://github.com/icinga/icinga2/issues/4095): DowntimesExpireTimerHandler crashes Icinga2 with \<unknown function\>
+* [#4089](https://github.com/icinga/icinga2/issues/4089): Make the socket event engine configurable
+* [#4078](https://github.com/icinga/icinga2/issues/4078) (Configuration): Overwriting global type variables causes crash in ConfigItem::Commit\(\)
+* [#4076](https://github.com/icinga/icinga2/issues/4076) (API): API User gets wrongly authenticated \(client\_cn and no password\)
+* [#4066](https://github.com/icinga/icinga2/issues/4066): ConfigSync broken from 2.4.3. to 2.4.4 under Windows
+* [#4056](https://github.com/icinga/icinga2/issues/4056) (CLI): Remove semi-colons in the auto-generated configs
+* [#4052](https://github.com/icinga/icinga2/issues/4052) (API): Config validation for Notification objects should check whether the state filters are valid
+* [#4035](https://github.com/icinga/icinga2/issues/4035) (DB IDO): IDO: historical contact notifications table column notification\_id is off-by-one
+* [#4031](https://github.com/icinga/icinga2/issues/4031): Downtimes are not always activated/expired on restart
+* [#4016](https://github.com/icinga/icinga2/issues/4016): Symlink subfolders not followed/considered for config files
+* [#4014](https://github.com/icinga/icinga2/issues/4014): Use retry\_interval instead of check\_interval for first OK -\> NOT-OK state change
+* [#3973](https://github.com/icinga/icinga2/issues/3973) (Cluster): Downtimes and Comments are not synced to child zones
+* [#3970](https://github.com/icinga/icinga2/issues/3970) (API): Socket Exceptions \(Operation not permitted\) while reading from API
+* [#3907](https://github.com/icinga/icinga2/issues/3907) (Configuration): Too many assign where filters cause stack overflow
+* [#3780](https://github.com/icinga/icinga2/issues/3780) (DB IDO): DB IDO: downtime is not in effect after restart
+
+### ITL
+
+* [#3953](https://github.com/icinga/icinga2/issues/3953) (ITL): Add --units, --rate and --rate-multiplier support for the snmpv3 check command
+* [#3903](https://github.com/icinga/icinga2/issues/3903) (ITL): Add --method parameter for check\_{oracle,mysql,mssql}\_health CheckCommands
+
+### Documentation
+
+* [#4122](https://github.com/icinga/icinga2/issues/4122) (Documentation): Remove instance\_name from Ido\*Connection example
+* [#4108](https://github.com/icinga/icinga2/issues/4108) (Documentation): Incorrect link in the documentation
+* [#4080](https://github.com/icinga/icinga2/issues/4080) (Documentation): Update documentation URL for Icinga Web 2
+* [#4058](https://github.com/icinga/icinga2/issues/4058) (Documentation): Docs: Cluster manual SSL generation formatting is broken
+* [#4057](https://github.com/icinga/icinga2/issues/4057) (Documentation): Update the CentOS installation documentation
+* [#4055](https://github.com/icinga/icinga2/issues/4055) (Documentation): Add silent install / reference to NSClient++ to documentation
+* [#4043](https://github.com/icinga/icinga2/issues/4043) (Documentation): Docs: Remove the migration script chapter
+* [#4041](https://github.com/icinga/icinga2/issues/4041) (Documentation): Explain how to use functions for wildcard matches for arrays and/or dictionaries in assign where expressions
+* [#4039](https://github.com/icinga/icinga2/issues/4039) (Documentation): Update .mailmap for Markus Frosch
+* [#3145](https://github.com/icinga/icinga2/issues/3145) (Documentation): Add Windows setup wizard screenshots
+
+### Support
+
+* [#4127](https://github.com/icinga/icinga2/issues/4127) (Installation): Windows installer does not copy "features-enabled" on upgrade
+* [#4119](https://github.com/icinga/icinga2/issues/4119) (Installation): Update chocolatey uninstall script for the MSI package
+* [#4118](https://github.com/icinga/icinga2/issues/4118) (Installation): icinga2-installer.exe doesn't wait until NSIS uninstall.exe exits
+* [#4117](https://github.com/icinga/icinga2/issues/4117) (Installation): Make sure to update the agent wizard banner
+* [#4113](https://github.com/icinga/icinga2/issues/4113) (Installation): Package fails to build on \*NIX
+* [#4099](https://github.com/icinga/icinga2/issues/4099) (Installation): make install overwrites configuration files
+* [#4074](https://github.com/icinga/icinga2/issues/4074) (Installation): FatalError\(\) returns when called before Application.Run
+* [#4073](https://github.com/icinga/icinga2/issues/4073) (Installation): Install 64-bit version of NSClient++ on 64-bit versions of Windows
+* [#4072](https://github.com/icinga/icinga2/issues/4072) (Installation): Update NSClient++ to version 0.4.4.19
+* [#4069](https://github.com/icinga/icinga2/issues/4069) (Installation): Error compiling icinga2 targeted for x64 on Windows
+* [#4064](https://github.com/icinga/icinga2/issues/4064) (Packages): Build 64-bit packages for Windows
+* [#4053](https://github.com/icinga/icinga2/issues/4053) (Installation): Icinga 2 Windows Agent does not honor install path during upgrade
+* [#4032](https://github.com/icinga/icinga2/issues/4032) (Packages): Remove dependency for .NET 3.5 from the chocolatey package
+* [#3988](https://github.com/icinga/icinga2/issues/3988) (Packages): Incorrect base URL in the icinga-rpm-release packages for Fedora
+* [#3658](https://github.com/icinga/icinga2/issues/3658) (Packages): Add application manifest for the Windows agent wizard
+* [#2998](https://github.com/icinga/icinga2/issues/2998) (Installation): logrotate fails since the "su" directive was removed
+
+## 2.4.4 (2016-03-16)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#4036](https://github.com/icinga/icinga2/issues/4036) (CLI): Add the executed cli command to the Windows wizard error messages
+* [#4019](https://github.com/icinga/icinga2/issues/4019) (Configuration): Segmentation fault during 'icinga2 daemon -C'
+* [#4017](https://github.com/icinga/icinga2/issues/4017) (CLI): 'icinga2 feature list' fails when all features are disabled
+* [#4008](https://github.com/icinga/icinga2/issues/4008) (Configuration): Windows wizard error "too many arguments"
+* [#4006](https://github.com/icinga/icinga2/issues/4006): Volatile transitions from HARD NOT-OK-\>NOT-OK do not trigger notifications
+* [#3996](https://github.com/icinga/icinga2/issues/3996): epoll\_ctl might cause oops on Ubuntu trusty
+* [#3990](https://github.com/icinga/icinga2/issues/3990): Services status updated multiple times within check\_interval even though no retry was triggered
+* [#3987](https://github.com/icinga/icinga2/issues/3987): Incorrect check interval when passive check results are used
+* [#3985](https://github.com/icinga/icinga2/issues/3985): Active checks are executed even though passive results are submitted
+* [#3981](https://github.com/icinga/icinga2/issues/3981): DEL\_DOWNTIME\_BY\_HOST\_NAME does not accept optional arguments
+* [#3961](https://github.com/icinga/icinga2/issues/3961) (CLI): Wrong log message for trusted cert in node setup command
+* [#3939](https://github.com/icinga/icinga2/issues/3939) (CLI): Common name in node wizard isn't case sensitive
+* [#3745](https://github.com/icinga/icinga2/issues/3745) (API): Status code 200 even if an object could not be deleted.
+* [#3742](https://github.com/icinga/icinga2/issues/3742) (DB IDO): DB IDO: User notification type filters are incorrect
+* [#3442](https://github.com/icinga/icinga2/issues/3442) (API): MkDirP not working on Windows
+* [#3439](https://github.com/icinga/icinga2/issues/3439) (Notifications): Host notification type is PROBLEM but should be RECOVERY
+* [#3303](https://github.com/icinga/icinga2/issues/3303) (Notifications): Problem notifications while Flapping is active
+* [#3153](https://github.com/icinga/icinga2/issues/3153) (Notifications): Flapping notifications are sent for hosts/services which are in a downtime
+
+### ITL
+
+* [#3958](https://github.com/icinga/icinga2/issues/3958) (ITL): Add "query" option to check\_postgres command.
+* [#3908](https://github.com/icinga/icinga2/issues/3908) (ITL): ITL: Missing documentation for nwc\_health "mode" parameter
+* [#3484](https://github.com/icinga/icinga2/issues/3484) (ITL): ITL: Allow to enforce specific SSL versions using the http check command
+
+### Documentation
+
+* [#4033](https://github.com/icinga/icinga2/issues/4033) (Documentation): Update development docs to use 'thread apply all bt full'
+* [#4018](https://github.com/icinga/icinga2/issues/4018) (Documentation): Docs: Add API examples for creating services and check commands
+* [#4009](https://github.com/icinga/icinga2/issues/4009) (Documentation): Typo in API docs
+* [#3845](https://github.com/icinga/icinga2/issues/3845) (Documentation): Explain how to join hosts/services for /v1/objects/comments
+* [#3755](https://github.com/icinga/icinga2/issues/3755) (Documentation): http check's URI is really just Path
+
+### Support
+
+* [#4027](https://github.com/icinga/icinga2/issues/4027) (Packages): Chocolatey package is missing uninstall function
+* [#4011](https://github.com/icinga/icinga2/issues/4011) (Packages): Update build requirements for SLES 11 SP4
+* [#3960](https://github.com/icinga/icinga2/issues/3960) (Installation): CMake does not find MySQL libraries on Windows
+
+## 2.4.3 (2016-02-24)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#3963](https://github.com/icinga/icinga2/issues/3963): Wrong permissions for files in /var/cache/icinga2/\*
+* [#3962](https://github.com/icinga/icinga2/issues/3962) (Configuration): Permission problem after running icinga2 node wizard
+
+## 2.4.2 (2016-02-23)
+
+### Notes
+
+* ITL
+ Additional arguments for check_disk
+ Fix incorrect path for the check_hpasm plugin
+ New command: check_iostat
+ Fix incorrect variable names for the check_impi plugin
+* Cluster
+ Improve cluster performance
+ Fix connection handling problems (multiple connections for the same endpoint)
+* Performance improvements for the DB IDO modules
+* Lots and lots of various other bugfixes
+* Documentation updates
+
+### Enhancement
+
+* [#3878](https://github.com/icinga/icinga2/issues/3878) (Configuration): Add String\#trim
+* [#3857](https://github.com/icinga/icinga2/issues/3857) (Cluster): Support TLSv1.1 and TLSv1.2 for the cluster transport encryption
+* [#3810](https://github.com/icinga/icinga2/issues/3810) (Plugins): Add Timeout parameter to snmpv3 check
+* [#3785](https://github.com/icinga/icinga2/issues/3785) (DB IDO): Log DB IDO query queue stats
+* [#3784](https://github.com/icinga/icinga2/issues/3784) (DB IDO): DB IDO: Add a log message when the connection handling is completed
+* [#3760](https://github.com/icinga/icinga2/issues/3760) (Configuration): Raise a config error for "Checkable" objects in global zones
+* [#3754](https://github.com/icinga/icinga2/issues/3754) (Plugins): Add "-x" parameter in command definition for disk-windows CheckCommand
+
+### Bug
+
+* [#3957](https://github.com/icinga/icinga2/issues/3957) (CLI): "node setup" tries to chown\(\) files before they're created
+* [#3947](https://github.com/icinga/icinga2/issues/3947): CentOS 5 doesn't support epoll\_create1
+* [#3922](https://github.com/icinga/icinga2/issues/3922) (Configuration): YYYY-MM-DD time specs are parsed incorrectly
+* [#3915](https://github.com/icinga/icinga2/issues/3915) (API): Connections are not cleaned up properly
+* [#3913](https://github.com/icinga/icinga2/issues/3913) (Cluster): Cluster WQ thread dies after fork\(\)
+* [#3910](https://github.com/icinga/icinga2/issues/3910): Clean up unused variables a bit
+* [#3905](https://github.com/icinga/icinga2/issues/3905) (DB IDO): Problem with hostgroup\_members table cleanup
+* [#3898](https://github.com/icinga/icinga2/issues/3898) (API): API queries on non-existant objects cause exception
+* [#3897](https://github.com/icinga/icinga2/issues/3897) (Configuration): Crash in ConfigItem::RunWithActivationContext
+* [#3896](https://github.com/icinga/icinga2/issues/3896) (Cluster): Ensure that config sync updates are always sent on reconnect
+* [#3889](https://github.com/icinga/icinga2/issues/3889) (DB IDO): Deleting an object via API does not disable it in DB IDO
+* [#3871](https://github.com/icinga/icinga2/issues/3871) (Cluster): Master reloads with agents generate false alarms
+* [#3870](https://github.com/icinga/icinga2/issues/3870) (DB IDO): next\_check noise in the IDO
+* [#3866](https://github.com/icinga/icinga2/issues/3866) (Cluster): Check event duplication with parallel connections involved
+* [#3863](https://github.com/icinga/icinga2/issues/3863) (Cluster): Segfault in ApiListener::ConfigUpdateObjectAPIHandler
+* [#3859](https://github.com/icinga/icinga2/issues/3859): Stream buffer size is 512 bytes, could be raised
+* [#3858](https://github.com/icinga/icinga2/issues/3858) (CLI): Escaped sequences not properly generated with 'node update-config'
+* [#3848](https://github.com/icinga/icinga2/issues/3848) (Configuration): Mistake in mongodb command definition \(mongodb\_replicaset\)
+* [#3843](https://github.com/icinga/icinga2/issues/3843): Modified attributes do not work for the IcingaApplication object w/ external commands
+* [#3835](https://github.com/icinga/icinga2/issues/3835) (Cluster): high load and memory consumption on icinga2 agent v2.4.1
+* [#3827](https://github.com/icinga/icinga2/issues/3827) (Configuration): Icinga state file corruption with temporary file creation
+* [#3817](https://github.com/icinga/icinga2/issues/3817) (Cluster): Cluster config sync: Ensure that /var/lib/icinga2/api/zones/\* exists
+* [#3816](https://github.com/icinga/icinga2/issues/3816) (Cluster): Exception stack trace on icinga2 client when the master reloads the configuration
+* [#3812](https://github.com/icinga/icinga2/issues/3812) (API): API actions: Decide whether fixed: false is the right default
+* [#3798](https://github.com/icinga/icinga2/issues/3798) (DB IDO): is\_active in IDO is only re-enabled on "every second" restart
+* [#3797](https://github.com/icinga/icinga2/issues/3797): Remove superfluous \#ifdef
+* [#3794](https://github.com/icinga/icinga2/issues/3794) (DB IDO): Icinga2 crashes in IDO when removing a comment
+* [#3787](https://github.com/icinga/icinga2/issues/3787) (CLI): "repository add" cli command writes invalid "type" attribute
+* [#3786](https://github.com/icinga/icinga2/issues/3786) (DB IDO): Evaluate if CanExecuteQuery/FieldToEscapedString lead to exceptions on !m\_Connected
+* [#3783](https://github.com/icinga/icinga2/issues/3783) (DB IDO): Implement support for re-ordering groups of IDO queries
+* [#3775](https://github.com/icinga/icinga2/issues/3775) (Configuration): Config validation doesn't fail when templates are used as object names
+* [#3774](https://github.com/icinga/icinga2/issues/3774) (DB IDO): IDO breaks when writing to icinga\_programstatus with latest snapshots
+* [#3773](https://github.com/icinga/icinga2/issues/3773) (Configuration): Relative path in include\_zones does not work
+* [#3766](https://github.com/icinga/icinga2/issues/3766) (API): Cluster config sync ignores zones.d from API packages
+* [#3765](https://github.com/icinga/icinga2/issues/3765): Use NodeName in null and random checks
+* [#3764](https://github.com/icinga/icinga2/issues/3764) (DB IDO): Failed IDO query for icinga\_downtimehistory
+* [#3752](https://github.com/icinga/icinga2/issues/3752): Incorrect information in --version on Linux
+* [#3741](https://github.com/icinga/icinga2/issues/3741) (DB IDO): Avoid duplicate config and status updates on startup
+* [#3735](https://github.com/icinga/icinga2/issues/3735) (Configuration): Disallow lambda expressions where side-effect-free expressions are not allowed
+* [#3730](https://github.com/icinga/icinga2/issues/3730): Missing path in mkdir\(\) exceptions
+* [#3728](https://github.com/icinga/icinga2/issues/3728) (DB IDO): build of icinga2 with gcc 4.4.7 segfaulting with ido
+* [#3722](https://github.com/icinga/icinga2/issues/3722) (API): Missing num\_hosts\_pending in /v1/status/CIB
+* [#3715](https://github.com/icinga/icinga2/issues/3715) (CLI): node wizard does not remember user defined port
+* [#3712](https://github.com/icinga/icinga2/issues/3712) (CLI): Remove the local zone name question in node wizard
+* [#3705](https://github.com/icinga/icinga2/issues/3705) (API): API is not working on wheezy
+* [#3704](https://github.com/icinga/icinga2/issues/3704) (Cluster): ApiListener::ReplayLog can block with a lot of clients
+* [#3702](https://github.com/icinga/icinga2/issues/3702) (Cluster): Zone::CanAccessObject is very expensive
+* [#3697](https://github.com/icinga/icinga2/issues/3697) (Compat): Crash in ExternalCommandListener
+* [#3677](https://github.com/icinga/icinga2/issues/3677) (API): API queries cause memory leaks
+* [#3613](https://github.com/icinga/icinga2/issues/3613) (DB IDO): Non-UTF8 characters from plugins causes IDO to fail
+* [#3606](https://github.com/icinga/icinga2/issues/3606) (Plugins): check\_network performance data in invalid format
+* [#3571](https://github.com/icinga/icinga2/issues/3571) (Plugins): check\_memory and check\_swap plugins do unit conversion and rounding before percentage calculations resulting in imprecise percentages
+* [#3540](https://github.com/icinga/icinga2/issues/3540) (Livestatus): Livestatus log query - filter "class" yields empty results
+* [#3440](https://github.com/icinga/icinga2/issues/3440): Icinga2 reload timeout results in killing old and new process because of systemd
+* [#2866](https://github.com/icinga/icinga2/issues/2866) (DB IDO): DB IDO: notification\_id for contact notifications is out of range
+* [#2746](https://github.com/icinga/icinga2/issues/2746) (DB IDO): Add priority queue for disconnect/programstatus update events
+* [#2009](https://github.com/icinga/icinga2/issues/2009): Re-checks scheduling w/ retry\_interval
+
+### ITL
+
+* [#3927](https://github.com/icinga/icinga2/issues/3927) (ITL): Checkcommand Disk : Option Freespace-ignore-reserved
+* [#3749](https://github.com/icinga/icinga2/issues/3749) (ITL): The hpasm check command is using the PluginDir constant
+* [#3747](https://github.com/icinga/icinga2/issues/3747) (ITL): Add check\_iostat to ITL
+* [#3729](https://github.com/icinga/icinga2/issues/3729) (ITL): ITL check command possibly mistyped variable names
+
+### Documentation
+
+* [#3946](https://github.com/icinga/icinga2/issues/3946) (Documentation): Documentation: Unescaped pipe character in tables
+* [#3893](https://github.com/icinga/icinga2/issues/3893) (Documentation): Outdated link to icingaweb2-module-nagvis
+* [#3892](https://github.com/icinga/icinga2/issues/3892) (Documentation): Partially missing escaping in doc/7-icinga-template-library.md
+* [#3861](https://github.com/icinga/icinga2/issues/3861) (Documentation): Incorrect IdoPgSqlConnection Example in Documentation
+* [#3850](https://github.com/icinga/icinga2/issues/3850) (Documentation): Incorrect name in AUTHORS
+* [#3836](https://github.com/icinga/icinga2/issues/3836) (Documentation): Troubleshooting: Explain how to fetch the executed command
+* [#3833](https://github.com/icinga/icinga2/issues/3833) (Documentation): Better explaination for array values in "disk" CheckCommand docs
+* [#3826](https://github.com/icinga/icinga2/issues/3826) (Documentation): Add example how to use custom functions in attributes
+* [#3808](https://github.com/icinga/icinga2/issues/3808) (Documentation): Typos in the "troubleshooting" section of the documentation
+* [#3793](https://github.com/icinga/icinga2/issues/3793) (Documentation): "setting up check plugins" section should be enhanced with package manager examples
+* [#3781](https://github.com/icinga/icinga2/issues/3781) (Documentation): Formatting problem in "Advanced Filter" chapter
+* [#3770](https://github.com/icinga/icinga2/issues/3770) (Documentation): Missing documentation for API packages zones.d config sync
+* [#3759](https://github.com/icinga/icinga2/issues/3759) (Documentation): Missing SUSE repository for monitoring plugins documentation
+* [#3748](https://github.com/icinga/icinga2/issues/3748) (Documentation): Wrong postgresql-setup initdb command for RHEL7
+* [#3550](https://github.com/icinga/icinga2/issues/3550) (Documentation): A PgSQL DB for the IDO can't be created w/ UTF8
+* [#3549](https://github.com/icinga/icinga2/issues/3549) (Documentation): Incorrect SQL command for creating the user of the PostgreSQL DB for the IDO
+
+### Support
+
+* [#3900](https://github.com/icinga/icinga2/issues/3900) (Packages): Windows build fails on InterlockedIncrement type
+* [#3838](https://github.com/icinga/icinga2/issues/3838) (Installation): Race condition when using systemd unit file
+* [#3832](https://github.com/icinga/icinga2/issues/3832) (Installation): Compiler warnings in lib/remote/base64.cpp
+* [#3818](https://github.com/icinga/icinga2/issues/3818) (Installation): Logrotate on systemd distros should use systemctl not service
+* [#3771](https://github.com/icinga/icinga2/issues/3771) (Installation): Build error with older CMake versions on VERSION\_LESS compare
+* [#3769](https://github.com/icinga/icinga2/issues/3769) (Packages): Windows build fails with latest git master
+* [#3746](https://github.com/icinga/icinga2/issues/3746) (Packages): chcon partial context error in safe-reload prevents reload
+* [#3723](https://github.com/icinga/icinga2/issues/3723) (Installation): Crash on startup with incorrect directory permissions
+* [#3679](https://github.com/icinga/icinga2/issues/3679) (Installation): Add CMake flag for disabling the unit tests
+
+## 2.4.1 (2015-11-26)
+
+### Notes
+
+* ITL
+ * Add running_kernel_use_sudo option for the running_kernel check
+* Configuration
+ * Add global constants: `PlatformName`. `PlatformVersion`, `PlatformKernel` and `PlatformKernelVersion`
+* CLI
+ * Use NodeName and ZoneName constants for 'node setup' and 'node wizard'
+
+### Enhancement
+
+* [#3706](https://github.com/icinga/icinga2/issues/3706) (CLI): Use NodeName and ZoneName constants for 'node setup' and 'node wizard'
+
+### Bug
+
+* [#3710](https://github.com/icinga/icinga2/issues/3710) (CLI): Remove --master\_zone from --help because it is currently not implemented
+* [#3689](https://github.com/icinga/icinga2/issues/3689) (CLI): CLI command 'repository add' doesn't work
+* [#3685](https://github.com/icinga/icinga2/issues/3685) (CLI): node wizard checks for /var/lib/icinga2/ca directory but not the files
+* [#3674](https://github.com/icinga/icinga2/issues/3674): lib/base/process.cpp SIGSEGV on Debian squeeze / RHEL 6
+* [#3671](https://github.com/icinga/icinga2/issues/3671) (API): Icinga 2 crashes when ScheduledDowntime objects are used
+* [#3670](https://github.com/icinga/icinga2/issues/3670) (CLI): API setup command incorrectly overwrites existing certificates
+* [#3665](https://github.com/icinga/icinga2/issues/3665) (CLI): "node wizard" does not ask user to verify SSL certificate
+
+### ITL
+
+* [#3691](https://github.com/icinga/icinga2/issues/3691) (ITL): Add running\_kernel\_use\_sudo option for the running\_kernel check
+* [#3682](https://github.com/icinga/icinga2/issues/3682) (ITL): Indentation in command-plugins.conf
+* [#3657](https://github.com/icinga/icinga2/issues/3657) (ITL): Add by\_ssh\_options argument for the check\_by\_ssh plugin
+
+### Documentation
+
+* [#3701](https://github.com/icinga/icinga2/issues/3701) (Documentation): Incorrect path for icinga2 binary in development documentation
+* [#3690](https://github.com/icinga/icinga2/issues/3690) (Documentation): Fix typos in the documentation
+* [#3673](https://github.com/icinga/icinga2/issues/3673) (Documentation): Documentation for schedule-downtime is missing required paremeters
+* [#3594](https://github.com/icinga/icinga2/issues/3594) (Documentation): Documentation example in "Access Object Attributes at Runtime" doesn't work correctly
+* [#3391](https://github.com/icinga/icinga2/issues/3391) (Documentation): Incorrect web inject URL in documentation
+
+### Support
+
+* [#3699](https://github.com/icinga/icinga2/issues/3699) (Installation): Windows setup wizard crashes when InstallDir registry key is not set
+* [#3680](https://github.com/icinga/icinga2/issues/3680) (Installation): Incorrect redirect for stderr in /usr/lib/icinga2/prepare-dirs
+* [#3656](https://github.com/icinga/icinga2/issues/3656) (Packages): Build fails on SLES 11 SP3 with GCC 4.8
+
+## 2.4.0 (2015-11-16)
+
+### Notes
+
+* API
+ * RESTful API with basic auth or client certificates
+ * Filters, types, permissions
+ * configuration package management
+ * query/create/modify/delete config objects at runtime
+ * status queries for global stats
+ * actions (e.g. acknowledge all service problems)
+ * event streams
+* ITL and Plugin Check Command definitions
+ * The 'running_kernel' check command was moved to the plugins-contrib section. You have to update your config to include 'plugins-contrib'
+* Configuration
+ * The global constants Enable* and Vars have been removed. Use the IcingaApplication object attributes instead.
+* Features
+ * New Graphite tree. Please check the documentation how enable the legacy schema.
+ * IcingaStatusWriter feature has been deprecated and will be removed in future versions.
+ * Modified attributes are not exposed as bit mask to external interfaces anymore (api related changes). External commands like CHANGE_*_MODATTR have been removed.
+
+### Enhancement
+
+* [#3642](https://github.com/icinga/icinga2/issues/3642): Release 2.4.0
+* [#3624](https://github.com/icinga/icinga2/issues/3624) (API): Enhance programmatic examples for the API docs
+* [#3611](https://github.com/icinga/icinga2/issues/3611) (API): Change object query result set
+* [#3609](https://github.com/icinga/icinga2/issues/3609) (API): Change 'api setup' into a manual step while configuring the API
+* [#3608](https://github.com/icinga/icinga2/issues/3608) (CLI): Icinga 2 script debugger
+* [#3591](https://github.com/icinga/icinga2/issues/3591) (CLI): Change output format for 'icinga2 console'
+* [#3580](https://github.com/icinga/icinga2/issues/3580): Change GetLastStateUp/Down to host attributes
+* [#3576](https://github.com/icinga/icinga2/issues/3576) (Plugins): Missing parameters for check jmx4perl
+* [#3561](https://github.com/icinga/icinga2/issues/3561) (CLI): Use ZoneName variable for parent\_zone in node update-config
+* [#3537](https://github.com/icinga/icinga2/issues/3537) (CLI): Rewrite man page
+* [#3531](https://github.com/icinga/icinga2/issues/3531) (DB IDO): Add the name for comments/downtimes next to legacy\_id to DB IDO
+* [#3515](https://github.com/icinga/icinga2/issues/3515): Remove api.cpp, api.hpp
+* [#3508](https://github.com/icinga/icinga2/issues/3508) (Cluster): Add getter for endpoint 'connected' attribute
+* [#3507](https://github.com/icinga/icinga2/issues/3507) (API): Hide internal attributes
+* [#3506](https://github.com/icinga/icinga2/issues/3506) (API): Original attributes list in IDO
+* [#3503](https://github.com/icinga/icinga2/issues/3503) (API): Log a warning message on unauthorized http request
+* [#3502](https://github.com/icinga/icinga2/issues/3502) (API): Use the API for "icinga2 console"
+* [#3498](https://github.com/icinga/icinga2/issues/3498) (DB IDO): DB IDO should provide its connected state via /v1/status
+* [#3488](https://github.com/icinga/icinga2/issues/3488) (API): Document that modified attributes require accept\_config for cluster/clients
+* [#3469](https://github.com/icinga/icinga2/issues/3469) (Configuration): Pretty-print arrays and dictionaries when converting them to strings
+* [#3463](https://github.com/icinga/icinga2/issues/3463) (API): Change object version to timestamps for diff updates on config sync
+* [#3452](https://github.com/icinga/icinga2/issues/3452) (Configuration): Provide keywords to retrieve the current file name at parse time
+* [#3435](https://github.com/icinga/icinga2/issues/3435) (API): Move /v1/\<type\> to /v1/objects/\<type\>
+* [#3432](https://github.com/icinga/icinga2/issues/3432) (API): Rename statusqueryhandler to objectqueryhandler
+* [#3419](https://github.com/icinga/icinga2/issues/3419) (API): Sanitize error status codes and messages
+* [#3414](https://github.com/icinga/icinga2/issues/3414): Make ConfigObject::{G,S}etField\(\) method public
+* [#3386](https://github.com/icinga/icinga2/issues/3386) (API): Add global status handler for the API
+* [#3357](https://github.com/icinga/icinga2/issues/3357) (API): Implement CSRF protection for the API
+* [#3354](https://github.com/icinga/icinga2/issues/3354) (API): Implement joins for status queries
+* [#3343](https://github.com/icinga/icinga2/issues/3343) (API): Implement a demo API client: Icinga Studio
+* [#3341](https://github.com/icinga/icinga2/issues/3341) (API): URL class improvements
+* [#3340](https://github.com/icinga/icinga2/issues/3340) (API): Add plural\_name field to /v1/types
+* [#3332](https://github.com/icinga/icinga2/issues/3332) (Configuration): Use an AST node for the 'library' keyword
+* [#3297](https://github.com/icinga/icinga2/issues/3297) (Configuration): Implement ignore\_on\_error keyword
+* [#3296](https://github.com/icinga/icinga2/issues/3296) (API): Rename config/modules to config/packages
+* [#3291](https://github.com/icinga/icinga2/issues/3291) (API): Remove debug messages in HttpRequest class
+* [#3290](https://github.com/icinga/icinga2/issues/3290): Add String::ToLower/ToUpper
+* [#3287](https://github.com/icinga/icinga2/issues/3287) (API): Add package attribute for ConfigObject and set its origin
+* [#3285](https://github.com/icinga/icinga2/issues/3285) (API): Implement support for restoring modified attributes
+* [#3283](https://github.com/icinga/icinga2/issues/3283) (API): Implement support for indexers in ConfigObject::RestoreAttribute
+* [#3282](https://github.com/icinga/icinga2/issues/3282): Implement Object\#clone and rename Array/Dictionary\#clone to shallow\_clone
+* [#3280](https://github.com/icinga/icinga2/issues/3280): Add override keyword for all relevant methods
+* [#3278](https://github.com/icinga/icinga2/issues/3278) (API): Figure out how to sync dynamically created objects inside the cluster
+* [#3277](https://github.com/icinga/icinga2/issues/3277) (API): Ensure that runtime config objects are persisted on disk
+* [#3272](https://github.com/icinga/icinga2/issues/3272): Implement the 'base' field for the Type class
+* [#3267](https://github.com/icinga/icinga2/issues/3267): Rename DynamicObject/DynamicType to ConfigObject/ConfigType
+* [#3240](https://github.com/icinga/icinga2/issues/3240): Implement support for attaching GDB to the Icinga process on crash
+* [#3238](https://github.com/icinga/icinga2/issues/3238) (API): Implement global modified attributes
+* [#3233](https://github.com/icinga/icinga2/issues/3233) (API): Implement support for . in modify\_attribute
+* [#3232](https://github.com/icinga/icinga2/issues/3232) (API): Remove GetModifiedAttributes/SetModifiedAttributes
+* [#3231](https://github.com/icinga/icinga2/issues/3231) (API): Re-implement events for attribute changes
+* [#3230](https://github.com/icinga/icinga2/issues/3230) (API): Validation for modified attributes
+* [#3203](https://github.com/icinga/icinga2/issues/3203) (Configuration): Setting global variables with i2tcl doesn't work
+* [#3197](https://github.com/icinga/icinga2/issues/3197) (API): Make Comments and Downtime types available as ConfigObject type in the API
+* [#3193](https://github.com/icinga/icinga2/issues/3193) (API): Update the url parsers behaviour
+* [#3177](https://github.com/icinga/icinga2/issues/3177) (API): Documentation for config management API
+* [#3173](https://github.com/icinga/icinga2/issues/3173) (API): Add real path sanity checks to provided file paths
+* [#3172](https://github.com/icinga/icinga2/issues/3172): String::Trim\(\) should return a new string rather than modifying the current string
+* [#3169](https://github.com/icinga/icinga2/issues/3169) (API): Implement support for X-HTTP-Method-Override
+* [#3168](https://github.com/icinga/icinga2/issues/3168): Add Array::FromVector\(\) method
+* [#3167](https://github.com/icinga/icinga2/issues/3167): Add exceptions for Utility::MkDir{,P}
+* [#3154](https://github.com/icinga/icinga2/issues/3154): Move url to /lib/remote from /lib/base
+* [#3144](https://github.com/icinga/icinga2/issues/3144): Register ServiceOK, ServiceWarning, HostUp, etc. as constants
+* [#3140](https://github.com/icinga/icinga2/issues/3140) (API): Implement base64 de- and encoder
+* [#3094](https://github.com/icinga/icinga2/issues/3094) (API): Implement ApiUser type
+* [#3093](https://github.com/icinga/icinga2/issues/3093) (API): Implement URL parser
+* [#3090](https://github.com/icinga/icinga2/issues/3090) (Graphite): New Graphite schema
+* [#3089](https://github.com/icinga/icinga2/issues/3089) (API): Implement support for filter\_vars
+* [#3083](https://github.com/icinga/icinga2/issues/3083) (API): Define RESTful url schema
+* [#3082](https://github.com/icinga/icinga2/issues/3082) (API): Implement support for HTTP
+* [#3065](https://github.com/icinga/icinga2/issues/3065): Allow comments when parsing JSON
+* [#3025](https://github.com/icinga/icinga2/issues/3025) (DB IDO): DB IDO/Livestatus: Add zone object table w/ endpoint members
+* [#2934](https://github.com/icinga/icinga2/issues/2934) (API): API Documentation
+* [#2933](https://github.com/icinga/icinga2/issues/2933) (API): Implement config file management commands
+* [#2932](https://github.com/icinga/icinga2/issues/2932) (API): Staging for configuration validation
+* [#2931](https://github.com/icinga/icinga2/issues/2931) (API): Support validating configuration changes
+* [#2930](https://github.com/icinga/icinga2/issues/2930) (API): Commands for adding and removing objects
+* [#2929](https://github.com/icinga/icinga2/issues/2929) (API): Multiple sources for zone configuration tree
+* [#2928](https://github.com/icinga/icinga2/issues/2928) (API): Implement support for writing configuration files
+* [#2927](https://github.com/icinga/icinga2/issues/2927) (API): Update modules to support adding and removing objects at runtime
+* [#2926](https://github.com/icinga/icinga2/issues/2926) (API): Dependency tracking for objects
+* [#2925](https://github.com/icinga/icinga2/issues/2925) (API): Disallow changes for certain config attributes at runtime
+* [#2923](https://github.com/icinga/icinga2/issues/2923) (API): Changelog for modified attributes
+* [#2921](https://github.com/icinga/icinga2/issues/2921) (API): API status queries
+* [#2918](https://github.com/icinga/icinga2/issues/2918) (API): API permissions
+* [#2917](https://github.com/icinga/icinga2/issues/2917) (API): Create default administrative user
+* [#2916](https://github.com/icinga/icinga2/issues/2916) (API): Password-based authentication for the API
+* [#2915](https://github.com/icinga/icinga2/issues/2915) (API): Certificate-based authentication for the API
+* [#2914](https://github.com/icinga/icinga2/issues/2914) (API): Enable the ApiListener by default
+* [#2913](https://github.com/icinga/icinga2/issues/2913) (API): Configuration file management for the API
+* [#2912](https://github.com/icinga/icinga2/issues/2912) (API): Runtime configuration for the API
+* [#2911](https://github.com/icinga/icinga2/issues/2911) (API): Add modified attribute support for the API
+* [#2910](https://github.com/icinga/icinga2/issues/2910) (API): Add commands \(actions\) for the API
+* [#2909](https://github.com/icinga/icinga2/issues/2909) (API): Implement status queries for the API
+* [#2908](https://github.com/icinga/icinga2/issues/2908) (API): Event stream support for the API
+* [#2907](https://github.com/icinga/icinga2/issues/2907) (API): Implement filters for the API
+* [#2906](https://github.com/icinga/icinga2/issues/2906) (API): Reflection support for the API
+* [#2904](https://github.com/icinga/icinga2/issues/2904) (API): Basic API framework
+* [#2901](https://github.com/icinga/icinga2/issues/2901) (Configuration): Implement sandbox mode for the config parser
+* [#2887](https://github.com/icinga/icinga2/issues/2887) (Configuration): Remove the ScopeCurrent constant
+* [#2857](https://github.com/icinga/icinga2/issues/2857): Avoid unnecessary dictionary lookups
+* [#2838](https://github.com/icinga/icinga2/issues/2838): Move implementation code from thpp files into separate files
+* [#2826](https://github.com/icinga/icinga2/issues/2826) (Configuration): Use DebugHint information when reporting validation errors
+* [#2814](https://github.com/icinga/icinga2/issues/2814): Add support for the C++11 keyword 'override'
+* [#2809](https://github.com/icinga/icinga2/issues/2809) (Configuration): Implement constructor-style casts
+* [#2788](https://github.com/icinga/icinga2/issues/2788) (Configuration): Refactor the startup process
+* [#2785](https://github.com/icinga/icinga2/issues/2785) (CLI): Implement support for libedit
+* [#2757](https://github.com/icinga/icinga2/issues/2757): Deprecate IcingaStatusWriter feature
+* [#2755](https://github.com/icinga/icinga2/issues/2755) (DB IDO): Implement support for CLIENT\_MULTI\_STATEMENTS
+* [#2741](https://github.com/icinga/icinga2/issues/2741) (DB IDO): Add support for current and current-1 db ido schema version
+* [#2740](https://github.com/icinga/icinga2/issues/2740) (DB IDO): Add embedded DB IDO version health check
+* [#2722](https://github.com/icinga/icinga2/issues/2722): Allow some of the Array and Dictionary methods to be inlined by the compiler
+* [#2514](https://github.com/icinga/icinga2/issues/2514): 'icinga2 console' should serialize temporary attributes \(rather than just config + state\)
+* [#2474](https://github.com/icinga/icinga2/issues/2474) (Graphite): graphite writer should pass "-" in host names and "." in perf data
+* [#2438](https://github.com/icinga/icinga2/issues/2438) (API): Add icinga, cluster, cluster-zone check information to the ApiListener status handler
+* [#2268](https://github.com/icinga/icinga2/issues/2268) (Configuration): Validators should be implemented in \(auto-generated\) native code
+
+### Bug
+
+* [#3669](https://github.com/icinga/icinga2/issues/3669): Use notify\_one in WorkQueue::Enqueue
+* [#3667](https://github.com/icinga/icinga2/issues/3667): Utility::FormatErrorNumber fails when error message uses arguments
+* [#3649](https://github.com/icinga/icinga2/issues/3649) (DB IDO): Group memberships are not updated for runtime created objects
+* [#3648](https://github.com/icinga/icinga2/issues/3648) (API): API overwrites \(and then deletes\) config file when trying to create an object that already exists
+* [#3647](https://github.com/icinga/icinga2/issues/3647) (API): Don't allow users to set state attributes via PUT
+* [#3645](https://github.com/icinga/icinga2/issues/3645): Deadlock in MacroProcessor::EvaluateFunction
+* [#3635](https://github.com/icinga/icinga2/issues/3635): modify\_attribute: object cannot be cloned
+* [#3633](https://github.com/icinga/icinga2/issues/3633) (API): Detailed error message is missing when object creation via API fails
+* [#3632](https://github.com/icinga/icinga2/issues/3632) (API): API call doesn't fail when trying to use a template that doesn't exist
+* [#3625](https://github.com/icinga/icinga2/issues/3625): Improve location information for errors in API filters
+* [#3622](https://github.com/icinga/icinga2/issues/3622) (API): /v1/console should only use a single permission
+* [#3620](https://github.com/icinga/icinga2/issues/3620) (API): 'remove-comment' action does not support filters
+* [#3619](https://github.com/icinga/icinga2/issues/3619) (CLI): 'api setup' should create a user even when api feature is already enabled
+* [#3618](https://github.com/icinga/icinga2/issues/3618) (CLI): Autocompletion doesn't work in the debugger
+* [#3617](https://github.com/icinga/icinga2/issues/3617) (API): There's a variable called 'string' in filter expressions
+* [#3607](https://github.com/icinga/icinga2/issues/3607) (CLI): Broken build - unresolved external symbol "public: void \_\_thiscall icinga::ApiClient::ExecuteScript...
+* [#3602](https://github.com/icinga/icinga2/issues/3602) (DB IDO): Async mysql queries aren't logged in the debug log
+* [#3601](https://github.com/icinga/icinga2/issues/3601): Don't validate custom attributes that aren't strings
+* [#3600](https://github.com/icinga/icinga2/issues/3600): Crash in ConfigWriter::EmitIdentifier
+* [#3598](https://github.com/icinga/icinga2/issues/3598) (CLI): Spaces do not work in command arguments
+* [#3595](https://github.com/icinga/icinga2/issues/3595) (DB IDO): Change session\_token to integer timestamp
+* [#3593](https://github.com/icinga/icinga2/issues/3593): Fix indentation for Dictionary::ToString
+* [#3587](https://github.com/icinga/icinga2/issues/3587): Crash in ConfigWriter::GetKeywords
+* [#3586](https://github.com/icinga/icinga2/issues/3586) (Cluster): Circular reference between \*Connection and TlsStream objects
+* [#3583](https://github.com/icinga/icinga2/issues/3583) (API): Mismatch on {comment,downtime}\_id vs internal name in the API
+* [#3581](https://github.com/icinga/icinga2/issues/3581): CreatePipeOverlapped is not thread-safe
+* [#3579](https://github.com/icinga/icinga2/issues/3579): Figure out whether we need the Checkable attributes state\_raw, last\_state\_raw, hard\_state\_raw
+* [#3577](https://github.com/icinga/icinga2/issues/3577) (Plugins): Increase the default timeout for OS checks
+* [#3574](https://github.com/icinga/icinga2/issues/3574) (API): Plural name rule not treating edge case correcly
+* [#3572](https://github.com/icinga/icinga2/issues/3572) (API): IcingaStudio: Accessing non-ConfigObjects causes ugly exception
+* [#3569](https://github.com/icinga/icinga2/issues/3569) (API): Incorrect JSON-RPC message causes Icinga 2 to crash
+* [#3566](https://github.com/icinga/icinga2/issues/3566) (DB IDO): Unique constraint violation with multiple comment inserts in DB IDO
+* [#3558](https://github.com/icinga/icinga2/issues/3558) (DB IDO): IDO tries to execute empty UPDATE queries
+* [#3554](https://github.com/icinga/icinga2/issues/3554) (Configuration): Crash in IndexerExpression::GetReference when attempting to set an attribute on an object other than the current one
+* [#3551](https://github.com/icinga/icinga2/issues/3551) (Configuration): Line continuation is broken in 'icinga2 console'
+* [#3548](https://github.com/icinga/icinga2/issues/3548) (Configuration): Don't allow scripts to access FANoUserView attributes in sandbox mode
+* [#3546](https://github.com/icinga/icinga2/issues/3546) (Cluster): Improve error handling during log replay
+* [#3536](https://github.com/icinga/icinga2/issues/3536) (CLI): Improve --help output for the --log-level option
+* [#3535](https://github.com/icinga/icinga2/issues/3535) (CLI): "Command options" is empty when executing icinga2 without any argument.
+* [#3534](https://github.com/icinga/icinga2/issues/3534) (DB IDO): Custom variables aren't removed from the IDO database
+* [#3524](https://github.com/icinga/icinga2/issues/3524) (DB IDO): Changing a group's attributes causes duplicate rows in the icinga\_\*group\_members table
+* [#3517](https://github.com/icinga/icinga2/issues/3517): OpenBSD: hang during ConfigItem::ActivateItems\(\) in daemon startup
+* [#3514](https://github.com/icinga/icinga2/issues/3514) (CLI): Misleading wording in generated zones.conf
+* [#3501](https://github.com/icinga/icinga2/issues/3501) (API): restore\_attribute does not work in clusters
+* [#3489](https://github.com/icinga/icinga2/issues/3489) (API): Ensure that modified attributes work with clients with local config and no zone attribute
+* [#3485](https://github.com/icinga/icinga2/issues/3485) (API): Icinga2 API performance regression
+* [#3482](https://github.com/icinga/icinga2/issues/3482) (API): Version updates are not working properly
+* [#3468](https://github.com/icinga/icinga2/issues/3468) (CLI): icinga2 repository host add does not work
+* [#3462](https://github.com/icinga/icinga2/issues/3462): ConfigWriter::EmitValue should format floating point values properly
+* [#3461](https://github.com/icinga/icinga2/issues/3461) (API): Config sync does not set endpoint syncing and plays disconnect-sync ping-pong
+* [#3459](https://github.com/icinga/icinga2/issues/3459) (API): /v1/objects/\<type\> returns an HTTP error when there are no objects of that type
+* [#3457](https://github.com/icinga/icinga2/issues/3457) (API): Config Sync shouldn't send updates for objects the client doesn't have access to
+* [#3451](https://github.com/icinga/icinga2/issues/3451) (API): Properly encode URLs in Icinga Studio
+* [#3448](https://github.com/icinga/icinga2/issues/3448) (API): Use a temporary file for modified-attributes.conf updates
+* [#3445](https://github.com/icinga/icinga2/issues/3445) (Configuration): ASCII NULs don't work in string values
+* [#3438](https://github.com/icinga/icinga2/issues/3438) (API): URL parser is cutting off last character
+* [#3434](https://github.com/icinga/icinga2/issues/3434) (API): PerfdataValue is not properly serialised in status queries
+* [#3433](https://github.com/icinga/icinga2/issues/3433) (API): Move the Collection status handler to /v1/status
+* [#3422](https://github.com/icinga/icinga2/issues/3422) (Configuration): Detect infinite recursion in user scripts
+* [#3411](https://github.com/icinga/icinga2/issues/3411) (API): API actions do not follow REST guidelines
+* [#3383](https://github.com/icinga/icinga2/issues/3383) (DB IDO): Add object\_id where clause for icinga\_downtimehistory
+* [#3345](https://github.com/icinga/icinga2/issues/3345) (API): Error handling in HttpClient/icinga-studio
+* [#3338](https://github.com/icinga/icinga2/issues/3338) (CLI): Unused variable console\_type in consolecommand.cpp
+* [#3336](https://github.com/icinga/icinga2/issues/3336) (API): Filtering by name doesn't work
+* [#3335](https://github.com/icinga/icinga2/issues/3335) (API): HTTP keep-alive does not work with .NET WebClient
+* [#3330](https://github.com/icinga/icinga2/issues/3330): Unused variable 'dobj' in configobject.tcpp
+* [#3328](https://github.com/icinga/icinga2/issues/3328) (Configuration): Don't parse config files for branches not taken
+* [#3315](https://github.com/icinga/icinga2/issues/3315) (Configuration): Crash in ConfigCompiler::RegisterZoneDir
+* [#3302](https://github.com/icinga/icinga2/issues/3302) (API): Implement support for '.' when persisting modified attributes
+* [#3301](https://github.com/icinga/icinga2/issues/3301): Fix formatting in mkclass
+* [#3264](https://github.com/icinga/icinga2/issues/3264) (API): Do not let API users create objects with invalid names
+* [#3250](https://github.com/icinga/icinga2/issues/3250) (API): Missing conf.d or zones.d cause parse failure
+* [#3248](https://github.com/icinga/icinga2/issues/3248): Crash during cluster log replay
+* [#3244](https://github.com/icinga/icinga2/issues/3244) (CLI): Color codes in console prompt break line editing
+* [#3242](https://github.com/icinga/icinga2/issues/3242) (CLI): Crash in ScriptFrame::~ScriptFrame
+* [#3227](https://github.com/icinga/icinga2/issues/3227) (CLI): console autocompletion should take into account parent classes' prototypes
+* [#3215](https://github.com/icinga/icinga2/issues/3215) (API): win32 build: S\_ISDIR is undefined
+* [#3205](https://github.com/icinga/icinga2/issues/3205) (Configuration): ScriptFrame's 'Self' attribute gets corrupted when an expression throws an exception
+* [#3202](https://github.com/icinga/icinga2/issues/3202) (Configuration): Operator - should not work with "" and numbers
+* [#3198](https://github.com/icinga/icinga2/issues/3198): Accessing field ID 0 \("prototype"\) fails
+* [#3182](https://github.com/icinga/icinga2/issues/3182) (API): Broken cluster config sync w/o include\_zones
+* [#3171](https://github.com/icinga/icinga2/issues/3171) (API): Problem with child nodes in http url registry
+* [#3138](https://github.com/icinga/icinga2/issues/3138) (CLI): 'node wizard/setup' should always generate new CN certificates
+* [#3131](https://github.com/icinga/icinga2/issues/3131) (DB IDO): Overflow in freshness\_threshold column \(smallint\) w/ DB IDO MySQL
+* [#3109](https://github.com/icinga/icinga2/issues/3109) (API): build failure: demo module
+* [#3087](https://github.com/icinga/icinga2/issues/3087) (DB IDO): Fix incorrect datatype for the check\_source column in icinga\_statehistory table
+* [#2974](https://github.com/icinga/icinga2/issues/2974) (Configuration): Remove incorrect 'ignore where' expression from 'ssh' apply example
+* [#2939](https://github.com/icinga/icinga2/issues/2939) (Cluster): Wrong vars changed handler in api events
+* [#2884](https://github.com/icinga/icinga2/issues/2884) (DB IDO): PostgreSQL schema sets default timestamps w/o time zone
+* [#2879](https://github.com/icinga/icinga2/issues/2879): Compiler warnings with latest HEAD 5ac5f98
+* [#2870](https://github.com/icinga/icinga2/issues/2870) (DB IDO): pgsql driver does not have latest mysql changes synced
+* [#2863](https://github.com/icinga/icinga2/issues/2863) (Configuration): Crash in VMOps::FunctionCall
+* [#2850](https://github.com/icinga/icinga2/issues/2850) (Configuration): Validation fails even though field is not required
+* [#2824](https://github.com/icinga/icinga2/issues/2824) (DB IDO): Failed assertion in IdoMysqlConnection::FieldToEscapedString
+* [#2808](https://github.com/icinga/icinga2/issues/2808) (Configuration): Make default notifications include users from host.vars.notification.mail.users
+* [#2803](https://github.com/icinga/icinga2/issues/2803): Don't allow users to instantiate the StreamLogger class
+
+### ITL
+
+* [#3584](https://github.com/icinga/icinga2/issues/3584) (ITL): Add ipv4/ipv6 only to tcp and http CheckCommand
+* [#3582](https://github.com/icinga/icinga2/issues/3582) (ITL): Add check command mysql
+* [#3578](https://github.com/icinga/icinga2/issues/3578) (ITL): Add check command negate
+* [#3532](https://github.com/icinga/icinga2/issues/3532) (ITL): 'dig\_lookup' custom attribute for the 'dig' check command isn't optional
+* [#3525](https://github.com/icinga/icinga2/issues/3525) (ITL): Ability to set port on SNMP Checks
+* [#3490](https://github.com/icinga/icinga2/issues/3490) (ITL): Add check command nginx\_status
+* [#2964](https://github.com/icinga/icinga2/issues/2964) (ITL): Move 'running\_kernel' check command to plugins-contrib 'operating system' section
+* [#2784](https://github.com/icinga/icinga2/issues/2784) (ITL): Move the base command templates into libmethods
+
+### Documentation
+
+* [#3663](https://github.com/icinga/icinga2/issues/3663) (Documentation): Update wxWidgets documentation for Icinga Studio
+* [#3640](https://github.com/icinga/icinga2/issues/3640) (Documentation): Explain DELETE for config stages/packages
+* [#3638](https://github.com/icinga/icinga2/issues/3638) (Documentation): Documentation for /v1/types
+* [#3631](https://github.com/icinga/icinga2/issues/3631) (Documentation): Documentation for the script debugger
+* [#3630](https://github.com/icinga/icinga2/issues/3630) (Documentation): Explain variable names for joined objects in filter expressions
+* [#3629](https://github.com/icinga/icinga2/issues/3629) (Documentation): Documentation for /v1/console
+* [#3628](https://github.com/icinga/icinga2/issues/3628) (Documentation): Mention wxWidget \(optional\) requirement in INSTALL.md
+* [#3626](https://github.com/icinga/icinga2/issues/3626) (Documentation): Icinga 2 API Docs
+* [#3621](https://github.com/icinga/icinga2/issues/3621) (Documentation): Documentation should not reference real host names
+* [#3563](https://github.com/icinga/icinga2/issues/3563) (Documentation): Documentation: Reorganize Livestatus and alternative frontends
+* [#3547](https://github.com/icinga/icinga2/issues/3547) (Documentation): Incorrect attribute name in the documentation
+* [#3516](https://github.com/icinga/icinga2/issues/3516) (Documentation): Add documentation for apply+for in the language reference chapter
+* [#3511](https://github.com/icinga/icinga2/issues/3511) (Documentation): Escaping $ not documented
+* [#3500](https://github.com/icinga/icinga2/issues/3500) (Documentation): Add 'support' tracker to changelog.py
+* [#3477](https://github.com/icinga/icinga2/issues/3477) (Documentation): Remove duplicated text in section "Apply Notifications to Hosts and Services"
+* [#3426](https://github.com/icinga/icinga2/issues/3426) (Documentation): Add documentation for api-users.conf and app.conf
+* [#3281](https://github.com/icinga/icinga2/issues/3281) (Documentation): Document Object\#clone
+
+### Support
+
+* [#3662](https://github.com/icinga/icinga2/issues/3662) (Packages): Download URL for NSClient++ is incorrect
+* [#3615](https://github.com/icinga/icinga2/issues/3615) (Packages): Update OpenSSL for the Windows builds
+* [#3614](https://github.com/icinga/icinga2/issues/3614) (Installation): Don't try to use --gc-sections on Solaris
+* [#3522](https://github.com/icinga/icinga2/issues/3522) (Packages): 'which' isn't available in a minimal CentOS container
+* [#3063](https://github.com/icinga/icinga2/issues/3063) (Installation): "-Wno-deprecated-register" compiler option breaks builds on SLES 11
+* [#2893](https://github.com/icinga/icinga2/issues/2893) (Installation): icinga demo module can not be built
+* [#2858](https://github.com/icinga/icinga2/issues/2858) (Packages): Specify pidfile for status\_of\_proc in the init script
+* [#2802](https://github.com/icinga/icinga2/issues/2802) (Packages): Update OpenSSL for the Windows builds
+
+## 2.3.11 (2015-10-20)
+
+### Notes
+
+* Function for performing CIDR matches: cidr_match()
+* New methods: String#reverse and Array#reverse
+* New ITL command definitions: nwc_health, hpasm, squid, pgsql
+* Additional arguments for ITL command definitions: by_ssh, dig, pop, spop, imap, simap
+* Documentation updates
+* Various bugfixes
+
+### Enhancement
+
+* [#3494](https://github.com/icinga/icinga2/issues/3494) (DB IDO): Add a debug log message for updating the program status table in DB IDO
+* [#3481](https://github.com/icinga/icinga2/issues/3481): New method: cidr\_match\(\)
+* [#3421](https://github.com/icinga/icinga2/issues/3421): Implement the Array\#reverse and String\#reverse methods
+* [#3327](https://github.com/icinga/icinga2/issues/3327): Implement a way for users to resolve commands+arguments in the same way Icinga does
+* [#3326](https://github.com/icinga/icinga2/issues/3326): escape\_shell\_arg\(\) method
+* [#2969](https://github.com/icinga/icinga2/issues/2969) (Metrics): Add timestamp support for OpenTsdbWriter
+
+### Bug
+
+* [#3492](https://github.com/icinga/icinga2/issues/3492) (Cluster): Wrong connection log message for global zones
+* [#3491](https://github.com/icinga/icinga2/issues/3491): cidr\_match\(\) doesn't properly validate IP addresses
+* [#3487](https://github.com/icinga/icinga2/issues/3487) (Cluster): ApiListener::SyncRelayMessage doesn't send message to all zone members
+* [#3476](https://github.com/icinga/icinga2/issues/3476) (Compat): Missing Start call for base class in CheckResultReader
+* [#3475](https://github.com/icinga/icinga2/issues/3475) (Compat): Checkresultreader is unable to process host checks
+* [#3466](https://github.com/icinga/icinga2/issues/3466): "Not after" value overflows in X509 certificates on RHEL5
+* [#3464](https://github.com/icinga/icinga2/issues/3464) (Cluster): Don't log messages we've already relayed to all relevant zones
+* [#3460](https://github.com/icinga/icinga2/issues/3460) (Metrics): Performance Data Labels including '=' will not be displayed correct
+* [#3454](https://github.com/icinga/icinga2/issues/3454): Percent character whitespace on Windows
+* [#3449](https://github.com/icinga/icinga2/issues/3449) (Cluster): Don't throw an exception when replaying the current replay log file
+* [#3446](https://github.com/icinga/icinga2/issues/3446): Deadlock in TlsStream::Close
+* [#3428](https://github.com/icinga/icinga2/issues/3428) (Configuration): config checker reports wrong error on apply for rules
+* [#3427](https://github.com/icinga/icinga2/issues/3427) (Configuration): Config parser problem with parenthesis and newlines
+* [#3423](https://github.com/icinga/icinga2/issues/3423) (Configuration): Remove unnecessary MakeLiteral calls in SetExpression::DoEvaluate
+* [#3417](https://github.com/icinga/icinga2/issues/3417) (Configuration): null + null should not be ""
+* [#3416](https://github.com/icinga/icinga2/issues/3416) (API): Problem with customvariable table update/insert queries
+* [#3373](https://github.com/icinga/icinga2/issues/3373) (Livestatus): Improve error message for socket errors in Livestatus
+* [#3324](https://github.com/icinga/icinga2/issues/3324) (Cluster): Deadlock in WorkQueue::Enqueue
+* [#3204](https://github.com/icinga/icinga2/issues/3204) (Configuration): String methods cannot be invoked on an empty string
+* [#3038](https://github.com/icinga/icinga2/issues/3038) (Livestatus): sending multiple Livestatus commands rejects all except the first
+* [#2568](https://github.com/icinga/icinga2/issues/2568) (Cluster): check cluster-zone returns wrong log lag
+
+### ITL
+
+* [#3437](https://github.com/icinga/icinga2/issues/3437) (ITL): Add timeout argument for pop, spop, imap, simap commands
+* [#3407](https://github.com/icinga/icinga2/issues/3407) (ITL): Make check\_disk.exe CheckCommand Config more verbose
+* [#3399](https://github.com/icinga/icinga2/issues/3399) (ITL): expand check command dig
+* [#3394](https://github.com/icinga/icinga2/issues/3394) (ITL): Add ipv4/ipv6 only to nrpe CheckCommand
+* [#3385](https://github.com/icinga/icinga2/issues/3385) (ITL): Add check command pgsql
+* [#3382](https://github.com/icinga/icinga2/issues/3382) (ITL): Add check command squid
+* [#3235](https://github.com/icinga/icinga2/issues/3235) (ITL): check\_command for plugin check\_hpasm
+* [#3214](https://github.com/icinga/icinga2/issues/3214) (ITL): add check command for check\_nwc\_health
+
+### Documentation
+
+* [#3479](https://github.com/icinga/icinga2/issues/3479) (Documentation): Improve timeperiod documentation
+* [#3478](https://github.com/icinga/icinga2/issues/3478) (Documentation): Broken table layout in chapter 20
+* [#3436](https://github.com/icinga/icinga2/issues/3436) (Documentation): Clarify on cluster/client naming convention and add troubleshooting section
+* [#3430](https://github.com/icinga/icinga2/issues/3430) (Documentation): Find a better description for cluster communication requirements
+* [#3409](https://github.com/icinga/icinga2/issues/3409) (Documentation): Windows Check Update -\> Access denied
+* [#3408](https://github.com/icinga/icinga2/issues/3408) (Documentation): Improve documentation for check\_memory
+* [#3406](https://github.com/icinga/icinga2/issues/3406) (Documentation): Update graphing section in the docs
+* [#3402](https://github.com/icinga/icinga2/issues/3402) (Documentation): Update debug docs for core dumps and full backtraces
+* [#3351](https://github.com/icinga/icinga2/issues/3351) (Documentation): Command Execution Bridge: Use of same endpoint names in examples for a better understanding
+* [#3092](https://github.com/icinga/icinga2/issues/3092) (Documentation): Add FreeBSD setup to getting started
+
+### Support
+
+* [#3379](https://github.com/icinga/icinga2/issues/3379) (Installation): Rather use unique SID when granting rights for folders in NSIS on Windows Client
+* [#3045](https://github.com/icinga/icinga2/issues/3045) (Packages): icinga2 ido mysql misspelled database username
+
+## 2.3.10 (2015-09-05)
+
+### Notes
+
+* Feature 9218: Use the command_endpoint name as check_source value if defined
+
+### Enhancement
+
+* [#2985](https://github.com/icinga/icinga2/issues/2985): Use the command\_endpoint name as check\_source value if defined
+
+### Bug
+
+* [#3369](https://github.com/icinga/icinga2/issues/3369): Missing zero padding for generated CA serial.txt
+* [#3352](https://github.com/icinga/icinga2/issues/3352): Wrong calculation for host compat state "UNREACHABLE" in DB IDO
+* [#3348](https://github.com/icinga/icinga2/issues/3348) (Cluster): Missing fix for reload on Windows in 2.3.9
+* [#3325](https://github.com/icinga/icinga2/issues/3325): Nested "outer" macro calls fails on \(handled\) missing "inner" values
+* [#2811](https://github.com/icinga/icinga2/issues/2811) (DB IDO): String escape problem with PostgreSQL \>= 9.1 and standard\_conforming\_strings=on
+
+## 2.3.9 (2015-08-26)
+
+### Notes
+
+* Fix that the first SOFT state is recognized as second SOFT state
+* Implemented reload functionality for Windows
+* New ITL check commands
+* Documentation updates
+* Various other bugfixes
+
+### Enhancement
+
+* [#3254](https://github.com/icinga/icinga2/issues/3254) (Livestatus): Use an empty dictionary for the 'this' scope when executing commands with Livestatus
+* [#3253](https://github.com/icinga/icinga2/issues/3253): Implement the Dictionary\#keys method
+* [#3206](https://github.com/icinga/icinga2/issues/3206): Implement Dictionary\#get and Array\#get
+* [#3170](https://github.com/icinga/icinga2/issues/3170) (Configuration): Adding "-r" parameter to the check\_load command for dividing the load averages by the number of CPUs.
+
+### Bug
+
+* [#3305](https://github.com/icinga/icinga2/issues/3305) (Configuration): Icinga2 - too many open files - Exception
+* [#3299](https://github.com/icinga/icinga2/issues/3299): Utility::Glob on Windows doesn't support wildcards in all but the last path component
+* [#3292](https://github.com/icinga/icinga2/issues/3292): Serial number field is not properly initialized for CA certificates
+* [#3279](https://github.com/icinga/icinga2/issues/3279) (DB IDO): Add missing category for IDO query
+* [#3266](https://github.com/icinga/icinga2/issues/3266) (Plugins): Default disk checks on Windows fail because check\_disk doesn't support -K
+* [#3260](https://github.com/icinga/icinga2/issues/3260): First SOFT state is recognized as second SOFT state
+* [#3255](https://github.com/icinga/icinga2/issues/3255) (Cluster): Warning about invalid API function icinga::Hello
+* [#3241](https://github.com/icinga/icinga2/issues/3241): Agent freezes when the check returns massive output
+* [#3222](https://github.com/icinga/icinga2/issues/3222) (Configuration): Dict initializer incorrectly re-initializes field that is set to an empty string
+* [#3211](https://github.com/icinga/icinga2/issues/3211) (Configuration): Operator + is inconsistent when used with empty and non-empty strings
+* [#3200](https://github.com/icinga/icinga2/issues/3200) (CLI): icinga2 node wizard don't take zone\_name input
+* [#3199](https://github.com/icinga/icinga2/issues/3199): Trying to set a field for a non-object instance fails
+* [#3196](https://github.com/icinga/icinga2/issues/3196) (Cluster): Add log for missing EventCommand for command\_endpoints
+* [#3194](https://github.com/icinga/icinga2/issues/3194): Set correct X509 version for certificates
+* [#3149](https://github.com/icinga/icinga2/issues/3149) (CLI): missing config warning on empty port in endpoints
+* [#3010](https://github.com/icinga/icinga2/issues/3010) (Cluster): cluster check w/ immediate parent and child zone endpoints
+* [#2867](https://github.com/icinga/icinga2/issues/2867): Missing DEL\_DOWNTIME\_BY\_HOST\_NAME command required by Classic UI 1.x
+* [#2352](https://github.com/icinga/icinga2/issues/2352) (Cluster): Reload does not work on Windows
+
+### ITL
+
+* [#3320](https://github.com/icinga/icinga2/issues/3320) (ITL): Add new arguments openvmtools for Open VM Tools
+* [#3313](https://github.com/icinga/icinga2/issues/3313) (ITL): add check command nscp-local-counter
+* [#3312](https://github.com/icinga/icinga2/issues/3312) (ITL): fix check command nscp-local
+* [#3265](https://github.com/icinga/icinga2/issues/3265) (ITL): check\_command interfaces option match\_aliases has to be boolean
+* [#3219](https://github.com/icinga/icinga2/issues/3219) (ITL): snmpv3 CheckCommand section improved
+* [#3213](https://github.com/icinga/icinga2/issues/3213) (ITL): add check command for check\_mailq
+* [#3208](https://github.com/icinga/icinga2/issues/3208) (ITL): Add check\_jmx4perl to ITL
+* [#3186](https://github.com/icinga/icinga2/issues/3186) (ITL): check\_command for plugin check\_clamd
+* [#3164](https://github.com/icinga/icinga2/issues/3164) (ITL): Add check\_redis to ITL
+* [#3162](https://github.com/icinga/icinga2/issues/3162) (ITL): Add check\_yum to ITL
+* [#3111](https://github.com/icinga/icinga2/issues/3111) (ITL): CheckCommand for check\_interfaces
+
+### Documentation
+
+* [#3319](https://github.com/icinga/icinga2/issues/3319) (Documentation): Duplicate severity type in the documentation for SyslogLogger
+* [#3308](https://github.com/icinga/icinga2/issues/3308) (Documentation): Fix global Zone example to "Global Configuration Zone for Templates"
+* [#3262](https://github.com/icinga/icinga2/issues/3262) (Documentation): typo in docs
+* [#3166](https://github.com/icinga/icinga2/issues/3166) (Documentation): Update gdb pretty printer docs w/ Python 3
+
+### Support
+
+* [#3298](https://github.com/icinga/icinga2/issues/3298) (Packages): Don't re-download NSCP for every build
+* [#3239](https://github.com/icinga/icinga2/issues/3239) (Packages): missing check\_perfmon.exe
+* [#3216](https://github.com/icinga/icinga2/issues/3216) (Tests): Build fix for Boost 1.59
+
+## 2.3.8 (2015-07-21)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#3160](https://github.com/icinga/icinga2/issues/3160) (Metrics): Escaping does not work for OpenTSDB perfdata plugin
+* [#3151](https://github.com/icinga/icinga2/issues/3151) (DB IDO): DB IDO: Do not update endpointstatus table on config updates
+* [#3120](https://github.com/icinga/icinga2/issues/3120) (Configuration): Don't allow "ignore where" for groups when there's no "assign where"
+
+### ITL
+
+* [#3161](https://github.com/icinga/icinga2/issues/3161) (ITL): checkcommand disk does not check free inode - check\_disk
+* [#3152](https://github.com/icinga/icinga2/issues/3152) (ITL): Wrong parameter for CheckCommand "ping-common-windows"
+
+## 2.3.7 (2015-07-15)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#3148](https://github.com/icinga/icinga2/issues/3148): Missing lock in ScriptUtils::Union
+* [#3147](https://github.com/icinga/icinga2/issues/3147): Assertion failed in icinga::ScriptUtils::Intersection
+* [#3136](https://github.com/icinga/icinga2/issues/3136) (DB IDO): DB IDO: endpoint\* tables are cleared on reload causing constraint violations
+* [#3134](https://github.com/icinga/icinga2/issues/3134): Incorrect return value for the macro\(\) function
+* [#3114](https://github.com/icinga/icinga2/issues/3114) (Configuration): Config parser ignores "ignore" in template definition
+* [#3061](https://github.com/icinga/icinga2/issues/3061) (Cluster): Selective cluster reconnecting breaks client communication
+
+### Documentation
+
+* [#3142](https://github.com/icinga/icinga2/issues/3142) (Documentation): Enhance troubleshooting ssl errors & cluster replay log
+* [#3135](https://github.com/icinga/icinga2/issues/3135) (Documentation): Wrong formatting in DB IDO extensions docs
+
+## 2.3.6 (2015-07-08)
+
+### Notes
+
+* Require openssl1 on sles11sp3 from Security Module repository
+ * Bug in SLES 11's OpenSSL version 0.9.8j preventing verification of generated certificates.
+ * Re-create these certificates with 2.3.6 linking against openssl1 (cli command or CSR auto-signing).
+* ITL: Add ldap, ntp_peer, mongodb and elasticsearch CheckCommand definitions
+* Bugfixes
+
+### Bug
+
+* [#3118](https://github.com/icinga/icinga2/issues/3118) (Cluster): Generated certificates cannot be verified w/ openssl 0.9.8j on SLES 11
+* [#3098](https://github.com/icinga/icinga2/issues/3098) (Cluster): Add log message for discarded cluster events \(e.g. from unauthenticated clients\)
+* [#3097](https://github.com/icinga/icinga2/issues/3097): Fix stability issues in the TlsStream/Stream classes
+* [#3088](https://github.com/icinga/icinga2/issues/3088) (Cluster): Windows client w/ command\_endpoint broken with $nscp\_path$ and NscpPath detection
+* [#3084](https://github.com/icinga/icinga2/issues/3084) (CLI): node setup: indent accept\_config and accept\_commands
+* [#3074](https://github.com/icinga/icinga2/issues/3074) (Notifications): Functions can't be specified as command arguments
+* [#2979](https://github.com/icinga/icinga2/issues/2979) (CLI): port empty when using icinga2 node wizard
+
+### ITL
+
+* [#3132](https://github.com/icinga/icinga2/issues/3132) (ITL): new options for smtp CheckCommand
+* [#3125](https://github.com/icinga/icinga2/issues/3125) (ITL): Add new options for ntp\_time CheckCommand
+* [#3110](https://github.com/icinga/icinga2/issues/3110) (ITL): Add ntp\_peer CheckCommand
+* [#3103](https://github.com/icinga/icinga2/issues/3103) (ITL): itl/plugins-contrib.d/\*.conf should point to PluginContribDir
+* [#3091](https://github.com/icinga/icinga2/issues/3091) (ITL): Incorrect check\_ping.exe parameter in the ITL
+* [#3066](https://github.com/icinga/icinga2/issues/3066) (ITL): snmpv3 CheckCommand: Add possibility to set securityLevel
+* [#3064](https://github.com/icinga/icinga2/issues/3064) (ITL): Add elasticsearch checkcommand to itl
+* [#3031](https://github.com/icinga/icinga2/issues/3031) (ITL): Missing 'snmp\_is\_cisco' in Manubulon snmp-memory command definition
+* [#3002](https://github.com/icinga/icinga2/issues/3002) (ITL): Incorrect variable name in the ITL
+* [#2975](https://github.com/icinga/icinga2/issues/2975) (ITL): Add "mongodb" CheckCommand definition
+* [#2963](https://github.com/icinga/icinga2/issues/2963) (ITL): Add "ldap" CheckCommand for "check\_ldap" plugin
+
+### Documentation
+
+* [#3126](https://github.com/icinga/icinga2/issues/3126) (Documentation): Update getting started for Debian Jessie
+* [#3108](https://github.com/icinga/icinga2/issues/3108) (Documentation): wrong default port documentated for nrpe
+* [#3099](https://github.com/icinga/icinga2/issues/3099) (Documentation): Missing openssl verify in cluster troubleshooting docs
+* [#3096](https://github.com/icinga/icinga2/issues/3096) (Documentation): Documentation for checks in an HA zone is wrong
+* [#3086](https://github.com/icinga/icinga2/issues/3086) (Documentation): Wrong file reference in README.md
+* [#3085](https://github.com/icinga/icinga2/issues/3085) (Documentation): Merge documentation fixes from GitHub
+* [#1793](https://github.com/icinga/icinga2/issues/1793) (Documentation): add pagerduty notification documentation
+
+### Support
+
+* [#3123](https://github.com/icinga/icinga2/issues/3123) (Packages): Require gcc47-c++ on sles11 from SLES software development kit repository
+* [#3122](https://github.com/icinga/icinga2/issues/3122) (Packages): mysql-devel is not available in sles11sp3
+* [#3081](https://github.com/icinga/icinga2/issues/3081) (Installation): changelog.py: Allow to define project, make custom\_fields and changes optional
+* [#3073](https://github.com/icinga/icinga2/issues/3073) (Installation): Enhance changelog.py with wordpress blogpost output
+* [#2651](https://github.com/icinga/icinga2/issues/2651) (Packages): Add Icinga 2 to Chocolatey Windows Repository
+
+## 2.3.5 (2015-06-17)
+
+### Notes
+
+* NSClient++ is now bundled with the Windows setup wizard and can optionally be installed
+* Windows Wizard: "include <nscp>" is set by default
+* Windows Wizard: Add update mode
+* Plugins: Add check_perfmon plugin for Windows
+* ITL: Add CheckCommand objects for Windows plugins ("include <windows-plugins>")
+* ITL: Add CheckCommand definitions for "mongodb", "iftraffic", "disk_smb"
+* ITL: Add arguments to CheckCommands "dns", "ftp", "tcp", "nscp"
+
+### Enhancement
+
+* [#3009](https://github.com/icinga/icinga2/issues/3009) (Configuration): Add the --load-all and --log options for nscp-local
+* [#3008](https://github.com/icinga/icinga2/issues/3008) (Configuration): Include \<nscp\> by default on Windows
+* [#2971](https://github.com/icinga/icinga2/issues/2971) (Metrics): Add timestamp support for PerfdataWriter
+* [#2817](https://github.com/icinga/icinga2/issues/2817) (Configuration): Add CheckCommand objects for Windows plugins
+* [#2794](https://github.com/icinga/icinga2/issues/2794) (Plugins): Add check\_perfmon plugin for Windows
+
+### Bug
+
+* [#3051](https://github.com/icinga/icinga2/issues/3051) (Plugins): plugins-contrib.d/databases.conf: wrong argument for mssql\_health
+* [#3043](https://github.com/icinga/icinga2/issues/3043) (Compat): Multiline vars are broken in objects.cache output
+* [#3039](https://github.com/icinga/icinga2/issues/3039) (Compat): Multi line output not correctly handled from compat channels
+* [#3007](https://github.com/icinga/icinga2/issues/3007) (Configuration): Disk and 'icinga' services are missing in the default Windows config
+* [#3006](https://github.com/icinga/icinga2/issues/3006) (Configuration): Some checks in the default Windows configuration fail
+* [#2986](https://github.com/icinga/icinga2/issues/2986) (DB IDO): Missing custom attributes in backends if name is equal to object attribute
+* [#2952](https://github.com/icinga/icinga2/issues/2952) (DB IDO): Incorrect type and state filter mapping for User objects in DB IDO
+* [#2951](https://github.com/icinga/icinga2/issues/2951) (DB IDO): Downtimes are always "fixed"
+* [#2945](https://github.com/icinga/icinga2/issues/2945) (DB IDO): Possible DB deadlock
+* [#2940](https://github.com/icinga/icinga2/issues/2940) (Configuration): node update-config reports critical and warning
+* [#2935](https://github.com/icinga/icinga2/issues/2935) (Configuration): WIN: syslog is not an enable-able feature in windows
+* [#2894](https://github.com/icinga/icinga2/issues/2894) (DB IDO): Wrong timestamps w/ historical data replay in DB IDO
+* [#2839](https://github.com/icinga/icinga2/issues/2839) (CLI): Node wont connect properly to master if host is is not set for Endpoint on new installs
+* [#2836](https://github.com/icinga/icinga2/issues/2836): Icinga2 --version: Error showing Distribution
+* [#2819](https://github.com/icinga/icinga2/issues/2819) (Configuration): Syntax Highlighting: host.address vs host.add
+
+### ITL
+
+* [#3019](https://github.com/icinga/icinga2/issues/3019) (ITL): Add 'iftraffic' to plugins-contrib check command definitions
+* [#3003](https://github.com/icinga/icinga2/issues/3003) (ITL): Add 'disk\_smb' Plugin CheckCommand definition
+* [#2959](https://github.com/icinga/icinga2/issues/2959) (ITL): 'disk': wrong order of threshold command arguments
+* [#2956](https://github.com/icinga/icinga2/issues/2956) (ITL): Add arguments to "tcp" CheckCommand
+* [#2955](https://github.com/icinga/icinga2/issues/2955) (ITL): Add arguments to "ftp" CheckCommand
+* [#2954](https://github.com/icinga/icinga2/issues/2954) (ITL): Add arguments to "dns" CheckCommand
+* [#2949](https://github.com/icinga/icinga2/issues/2949) (ITL): Add 'check\_drivesize' as nscp-local check command
+* [#2938](https://github.com/icinga/icinga2/issues/2938) (ITL): Add SHOWALL to NSCP Checkcommand
+* [#2880](https://github.com/icinga/icinga2/issues/2880) (ITL): Including \<nscp\> on Linux fails with unregistered function
+
+### Documentation
+
+* [#3072](https://github.com/icinga/icinga2/issues/3072) (Documentation): Documentation: Move configuration before advanced topics
+* [#3069](https://github.com/icinga/icinga2/issues/3069) (Documentation): Enhance cluster docs with HA command\_endpoints
+* [#3068](https://github.com/icinga/icinga2/issues/3068) (Documentation): Enhance cluster/client troubleshooting
+* [#3062](https://github.com/icinga/icinga2/issues/3062) (Documentation): Documentation: Update the link to register a new Icinga account
+* [#3059](https://github.com/icinga/icinga2/issues/3059) (Documentation): Documentation: Typo
+* [#3057](https://github.com/icinga/icinga2/issues/3057) (Documentation): Documentation: Extend Custom Attributes with the boolean type
+* [#3056](https://github.com/icinga/icinga2/issues/3056) (Documentation): Wrong service table attributes in Livestatus documentation
+* [#3055](https://github.com/icinga/icinga2/issues/3055) (Documentation): Documentation: Typo
+* [#3049](https://github.com/icinga/icinga2/issues/3049) (Documentation): Update documentation for escape sequences
+* [#3036](https://github.com/icinga/icinga2/issues/3036) (Documentation): Explain string concatenation in objects by real-world example
+* [#3035](https://github.com/icinga/icinga2/issues/3035) (Documentation): Use a more simple example for passing command parameters
+* [#3033](https://github.com/icinga/icinga2/issues/3033) (Documentation): Add local variable scope for \*Command to documentation \(host, service, etc\)
+* [#3032](https://github.com/icinga/icinga2/issues/3032) (Documentation): Add typeof in 'assign/ignore where' expression as example
+* [#3030](https://github.com/icinga/icinga2/issues/3030) (Documentation): Add examples for function usage in "set\_if" and "command" attributes
+* [#3024](https://github.com/icinga/icinga2/issues/3024) (Documentation): Best practices: cluster config sync
+* [#3017](https://github.com/icinga/icinga2/issues/3017) (Documentation): Update service apply for documentation
+* [#3015](https://github.com/icinga/icinga2/issues/3015) (Documentation): Typo in Configuration Best Practice
+* [#2966](https://github.com/icinga/icinga2/issues/2966) (Documentation): Include Windows support details in the documentation
+* [#2965](https://github.com/icinga/icinga2/issues/2965) (Documentation): ITL Documentation: Add a link for passing custom attributes as command parameters
+* [#2950](https://github.com/icinga/icinga2/issues/2950) (Documentation): Missing "\)" in last Apply Rules example
+* [#2279](https://github.com/icinga/icinga2/issues/2279) (Documentation): Add documentation and CheckCommands for the windows plugins
+
+### Support
+
+* [#3016](https://github.com/icinga/icinga2/issues/3016) (Installation): Wrong permission etc on windows
+* [#3011](https://github.com/icinga/icinga2/issues/3011) (Installation): Add support for installing NSClient++ in the Icinga 2 Windows wizard
+* [#3005](https://github.com/icinga/icinga2/issues/3005) (Installation): Determine NSClient++ installation path using MsiGetComponentPath
+* [#3004](https://github.com/icinga/icinga2/issues/3004) (Installation): --scm-installs fails when the service is already installed
+* [#2994](https://github.com/icinga/icinga2/issues/2994) (Installation): Bundle NSClient++ in Windows Installer
+* [#2973](https://github.com/icinga/icinga2/issues/2973) (Packages): SPEC: Give group write permissions for perfdata dir
+* [#2451](https://github.com/icinga/icinga2/issues/2451) (Installation): Extend Windows installer with an update mode
+
+## 2.3.4 (2015-04-20)
+
+### Notes
+
+* ITL: Check commands for various databases
+* Improve validation messages for time periods
+* Update max_check_attempts in generic-{host,service} templates
+* Update logrotate configuration
+* Bugfixes
+
+### Enhancement
+
+* [#2841](https://github.com/icinga/icinga2/issues/2841): Improve timeperiod validation error messages
+* [#2791](https://github.com/icinga/icinga2/issues/2791) (Cluster): Agent Wizard: add options for API defaults
+
+### Bug
+
+* [#2903](https://github.com/icinga/icinga2/issues/2903) (Configuration): custom attributes with recursive macro function calls causing sigabrt
+* [#2898](https://github.com/icinga/icinga2/issues/2898) (CLI): troubleshoot truncates crash reports
+* [#2886](https://github.com/icinga/icinga2/issues/2886): Acknowledging problems w/ expire time does not add the expiry information to the related comment for IDO and compat
+* [#2883](https://github.com/icinga/icinga2/issues/2883) (Notifications): Multiple log messages w/ "Attempting to send notifications for notification object"
+* [#2882](https://github.com/icinga/icinga2/issues/2882) (DB IDO): scheduled\_downtime\_depth column is not reset when a downtime ends or when a downtime is being removed
+* [#2881](https://github.com/icinga/icinga2/issues/2881) (DB IDO): Downtimes which have been triggered are not properly recorded in the database
+* [#2878](https://github.com/icinga/icinga2/issues/2878) (DB IDO): Don't update scheduleddowntime table w/ trigger\_time column when only adding a downtime
+* [#2855](https://github.com/icinga/icinga2/issues/2855): Fix complexity class for Dictionary::Get
+* [#2853](https://github.com/icinga/icinga2/issues/2853) (CLI): Node wizard should only accept 'y', 'n', 'Y' and 'N' as answers for boolean questions
+* [#2842](https://github.com/icinga/icinga2/issues/2842) (Configuration): Default max\_check\_attempts should be lower for hosts than for services
+* [#2840](https://github.com/icinga/icinga2/issues/2840) (Configuration): Validation errors for time ranges which span the DST transition
+* [#2827](https://github.com/icinga/icinga2/issues/2827) (Configuration): logrotate does not work
+* [#2801](https://github.com/icinga/icinga2/issues/2801) (Cluster): command\_endpoint check\_results are not replicated to other endpoints in the same zone
+
+### ITL
+
+* [#2891](https://github.com/icinga/icinga2/issues/2891) (ITL): web.conf is not in the RPM package
+* [#2890](https://github.com/icinga/icinga2/issues/2890) (ITL): check\_disk order of command arguments
+* [#2834](https://github.com/icinga/icinga2/issues/2834) (ITL): Add arguments to the UPS check
+* [#2770](https://github.com/icinga/icinga2/issues/2770) (ITL): Add database plugins to ITL
+
+### Documentation
+
+* [#2902](https://github.com/icinga/icinga2/issues/2902) (Documentation): Documentation: set\_if usage with boolean values and functions
+* [#2876](https://github.com/icinga/icinga2/issues/2876) (Documentation): Typo in graphite feature enable documentation
+* [#2868](https://github.com/icinga/icinga2/issues/2868) (Documentation): Fix a typo
+* [#2843](https://github.com/icinga/icinga2/issues/2843) (Documentation): Add explanatory note for Icinga2 client documentation
+* [#2837](https://github.com/icinga/icinga2/issues/2837) (Documentation): Fix a minor markdown error
+* [#2832](https://github.com/icinga/icinga2/issues/2832) (Documentation): Reword documentation of check\_address
+
+### Support
+
+* [#2888](https://github.com/icinga/icinga2/issues/2888) (Installation): Vim syntax: Match groups before host/service/user objects
+* [#2852](https://github.com/icinga/icinga2/issues/2852) (Installation): Windows Build: Flex detection
+* [#2793](https://github.com/icinga/icinga2/issues/2793) (Packages): logrotate doesn't work on Ubuntu
+
+## 2.3.3 (2015-03-26)
+
+### Notes
+
+* New function: parse_performance_data
+* Include more details in --version
+* Improve documentation
+* Bugfixes
+
+### Enhancement
+
+* [#2771](https://github.com/icinga/icinga2/issues/2771): Include more details in --version
+* [#2743](https://github.com/icinga/icinga2/issues/2743): New function: parse\_performance\_data
+* [#2737](https://github.com/icinga/icinga2/issues/2737) (Notifications): Show state/type filter names in notice/debug log
+
+### Bug
+
+* [#2828](https://github.com/icinga/icinga2/issues/2828): Array in command arguments doesn't work
+* [#2818](https://github.com/icinga/icinga2/issues/2818) (Configuration): Local variables in "apply for" are overridden
+* [#2816](https://github.com/icinga/icinga2/issues/2816) (CLI): Segmentation fault when executing "icinga2 pki new-cert"
+* [#2812](https://github.com/icinga/icinga2/issues/2812) (Configuration): Return doesn't work inside loops
+* [#2807](https://github.com/icinga/icinga2/issues/2807) (Configuration): Figure out why command validators are not triggered
+* [#2778](https://github.com/icinga/icinga2/issues/2778) (Configuration): object Notification + apply Service fails with error "...refers to service which doesn't exist"
+* [#2772](https://github.com/icinga/icinga2/issues/2772) (Plugins): Plugin "check\_http" is missing in Windows environments
+* [#2768](https://github.com/icinga/icinga2/issues/2768) (Configuration): Add missing keywords in the syntax highlighting files
+* [#2760](https://github.com/icinga/icinga2/issues/2760): Don't ignore extraneous arguments for functions
+* [#2753](https://github.com/icinga/icinga2/issues/2753) (DB IDO): Don't update custom vars for each status update
+* [#2752](https://github.com/icinga/icinga2/issues/2752): startup.log broken when the DB schema needs an update
+* [#2749](https://github.com/icinga/icinga2/issues/2749) (Configuration): Missing config validator for command arguments 'set\_if'
+* [#2718](https://github.com/icinga/icinga2/issues/2718) (Configuration): Update syntax highlighting for 2.3 features
+* [#2557](https://github.com/icinga/icinga2/issues/2557) (Configuration): Improve error message for invalid field access
+* [#2548](https://github.com/icinga/icinga2/issues/2548) (Configuration): Fix VIM syntax highlighting for comments
+
+### ITL
+
+* [#2823](https://github.com/icinga/icinga2/issues/2823) (ITL): wrong 'dns\_lookup' custom attribute default in command-plugins.conf
+* [#2799](https://github.com/icinga/icinga2/issues/2799) (ITL): Add "random" CheckCommand for test and demo purposes
+
+### Documentation
+
+* [#2825](https://github.com/icinga/icinga2/issues/2825) (Documentation): Fix incorrect perfdata templates in the documentation
+* [#2806](https://github.com/icinga/icinga2/issues/2806) (Documentation): Move release info in INSTALL.md into a separate file
+* [#2779](https://github.com/icinga/icinga2/issues/2779) (Documentation): Correct HA documentation
+* [#2777](https://github.com/icinga/icinga2/issues/2777) (Documentation): Typo and invalid example in the runtime macro documentation
+* [#2776](https://github.com/icinga/icinga2/issues/2776) (Documentation): Remove prompt to create a TicketSalt from the wizard
+* [#2775](https://github.com/icinga/icinga2/issues/2775) (Documentation): Explain processing logic/order of apply rules with for loops
+* [#2774](https://github.com/icinga/icinga2/issues/2774) (Documentation): Revamp migration documentation
+* [#2773](https://github.com/icinga/icinga2/issues/2773) (Documentation): Typo in doc library-reference
+* [#2765](https://github.com/icinga/icinga2/issues/2765) (Documentation): Fix a typo in the documentation of ICINGA2\_WITH\_MYSQL and ICINGA2\_WITH\_PGSQL
+* [#2756](https://github.com/icinga/icinga2/issues/2756) (Documentation): Add "access objects at runtime" examples to advanced section
+* [#2738](https://github.com/icinga/icinga2/issues/2738) (Documentation): Update documentation for "apply for" rules
+* [#2501](https://github.com/icinga/icinga2/issues/2501) (Documentation): Re-order the object types in alphabetical order
+
+### Support
+
+* [#2762](https://github.com/icinga/icinga2/issues/2762) (Installation): Flex version check does not reject unsupported versions
+* [#2761](https://github.com/icinga/icinga2/issues/2761) (Installation): Build warnings with CMake 3.1.3
+
+## 2.3.2 (2015-03-12)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#2747](https://github.com/icinga/icinga2/issues/2747): Log message for cli commands breaks the init script
+
+## 2.3.1 (2015-03-12)
+
+### Notes
+
+* Bugfixes
+
+Please note that this version fixes the default thresholds for the disk check which were inadvertently broken in 2.3.0; if you're using percent-based custom thresholds you will need to add the '%' sign to your custom attributes
+
+### Enhancement
+
+* [#2717](https://github.com/icinga/icinga2/issues/2717) (Configuration): Implement String\#contains
+
+### Bug
+
+* [#2739](https://github.com/icinga/icinga2/issues/2739): Crash in Dependency::Stop
+* [#2736](https://github.com/icinga/icinga2/issues/2736): Fix formatting for the GDB stacktrace
+* [#2735](https://github.com/icinga/icinga2/issues/2735): Make sure that the /var/log/icinga2/crash directory exists
+* [#2731](https://github.com/icinga/icinga2/issues/2731) (Configuration): Config validation fail because of unexpected new-line
+* [#2727](https://github.com/icinga/icinga2/issues/2727) (Cluster): Api heartbeat message response time problem
+* [#2716](https://github.com/icinga/icinga2/issues/2716) (CLI): Missing program name in 'icinga2 --version'
+* [#2672](https://github.com/icinga/icinga2/issues/2672): Kill signal sent only to check process, not whole process group
+
+### ITL
+
+* [#2483](https://github.com/icinga/icinga2/issues/2483) (ITL): Fix check\_disk thresholds: make sure partitions are the last arguments
+
+### Documentation
+
+* [#2732](https://github.com/icinga/icinga2/issues/2732) (Documentation): Update documentation for DB IDO HA Run-Once
+* [#2728](https://github.com/icinga/icinga2/issues/2728) (Documentation): Fix check\_disk default thresholds and document the change of unit
+
+### Support
+
+* [#2742](https://github.com/icinga/icinga2/issues/2742) (Packages): Debian packages do not create /var/log/icinga2/crash
+
+## 2.3.0 (2015-03-10)
+
+### Notes
+
+* Improved configuration validation
+ * Unnecessary escapes are no longer permitted (e.g. \')
+ * Dashes are no longer permitted in identifier names (as their semantics are ambiguous)
+ * Unused values are detected (e.g. { "-M" })
+ * Validation for time ranges has been improved
+ * Additional validation rules for some object types (Notification and User)
+* New language features
+ * Implement a separate type for boolean values
+ * Support for user-defined functions
+ * Support for conditional statements (if/else)
+ * Support for 'for' and 'while' loops
+ * Support for local variables using the 'var' keyword
+ * New operators: % (modulo), ^ (xor), - (unary minus) and + (unary plus)
+ * Implemented prototype-based methods for most built-in types (e.g. [ 3, 2 ].sort())
+ * Explicit access to local and global variables using the 'locals' and 'globals' keywords
+ * Changed the order in which filters are evaluated for apply rules with 'for'
+ * Make type objects accessible as global variables
+ * Support for using functions in custom attributes
+ * Access objects and their runtime attributes in functions (e.g. get_host(NodeName).state)
+* ITL improvements
+ * Additional check commands were added to the ITL
+ * Additional arguments for existing check commands
+* CLI improvements
+ * Add the 'icinga2 console' CLI command which can be used to test expressions
+ * Add the 'icinga2 troubleshoot' CLI command for collecting troubleshooting information
+ * Performance improvements for the 'icinga2 node update-config' CLI command
+ * Implement argument auto-completion for short options (e.g. daemon -c)
+ * 'node setup' and 'node wizard' create backups for existing certificate files
+* Add ignore_soft_states option for Dependency object configuration
+* Fewer threads are used for socket I/O
+* Flapping detection for hosts and services is disabled by default
+* Added support for OpenTSDB
+* New Livestatus tables: hostsbygroup, servicesbygroup, servicesbyhostgroup
+* Include GDB backtrace in crash reports
+* Various documentation improvements
+* Solved a number of issues where cluster instances would not reconnect after intermittent connection problems
+* A lot of other, minor changes
+
+* [DB IDO schema upgrade](17-upgrading-icinga-2.md#upgrading-icinga-2) to `1.13.0` required!
+
+### Enhancement
+
+* [#2704](https://github.com/icinga/icinga2/issues/2704): Support the SNI TLS extension
+* [#2702](https://github.com/icinga/icinga2/issues/2702): Add validator for time ranges in ScheduledDowntime objects
+* [#2701](https://github.com/icinga/icinga2/issues/2701): Remove macro argument for IMPL\_TYPE\_LOOKUP
+* [#2696](https://github.com/icinga/icinga2/issues/2696): Include GDB backtrace in crash reports
+* [#2678](https://github.com/icinga/icinga2/issues/2678) (Configuration): Add support for else-if
+* [#2663](https://github.com/icinga/icinga2/issues/2663) (Livestatus): Change Livestatus query log level to 'notice'
+* [#2657](https://github.com/icinga/icinga2/issues/2657) (Cluster): Show slave lag for the cluster-zone check
+* [#2635](https://github.com/icinga/icinga2/issues/2635) (Configuration): introduce time dependent variable values
+* [#2634](https://github.com/icinga/icinga2/issues/2634) (Cluster): Add the ability to use a CA certificate as a way of verifying hosts for CSR autosigning
+* [#2609](https://github.com/icinga/icinga2/issues/2609): udp check command is missing arguments.
+* [#2604](https://github.com/icinga/icinga2/issues/2604) (CLI): Backup certificate files in 'node setup'
+* [#2601](https://github.com/icinga/icinga2/issues/2601) (Configuration): Implement continue/break keywords
+* [#2600](https://github.com/icinga/icinga2/issues/2600) (Configuration): Implement support for Json.encode and Json.decode
+* [#2591](https://github.com/icinga/icinga2/issues/2591) (Metrics): Add timestamp support for Graphite
+* [#2588](https://github.com/icinga/icinga2/issues/2588) (Configuration): Add path information for objects in object list
+* [#2578](https://github.com/icinga/icinga2/issues/2578) (Configuration): Implement Array\#join
+* [#2553](https://github.com/icinga/icinga2/issues/2553) (Configuration): Implement validator support for function objects
+* [#2552](https://github.com/icinga/icinga2/issues/2552) (Configuration): Make operators &&, || behave like in JavaScript
+* [#2546](https://github.com/icinga/icinga2/issues/2546): Add macros $host.check\_source$ and $service.check\_source$
+* [#2544](https://github.com/icinga/icinga2/issues/2544) (Configuration): Implement the while keyword
+* [#2531](https://github.com/icinga/icinga2/issues/2531) (Configuration): Implement keywords to explicitly access globals/locals
+* [#2522](https://github.com/icinga/icinga2/issues/2522) (CLI): Make invalid log-severity option output an error instead of a warning
+* [#2509](https://github.com/icinga/icinga2/issues/2509): Host/Service runtime macro downtime\_depth
+* [#2491](https://github.com/icinga/icinga2/issues/2491) (Configuration): Assignments shouldn't have a "return" value
+* [#2488](https://github.com/icinga/icinga2/issues/2488): Implement additional methods for strings
+* [#2487](https://github.com/icinga/icinga2/issues/2487) (CLI): Figure out what to do about libreadline \(license\)
+* [#2486](https://github.com/icinga/icinga2/issues/2486) (CLI): Figure out a better name for the repl command
+* [#2466](https://github.com/icinga/icinga2/issues/2466) (Configuration): Implement line-continuation for the "console" command
+* [#2456](https://github.com/icinga/icinga2/issues/2456) (CLI): feature enable should use relative symlinks
+* [#2439](https://github.com/icinga/icinga2/issues/2439) (Configuration): Document the new language features in 2.3
+* [#2437](https://github.com/icinga/icinga2/issues/2437) (CLI): Implement readline support for the "console" CLI command
+* [#2432](https://github.com/icinga/icinga2/issues/2432) (CLI): Backport i2tcl's error reporting functionality into "icinga2 console"
+* [#2429](https://github.com/icinga/icinga2/issues/2429) (Configuration): Figure out how variable scopes should work
+* [#2426](https://github.com/icinga/icinga2/issues/2426) (Configuration): Implement a way to call methods on objects
+* [#2421](https://github.com/icinga/icinga2/issues/2421) (Configuration): Implement a way to remove dictionary keys
+* [#2418](https://github.com/icinga/icinga2/issues/2418) (Plugins): Windows plugins should behave like their Linux cousins
+* [#2408](https://github.com/icinga/icinga2/issues/2408) (Configuration): ConfigCompiler::HandleInclude should return an inline dictionary
+* [#2407](https://github.com/icinga/icinga2/issues/2407) (Configuration): Implement a boolean sub-type for the Value class
+* [#2405](https://github.com/icinga/icinga2/issues/2405): Disallow calling strings as functions
+* [#2396](https://github.com/icinga/icinga2/issues/2396) (Configuration): Evaluate usage of function\(\)
+* [#2391](https://github.com/icinga/icinga2/issues/2391): Improve output of ToString for type objects
+* [#2390](https://github.com/icinga/icinga2/issues/2390): Register type objects as global variables
+* [#2367](https://github.com/icinga/icinga2/issues/2367) (Configuration): The lexer shouldn't accept escapes for characters which don't have to be escaped
+* [#2365](https://github.com/icinga/icinga2/issues/2365) (DB IDO): Implement socket\_path attribute for the IdoMysqlConnection class
+* [#2355](https://github.com/icinga/icinga2/issues/2355) (Configuration): Implement official support for user-defined functions and the "for" keyword
+* [#2351](https://github.com/icinga/icinga2/issues/2351) (Plugins): Windows agent is missing the standard plugin check\_ping
+* [#2348](https://github.com/icinga/icinga2/issues/2348) (Plugins): Plugin Check Commands: Add icmp
+* [#2324](https://github.com/icinga/icinga2/issues/2324) (Configuration): Implement the "if" and "else" keywords
+* [#2323](https://github.com/icinga/icinga2/issues/2323) (Configuration): Figure out whether Number + String should implicitly convert the Number argument to a string
+* [#2322](https://github.com/icinga/icinga2/issues/2322) (Configuration): Make the config parser thread-safe
+* [#2318](https://github.com/icinga/icinga2/issues/2318) (Configuration): Implement the % operator
+* [#2312](https://github.com/icinga/icinga2/issues/2312): Move the cast functions into libbase
+* [#2310](https://github.com/icinga/icinga2/issues/2310) (Configuration): Implement unit tests for the config parser
+* [#2304](https://github.com/icinga/icinga2/issues/2304): Implement an option to disable building the Demo component
+* [#2303](https://github.com/icinga/icinga2/issues/2303): Implement an option to disable building the Livestatus module
+* [#2300](https://github.com/icinga/icinga2/issues/2300) (Notifications): Implement the DISABLE\_HOST\_SVC\_NOTIFICATIONS and ENABLE\_HOST\_SVC\_NOTIFICATIONS commands
+* [#2298](https://github.com/icinga/icinga2/issues/2298) (Plugins): Missing check\_disk output on Windows
+* [#2294](https://github.com/icinga/icinga2/issues/2294) (Configuration): Implement an AST Expression for T\_CONST
+* [#2290](https://github.com/icinga/icinga2/issues/2290): Rename \_DEBUG to I2\_DEBUG
+* [#2286](https://github.com/icinga/icinga2/issues/2286) (Configuration): Redesign how stack frames work for scripts
+* [#2265](https://github.com/icinga/icinga2/issues/2265): ConfigCompiler::Compile\* should return an AST node
+* [#2264](https://github.com/icinga/icinga2/issues/2264) (Configuration): ConfigCompiler::HandleInclude\* should return an AST node
+* [#2262](https://github.com/icinga/icinga2/issues/2262) (CLI): Add an option that hides CLI commands
+* [#2260](https://github.com/icinga/icinga2/issues/2260) (Configuration): Evaluate apply/object rules when the parent objects are created
+* [#2211](https://github.com/icinga/icinga2/issues/2211) (Configuration): Variable from for loop not usable in assign statement
+* [#2186](https://github.com/icinga/icinga2/issues/2186) (Configuration): Access object runtime attributes in custom vars & command arguments
+* [#2176](https://github.com/icinga/icinga2/issues/2176) (Configuration): Please add labels in SNMP checks
+* [#2043](https://github.com/icinga/icinga2/issues/2043) (Livestatus): Livestatus: Add GroupBy tables: hostsbygroup, servicesbygroup, servicesbyhostgroup
+* [#2027](https://github.com/icinga/icinga2/issues/2027) (Configuration): Add parent soft states option to Dependency object configuration
+* [#2000](https://github.com/icinga/icinga2/issues/2000) (Metrics): Add OpenTSDB Writer
+* [#1959](https://github.com/icinga/icinga2/issues/1959) (Configuration): extended Manubulon SNMP Check Plugin Command
+* [#1890](https://github.com/icinga/icinga2/issues/1890) (DB IDO): IDO should fill program\_end\_time on a clean shutdown
+* [#1866](https://github.com/icinga/icinga2/issues/1866) (Notifications): Disable flapping detection by default
+* [#1859](https://github.com/icinga/icinga2/issues/1859): Run CheckCommands with C locale \(workaround for comma vs dot and plugin api bug\)
+* [#1783](https://github.com/icinga/icinga2/issues/1783) (Plugins): Plugin Check Commands: add check\_vmware\_esx
+* [#1733](https://github.com/icinga/icinga2/issues/1733) (Configuration): Disallow side-effect-free r-value expressions in expression lists
+* [#1507](https://github.com/icinga/icinga2/issues/1507): Don't spawn threads for network connections
+* [#404](https://github.com/icinga/icinga2/issues/404) (CLI): Add troubleshooting collect cli command
+
+### Bug
+
+* [#2707](https://github.com/icinga/icinga2/issues/2707) (DB IDO): Crash when using ido-pgsql
+* [#2706](https://github.com/icinga/icinga2/issues/2706): Icinga2 shuts down when service is reloaded
+* [#2703](https://github.com/icinga/icinga2/issues/2703) (Configuration): Attribute hints don't work for nested attributes
+* [#2699](https://github.com/icinga/icinga2/issues/2699) (Configuration): Dependency: Validate \*\_{host,service}\_name objects on their existance
+* [#2698](https://github.com/icinga/icinga2/issues/2698) (Livestatus): Improve Livestatus query performance
+* [#2697](https://github.com/icinga/icinga2/issues/2697) (Configuration): Memory leak in Expression::GetReference
+* [#2695](https://github.com/icinga/icinga2/issues/2695) (Configuration): else if doesn't work without an else branch
+* [#2693](https://github.com/icinga/icinga2/issues/2693): Check whether the new TimePeriod validator is working as expected
+* [#2692](https://github.com/icinga/icinga2/issues/2692) (CLI): Resource leak in TroubleshootCommand::ObjectInfo
+* [#2691](https://github.com/icinga/icinga2/issues/2691) (CLI): Resource leak in TroubleshootCommand::Run
+* [#2689](https://github.com/icinga/icinga2/issues/2689): Check if scheduled downtimes work properly
+* [#2688](https://github.com/icinga/icinga2/issues/2688) (Plugins): check\_memory tool shows incorrect memory size on windows
+* [#2685](https://github.com/icinga/icinga2/issues/2685) (Cluster): Don't accept config updates for zones for which we have an authoritative copy of the config
+* [#2684](https://github.com/icinga/icinga2/issues/2684) (Cluster): Icinga crashed on SocketEvent
+* [#2683](https://github.com/icinga/icinga2/issues/2683) (Cluster): Crash in ApiClient::TimeoutTimerHandler
+* [#2680](https://github.com/icinga/icinga2/issues/2680): Deadlock in TlsStream::Handshake
+* [#2679](https://github.com/icinga/icinga2/issues/2679) (Cluster): Deadlock in ApiClient::Disconnect
+* [#2677](https://github.com/icinga/icinga2/issues/2677): Crash in SocketEvents::Register
+* [#2676](https://github.com/icinga/icinga2/issues/2676) (Livestatus): Windows build fails
+* [#2674](https://github.com/icinga/icinga2/issues/2674) (DB IDO): Hosts: process\_performance\_data = 0 in database even though enable\_perfdata = 1 in config
+* [#2671](https://github.com/icinga/icinga2/issues/2671) (DB IDO): Crash in DbObject::SendStatusUpdate
+* [#2670](https://github.com/icinga/icinga2/issues/2670) (Compat): Valgrind warning for ExternalCommandListener::CommandPipeThread
+* [#2669](https://github.com/icinga/icinga2/issues/2669): Crash in ApiEvents::RepositoryTimerHandler
+* [#2665](https://github.com/icinga/icinga2/issues/2665) (Livestatus): livestatus limit header not working
+* [#2660](https://github.com/icinga/icinga2/issues/2660) (Configuration): apply-for incorrectly converts loop var to string
+* [#2659](https://github.com/icinga/icinga2/issues/2659) (Configuration): Config parser fails non-deterministic on Notification missing Checkable
+* [#2658](https://github.com/icinga/icinga2/issues/2658) (CLI): Crash in icinga2 console
+* [#2654](https://github.com/icinga/icinga2/issues/2654) (DB IDO): Deadlock with DB IDO dump and forcing a scheduled check
+* [#2650](https://github.com/icinga/icinga2/issues/2650) (CLI): SIGSEGV in CLI
+* [#2647](https://github.com/icinga/icinga2/issues/2647) (DB IDO): Icinga doesn't update long\_output in DB
+* [#2646](https://github.com/icinga/icinga2/issues/2646) (Cluster): Misleading ApiListener connection log messages on a master \(Endpoint vs Zone\)
+* [#2644](https://github.com/icinga/icinga2/issues/2644) (CLI): Figure out why 'node update-config' becomes slow over time
+* [#2642](https://github.com/icinga/icinga2/issues/2642): Icinga 2 sometimes doesn't reconnect to the master
+* [#2641](https://github.com/icinga/icinga2/issues/2641) (Cluster): ICINGA process crashes every night
+* [#2639](https://github.com/icinga/icinga2/issues/2639) (CLI): Build fails on Debian squeeze
+* [#2636](https://github.com/icinga/icinga2/issues/2636): Exception in WorkQueue::StatusTimerHandler
+* [#2631](https://github.com/icinga/icinga2/issues/2631) (Cluster): deadlock in client connection
+* [#2630](https://github.com/icinga/icinga2/issues/2630) (Cluster): Don't request heartbeat messages until after we've synced the log
+* [#2627](https://github.com/icinga/icinga2/issues/2627) (Livestatus): Livestatus query on commands table with custom vars fails
+* [#2626](https://github.com/icinga/icinga2/issues/2626) (DB IDO): Icinga2 segfaults when issuing postgresql queries
+* [#2622](https://github.com/icinga/icinga2/issues/2622): "node wizard" crashes
+* [#2621](https://github.com/icinga/icinga2/issues/2621): Don't attempt to restore program state from non-existing state file
+* [#2618](https://github.com/icinga/icinga2/issues/2618) (DB IDO): DB IDO {host,service}checks command\_line value is "Object of type 'icinga::Array'"
+* [#2617](https://github.com/icinga/icinga2/issues/2617) (DB IDO): Indicate that Icinga2 is shutting down in case of a fatal error
+* [#2615](https://github.com/icinga/icinga2/issues/2615): Make the arguments for the stats functions const-ref
+* [#2613](https://github.com/icinga/icinga2/issues/2613) (DB IDO): DB IDO: Duplicate entry icinga\_scheduleddowntime
+* [#2608](https://github.com/icinga/icinga2/issues/2608) (Plugins): Ignore the -X option for check\_disk on Windows
+* [#2605](https://github.com/icinga/icinga2/issues/2605): Compiler warnings
+* [#2599](https://github.com/icinga/icinga2/issues/2599) (Cluster): Agent writes CR CR LF in synchronized config files
+* [#2598](https://github.com/icinga/icinga2/issues/2598): Added downtimes must be triggered immediately if checkable is Not-OK
+* [#2597](https://github.com/icinga/icinga2/issues/2597) (Cluster): Config sync authoritative file never created
+* [#2596](https://github.com/icinga/icinga2/issues/2596) (Compat): StatusDataWriter: Wrong host notification filters \(broken fix in \#8192\)
+* [#2593](https://github.com/icinga/icinga2/issues/2593) (Compat): last\_hard\_state missing in StatusDataWriter
+* [#2589](https://github.com/icinga/icinga2/issues/2589) (Configuration): Stacktrace on Endpoint not belonging to a zone or multiple zones
+* [#2586](https://github.com/icinga/icinga2/issues/2586): Icinga2 master doesn't change check-status when "accept\_commands = true" is not set at client node
+* [#2579](https://github.com/icinga/icinga2/issues/2579) (Configuration): Apply rule '' for host does not match anywhere!
+* [#2572](https://github.com/icinga/icinga2/issues/2572) (Cluster): Incorrectly formatted timestamp in .timestamp file
+* [#2570](https://github.com/icinga/icinga2/issues/2570): Crash in ScheduledDowntime::CreateNextDowntime
+* [#2569](https://github.com/icinga/icinga2/issues/2569): PidPath, VarsPath, ObjectsPath and StatePath no longer read from init.conf
+* [#2566](https://github.com/icinga/icinga2/issues/2566) (Configuration): Don't allow comparison of strings and numbers
+* [#2562](https://github.com/icinga/icinga2/issues/2562) (Cluster): ApiListener::ReplayLog shouldn't hold mutex lock during call to Socket::Poll
+* [#2560](https://github.com/icinga/icinga2/issues/2560): notify flag is ignored in ACKNOWLEDGE\_\*\_PROBLEM commands
+* [#2559](https://github.com/icinga/icinga2/issues/2559) (DB IDO): Duplicate entry on icinga\_hoststatus
+* [#2556](https://github.com/icinga/icinga2/issues/2556) (CLI): Running icinga2 command as non privilged user raises error
+* [#2551](https://github.com/icinga/icinga2/issues/2551) (Livestatus): Livestatus operator =~ is not case-insensitive
+* [#2542](https://github.com/icinga/icinga2/issues/2542) (CLI): icinga2 node wizard: Create backups of certificates
+* [#2539](https://github.com/icinga/icinga2/issues/2539) (Cluster): Report missing command objects on remote agent
+* [#2533](https://github.com/icinga/icinga2/issues/2533) (Cluster): Problems using command\_endpoint inside HA zone
+* [#2529](https://github.com/icinga/icinga2/issues/2529) (CLI): CLI console fails to report errors in included files
+* [#2526](https://github.com/icinga/icinga2/issues/2526) (Configuration): Deadlock when accessing loop variable inside of the loop
+* [#2525](https://github.com/icinga/icinga2/issues/2525) (Configuration): Lexer term for T\_ANGLE\_STRING is too aggressive
+* [#2513](https://github.com/icinga/icinga2/issues/2513) (CLI): icinga2 node update should not write config for blacklisted zones/host
+* [#2503](https://github.com/icinga/icinga2/issues/2503) (CLI): Argument auto-completion doesn't work for short options
+* [#2502](https://github.com/icinga/icinga2/issues/2502): group assign fails with bad lexical cast when evaluating rules
+* [#2497](https://github.com/icinga/icinga2/issues/2497): Exception on missing config files
+* [#2494](https://github.com/icinga/icinga2/issues/2494) (Livestatus): Error messages when stopping Icinga
+* [#2493](https://github.com/icinga/icinga2/issues/2493): Compiler warnings
+* [#2492](https://github.com/icinga/icinga2/issues/2492): Segfault on icinga::String::operator= when compiling configuration
+* [#2485](https://github.com/icinga/icinga2/issues/2485) (Configuration): parsing include\_recursive
+* [#2482](https://github.com/icinga/icinga2/issues/2482) (Configuration): escaped backslash in string literals
+* [#2467](https://github.com/icinga/icinga2/issues/2467) (CLI): Icinga crashes when config file name is invalid
+* [#2465](https://github.com/icinga/icinga2/issues/2465) (Configuration): Debug info for indexer is incorrect
+* [#2457](https://github.com/icinga/icinga2/issues/2457): Config file passing validation causes segfault
+* [#2452](https://github.com/icinga/icinga2/issues/2452) (Cluster): Agent checks fail when there's already a host with the same name
+* [#2448](https://github.com/icinga/icinga2/issues/2448) (Configuration): User::ValidateFilters isn't being used
+* [#2447](https://github.com/icinga/icinga2/issues/2447) (Configuration): ConfigCompilerContext::WriteObject crashes after ConfigCompilerContext::FinishObjectsFile was called
+* [#2445](https://github.com/icinga/icinga2/issues/2445) (Configuration): segfault on startup
+* [#2442](https://github.com/icinga/icinga2/issues/2442) (DB IDO): POSTGRES IDO: invalid syntax for integer: "true" while trying to update table icinga\_hoststatus
+* [#2441](https://github.com/icinga/icinga2/issues/2441) (CLI): console: Don't repeat line when we're reporting an error for the last line
+* [#2436](https://github.com/icinga/icinga2/issues/2436) (Configuration): Modulo 0 crashes Icinga
+* [#2435](https://github.com/icinga/icinga2/issues/2435) (Configuration): Location info for strings is incorrect
+* [#2434](https://github.com/icinga/icinga2/issues/2434) (Configuration): Setting an attribute on an r-value fails
+* [#2433](https://github.com/icinga/icinga2/issues/2433) (Configuration): Confusing error message when trying to set a field on a string
+* [#2431](https://github.com/icinga/icinga2/issues/2431) (Configuration): icinga 2 Config Error needs to be more verbose
+* [#2428](https://github.com/icinga/icinga2/issues/2428) (Configuration): Debug visualizer for the Value class is broken
+* [#2427](https://github.com/icinga/icinga2/issues/2427) (Configuration): if doesn't work for non-boolean arguments
+* [#2423](https://github.com/icinga/icinga2/issues/2423) (Configuration): Require at least one user for notification objects \(user or as member of user\_groups\)
+* [#2419](https://github.com/icinga/icinga2/issues/2419) (Configuration): Confusing error message for import
+* [#2410](https://github.com/icinga/icinga2/issues/2410): The Boolean type change broke set\_if
+* [#2406](https://github.com/icinga/icinga2/issues/2406) (Configuration): len\(\) overflows
+* [#2395](https://github.com/icinga/icinga2/issues/2395) (Configuration): operator precedence for % and \> is incorrect
+* [#2388](https://github.com/icinga/icinga2/issues/2388): Value\(""\).IsEmpty\(\) should return true
+* [#2379](https://github.com/icinga/icinga2/issues/2379) (Cluster): Windows Agent: Missing directory "zones" in setup
+* [#2375](https://github.com/icinga/icinga2/issues/2375) (Configuration): Config validator doesn't show in which file the error was found
+* [#2362](https://github.com/icinga/icinga2/issues/2362): Serialize\(\) fails to serialize objects which don't have a registered type
+* [#2361](https://github.com/icinga/icinga2/issues/2361): Fix warnings when using CMake 3.1.0
+* [#2346](https://github.com/icinga/icinga2/issues/2346) (DB IDO): Missing persistent\_comment, notify\_contact columns for acknowledgement table
+* [#2329](https://github.com/icinga/icinga2/issues/2329) (Configuration): - shouldn't be allowed in identifiers
+* [#2326](https://github.com/icinga/icinga2/issues/2326): Compiler warnings
+* [#2320](https://github.com/icinga/icinga2/issues/2320) (Configuration): - operator doesn't work in expressions
+* [#2319](https://github.com/icinga/icinga2/issues/2319) (Configuration): Set expression should check whether LHS is a null pointer
+* [#2317](https://github.com/icinga/icinga2/issues/2317) (Configuration): Validate array subscripts
+* [#2316](https://github.com/icinga/icinga2/issues/2316) (Configuration): The \_\_return keyword is broken
+* [#2315](https://github.com/icinga/icinga2/issues/2315) (Configuration): Return values for functions are broken
+* [#2314](https://github.com/icinga/icinga2/issues/2314): Scoping rules for "for" are broken
+* [#2313](https://github.com/icinga/icinga2/issues/2313) (Configuration): Unterminated string literals should cause parser to return an error
+* [#2308](https://github.com/icinga/icinga2/issues/2308) (Configuration): Change parameter type for include and include\_recursive to T\_STRING
+* [#2307](https://github.com/icinga/icinga2/issues/2307) (Configuration): Fix the shift/reduce conflicts in the parser
+* [#2289](https://github.com/icinga/icinga2/issues/2289) (DB IDO): DB IDO: Duplicate entry icinga\_{host,service}dependencies
+* [#2274](https://github.com/icinga/icinga2/issues/2274) (Notifications): Reminder notifications not being sent but logged every 5 secs
+* [#2234](https://github.com/icinga/icinga2/issues/2234): Avoid rebuilding libbase when the version number changes
+* [#2232](https://github.com/icinga/icinga2/issues/2232): Unity build doesn't work with MSVC
+* [#2194](https://github.com/icinga/icinga2/issues/2194) (Configuration): validate configured legacy timeperiod ranges
+* [#2174](https://github.com/icinga/icinga2/issues/2174) (Configuration): Update validators for CustomVarObject
+* [#2020](https://github.com/icinga/icinga2/issues/2020) (Configuration): Invalid macro results in exception
+* [#1899](https://github.com/icinga/icinga2/issues/1899): Scheduled start time will be ignored if the host or service is already in a problem state
+* [#1530](https://github.com/icinga/icinga2/issues/1530): Remove name and return value for stats functions
+
+### ITL
+
+* [#2705](https://github.com/icinga/icinga2/issues/2705) (ITL): Add check commands for NSClient++
+* [#2661](https://github.com/icinga/icinga2/issues/2661) (ITL): ITL: The procs check command uses spaces instead of tabs
+* [#2652](https://github.com/icinga/icinga2/issues/2652) (ITL): Rename PluginsContribDir to PluginContribDir
+* [#2649](https://github.com/icinga/icinga2/issues/2649) (ITL): Snmp CheckCommand misses various options
+* [#2614](https://github.com/icinga/icinga2/issues/2614) (ITL): add webinject checkcommand
+* [#2610](https://github.com/icinga/icinga2/issues/2610) (ITL): Add ITL check command for check\_ipmi\_sensor
+* [#2573](https://github.com/icinga/icinga2/issues/2573) (ITL): Extend disk checkcommand
+* [#2541](https://github.com/icinga/icinga2/issues/2541) (ITL): The check "hostalive" is not working with ipv6
+* [#2012](https://github.com/icinga/icinga2/issues/2012) (ITL): ITL: ESXi-Hardware
+* [#2011](https://github.com/icinga/icinga2/issues/2011) (ITL): ITL: Check\_Mem.pl
+* [#1984](https://github.com/icinga/icinga2/issues/1984) (ITL): ITL: Interfacetable
+
+### Documentation
+
+* [#2711](https://github.com/icinga/icinga2/issues/2711) (Documentation): Document closures \('use'\)
+* [#2709](https://github.com/icinga/icinga2/issues/2709) (Documentation): Fix a typo in documentation
+* [#2662](https://github.com/icinga/icinga2/issues/2662) (Documentation): Update Remote Client/Distributed Monitoring Documentation
+* [#2595](https://github.com/icinga/icinga2/issues/2595) (Documentation): Add documentation for cli command 'console'
+* [#2575](https://github.com/icinga/icinga2/issues/2575) (Documentation): Remote Clients: Add manual setup cli commands
+* [#2555](https://github.com/icinga/icinga2/issues/2555) (Documentation): The Zone::global attribute is not documented
+* [#2399](https://github.com/icinga/icinga2/issues/2399) (Documentation): Allow name changed from inside the object
+* [#2387](https://github.com/icinga/icinga2/issues/2387) (Documentation): Documentation enhancement for snmp traps and passive checks.
+* [#2321](https://github.com/icinga/icinga2/issues/2321) (Documentation): Document operator precedence
+* [#2198](https://github.com/icinga/icinga2/issues/2198) (Documentation): Variable expansion is single quoted.
+* [#1860](https://github.com/icinga/icinga2/issues/1860) (Documentation): Add some more PNP details
+
+### Support
+
+* [#2616](https://github.com/icinga/icinga2/issues/2616) (Installation): Build fails on OpenBSD
+* [#2602](https://github.com/icinga/icinga2/issues/2602) (Packages): Icinga2 config reset after package update \(centos6.6\)
+* [#2511](https://github.com/icinga/icinga2/issues/2511) (Packages): '../features-available/checker.conf' does not exist \[Windows\]
+* [#2374](https://github.com/icinga/icinga2/issues/2374) (Packages): Move the config file for the ido-\*sql features into the icinga2-ido-\* packages
+* [#2302](https://github.com/icinga/icinga2/issues/2302) (Installation): Don't build db\_ido when both MySQL and PostgreSQL aren't enabled
+
+## 2.2.4 (2015-02-05)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#2587](https://github.com/icinga/icinga2/issues/2587) (CLI): Output in "node wizard" is confusing
+* [#2577](https://github.com/icinga/icinga2/issues/2577) (Compat): enable\_event\_handlers attribute is missing in status.dat
+* [#2571](https://github.com/icinga/icinga2/issues/2571): Segfault in Checkable::AddNotification
+* [#2561](https://github.com/icinga/icinga2/issues/2561): Scheduling downtime for host and all services only schedules services
+* [#2558](https://github.com/icinga/icinga2/issues/2558) (CLI): Restart of Icinga hangs
+* [#2550](https://github.com/icinga/icinga2/issues/2550) (DB IDO): Crash in DbConnection::ProgramStatusHandler
+* [#2538](https://github.com/icinga/icinga2/issues/2538) (CLI): Restart fails after deleting a Host
+* [#2508](https://github.com/icinga/icinga2/issues/2508) (Compat): Feature statusdata shows wrong host notification options
+* [#2481](https://github.com/icinga/icinga2/issues/2481) (CLI): Satellite doesn't use manually supplied 'local zone name'
+* [#2464](https://github.com/icinga/icinga2/issues/2464): vfork\(\) hangs on OS X
+* [#2256](https://github.com/icinga/icinga2/issues/2256) (Notifications): kUn-Bashify mail-{host,service}-notification.sh
+* [#2242](https://github.com/icinga/icinga2/issues/2242): livestatus / nsca / etc submits are ignored during reload
+* [#1893](https://github.com/icinga/icinga2/issues/1893): Configured recurring downtimes not applied on saturdays
+
+### ITL
+
+* [#2532](https://github.com/icinga/icinga2/issues/2532) (ITL): check\_ssmtp command does NOT support mail\_from
+
+### Documentation
+
+* [#2521](https://github.com/icinga/icinga2/issues/2521) (Documentation): Typos in readme file for windows plugins
+* [#2520](https://github.com/icinga/icinga2/issues/2520) (Documentation): inconsistent URL http\(s\)://www.icinga.org
+* [#2512](https://github.com/icinga/icinga2/issues/2512) (Documentation): Update Icinga Web 2 uri to /icingaweb2
+
+### Support
+
+* [#2517](https://github.com/icinga/icinga2/issues/2517) (Packages): Fix YAJL detection on Debian squeeze
+* [#2462](https://github.com/icinga/icinga2/issues/2462) (Packages): Icinga 2.2.2 build fails on SLES11SP3 because of changed boost dependency
+
+## 2.2.3 (2015-01-12)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#2499](https://github.com/icinga/icinga2/issues/2499) (CLI): Segfault on update-config old empty config
+* [#2498](https://github.com/icinga/icinga2/issues/2498) (CLI): icinga2 node update config shows hex instead of human readable names
+* [#2496](https://github.com/icinga/icinga2/issues/2496): Icinga 2.2.2 segfaults on FreeBSD
+* [#2477](https://github.com/icinga/icinga2/issues/2477): DB IDO query queue limit reached on reload
+* [#2473](https://github.com/icinga/icinga2/issues/2473) (CLI): check\_interval must be greater than 0 error on update-config
+* [#2471](https://github.com/icinga/icinga2/issues/2471) (Cluster): Arguments without values are not used on plugin exec
+* [#2470](https://github.com/icinga/icinga2/issues/2470) (Plugins): Windows plugin check\_service.exe can't find service NTDS
+* [#2459](https://github.com/icinga/icinga2/issues/2459) (CLI): Incorrect ticket shouldn't cause "node wizard" to terminate
+* [#2420](https://github.com/icinga/icinga2/issues/2420) (Notifications): Volatile checks trigger invalid notifications on OK-\>OK state changes
+
+### Documentation
+
+* [#2490](https://github.com/icinga/icinga2/issues/2490) (Documentation): Typo in example of StatusDataWriter
+
+### Support
+
+* [#2460](https://github.com/icinga/icinga2/issues/2460) (Packages): Icinga 2.2.2 doesn't build on i586 SUSE distributions
+
+## 2.2.2 (2014-12-18)
+
+### Notes
+
+* Bugfixes
+
+### Bug
+
+* [#2446](https://github.com/icinga/icinga2/issues/2446) (Compat): StatusDataWriter: Wrong export of event\_handler\_enabled
+* [#2444](https://github.com/icinga/icinga2/issues/2444) (CLI): Remove usage info from --version
+* [#2416](https://github.com/icinga/icinga2/issues/2416) (DB IDO): DB IDO: Missing last\_hard\_state column update in {host,service}status tables
+* [#2411](https://github.com/icinga/icinga2/issues/2411): exception during config check
+* [#2394](https://github.com/icinga/icinga2/issues/2394): typeof does not work for numbers
+* [#2381](https://github.com/icinga/icinga2/issues/2381): SIGABRT while evaluating apply rules
+* [#2380](https://github.com/icinga/icinga2/issues/2380) (Configuration): typeof\(\) seems to return null for arrays and dictionaries
+* [#2376](https://github.com/icinga/icinga2/issues/2376) (Configuration): Apache 2.2 fails with new apache conf
+* [#2371](https://github.com/icinga/icinga2/issues/2371) (Configuration): Test Classic UI config file with Apache 2.4
+* [#2370](https://github.com/icinga/icinga2/issues/2370) (Cluster): update\_config not updating configuration
+* [#2360](https://github.com/icinga/icinga2/issues/2360): CLI `icinga2 node update-config` doesn't sync configs from remote clients as expected
+* [#2354](https://github.com/icinga/icinga2/issues/2354) (DB IDO): Improve error reporting when libmysqlclient or libpq are missing
+* [#2350](https://github.com/icinga/icinga2/issues/2350) (Cluster): Segfault on issuing node update-config
+* [#2341](https://github.com/icinga/icinga2/issues/2341) (Cluster): execute checks locally if command\_endpoint == local endpoint
+* [#2283](https://github.com/icinga/icinga2/issues/2283) (Cluster): Cluster heartbeats need to be more aggressive
+* [#2266](https://github.com/icinga/icinga2/issues/2266) (CLI): "node wizard" shouldn't crash when SaveCert fails
+* [#2255](https://github.com/icinga/icinga2/issues/2255) (DB IDO): If a parent host goes down, the child host isn't marked as unrechable in the db ido
+* [#2216](https://github.com/icinga/icinga2/issues/2216) (Cluster): Repository does not support services which have a slash in their name
+* [#2202](https://github.com/icinga/icinga2/issues/2202) (Configuration): CPU usage at 100% when check\_interval = 0 in host object definition
+* [#2154](https://github.com/icinga/icinga2/issues/2154) (Cluster): update-config fails to create hosts
+* [#2148](https://github.com/icinga/icinga2/issues/2148) (Compat): Feature `compatlog' should flush output buffer on every new line
+* [#2021](https://github.com/icinga/icinga2/issues/2021): double macros in command arguments seems to lead to exception
+* [#2016](https://github.com/icinga/icinga2/issues/2016) (Notifications): Docs: Better explaination of dependency state filters
+* [#1947](https://github.com/icinga/icinga2/issues/1947) (Livestatus): Missing host downtimes/comments in Livestatus
+
+### ITL
+
+* [#2430](https://github.com/icinga/icinga2/issues/2430) (ITL): No option to specify timeout to check\_snmp and snmp manubulon commands
+
+### Documentation
+
+* [#2422](https://github.com/icinga/icinga2/issues/2422) (Documentation): Setting a dictionary key to null does not cause the key/value to be removed
+* [#2412](https://github.com/icinga/icinga2/issues/2412) (Documentation): Update host examples in Dependencies for Network Reachability documentation
+* [#2409](https://github.com/icinga/icinga2/issues/2409) (Documentation): Wrong command in documentation for installing Icinga 2 pretty printers.
+* [#2404](https://github.com/icinga/icinga2/issues/2404) (Documentation): Livestatus: Replace unixcat with nc -U
+* [#2180](https://github.com/icinga/icinga2/issues/2180) (Documentation): Documentation: Add note on default notification interval in getting started notifications.conf
+
+### Support
+
+* [#2417](https://github.com/icinga/icinga2/issues/2417) (Tests): Unit tests fail on FreeBSD
+* [#2369](https://github.com/icinga/icinga2/issues/2369) (Packages): SUSE packages %set\_permissions post statement wasn't moved to common
+* [#2368](https://github.com/icinga/icinga2/issues/2368) (Packages): /usr/lib/icinga2 is not owned by a package
+* [#2292](https://github.com/icinga/icinga2/issues/2292) (Tests): The unit tests still crash sometimes
+* [#1942](https://github.com/icinga/icinga2/issues/1942) (Packages): icinga2 init-script doesn't validate configuration on reload action
+
+## 2.2.1 (2014-12-01)
+
+### Notes
+
+* Support arrays in [command argument macros](#command-passing-parameters) #6709
+ * Allows to define multiple parameters for [nrpe -a](#plugin-check-command-nrpe), [nscp -l](#plugin-check-command-nscp), [disk -p](#plugin-check-command-disk), [dns -a](#plugin-check-command-dns).
+* Bugfixes
+
+### Enhancement
+
+* [#2366](https://github.com/icinga/icinga2/issues/2366): Release 2.2.1
+* [#2277](https://github.com/icinga/icinga2/issues/2277) (Configuration): The classicui Apache conf doesn't support Apache 2.4
+* [#1790](https://github.com/icinga/icinga2/issues/1790): Support for arrays in macros
+
+### Bug
+
+* [#2340](https://github.com/icinga/icinga2/issues/2340) (CLI): Segfault in CA handling
+* [#2328](https://github.com/icinga/icinga2/issues/2328) (Cluster): Verify if master radio box is disabled in the Windows wizard
+* [#2311](https://github.com/icinga/icinga2/issues/2311) (Configuration): !in operator returns incorrect result
+* [#2293](https://github.com/icinga/icinga2/issues/2293) (Configuration): Objects created with node update-config can't be seen in Classic UI
+* [#2288](https://github.com/icinga/icinga2/issues/2288) (Cluster): Incorrect error message for localhost
+* [#2282](https://github.com/icinga/icinga2/issues/2282) (Cluster): Icinga2 node add failed with unhandled exception
+* [#2273](https://github.com/icinga/icinga2/issues/2273): Restart Icinga - Error Restoring program state from file '/var/lib/icinga2/icinga2.state'
+* [#2272](https://github.com/icinga/icinga2/issues/2272) (Cluster): Windows wizard is missing --zone argument
+* [#2271](https://github.com/icinga/icinga2/issues/2271) (Cluster): Windows wizard uses incorrect CLI command
+* [#2267](https://github.com/icinga/icinga2/issues/2267) (Cluster): Built-in commands shouldn't be run on the master instance in remote command execution mode
+* [#2207](https://github.com/icinga/icinga2/issues/2207) (Livestatus): livestatus large amount of submitting unix socket command results in broken pipes
+
+### ITL
+
+* [#2285](https://github.com/icinga/icinga2/issues/2285) (ITL): Increase default timeout for NRPE checks
+
+### Documentation
+
+* [#2344](https://github.com/icinga/icinga2/issues/2344) (Documentation): Documentation: Explain how unresolved macros are handled
+* [#2343](https://github.com/icinga/icinga2/issues/2343) (Documentation): Document how arrays in macros work
+* [#2336](https://github.com/icinga/icinga2/issues/2336) (Documentation): Wrong information in section "Linux Client Setup Wizard for Remote Monitoring"
+* [#2275](https://github.com/icinga/icinga2/issues/2275) (Documentation): 2.2.0 has out-of-date icinga2 man page
+* [#2251](https://github.com/icinga/icinga2/issues/2251) (Documentation): object and template with the same name generate duplicate object error
+
+### Support
+
+* [#2363](https://github.com/icinga/icinga2/issues/2363) (Packages): Fix Apache config in the Debian package
+* [#2359](https://github.com/icinga/icinga2/issues/2359) (Packages): Wrong permission in run directory after restart
+* [#2301](https://github.com/icinga/icinga2/issues/2301) (Packages): Move the icinga2-prepare-dirs script elsewhere
+* [#2280](https://github.com/icinga/icinga2/issues/2280) (Packages): Icinga 2.2 misses the build requirement libyajl-devel for SUSE distributions
+* [#2278](https://github.com/icinga/icinga2/issues/2278) (Packages): /usr/sbin/icinga-prepare-dirs conflicts in the bin and common package
+* [#2276](https://github.com/icinga/icinga2/issues/2276) (Packages): Systemd rpm scripts are run in wrong package
+* [#2212](https://github.com/icinga/icinga2/issues/2212) (Packages): icinga2 checkconfig should fail if group given for command files does not exist
+* [#2117](https://github.com/icinga/icinga2/issues/2117) (Packages): Update spec file to use yajl-devel
+* [#1968](https://github.com/icinga/icinga2/issues/1968) (Packages): service icinga2 status gives wrong information when run as unprivileged user
+
+## 2.2.0 (2014-11-17)
+
+### Notes
+
+* DB IDO schema update to version `1.12.0`
+ * schema files in `lib/db_ido_{mysql,pgsql}/schema` (source)
+ * Table `programstatus`: New column `program_version`
+ * Table `customvariables` and `customvariablestatus`: New column `is_json` (required for custom attribute array/dictionary support)
+* New features
+ * [GelfWriter](#gelfwriter): Logging check results, state changes, notifications to GELF (graylog2, logstash) #7619
+ * Agent/Client/Node framework #7249
+ * Windows plugins for the client/agent parts #7242 #7243
+* New CLI commands #7245
+ * `icinga2 feature {enable,disable}` replaces `icinga2-{enable,disable}-feature` script #7250
+ * `icinga2 object list` replaces `icinga2-list-objects` script #7251
+ * `icinga2 pki` replaces` icinga2-build-{ca,key}` scripts #7247
+ * `icinga2 repository` manages `/etc/icinga2/repository.d` which must be included in `icinga2.conf` #7255
+ * `icinga2 node` cli command provides node (master, satellite, agent) setup (wizard) and management functionality #7248
+ * `icinga2 daemon` for existing daemon arguments (`-c`, `-C`). Removed `-u` and `-g` parameters in favor of [init.conf](#init-conf).
+ * bash auto-completion & terminal colors #7396
+* Configuration
+ * Former `localhost` example host is now defined in [hosts.conf](#hosts-conf) #7594
+ * All example services moved into advanced apply rules in [services.conf](#services-conf)
+ * Updated downtimes configuration example in [downtimes.conf](#downtimes-conf) #7472
+ * Updated notification apply example in [notifications.conf](#notifications-conf) #7594
+ * Support for object attribute 'zone' #7400
+ * Support setting [object variables in apply rules](#dependencies-apply-custom-attributes) #7479
+ * Support arrays and dictionaries in [custom attributes](#custom-attributes-apply) #6544 #7560
+ * Add [apply for rules](#using-apply-for) for advanced dynamic object generation #7561
+ * New attribute `accept_commands` for [ApiListener](#objecttype-apilistener) #7559
+ * New [init.conf](#init-conf) file included first containing new constants `RunAsUser` and `RunAsGroup`.
+* Cluster
+ * Add [CSR Auto-Signing support](#csr-autosigning-requirements) using generated ticket #7244
+ * Allow to [execute remote commands](#icinga2-remote-monitoring-client-command-execution) on endpoint clients #7559
+* Perfdata
+ * [PerfdataWriter](#writing-performance-data-files): Don't change perfdata, pass through from plugins #7268
+ * [GraphiteWriter](#graphite-carbon-cache-writer): Add warn/crit/min/max perfdata and downtime_depth stats values #7366 #6946
+* Packages
+ * `python-icinga2` package dropped in favor of integrated cli commands #7245
+ * Windows Installer for the agent parts #7243
+
+> **Note**
+>
+> Please remove `conf.d/hosts/localhost*` after verifying your updated configuration!
+
+### Enhancement
+
+* [#2219](https://github.com/icinga/icinga2/issues/2219): Icinga 2 should use less RAM
+* [#2217](https://github.com/icinga/icinga2/issues/2217) (Metrics): Add GelfWriter for writing log events to graylog2/logstash
+* [#2213](https://github.com/icinga/icinga2/issues/2213): Optimize class layout
+* [#2203](https://github.com/icinga/icinga2/issues/2203) (Configuration): Revamp sample configuration: add NodeName host, move services into apply rules schema
+* [#2189](https://github.com/icinga/icinga2/issues/2189) (Configuration): Refactor AST into multiple classes
+* [#2187](https://github.com/icinga/icinga2/issues/2187) (Configuration): Implement support for arbitrarily complex indexers
+* [#2184](https://github.com/icinga/icinga2/issues/2184) (Configuration): Generate objects using apply with foreach in arrays or dictionaries \(key =\> value\)
+* [#2183](https://github.com/icinga/icinga2/issues/2183) (Configuration): Support dictionaries in custom attributes
+* [#2182](https://github.com/icinga/icinga2/issues/2182) (Cluster): Execute remote commands on the agent w/o local objects by passing custom attributes
+* [#2179](https://github.com/icinga/icinga2/issues/2179): Implement keys\(\)
+* [#2178](https://github.com/icinga/icinga2/issues/2178) (CLI): Cli command Node: Disable notifications feature on client nodes
+* [#2161](https://github.com/icinga/icinga2/issues/2161) (CLI): Cli Command: Rename 'agent' to 'node'
+* [#2158](https://github.com/icinga/icinga2/issues/2158) (Cluster): Require --zone to be specified for "node setup"
+* [#2152](https://github.com/icinga/icinga2/issues/2152) (Cluster): Rename --agent to --zone \(for blacklist/whitelist\)
+* [#2140](https://github.com/icinga/icinga2/issues/2140) (CLI): Cli: Use Node Blacklist functionality in 'node update-config'
+* [#2138](https://github.com/icinga/icinga2/issues/2138) (CLI): Find a better name for 'repository commit --clear'
+* [#2131](https://github.com/icinga/icinga2/issues/2131) (Configuration): Set host/service variable in apply rules
+* [#2124](https://github.com/icinga/icinga2/issues/2124) (Configuration): Update downtimes.conf example config
+* [#2119](https://github.com/icinga/icinga2/issues/2119) (Cluster): Remove virtual agent name feature for localhost
+* [#2118](https://github.com/icinga/icinga2/issues/2118) (CLI): Cli command: Node Setup Wizard \(for Satellites and Agents\)
+* [#2115](https://github.com/icinga/icinga2/issues/2115) (CLI): Cli command: Repository remove host should remove host.conf host/ dir with services
+* [#2113](https://github.com/icinga/icinga2/issues/2113) (CLI): validate repository config updates
+* [#2108](https://github.com/icinga/icinga2/issues/2108): Only build YAJL when there's no system-provided version available
+* [#2107](https://github.com/icinga/icinga2/issues/2107): Replace cJSON with a better JSON parser
+* [#2104](https://github.com/icinga/icinga2/issues/2104) (CLI): Use "variable get" for "pki ticket"
+* [#2103](https://github.com/icinga/icinga2/issues/2103) (CLI): Validate number of arguments
+* [#2098](https://github.com/icinga/icinga2/issues/2098) (CLI): Support for writing api.conf
+* [#2096](https://github.com/icinga/icinga2/issues/2096) (CLI): Cli command: pki needs option to define the algorithm
+* [#2092](https://github.com/icinga/icinga2/issues/2092) (CLI): Rename PKI arguments
+* [#2088](https://github.com/icinga/icinga2/issues/2088) (CLI): Cli command: Node Setup
+* [#2087](https://github.com/icinga/icinga2/issues/2087) (CLI): "pki request" should ask user to verify the peer's certificate
+* [#2086](https://github.com/icinga/icinga2/issues/2086) (CLI): Add -h next to --help
+* [#2085](https://github.com/icinga/icinga2/issues/2085) (CLI): Remove "available features" list from "feature list"
+* [#2084](https://github.com/icinga/icinga2/issues/2084) (CLI): Implement "feature disable" for Windows
+* [#2081](https://github.com/icinga/icinga2/issues/2081) (CLI): CLI: List disabled features in feature list too
+* [#2079](https://github.com/icinga/icinga2/issues/2079): Move WSAStartup call to INITIALIZE\_ONCE
+* [#2076](https://github.com/icinga/icinga2/issues/2076) (CLI): Implement field attribute to hide fields in command auto-completion
+* [#2074](https://github.com/icinga/icinga2/issues/2074) (CLI): Add autocomplete to 'host/service add' for object attributes \(e.g. --check\_interval\)
+* [#2073](https://github.com/icinga/icinga2/issues/2073) (Configuration): Remove zone keyword and allow to use object attribute 'zone'
+* [#2071](https://github.com/icinga/icinga2/issues/2071) (Configuration): Move localhost config into repository
+* [#2069](https://github.com/icinga/icinga2/issues/2069) (CLI): Implement generic color support for terminals
+* [#2066](https://github.com/icinga/icinga2/issues/2066) (CLI): Implement support for serial files
+* [#2064](https://github.com/icinga/icinga2/issues/2064) (DB IDO): Add program\_version column to programstatus table
+* [#2062](https://github.com/icinga/icinga2/issues/2062): Release 2.2
+* [#2059](https://github.com/icinga/icinga2/issues/2059) (CLI): Auto-completion for feature enable/disable
+* [#2055](https://github.com/icinga/icinga2/issues/2055) (CLI): Windows support for cli command feature
+* [#2054](https://github.com/icinga/icinga2/issues/2054) (CLI): CLI Commands: Remove timestamp prefix when logging output
+* [#2053](https://github.com/icinga/icinga2/issues/2053) (CLI): autocomplete should support '--key value'
+* [#2050](https://github.com/icinga/icinga2/issues/2050) (CLI): Cli command parser must support unregistered boost::program\_options
+* [#2049](https://github.com/icinga/icinga2/issues/2049) (CLI): CLI command: variable
+* [#2046](https://github.com/icinga/icinga2/issues/2046) (Graphite): GraphiteWriter: Add warn/crit/min/max perfdata values if existing
+* [#2031](https://github.com/icinga/icinga2/issues/2031) (Graphite): GraphiteWriter: Add support for customized metric prefix names
+* [#2003](https://github.com/icinga/icinga2/issues/2003): macro processor needs an array printer
+* [#1999](https://github.com/icinga/icinga2/issues/1999) (CLI): Cli command: Repository
+* [#1997](https://github.com/icinga/icinga2/issues/1997) (CLI): Cli Commands: Node Repository Blacklist & Whitelist
+* [#1996](https://github.com/icinga/icinga2/issues/1996) (CLI): Cli command: SCM
+* [#1995](https://github.com/icinga/icinga2/issues/1995) (CLI): Cli command: Object
+* [#1994](https://github.com/icinga/icinga2/issues/1994) (CLI): Cli command: Feature
+* [#1993](https://github.com/icinga/icinga2/issues/1993) (CLI): Node Repository
+* [#1992](https://github.com/icinga/icinga2/issues/1992) (CLI): Cli command: Node
+* [#1991](https://github.com/icinga/icinga2/issues/1991) (CLI): Cli command: pki
+* [#1990](https://github.com/icinga/icinga2/issues/1990) (CLI): Cli command framework
+* [#1989](https://github.com/icinga/icinga2/issues/1989) (CLI): Cli commands
+* [#1988](https://github.com/icinga/icinga2/issues/1988) (Cluster): CSR auto-signing
+* [#1987](https://github.com/icinga/icinga2/issues/1987) (Plugins): Windows plugins
+* [#1986](https://github.com/icinga/icinga2/issues/1986) (Cluster): Windows Wizard
+* [#1977](https://github.com/icinga/icinga2/issues/1977) (CLI): Cli commands: add filter capability to 'object list'
+* [#1901](https://github.com/icinga/icinga2/issues/1901) (Cluster): Windows installer
+* [#1895](https://github.com/icinga/icinga2/issues/1895) (Graphite): Add downtime depth as statistic metric for GraphiteWriter
+* [#1717](https://github.com/icinga/icinga2/issues/1717) (Configuration): Support for array in custom variable.
+* [#894](https://github.com/icinga/icinga2/issues/894): Add copyright header to .ti files and add support for comments in mkclass
+
+### Bug
+
+* [#2258](https://github.com/icinga/icinga2/issues/2258) (Configuration): Names for nested objects are evaluated at the wrong time
+* [#2257](https://github.com/icinga/icinga2/issues/2257) (Configuration): DebugInfo is missing for nested dictionaries
+* [#2254](https://github.com/icinga/icinga2/issues/2254): CreateProcess fails on Windows 7
+* [#2241](https://github.com/icinga/icinga2/issues/2241) (Cluster): node wizard uses incorrect path for the CA certificate
+* [#2237](https://github.com/icinga/icinga2/issues/2237) (Configuration): Wrong set of dependency state when a host depends on a service
+* [#2235](https://github.com/icinga/icinga2/issues/2235): Unit tests fail to run
+* [#2233](https://github.com/icinga/icinga2/issues/2233): Get rid of static boost::mutex variables
+* [#2222](https://github.com/icinga/icinga2/issues/2222) (DB IDO): IDO module crashes on Windows
+* [#2221](https://github.com/icinga/icinga2/issues/2221): Installation on Windows fails
+* [#2220](https://github.com/icinga/icinga2/issues/2220) (Notifications): Missing state filter 'OK' must not prevent recovery notifications being sent
+* [#2215](https://github.com/icinga/icinga2/issues/2215): mkclass crashes when called without arguments
+* [#2214](https://github.com/icinga/icinga2/issues/2214) (Cluster): Removing multiple services fails
+* [#2206](https://github.com/icinga/icinga2/issues/2206): Plugin execution on Windows does not work
+* [#2205](https://github.com/icinga/icinga2/issues/2205): Compilation Error with boost 1.56 under Windows
+* [#2201](https://github.com/icinga/icinga2/issues/2201): Exception when executing check
+* [#2200](https://github.com/icinga/icinga2/issues/2200) (Configuration): Nested templates do not work \(anymore\)
+* [#2199](https://github.com/icinga/icinga2/issues/2199) (CLI): Typo in output of 'icinga2 object list'
+* [#2197](https://github.com/icinga/icinga2/issues/2197) (Notifications): only notify users on recovery which have been notified before \(not-ok state\)
+* [#2195](https://github.com/icinga/icinga2/issues/2195) (Cluster): Invalid checkresult object causes Icinga 2 to crash
+* [#2177](https://github.com/icinga/icinga2/issues/2177) (CLI): 'pki request' fails with serial permission error
+* [#2172](https://github.com/icinga/icinga2/issues/2172) (Configuration): There is no \_\_name available to nested objects
+* [#2171](https://github.com/icinga/icinga2/issues/2171) (Configuration): Nesting an object in a template causes the template to become non-abstract
+* [#2170](https://github.com/icinga/icinga2/issues/2170) (Configuration): Object list dump erraneously evaluates template definitions
+* [#2166](https://github.com/icinga/icinga2/issues/2166) (Cluster): Error message is always shown even when the host exists
+* [#2165](https://github.com/icinga/icinga2/issues/2165) (Cluster): Incorrect warning message for "node update-config"
+* [#2164](https://github.com/icinga/icinga2/issues/2164) (Cluster): Error in migrate-hosts
+* [#2162](https://github.com/icinga/icinga2/issues/2162) (CLI): Change blacklist/whitelist storage
+* [#2156](https://github.com/icinga/icinga2/issues/2156) (Cluster): Use ScriptVariable::Get for RunAsUser/RunAsGroup
+* [#2155](https://github.com/icinga/icinga2/issues/2155) (Cluster): Agent health check must not have zone attribute
+* [#2153](https://github.com/icinga/icinga2/issues/2153) (Cluster): Misleading error messages for blacklist/whitelist remove
+* [#2142](https://github.com/icinga/icinga2/issues/2142) (Configuration): Icinga2 fails to start due to configuration errors
+* [#2141](https://github.com/icinga/icinga2/issues/2141): Build fails
+* [#2137](https://github.com/icinga/icinga2/issues/2137): Utility::GetFQDN doesn't work on OS X
+* [#2134](https://github.com/icinga/icinga2/issues/2134): Hosts/services should not have themselves as parents
+* [#2133](https://github.com/icinga/icinga2/issues/2133): OnStateLoaded isn't called for objects which don't have any state
+* [#2132](https://github.com/icinga/icinga2/issues/2132) (CLI): cli command 'node setup update-config' overwrites existing constants.conf
+* [#2128](https://github.com/icinga/icinga2/issues/2128) (CLI): Cli: Node Setup/Wizard running as root must chown\(\) generated files to icinga daemon user
+* [#2127](https://github.com/icinga/icinga2/issues/2127) (Configuration): can't assign Service to Host in nested HostGroup
+* [#2125](https://github.com/icinga/icinga2/issues/2125) (Metrics): Performance data via API is broken
+* [#2116](https://github.com/icinga/icinga2/issues/2116) (CLI): Cli command: Repository should validate if object exists before add/remove
+* [#2106](https://github.com/icinga/icinga2/issues/2106) (Cluster): When replaying logs the secobj attribute is ignored
+* [#2091](https://github.com/icinga/icinga2/issues/2091) (CLI): Cli command: pki request throws exception on connection failure
+* [#2083](https://github.com/icinga/icinga2/issues/2083): CMake warnings on OS X
+* [#2077](https://github.com/icinga/icinga2/issues/2077) (CLI): CLI: Auto-completion with colliding arguments
+* [#2070](https://github.com/icinga/icinga2/issues/2070) (DB IDO): CLI / MySQL error during vagrant provisioning
+* [#2068](https://github.com/icinga/icinga2/issues/2068) (CLI): pki new-cert doesn't check whether the files were successfully written
+* [#2065](https://github.com/icinga/icinga2/issues/2065) (DB IDO): Schema upgrade files are missing in /usr/share/icinga2-ido-{mysql,pgsql}
+* [#2063](https://github.com/icinga/icinga2/issues/2063) (CLI): Cli commands: Integers in arrays are printed incorrectly
+* [#2057](https://github.com/icinga/icinga2/issues/2057) (CLI): failed en/disable feature should return error
+* [#2056](https://github.com/icinga/icinga2/issues/2056) (CLI): Commands are auto-completed when they shouldn't be
+* [#2051](https://github.com/icinga/icinga2/issues/2051) (Configuration): custom attribute name 'type' causes empty vars dictionary
+* [#2048](https://github.com/icinga/icinga2/issues/2048) (Compat): Fix reading perfdata in compat/checkresultreader
+* [#2042](https://github.com/icinga/icinga2/issues/2042) (Plugins): Setting snmp\_v2 can cause snmp-manubulon-command derived checks to fail
+* [#2038](https://github.com/icinga/icinga2/issues/2038) (Configuration): snmp-load checkcommand has a wrong "-T" param value
+* [#2034](https://github.com/icinga/icinga2/issues/2034) (Configuration): Importing a CheckCommand in a NotificationCommand results in an exception without stacktrace.
+* [#2029](https://github.com/icinga/icinga2/issues/2029) (Configuration): Error messages for invalid imports missing
+* [#2026](https://github.com/icinga/icinga2/issues/2026) (Configuration): config parser crashes on unknown attribute in assign
+* [#2006](https://github.com/icinga/icinga2/issues/2006) (Configuration): snmp-load checkcommand has wrong threshold syntax
+* [#2005](https://github.com/icinga/icinga2/issues/2005) (Metrics): icinga2 returns exponentail perfdata format with check\_nt
+* [#2004](https://github.com/icinga/icinga2/issues/2004) (Metrics): Icinga2 changes perfdata order and removes maximum
+* [#2001](https://github.com/icinga/icinga2/issues/2001) (Notifications): default value for "disable\_notifications" in service dependencies is set to "false"
+* [#1950](https://github.com/icinga/icinga2/issues/1950) (Configuration): Typo for "HTTP Checks" match in groups.conf
+* [#1720](https://github.com/icinga/icinga2/issues/1720) (Notifications): delaying notifications with times.begin should postpone first notification into that window
+
+### ITL
+
+* [#2204](https://github.com/icinga/icinga2/issues/2204) (ITL): Plugin Check Commands: disk is missing '-p', 'x' parameter
+* [#2017](https://github.com/icinga/icinga2/issues/2017) (ITL): ITL: check\_procs and check\_http are missing arguments
+
+### Documentation
+
+* [#2218](https://github.com/icinga/icinga2/issues/2218) (Documentation): Documentation: Update Icinga Web 2 installation
+* [#2191](https://github.com/icinga/icinga2/issues/2191) (Documentation): link missing in documentation about livestatus
+* [#2175](https://github.com/icinga/icinga2/issues/2175) (Documentation): Documentation for arrays & dictionaries in custom attributes and their usage in apply rules for
+* [#2160](https://github.com/icinga/icinga2/issues/2160) (Documentation): Documentation: Explain how to manage agent config in central repository
+* [#2150](https://github.com/icinga/icinga2/issues/2150) (Documentation): Documentation: Move troubleshooting after the getting started chapter
+* [#2143](https://github.com/icinga/icinga2/issues/2143) (Documentation): Documentation: Revamp getting started with 1 host and multiple \(service\) applies
+* [#2130](https://github.com/icinga/icinga2/issues/2130) (Documentation): Documentation: Mention 'icinga2 object list' in config validation
+* [#2129](https://github.com/icinga/icinga2/issues/2129) (Documentation): Fix typos and other small corrections in documentation
+* [#2093](https://github.com/icinga/icinga2/issues/2093) (Documentation): Documentation: 1-about contribute links to non-existing report a bug howto
+* [#2052](https://github.com/icinga/icinga2/issues/2052) (Documentation): Wrong usermod command for external command pipe setup
+* [#2041](https://github.com/icinga/icinga2/issues/2041) (Documentation): Documentation: Cli Commands
+* [#2037](https://github.com/icinga/icinga2/issues/2037) (Documentation): Documentation: Wrong check command for snmp-int\(erface\)
+* [#2033](https://github.com/icinga/icinga2/issues/2033) (Documentation): Docs: Default command timeout is 60s not 5m
+* [#2028](https://github.com/icinga/icinga2/issues/2028) (Documentation): Icinga2 docs: link supported operators from sections about apply rules
+* [#2024](https://github.com/icinga/icinga2/issues/2024) (Documentation): Documentation: Add support for locally-scoped variables for host/service in applied Dependency
+* [#2013](https://github.com/icinga/icinga2/issues/2013) (Documentation): Documentation: Add host/services variables in apply rules
+* [#1998](https://github.com/icinga/icinga2/issues/1998) (Documentation): Documentation: Agent/Satellite Setup
+* [#1972](https://github.com/icinga/icinga2/issues/1972) (Documentation): Document how to use multiple assign/ignore statements with logical "and" & "or"
+
+### Support
+
+* [#2253](https://github.com/icinga/icinga2/issues/2253) (Packages): Conditionally enable MySQL and PostgresSQL, add support for FreeBSD and DragonFlyBSD
+* [#2236](https://github.com/icinga/icinga2/issues/2236) (Packages): Enable parallel builds for the Debian package
+* [#2147](https://github.com/icinga/icinga2/issues/2147) (Packages): Feature `checker' is not enabled when installing Icinga 2 using our lates RPM snapshot packages
+* [#2136](https://github.com/icinga/icinga2/issues/2136) (Packages): Build fails on RHEL 6.6
+* [#2123](https://github.com/icinga/icinga2/issues/2123) (Packages): Post-update script \(migrate-hosts\) isn't run on RPM-based distributions
+* [#2095](https://github.com/icinga/icinga2/issues/2095) (Packages): Unity build fails on RHEL 5
+* [#2058](https://github.com/icinga/icinga2/issues/2058) (Packages): Debian package root permissions interfere with icinga2 cli commands as icinga user
+* [#2007](https://github.com/icinga/icinga2/issues/2007) (Packages): SLES \(Suse Linux Enterprise Server\) 11 SP3 package dependency failure
+
+## 2.1.1 (2014-09-16)
+
+### Enhancement
+
+* [#1938](https://github.com/icinga/icinga2/issues/1938): Unity builds: Detect whether \_\_COUNTER\_\_ is available
+* [#1933](https://github.com/icinga/icinga2/issues/1933): Implement support for unity builds
+* [#1932](https://github.com/icinga/icinga2/issues/1932): Ensure that namespaces for INITIALIZE\_ONCE and REGISTER\_TYPE are truly unique
+* [#1931](https://github.com/icinga/icinga2/issues/1931): Add include guards for mkclass files
+* [#1797](https://github.com/icinga/icinga2/issues/1797): Change log message for checking/sending notifications
+
+### Bug
+
+* [#1975](https://github.com/icinga/icinga2/issues/1975): fix memory leak ido\_pgsql
+* [#1971](https://github.com/icinga/icinga2/issues/1971) (Livestatus): Livestatus hangs from time to time
+* [#1967](https://github.com/icinga/icinga2/issues/1967) (Plugins): fping4 doesn't work correctly with the shipped command-plugins.conf
+* [#1966](https://github.com/icinga/icinga2/issues/1966) (Cluster): Segfault using cluster in TlsStream::IsEof
+* [#1958](https://github.com/icinga/icinga2/issues/1958) (Configuration): Manubulon-Plugin conf Filename wrong
+* [#1957](https://github.com/icinga/icinga2/issues/1957): Build fails on Haiku
+* [#1955](https://github.com/icinga/icinga2/issues/1955) (Cluster): new SSL Errors with too many queued messages
+* [#1954](https://github.com/icinga/icinga2/issues/1954): Missing differentiation between service and systemctl
+* [#1952](https://github.com/icinga/icinga2/issues/1952) (Metrics): GraphiteWriter should ignore empty perfdata value
+* [#1948](https://github.com/icinga/icinga2/issues/1948): pipe2 returns ENOSYS on GNU Hurd and Debian kfreebsd
+* [#1946](https://github.com/icinga/icinga2/issues/1946): Exit code is not initialized for some failed checks
+* [#1940](https://github.com/icinga/icinga2/issues/1940): icinga2-list-objects complains about Umlauts and stops output
+* [#1935](https://github.com/icinga/icinga2/issues/1935): icinga2-list-objects doesn't work with Python 3
+* [#1934](https://github.com/icinga/icinga2/issues/1934) (Configuration): Remove validator for the Script type
+* [#1930](https://github.com/icinga/icinga2/issues/1930): "Error parsing performance data" in spite of "enable\_perfdata = false"
+* [#1910](https://github.com/icinga/icinga2/issues/1910) (Cluster): SSL errors with interleaved SSL\_read/write
+* [#1862](https://github.com/icinga/icinga2/issues/1862) (Cluster): SSL\_read errors during restart
+* [#1849](https://github.com/icinga/icinga2/issues/1849) (Cluster): Too many queued messages
+* [#1782](https://github.com/icinga/icinga2/issues/1782): make test fails on openbsd
+* [#1522](https://github.com/icinga/icinga2/issues/1522): Link libcJSON against libm
+
+### Documentation
+
+* [#1985](https://github.com/icinga/icinga2/issues/1985) (Documentation): clarify on db ido upgrades
+* [#1962](https://github.com/icinga/icinga2/issues/1962) (Documentation): Extend documentation for icinga-web on Debian systems
+* [#1949](https://github.com/icinga/icinga2/issues/1949) (Documentation): Explain event commands and their integration by a real life example \(httpd restart via ssh\)
+* [#1927](https://github.com/icinga/icinga2/issues/1927) (Documentation): Document how to use @ to escape keywords
+
+### Support
+
+* [#1960](https://github.com/icinga/icinga2/issues/1960) (Packages): GNUInstallDirs.cmake outdated
+* [#1944](https://github.com/icinga/icinga2/issues/1944) (Packages): service icinga2 status - prints cat error if the service is stopped
+* [#1941](https://github.com/icinga/icinga2/issues/1941) (Packages): icinga2 init-script terminates with exit code 0 if $DAEMON is not in place or not executable
+* [#1939](https://github.com/icinga/icinga2/issues/1939) (Packages): Enable unity build for RPM/Debian packages
+* [#1937](https://github.com/icinga/icinga2/issues/1937) (Packages): Figure out a better way to set the version for snapshot builds
+* [#1936](https://github.com/icinga/icinga2/issues/1936) (Packages): Fix rpmlint errors
+* [#1928](https://github.com/icinga/icinga2/issues/1928) (Packages): icinga2.spec: files-attr-not-set for python-icinga2 package
+
+## 2.1.0 (2014-08-29)
+
+### Notes
+
+* DB IDO schema upgrade ([MySQL](#upgrading-mysql-db),[PostgreSQL](#upgrading-postgresql-db) required!
+ * new schema version: **1.11.7**
+ * RPMs install the schema files into `/usr/share/icinga2-ido*` instead of `/usr/share/doc/icinga2-ido*` #6881
+* [Information for config objects](#list-configuration-objects) using `icinga2-list-objects` script #6702
+* Add Python 2.4 as requirement #6702
+* Add search path: If `-c /etc/icinga2/icinga2.conf` is omitted, use `SysconfDir + "/icinga2/icinga2.conf"` #6874
+* Change log level for failed commands #6751
+* Notifications are load-balanced in a [High Availability cluster setup](#high-availability-notifications) #6203
+ * New config attribute: `enable_ha`
+* DB IDO "run once" or "run everywhere" mode in a [High Availability cluster setup](#high-availability-db-ido) #6203 #6827
+ * New config attributes: `enable_ha` and `failover_timeout`
+* RPMs use the `icingacmd` group for /var/{cache,log,run}/icinga2 #6948
+
+### Enhancement
+
+* [#1879](https://github.com/icinga/icinga2/issues/1879): Enhance logging for perfdata/graphitewriter
+* [#1871](https://github.com/icinga/icinga2/issues/1871) (Configuration): add search path for icinga2.conf
+* [#1843](https://github.com/icinga/icinga2/issues/1843) (DB IDO): delay ido connect in ha cluster
+* [#1810](https://github.com/icinga/icinga2/issues/1810): Change log level for failed commands
+* [#1788](https://github.com/icinga/icinga2/issues/1788): Release 2.1
+* [#1786](https://github.com/icinga/icinga2/issues/1786) (Configuration): Information for config objects
+* [#1760](https://github.com/icinga/icinga2/issues/1760) (Plugins): Plugin Check Commands: add manubulon snmp plugins
+* [#1548](https://github.com/icinga/icinga2/issues/1548) (Cluster): Log replay sends messages to instances which shouldn't get those messages
+* [#1546](https://github.com/icinga/icinga2/issues/1546) (Cluster): Better cluster support for notifications / IDO
+* [#1491](https://github.com/icinga/icinga2/issues/1491) (Cluster): Better log messages for cluster changes
+* [#977](https://github.com/icinga/icinga2/issues/977) (Cluster): Cluster support for modified attributes
+
+### Bug
+
+* [#1916](https://github.com/icinga/icinga2/issues/1916): Build fails with Boost 1.56
+* [#1903](https://github.com/icinga/icinga2/issues/1903) (Cluster): Host and service checks stuck in "pending" when hostname = localhost a parent/satellite setup
+* [#1902](https://github.com/icinga/icinga2/issues/1902): Commands are processed multiple times
+* [#1896](https://github.com/icinga/icinga2/issues/1896): check file permissions in /var/cache/icinga2
+* [#1884](https://github.com/icinga/icinga2/issues/1884): External command pipe: Too many open files
+* [#1819](https://github.com/icinga/icinga2/issues/1819): ExternalCommandListener fails open pipe: Too many open files
+
+### Documentation
+
+* [#1924](https://github.com/icinga/icinga2/issues/1924) (Documentation): add example selinux policy for external command pipe
+* [#1915](https://github.com/icinga/icinga2/issues/1915) (Documentation): how to add a new cluster node
+* [#1913](https://github.com/icinga/icinga2/issues/1913) (Documentation): Keyword "required" used inconsistently for host and service "icon\_image\*" attributes
+* [#1905](https://github.com/icinga/icinga2/issues/1905) (Documentation): Update command arguments 'set\_if' and beautify error message
+* [#1897](https://github.com/icinga/icinga2/issues/1897) (Documentation): Add documentation for icinga2-list-objects
+* [#1889](https://github.com/icinga/icinga2/issues/1889) (Documentation): Enhance Graphite Writer description
+* [#1881](https://github.com/icinga/icinga2/issues/1881) (Documentation): clarify on which config tools are available
+* [#1872](https://github.com/icinga/icinga2/issues/1872) (Documentation): Wrong parent in Load Distribution
+* [#1868](https://github.com/icinga/icinga2/issues/1868) (Documentation): Wrong object attribute 'enable\_flap\_detection'
+* [#1867](https://github.com/icinga/icinga2/issues/1867) (Documentation): Add systemd options: enable, journal
+* [#1865](https://github.com/icinga/icinga2/issues/1865) (Documentation): add section about disabling re-notifications
+* [#1864](https://github.com/icinga/icinga2/issues/1864) (Documentation): Add section for reserved keywords
+* [#1847](https://github.com/icinga/icinga2/issues/1847) (Documentation): Explain how the order attribute works in commands
+* [#1807](https://github.com/icinga/icinga2/issues/1807) (Documentation): Better explanation for HA config cluster
+* [#1787](https://github.com/icinga/icinga2/issues/1787) (Documentation): Documentation for zones and cluster permissions
+* [#1761](https://github.com/icinga/icinga2/issues/1761) (Documentation): Migration: note on check command timeouts
+
+### Support
+
+* [#1923](https://github.com/icinga/icinga2/issues/1923) (Packages): 64-bit RPMs are not installable
+* [#1888](https://github.com/icinga/icinga2/issues/1888) (Packages): Recommend related packages on SUSE distributions
+* [#1887](https://github.com/icinga/icinga2/issues/1887) (Installation): Clean up spec file
+* [#1885](https://github.com/icinga/icinga2/issues/1885) (Packages): enforce /usr/lib as base for the cgi path on SUSE distributions
+* [#1883](https://github.com/icinga/icinga2/issues/1883) (Installation): use \_rundir macro for configuring the run directory
+* [#1873](https://github.com/icinga/icinga2/issues/1873) (Packages): make install does not install the db-schema
+
+## 2.0.2 (2014-08-07)
+
+### Notes
+
+* DB IDO schema upgrade required (new schema version: 1.11.6)
+
+### Enhancement
+
+* [#1830](https://github.com/icinga/icinga2/issues/1830) (Plugins): Plugin Check Commands: Add timeout option to check\_ssh
+* [#1826](https://github.com/icinga/icinga2/issues/1826): Print application paths for --version
+* [#1785](https://github.com/icinga/icinga2/issues/1785): Release 2.0.2
+* [#1784](https://github.com/icinga/icinga2/issues/1784) (Configuration): Require command to be an array when the arguments attribute is used
+* [#1781](https://github.com/icinga/icinga2/issues/1781) (Plugins): Plugin Check Commands: Add expect option to check\_http
+
+### Bug
+
+* [#1861](https://github.com/icinga/icinga2/issues/1861): write startup error messages to error.log
+* [#1858](https://github.com/icinga/icinga2/issues/1858): event command execution does not call finish handler
+* [#1855](https://github.com/icinga/icinga2/issues/1855): Startup logfile is not flushed to disk
+* [#1853](https://github.com/icinga/icinga2/issues/1853) (DB IDO): exit application if ido schema version does not match
+* [#1852](https://github.com/icinga/icinga2/issues/1852): Error handler for getaddrinfo must use gai\_strerror
+* [#1848](https://github.com/icinga/icinga2/issues/1848): Missing space in error message
+* [#1840](https://github.com/icinga/icinga2/issues/1840): \[Patch\] Fix build issue and crash found on Solaris, potentially other Unix OSes
+* [#1839](https://github.com/icinga/icinga2/issues/1839): Icinga 2 crashes during startup
+* [#1834](https://github.com/icinga/icinga2/issues/1834) (Cluster): High Availablity does not synchronise the data like expected
+* [#1829](https://github.com/icinga/icinga2/issues/1829): Service icinga2 reload command does not cause effect
+* [#1828](https://github.com/icinga/icinga2/issues/1828): Fix notification definition if no host\_name / service\_description given
+* [#1816](https://github.com/icinga/icinga2/issues/1816): Config validation without filename argument fails with unhandled exception
+* [#1813](https://github.com/icinga/icinga2/issues/1813) (Metrics): GraphiteWriter: Malformatted integer values
+* [#1800](https://github.com/icinga/icinga2/issues/1800) (Cluster): TLS Connections still unstable in 2.0.1
+* [#1796](https://github.com/icinga/icinga2/issues/1796): "order" attribute doesn't seem to work as expected
+* [#1792](https://github.com/icinga/icinga2/issues/1792) (Configuration): sample config: add check commands location hint \(itl/plugin check commands\)
+* [#1779](https://github.com/icinga/icinga2/issues/1779) (Configuration): Remove superfluous quotes and commas in dictionaries
+* [#1778](https://github.com/icinga/icinga2/issues/1778): Event Commands are triggered in OK HARD state everytime
+* [#1775](https://github.com/icinga/icinga2/issues/1775): additional group rights missing when Icinga started with -u and -g
+* [#1774](https://github.com/icinga/icinga2/issues/1774) (Cluster): Missing detailed error messages on ApiListener SSL Errors
+* [#1766](https://github.com/icinga/icinga2/issues/1766): RPMLint security warning - missing-call-to-setgroups-before-setuid /usr/sbin/icinga2
+* [#1757](https://github.com/icinga/icinga2/issues/1757) (DB IDO): NULL vs empty string
+* [#1752](https://github.com/icinga/icinga2/issues/1752) (Cluster): Infinite loop in TlsStream::Close
+* [#1744](https://github.com/icinga/icinga2/issues/1744) (DB IDO): Two Custom Variables with same name, but Upper/Lowercase creating IDO duplicate entry
+* [#1741](https://github.com/icinga/icinga2/issues/1741): Command pipe blocks when trying to open it more than once in parallel
+* [#1730](https://github.com/icinga/icinga2/issues/1730): Check and retry intervals are incorrect
+* [#1729](https://github.com/icinga/icinga2/issues/1729): $TOTALHOSTSERVICESWARNING$ and $TOTALHOSTSERVICESCRITICAL$ aren't getting converted
+* [#1728](https://github.com/icinga/icinga2/issues/1728): Service dependencies aren't getting converted properly
+* [#1726](https://github.com/icinga/icinga2/issues/1726): group names quoted twice in arrays
+* [#1723](https://github.com/icinga/icinga2/issues/1723): add log message for invalid performance data
+* [#1722](https://github.com/icinga/icinga2/issues/1722): GraphiteWriter regularly sends empty lines
+* [#1721](https://github.com/icinga/icinga2/issues/1721) (Configuration): Add cmake constant for PluginDir
+* [#1684](https://github.com/icinga/icinga2/issues/1684) (Notifications): Notifications not always triggered
+* [#1674](https://github.com/icinga/icinga2/issues/1674): ipmi-sensors segfault due to stack size
+* [#1666](https://github.com/icinga/icinga2/issues/1666) (DB IDO): objects and their ids are inserted twice
+
+### ITL
+
+* [#1825](https://github.com/icinga/icinga2/issues/1825) (ITL): The "ssl" check command always sets -D
+* [#1821](https://github.com/icinga/icinga2/issues/1821) (ITL): Order doesn't work in check ssh command
+
+### Documentation
+
+* [#1802](https://github.com/icinga/icinga2/issues/1802) (Documentation): wrong path for the file 'localhost.conf'
+* [#1801](https://github.com/icinga/icinga2/issues/1801) (Documentation): Missing documentation about implicit dependency
+* [#1791](https://github.com/icinga/icinga2/issues/1791) (Documentation): icinga Web: wrong path to command pipe
+* [#1789](https://github.com/icinga/icinga2/issues/1789) (Documentation): update installation with systemd usage
+* [#1762](https://github.com/icinga/icinga2/issues/1762) (Documentation): clarify on which features are required for classic ui/web/web2
+
+### Support
+
+* [#1845](https://github.com/icinga/icinga2/issues/1845) (Packages): Remove if\(NOT DEFINED ICINGA2\_SYSCONFIGFILE\) in etc/initsystem/CMakeLists.txt
+* [#1842](https://github.com/icinga/icinga2/issues/1842) (Packages): incorrect sysconfig path on sles11
+* [#1820](https://github.com/icinga/icinga2/issues/1820) (Installation): Repo Error on RHEL 6.5
+* [#1780](https://github.com/icinga/icinga2/issues/1780) (Packages): Rename README to README.md
+* [#1763](https://github.com/icinga/icinga2/issues/1763) (Packages): Build packages for el7
+* [#1754](https://github.com/icinga/icinga2/issues/1754) (Installation): Location of the run directory is hard coded and bound to "local\_state\_dir"
+* [#1699](https://github.com/icinga/icinga2/issues/1699) (Packages): Classic UI Debian/Ubuntu: apache 2.4 requires 'a2enmod cgi' & apacheutils installed
+* [#1338](https://github.com/icinga/icinga2/issues/1338) (Packages): SUSE packages
+
+## 2.0.1 (2014-07-10)
+
+### Notes
+
+Bugfix release
+
+### Enhancement
+
+* [#1713](https://github.com/icinga/icinga2/issues/1713) (Configuration): Add port option to check imap/pop/smtp and a new dig
+* [#1049](https://github.com/icinga/icinga2/issues/1049) (Livestatus): OutputFormat python
+
+### Bug
+
+* [#1773](https://github.com/icinga/icinga2/issues/1773) (Notifications): Problem with enable\_notifications and retained state
+* [#1772](https://github.com/icinga/icinga2/issues/1772) (Notifications): enable\_notifications = false for users has no effect
+* [#1771](https://github.com/icinga/icinga2/issues/1771) (Cluster): Icinga crashes after "Too many queued messages"
+* [#1769](https://github.com/icinga/icinga2/issues/1769): Build fails when MySQL is not installed
+* [#1767](https://github.com/icinga/icinga2/issues/1767): Increase icinga.cmd Limit
+* [#1753](https://github.com/icinga/icinga2/issues/1753) (Configuration): icinga2-sign-key creates ".crt" and ".key" files when the CA passphrase is invalid
+* [#1751](https://github.com/icinga/icinga2/issues/1751) (Configuration): icinga2-build-ca shouldn't prompt for DN
+* [#1749](https://github.com/icinga/icinga2/issues/1749): TLS connections are still unstable
+* [#1745](https://github.com/icinga/icinga2/issues/1745): Icinga stops updating IDO after a while
+* [#1743](https://github.com/icinga/icinga2/issues/1743) (Configuration): Please add --sni option to http check command
+* [#1740](https://github.com/icinga/icinga2/issues/1740) (Notifications): Notifications causing segfault from exim
+* [#1737](https://github.com/icinga/icinga2/issues/1737) (DB IDO): icinga2-ido-pgsql snapshot package missing dependecy dbconfig-common
+* [#1736](https://github.com/icinga/icinga2/issues/1736): Remove line number information from stack traces
+* [#1734](https://github.com/icinga/icinga2/issues/1734): Check command result doesn't match
+* [#1731](https://github.com/icinga/icinga2/issues/1731): Dependencies should cache their parent and child object
+* [#1727](https://github.com/icinga/icinga2/issues/1727): $SERVICEDESC$ isn't getting converted correctly
+* [#1724](https://github.com/icinga/icinga2/issues/1724): Improve systemd service definition
+* [#1716](https://github.com/icinga/icinga2/issues/1716) (Cluster): Icinga doesn't send SetLogPosition messages when one of the endpoints fails to connect
+* [#1712](https://github.com/icinga/icinga2/issues/1712): parsing of double defined command can generate unexpected errors
+* [#1704](https://github.com/icinga/icinga2/issues/1704): Reminder notifications are sent on disabled services
+* [#1698](https://github.com/icinga/icinga2/issues/1698): icinga2 cannot be built with both systemd and init.d files
+* [#1697](https://github.com/icinga/icinga2/issues/1697) (Livestatus): Thruk Panorama View cannot query Host Status
+* [#1695](https://github.com/icinga/icinga2/issues/1695): icinga2.state could not be opened
+* [#1691](https://github.com/icinga/icinga2/issues/1691): build warnings
+* [#1644](https://github.com/icinga/icinga2/issues/1644) (Cluster): base64 on CentOS 5 fails to read certificate bundles
+* [#1639](https://github.com/icinga/icinga2/issues/1639) (Cluster): Deadlock in ApiListener::RelayMessage
+* [#1609](https://github.com/icinga/icinga2/issues/1609): application fails to start on wrong log file permissions but does not tell about it
+* [#1206](https://github.com/icinga/icinga2/issues/1206) (DB IDO): PostgreSQL string escaping
+
+### ITL
+
+* [#1739](https://github.com/icinga/icinga2/issues/1739) (ITL): Add more options to snmp check
+
+### Documentation
+
+* [#1777](https://github.com/icinga/icinga2/issues/1777) (Documentation): event command execution cases are missing
+* [#1765](https://github.com/icinga/icinga2/issues/1765) (Documentation): change docs.icinga.org/icinga2/latest to git master
+* [#1742](https://github.com/icinga/icinga2/issues/1742) (Documentation): Documentation for || and && is missing
+* [#1702](https://github.com/icinga/icinga2/issues/1702) (Documentation): Array section confusing
+
+### Support
+
+* [#1764](https://github.com/icinga/icinga2/issues/1764) (Installation): ICINGA2\_SYSCONFIGFILE should use full path using CMAKE\_INSTALL\_FULL\_SYSCONFDIR
+* [#1709](https://github.com/icinga/icinga2/issues/1709) (Packages): htpasswd should be installed with icinga2-classicui on Ubuntu
+* [#1696](https://github.com/icinga/icinga2/issues/1696) (Packages): Copyright problems
+* [#1655](https://github.com/icinga/icinga2/issues/1655) (Packages): Debian package icinga2-classicui needs versioned dependency of icinga-cgi\*
+
+## 2.0.0 (2014-06-16)
+
+### Notes
+
+First official release
+
+### Enhancement
+
+* [#1600](https://github.com/icinga/icinga2/issues/1600): Prepare 2.0.0 release
+* [#1575](https://github.com/icinga/icinga2/issues/1575) (Cluster): Cluster: global zone for all nodes
+* [#1348](https://github.com/icinga/icinga2/issues/1348): move vagrant box into dedicated demo project
+* [#1341](https://github.com/icinga/icinga2/issues/1341): Revamp migration script
+* [#1322](https://github.com/icinga/icinga2/issues/1322): Update website for release
+* [#1320](https://github.com/icinga/icinga2/issues/1320): Update documentation for 2.0
+
+### Bug
+
+* [#1694](https://github.com/icinga/icinga2/issues/1694): Separate CMakeLists.txt for etc/initsystem
+* [#1682](https://github.com/icinga/icinga2/issues/1682) (Configuration): logrotate.conf file should rotate log files as icinga user
+* [#1680](https://github.com/icinga/icinga2/issues/1680) (Livestatus): Column 'host\_name' does not exist in table 'hosts'
+* [#1678](https://github.com/icinga/icinga2/issues/1678) (Livestatus): Nagvis does not work with livestatus \(invalid format\)
+* [#1673](https://github.com/icinga/icinga2/issues/1673): OpenSUSE Packages do not enable basic features
+* [#1669](https://github.com/icinga/icinga2/issues/1669) (Cluster): Segfault with zones without endpoints on config compile
+* [#1642](https://github.com/icinga/icinga2/issues/1642): Check if host recovery notifications work
+* [#1615](https://github.com/icinga/icinga2/issues/1615) (Cluster): Subdirectories in the zone config are not synced
+* [#1427](https://github.com/icinga/icinga2/issues/1427): fd-handling in Daemonize incorrect
+* [#1312](https://github.com/icinga/icinga2/issues/1312): Permissions error on startup is only logged but not on stderr
+
+### ITL
+
+* [#1690](https://github.com/icinga/icinga2/issues/1690) (ITL): improve predefined command-plugins
+
+### Documentation
+
+* [#1689](https://github.com/icinga/icinga2/issues/1689) (Documentation): explain the icinga 2 reload
+* [#1681](https://github.com/icinga/icinga2/issues/1681) (Documentation): Add instructions to install debug symbols on debian systems
+* [#1675](https://github.com/icinga/icinga2/issues/1675) (Documentation): add a note on no length restrictions for plugin output / perfdata
+* [#1636](https://github.com/icinga/icinga2/issues/1636) (Documentation): Update command definitions to use argument conditions
+* [#1572](https://github.com/icinga/icinga2/issues/1572) (Documentation): change docs.icinga.org/icinga2/snapshot to 'latest'
+* [#1302](https://github.com/icinga/icinga2/issues/1302) (Documentation): Replace Sphinx with Icinga Web 2 Doc Module
+
+### Support
+
+* [#1686](https://github.com/icinga/icinga2/issues/1686) (Installation): man pages for scripts
+* [#1685](https://github.com/icinga/icinga2/issues/1685) (Installation): Cleanup installer for 2.0 supported features
+* [#1683](https://github.com/icinga/icinga2/issues/1683) (Installation): remove 0.0.x schema upgrade files
+* [#1670](https://github.com/icinga/icinga2/issues/1670) (Packages): Ubuntu package Release file lacks 'Suite' line
+* [#1645](https://github.com/icinga/icinga2/issues/1645) (Packages): Packages are not installable on CentOS 5
+* [#1342](https://github.com/icinga/icinga2/issues/1342) (Installation): Less verbose start output using the initscript
+* [#1319](https://github.com/icinga/icinga2/issues/1319) (Tests): Release tests
+* [#907](https://github.com/icinga/icinga2/issues/907) (Packages): icinga2-classicui is not installable on Debian
+* [#788](https://github.com/icinga/icinga2/issues/788) (Packages): add systemd support
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..f3cb26f
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,531 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+cmake_minimum_required(VERSION 2.8.12)
+set(BOOST_MIN_VERSION "1.66.0")
+
+if("${CMAKE_VERSION}" VERSION_LESS "3.8") # SLES 12.5
+ if(NOT MSVC)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
+ endif()
+else()
+ set(CMAKE_CXX_STANDARD 17)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+endif()
+
+project(icinga2)
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/third-party/cmake")
+
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE Release CACHE STRING
+ "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
+ FORCE)
+endif()
+
+if(WIN32)
+ set(ICINGA2_MASTER OFF)
+else()
+ set(ICINGA2_MASTER ON)
+endif()
+
+option(ICINGA2_WITH_MYSQL "Build the MySQL IDO module" ${ICINGA2_MASTER})
+option(ICINGA2_WITH_PGSQL "Build the PostgreSQL IDO module" ${ICINGA2_MASTER})
+option(ICINGA2_WITH_CHECKER "Build the checker module" ON)
+option(ICINGA2_WITH_COMPAT "Build the compat module" ${ICINGA2_MASTER})
+option(ICINGA2_WITH_LIVESTATUS "Build the Livestatus module" ${ICINGA2_MASTER})
+option(ICINGA2_WITH_NOTIFICATION "Build the notification module" ON)
+option(ICINGA2_WITH_PERFDATA "Build the perfdata module" ${ICINGA2_MASTER})
+option(ICINGA2_WITH_TESTS "Run unit tests" ON)
+option(ICINGA2_WITH_ICINGADB "Build the IcingaDB module" ${ICINGA2_MASTER})
+
+option (USE_SYSTEMD
+ "Configure icinga as native systemd service instead of a SysV initscript" OFF)
+
+set(HAVE_SYSTEMD ${USE_SYSTEMD})
+
+include(GNUInstallDirs)
+include(InstallConfig)
+include(SetFullDir)
+
+set(ICINGA2_USER "icinga" CACHE STRING "Icinga 2 user")
+set(ICINGA2_GROUP "icinga" CACHE STRING "Icinga 2 group")
+set(ICINGA2_COMMAND_GROUP "icingacmd" CACHE STRING "Icinga 2 command group")
+set(ICINGA2_PLUGINDIR "/usr/lib/nagios/plugins" CACHE STRING "Path for the check plugins")
+set(ICINGA2_GIT_VERSION_INFO ON CACHE BOOL "Whether to use git describe")
+set(ICINGA2_UNITY_BUILD ON CACHE BOOL "Whether to perform a unity build")
+set(ICINGA2_LTO_BUILD OFF CACHE BOOL "Whether to use LTO")
+
+set(ICINGA2_CONFIGDIR "${CMAKE_INSTALL_SYSCONFDIR}/icinga2" CACHE FILEPATH "Main config directory, e.g. /etc/icinga2")
+set(ICINGA2_CACHEDIR "${CMAKE_INSTALL_LOCALSTATEDIR}/cache/icinga2" CACHE FILEPATH "Directory for cache files, e.g. /var/cache/icinga2")
+set(ICINGA2_DATADIR "${CMAKE_INSTALL_LOCALSTATEDIR}/lib/icinga2" CACHE FILEPATH "Data directory for the daemon, e.g. /var/lib/icinga2")
+set(ICINGA2_LOGDIR "${CMAKE_INSTALL_LOCALSTATEDIR}/log/icinga2" CACHE FILEPATH "Logging directory, e.g. /var/log/icinga2")
+set(ICINGA2_SPOOLDIR "${CMAKE_INSTALL_LOCALSTATEDIR}/spool/icinga2" CACHE FILEPATH "Spooling directory, e.g. /var/spool/icinga2")
+set(ICINGA2_RUNDIR "${CMAKE_INSTALL_LOCALSTATEDIR}/run" CACHE STRING "/run directory (deprecated, please use ICINGA2_INITRUNDIR)")
+set(ICINGA2_INITRUNDIR "${ICINGA2_RUNDIR}/icinga2" CACHE FILEPATH "Runtime data for the init system, e.g. /run/icinga2")
+
+set(ICINGA2_PKGDATADIR "${CMAKE_INSTALL_DATADIR}/icinga2" CACHE FILEPATH "Installed data, e.g. /usr/share/icinga2")
+set(ICINGA2_INCLUDEDIR "${ICINGA2_PKGDATADIR}/include" CACHE FILEPATH "Include directory for the ITL, e.g. /usr/share/icinga2/include")
+
+# ensure absolute paths
+set_full_dir(ICINGA2_FULL_CONFIGDIR "${ICINGA2_CONFIGDIR}")
+set_full_dir(ICINGA2_FULL_CACHEDIR "${ICINGA2_CACHEDIR}")
+set_full_dir(ICINGA2_FULL_DATADIR "${ICINGA2_DATADIR}")
+set_full_dir(ICINGA2_FULL_LOGDIR "${ICINGA2_LOGDIR}")
+set_full_dir(ICINGA2_FULL_SPOOLDIR "${ICINGA2_SPOOLDIR}")
+set_full_dir(ICINGA2_FULL_RUNDIR "${ICINGA2_RUNDIR}")
+set_full_dir(ICINGA2_FULL_INITRUNDIR "${ICINGA2_INITRUNDIR}")
+set_full_dir(ICINGA2_FULL_PKGDATADIR "${ICINGA2_PKGDATADIR}")
+set_full_dir(ICINGA2_FULL_INCLUDEDIR "${ICINGA2_INCLUDEDIR}")
+
+set(LOGROTATE_DIR "${CMAKE_INSTALL_SYSCONFDIR}/logrotate.d" CACHE STRING "Location of logrotate configs, e.g. /etc/logrotate.d")
+set(BASHCOMPLETION_DIR "${CMAKE_INSTALL_SYSCONFDIR}/bash_completion.d" CACHE STRING "Location of bash_completion files, e.g. /etc/bash_completion.d")
+
+if(NOT WIN32)
+ set(ICINGA2_SYSCONFIGFILE "${CMAKE_INSTALL_SYSCONFDIR}/sysconfig/icinga2" CACHE PATH "where to store configuation for the init system, defaults to /etc/sysconfig/icinga2")
+endif()
+
+site_name(ICINGA2_BUILD_HOST_NAME)
+set(ICINGA2_BUILD_COMPILER_NAME "${CMAKE_CXX_COMPILER_ID}")
+
+if(NOT CMAKE_CXX_COMPILER_VERSION)
+ execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion
+ OUTPUT_VARIABLE CMAKE_CXX_COMPILER_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
+endif()
+
+set(ICINGA2_BUILD_COMPILER_VERSION "${CMAKE_CXX_COMPILER_VERSION}")
+
+file(READ "${CMAKE_CURRENT_SOURCE_DIR}/COPYING" ICINGA2_LICENSE_GPL)
+set(ICINGA2_LICENSE "${ICINGA2_LICENSE_GPL}\n\n---\n\n${ICINGA2_LICENSE_ADDITIONS}")
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/LICENSE.txt" ${ICINGA2_LICENSE})
+
+file(STRINGS ICINGA2_VERSION SPEC_VERSION REGEX "^Version:")
+string(LENGTH "${SPEC_VERSION}" SPEC_VERSION_LENGTH)
+math(EXPR SPEC_VERSION_LENGTH "${SPEC_VERSION_LENGTH} - 9")
+string(SUBSTRING ${SPEC_VERSION} 9 ${SPEC_VERSION_LENGTH} SPEC_VERSION)
+
+configure_file(icinga-spec-version.h.cmake icinga-spec-version.h)
+
+include(GetGitRevisionDescription)
+git_describe(GIT_VERSION --tags)
+if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/icinga-version.h.force)
+ configure_file(icinga-version.h.force ${CMAKE_CURRENT_BINARY_DIR}/icinga-version.h COPYONLY)
+else()
+ if(NOT ICINGA2_GIT_VERSION_INFO OR GIT_VERSION MATCHES "-NOTFOUND$")
+ file(STRINGS ICINGA2_VERSION SPEC_REVISION REGEX "^Revision: ")
+ string(LENGTH "${SPEC_REVISION}" SPEC_REVISION_LENGTH)
+ math(EXPR SPEC_REVISION_LENGTH "${SPEC_REVISION_LENGTH} - 10")
+ string(SUBSTRING ${SPEC_REVISION} 10 ${SPEC_REVISION_LENGTH} SPEC_REVISION)
+
+ set(GIT_VERSION "r${SPEC_VERSION}-${SPEC_REVISION}")
+ set(ICINGA2_VERSION "${SPEC_VERSION}")
+ else()
+ # use GIT version as ICINGA2_VERSION
+ string(REGEX REPLACE "^[rv]" "" ICINGA2_VERSION "${GIT_VERSION}")
+ endif()
+ configure_file(icinga-version.h.cmake icinga-version.h)
+endif()
+
+# NuGet on Windows requires a semantic versioning, example: 2.10.4.123 (only 4 element, only numeric)
+string(REGEX REPLACE "-([0-9]+).*$" ".\\1" ICINGA2_VERSION_SAFE "${ICINGA2_VERSION}")
+string(REGEX REPLACE "-[^\\.]*(.*)$" "\\1" ICINGA2_VERSION_SAFE "${ICINGA2_VERSION_SAFE}")
+string(REGEX REPLACE "^([0-9]+\\.[0-9]+\\.[0-9]+)[\\.]?[0-9]*" "\\1" CHOCO_VERSION_SHORT "${ICINGA2_VERSION_SAFE}")
+
+message(STATUS "ICINGA2_VERSION_SAFE=${ICINGA2_VERSION_SAFE} CHOCO_VERSION_SHORT=${CHOCO_VERSION_SHORT}")
+
+if(WIN32)
+ set(Boost_USE_STATIC_LIBS ON)
+ # Disabled for linking issues for newer Boost versions, they link against Windows SDKs
+ #add_definitions(-DBOOST_ALL_NO_LIB)
+
+ # Disable optimization for Boost::context
+ # https://www.boost.org/doc/libs/1_69_0/libs/context/doc/html/context/overview.html
+ # https://docs.microsoft.com/en-us/cpp/build/reference/gl-whole-program-optimization?view=vs-2017
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /bigobj /GL- /EHs")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj /GL- /EHs")
+
+ # detect if 32-bit target
+ if(CMAKE_VS_PLATFORM_NAME STREQUAL "Win32")
+ # SAFESEH is not supported in Boost on Windows x86
+ # maybe it is when Boost is compiled with it...
+ # https://lists.boost.org/Archives/boost/2013/10/206720.php
+ # https://docs.microsoft.com/en-us/cpp/build/reference/safeseh-image-has-safe-exception-handlers?view=vs-2017
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SAFESEH:NO")
+ endif()
+endif()
+
+if(NOT DEFINED LOGROTATE_HAS_SU)
+ set(LOGROTATE_HAS_SU OFF)
+ find_program(LOGROTATE_BINARY logrotate)
+ execute_process(COMMAND ${LOGROTATE_BINARY} ERROR_VARIABLE LOGROTATE_OUTPUT)
+ if(LOGROTATE_OUTPUT)
+ string(REGEX REPLACE "^logrotate ([0-9.]*).*" "\\1" LOGROTATE_VERSION
+ ${LOGROTATE_OUTPUT})
+ message(STATUS "Found logrotate (found version \"${LOGROTATE_VERSION}\")")
+ if("${LOGROTATE_VERSION}" VERSION_GREATER "3.7.9")
+ set(LOGROTATE_HAS_SU ON)
+ endif()
+ endif()
+endif()
+if(LOGROTATE_HAS_SU)
+ set(LOGROTATE_USE_SU "\n\tsu ${ICINGA2_USER} ${ICINGA2_GROUP}")
+else()
+ set(LOGROTATE_CREATE "\n\tcreate 644 ${ICINGA2_USER} ${ICINGA2_GROUP}")
+endif()
+
+find_package(Boost ${BOOST_MIN_VERSION} COMPONENTS coroutine context date_time filesystem iostreams thread system program_options regex REQUIRED)
+
+# Boost.Coroutine2 (the successor of Boost.Coroutine)
+# (1) doesn't even exist in old Boost versions and
+# (2) isn't supported by ASIO, yet.
+add_definitions(-DBOOST_COROUTINES_NO_DEPRECATION_WARNING)
+
+add_definitions(-DBOOST_FILESYSTEM_NO_DEPRECATED)
+
+# Required for Boost v1.74+
+add_definitions(-DBOOST_ASIO_USE_TS_EXECUTOR_AS_DEFAULT)
+
+link_directories(${Boost_LIBRARY_DIRS})
+include_directories(${Boost_INCLUDE_DIRS})
+
+find_package(OpenSSL REQUIRED)
+include_directories(${OPENSSL_INCLUDE_DIR})
+
+set(base_DEPS ${CMAKE_DL_LIBS} ${Boost_LIBRARIES} ${OPENSSL_LIBRARIES})
+set(base_OBJS $<TARGET_OBJECTS:mmatch> $<TARGET_OBJECTS:socketpair> $<TARGET_OBJECTS:base>)
+
+# JSON
+find_package(JSON)
+include_directories(${JSON_INCLUDE})
+
+# UTF8CPP
+find_package(UTF8CPP)
+include_directories(${UTF8CPP_INCLUDE})
+
+find_package(Editline)
+set(HAVE_EDITLINE "${EDITLINE_FOUND}")
+
+find_package(Termcap)
+set(HAVE_TERMCAP "${TERMCAP_FOUND}")
+
+include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/lib
+ ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}/lib
+)
+
+if(UNIX OR CYGWIN)
+ list(APPEND base_OBJS $<TARGET_OBJECTS:execvpe>)
+endif()
+
+if(HAVE_SYSTEMD)
+ list(APPEND base_DEPS systemd)
+endif()
+
+if(EDITLINE_FOUND)
+ list(APPEND base_DEPS ${EDITLINE_LIBRARIES})
+ include_directories(${EDITLINE_INCLUDE_DIR})
+endif()
+
+if(TERMCAP_FOUND)
+ list(APPEND base_DEPS ${TERMCAP_LIBRARIES})
+ include_directories(${TERMCAP_INCLUDE_DIR})
+endif()
+
+if(WIN32)
+ list(APPEND base_DEPS ws2_32 dbghelp shlwapi msi)
+endif()
+
+set(CMAKE_MACOSX_RPATH 1)
+set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH};${CMAKE_INSTALL_FULL_LIBDIR}/icinga2")
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Qunused-arguments -fcolor-diagnostics -fno-limit-debug-info")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments -fcolor-diagnostics -fno-limit-debug-info")
+
+ # Clang on Fedora requires -pthread, Apple Clang does not
+ # AppleClang is available since CMake 3.0.0
+ if (NOT CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
+ endif()
+endif()
+
+if(CMAKE_C_COMPILER_ID STREQUAL "SunPro")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mt")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mt -library=stlport4")
+endif()
+
+if(CMAKE_C_COMPILER_ID STREQUAL "GNU")
+ if(CMAKE_SYSTEM_NAME MATCHES AIX)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -lpthread")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -lpthread")
+ elseif(CMAKE_SYSTEM_NAME MATCHES "kOpenBSD.*|OpenBSD.*")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -pthread")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -pthread")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lpthread")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -lpthread")
+ set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} -lpthread")
+ else()
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -pthread")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -pthread")
+ endif()
+endif()
+
+include(CheckCXXCompilerFlag)
+
+function(check_cxx_linker_flag flag var)
+ set(CMAKE_REQUIRED_FLAGS ${flag})
+ set(result 0)
+ check_cxx_compiler_flag(${flag} result)
+ set(${var} ${result} PARENT_SCOPE)
+endfunction()
+
+check_cxx_linker_flag("-Wl,--gc-sections" LD_GC_SECTIONS)
+
+if(LD_GC_SECTIONS)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections")
+endif()
+
+check_cxx_linker_flag("-Wl,--no-export-dynamic" LD_NO_EXPORT_DYNAMIC)
+
+if(LD_NO_EXPORT_DYNAMIC)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-export-dynamic")
+endif()
+
+check_cxx_linker_flag("-Bsymbolic-functions" LD_SYMBOLIC_FUNCTIONS)
+
+if(LD_SYMBOLIC_FUNCTIONS)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Bsymbolic-functions")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Bsymbolic-functions")
+endif()
+
+check_cxx_linker_flag("-Wl,--dynamic-list-cpp-typeinfo" LD_DYNAMIC_LIST_CPP_TYPEINFO)
+
+if(LD_DYNAMIC_LIST_CPP_TYPEINFO)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--dynamic-list-cpp-typeinfo")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--dynamic-list-cpp-typeinfo")
+endif()
+
+check_cxx_linker_flag("-Wl,--dynamic-list-data" LD_DYNAMIC_LIST_DATA)
+
+if(LD_DYNAMIC_LIST_DATA)
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--dynamic-list-data")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--dynamic-list-data")
+endif()
+
+check_cxx_compiler_flag("-Winvalid-pch" CXX_INVALID_PCH)
+
+if(CXX_INVALID_PCH)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Winvalid-pch")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Winvalid-pch")
+endif()
+
+if(ICINGA2_LTO_BUILD)
+ check_cxx_compiler_flag("-flto" CXX_FLAG_LTO)
+
+ if(NOT CXX_FLAG_LTO)
+ message(WARNING "Compiler does not support LTO, falling back to non-LTO build")
+ else()
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -flto")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto")
+ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -flto")
+
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9.0) AND NOT OPENBSD)
+ set(CMAKE_AR "gcc-ar")
+ set(CMAKE_RANLIB "gcc-ranlib")
+ endif()
+ endif()
+endif()
+
+if(MSVC)
+ add_definitions(-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE -D_SCL_SECURE_NO_WARNINGS)
+endif()
+
+set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/Bin/${CMAKE_BUILD_TYPE} CACHE PATH "Library output path")
+set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/Bin/${CMAKE_BUILD_TYPE} CACHE PATH "Executable output path")
+
+include(CheckSymbolExists)
+include(CheckFunctionExists)
+include(CheckLibraryExists)
+include(CheckIncludeFileCXX)
+
+check_symbol_exists(__COUNTER__ "" HAVE_COUNTER_MACRO)
+
+if(NOT HAVE_COUNTER_MACRO)
+message(FATAL_ERROR "Your C/C++ compiler does not support the __COUNTER__ macro.")
+endif()
+
+set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DI2_DEBUG")
+set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DI2_DEBUG")
+
+check_function_exists(vfork HAVE_VFORK)
+check_function_exists(backtrace_symbols HAVE_BACKTRACE_SYMBOLS)
+check_function_exists(pipe2 HAVE_PIPE2)
+check_function_exists(nice HAVE_NICE)
+check_library_exists(dl dladdr "dlfcn.h" HAVE_DLADDR)
+check_library_exists(execinfo backtrace_symbols "" HAVE_LIBEXECINFO)
+check_include_file_cxx(cxxabi.h HAVE_CXXABI_H)
+
+if(HAVE_LIBEXECINFO)
+ set(HAVE_BACKTRACE_SYMBOLS TRUE)
+ list(APPEND base_DEPS execinfo)
+endif()
+
+if(NOT WIN32)
+ # boost::stacktrace uses _Unwind_Backtrace which is only exposed if _GNU_SOURCE is defined on most systems
+ add_definitions(-D_GNU_SOURCE)
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
+ set(ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS TRUE)
+endif()
+
+if(ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS AND NOT HAVE_BACKTRACE_SYMBOLS)
+ message(FATAL_ERROR "ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS is set but backtrace_symbols() was not found")
+endif()
+
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ exec_program(${CMAKE_CXX_COMPILER}
+ ARGS -dumpversion
+ OUTPUT_VARIABLE _ICINGA2_COMPILER_VERSION
+ )
+
+ if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "7.0.0")
+ message(FATAL_ERROR "Your version of GCC (${CMAKE_CXX_COMPILER_VERSION}) is too old for building Icinga 2 (GCC >= 7.0.0 is required).")
+ endif()
+endif()
+
+if(MSVC)
+ if("${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "19.20")
+ message(FATAL_ERROR "Your version of MSVC (${CMAKE_CXX_COMPILER_VERSION}) is too old for building Icinga 2 (MSVC >= 19.20 from Visual Studio 2019 is required).")
+ endif()
+endif()
+
+if(NOT MSVC)
+ check_cxx_source_compiles("class Base { public: virtual void test(void) { } }; class Derived : public Base { virtual void test(void) override { } }; int main(){}" CXX_FEATURE_OVERRIDE)
+
+ if(NOT CXX_FEATURE_OVERRIDE)
+ add_definitions("-Doverride=")
+ endif()
+endif()
+
+# Architecture specifics
+# - Log the target architecture
+# - ARM needs to link against atomic
+if(NOT MSVC)
+ # inspired by https://github.com/civetweb/civetweb/blob/master/cmake/DetermineTargetArchitecture.cmake
+ execute_process(
+ COMMAND ${CMAKE_C_COMPILER} -dumpmachine
+ RESULT_VARIABLE RESULT
+ OUTPUT_VARIABLE ARCH
+ ERROR_QUIET
+ )
+
+ if (RESULT)
+ message(STATUS "Failed to detect target architecture with compiler ${CMAKE_C_COMPILER}: ${RESULT}")
+ endif()
+
+ string(REGEX MATCH "([^-]+).*" ARCH_MATCH "${ARCH}")
+ if (NOT CMAKE_MATCH_1 OR NOT ARCH_MATCH)
+ message(STATUS "Failed to match the target architecture: ${ARCH}")
+ endif()
+
+ set(ARCH ${CMAKE_MATCH_1})
+
+ message(STATUS "Target architecture - ${ARCH}")
+
+ # ARM settings
+ if("${ARCH}" STREQUAL "arm")
+ check_cxx_source_compiles( "include <atomic>; int main(){ std::atomic<uint_fast64_t> x; x.fetch_add(1); x.sub_add(1); }" CXX_ATOMIC)
+ link_libraries(atomic)
+ endif()
+
+else()
+ if("${MSVC_C_ARCHITECTURE_ID}" STREQUAL "X86")
+ set(ARCH "i686")
+ elseif("${MSVC_C_ARCHITECTURE_ID}" STREQUAL "x64")
+ set(ARCH "x86_64")
+ elseif("${MSVC_C_ARCHITECTURE_ID}" STREQUAL "ARM")
+ set(ARCH "arm")
+ else()
+ message(FATAL_ERROR "Failed to determine the MSVC target architecture: ${MSVC_C_ARCHITECTURE_ID}")
+ endif()
+
+ message(STATUS "Target architecture - ${ARCH}")
+endif()
+
+configure_file(config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config.h ESCAPE_QUOTES)
+
+install(
+ FILES README.md COPYING AUTHORS CHANGELOG.md NEWS
+ DESTINATION ${CMAKE_INSTALL_DOCDIR}
+)
+
+include(CTest)
+enable_testing()
+
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+add_subdirectory(third-party)
+add_subdirectory(tools)
+add_subdirectory(lib)
+add_subdirectory(icinga-app)
+add_subdirectory(etc)
+add_subdirectory(itl)
+add_subdirectory(agent)
+add_subdirectory(plugins)
+add_subdirectory(choco)
+
+if(NOT WIN32)
+ add_subdirectory(doc)
+endif()
+
+if(MSVC)
+ add_subdirectory(icinga-installer)
+endif()
+
+if(ICINGA2_WITH_TESTS)
+ add_subdirectory(test)
+endif()
+
+set(CPACK_PACKAGE_NAME "Icinga 2")
+set(CPACK_PACKAGE_VENDOR "Icinga GmbH")
+set(CPACK_PACKAGE_VERSION ${ICINGA2_VERSION_SAFE})
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "ICINGA2")
+set(CPACK_PACKAGE_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icinga-app\\\\icinga.ico")
+set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_BINARY_DIR}/LICENSE.txt")
+
+set(CPACK_PACKAGE_EXECUTABLES "Icinga2SetupAgent;Icinga 2 Agent Wizard")
+set(CPACK_WIX_PRODUCT_ICON "${CMAKE_CURRENT_SOURCE_DIR}/icinga-app\\\\icinga.ico")
+set(CPACK_WIX_UPGRADE_GUID "52F2BEAA-4DF0-4C3E-ABDC-C0F61DE4DF8A")
+set(CPACK_WIX_UI_BANNER "${CMAKE_CURRENT_SOURCE_DIR}/icinga-installer/bannrbmp.bmp")
+set(CPACK_WIX_UI_DIALOG "${CMAKE_CURRENT_SOURCE_DIR}/icinga-installer/dlgbmp.bmp")
+set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_BINARY_DIR}/icinga-installer/icinga2.wixpatch.Debug")
+set(CPACK_WIX_PATCH_FILE "${CMAKE_CURRENT_BINARY_DIR}/icinga-installer/icinga2.wixpatch")
+set(CPACK_WIX_EXTENSIONS "WixUtilExtension" "WixNetFxExtension")
+
+set(CMAKE_INSTALL_SYSTEM_RUNTIME_DESTINATION "sbin")
+set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
+include(InstallRequiredSystemLibraries)
+
+if(WIN32)
+ if (CMAKE_VS_PLATFORM_NAME STREQUAL "x64")
+ set(ICINGA2_OPENSSL_DLL_ARCH "-x64")
+ else()
+ set(ICINGA2_OPENSSL_DLL_ARCH "")
+ endif()
+
+ foreach(ICINGA2_OPENSSL_LIB crypto ssl)
+ list(APPEND ICINGA2_OPENSSL_DLLS ${OPENSSL_INCLUDE_DIR}/../bin/lib${ICINGA2_OPENSSL_LIB}-3${ICINGA2_OPENSSL_DLL_ARCH}.dll)
+ endforeach()
+
+ install(
+ PROGRAMS ${ICINGA2_OPENSSL_DLLS}
+ DESTINATION ${CMAKE_INSTALL_SBINDIR}
+ )
+endif()
+
+include(CPack)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..cec5265
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,500 @@
+# <a id="contributing"></a> Contributing
+
+Icinga is an open source project and lives from your ideas and contributions.
+
+There are many ways to contribute, from improving the documentation, submitting
+bug reports and features requests or writing code to add enhancements or fix bugs.
+
+#### Table of Contents
+
+1. [Introduction](#contributing-intro)
+2. [Fork the Project](#contributing-fork)
+3. [Branches](#contributing-branches)
+4. [Commits](#contributing-commits)
+5. [Pull Requests](#contributing-pull-requests)
+6. [Testing](#contributing-testing)
+7. [Source Code Patches](#contributing-patches-source-code)
+8. [Documentation Patches](#contributing-patches-documentation)
+9. [Contribute CheckCommand Definitions](#contributing-patches-itl-checkcommands)
+10. [Review](#contributing-review)
+
+## <a id="contributing-intro"></a> Introduction
+
+Please consider our [roadmap](https://github.com/Icinga/icinga2/milestones) and
+[open issues](https://github.com/icinga/icinga2/issues) when you start contributing
+to the project.
+Issues labeled with [help wanted](https://github.com/Icinga/icinga2/labels/help%20wanted) or
+[good first issue](https://github.com/Icinga/icinga2/labels/good%20first%20issue) will
+help you get started more easily.
+
+Before starting your work on Icinga 2, you should [fork the project](https://help.github.com/articles/fork-a-repo/)
+to your GitHub account. This allows you to freely experiment with your changes.
+When your changes are complete, submit a [pull request](https://help.github.com/articles/using-pull-requests/).
+All pull requests will be reviewed and merged if they suit some general guidelines:
+
+* Changes are located in a topic branch
+* For new functionality, proper tests are written
+* Changes should follow the existing coding style and standards
+
+Please continue reading in the following sections for a step by step guide.
+
+## <a id="contributing-fork"></a> Fork the Project
+
+[Fork the project](https://help.github.com/articles/fork-a-repo/) to your GitHub account
+and clone the repository:
+
+```bash
+git clone git@github.com:dnsmichi/icinga2.git
+cd icinga2
+```
+
+Add a new remote `upstream` with this repository as value.
+
+```bash
+git remote add upstream https://github.com/icinga/icinga2.git
+```
+
+You can pull updates to your fork's master branch:
+
+```bash
+git fetch --all
+git pull upstream HEAD
+```
+
+Please continue to learn about [branches](CONTRIBUTING.md#contributing-branches).
+
+## <a id="contributing-branches"></a> Branches
+
+Choosing a proper name for a branch helps us identify its purpose and possibly
+find an associated bug or feature.
+Generally a branch name should include a topic such as `bugfix` or `feature` followed
+by a description and an issue number if applicable. Branches should have only changes
+relevant to a specific issue.
+
+```bash
+git checkout -b bugfix/service-template-typo-1234
+git checkout -b feature/config-handling-1235
+```
+
+Continue to apply your changes and test them. More details on specific changes:
+
+* [Source Code Patches](#contributing-patches-source-code)
+* [Documentation Patches](#contributing-patches-documentation)
+* [Contribute CheckCommand Definitions](#contributing-patches-itl-checkcommands)
+
+## <a id="contributing-commits"></a> Commits
+
+Once you've finished your work in a branch, please ensure to commit
+your changes. A good commit message includes a short topic, additional body
+and a reference to the issue you wish to solve (if existing).
+
+Fixes:
+
+```
+Fix problem with notifications in HA cluster
+
+There was a race condition when restarting.
+
+refs #4567
+```
+
+Features:
+
+```
+Add ITL CheckCommand printer
+
+Requires the check_printer plugin.
+
+refs #1234
+```
+
+You can add multiple commits during your journey to finish your patch.
+Don't worry, you can squash those changes into a single commit later on.
+
+Ensure your name and email address in the commit metadata are correct.
+In your first contribution (PR) also add them to [AUTHORS](./AUTHORS).
+If those metadata changed since your last successful contribution,
+you should update [AUTHORS](./AUTHORS) and [.mailmap](./.mailmap).
+For the latter see [gitmailmap(5)](https://git-scm.com/docs/gitmailmap).
+
+## <a id="contributing-pull-requests"></a> Pull Requests
+
+Once you've commited your changes, please update your local master
+branch and rebase your bugfix/feature branch against it before submitting a PR.
+
+```bash
+git checkout master
+git pull upstream HEAD
+
+git checkout bugfix/notifications
+git rebase master
+```
+
+Once you've resolved any conflicts, push the branch to your remote repository.
+It might be necessary to force push after rebasing - use with care!
+
+New branch:
+
+```bash
+git push --set-upstream origin bugfix/notifications
+```
+
+Existing branch:
+
+```bash
+git push -f origin bugfix/notifications
+```
+
+You can now either use the [hub](https://hub.github.com) CLI tool to create a PR, or nagivate
+to your GitHub repository and create a PR there.
+
+The pull request should again contain a telling subject and a reference
+with `fixes` to an existing issue id if any. That allows developers
+to automatically resolve the issues once your PR gets merged.
+
+```
+hub pull-request
+
+<a telling subject>
+
+fixes #1234
+```
+
+Thanks a lot for your contribution!
+
+
+### <a id="contributing-rebase"></a> Rebase a Branch
+
+If you accidentally sent in a PR which was not rebased against the upstream master,
+developers might ask you to rebase your PR.
+
+First off, fetch and pull `upstream` master.
+
+```bash
+git checkout master
+git fetch --all
+git pull upstream HEAD
+```
+
+Then change to your working branch and start rebasing it against master:
+
+```bash
+git checkout bugfix/notifications
+git rebase master
+```
+
+If you are running into a conflict, rebase will stop and ask you to fix the problems.
+
+```
+git status
+
+ both modified: path/to/conflict.cpp
+```
+
+Edit the file and search for `>>>`. Fix, build, test and save as needed.
+
+Add the modified file(s) and continue rebasing.
+
+```bash
+git add path/to/conflict.cpp
+git rebase --continue
+```
+
+Once succeeded ensure to push your changed history remotely.
+
+```bash
+git push -f origin bugfix/notifications
+```
+
+
+If you fear to break things, do the rebase in a backup branch first and later replace your current branch.
+
+```bash
+git checkout bugfix/notifications
+git checkout -b bugfix/notifications-rebase
+
+git rebase master
+
+git branch -D bugfix/notifications
+git checkout -b bugfix/notifications
+
+git push -f origin bugfix/notifications
+```
+
+### <a id="contributing-squash"></a> Squash Commits
+
+> **Note:**
+>
+> Be careful with squashing. This might lead to non-recoverable mistakes.
+>
+> This is for advanced Git users.
+
+Say you want to squash the last 3 commits in your branch into a single one.
+
+Start an interactive (`-i`) rebase from current HEAD minus three commits (`HEAD~3`).
+
+```bash
+git rebase -i HEAD~3
+```
+
+Git opens your preferred editor. `pick` the commit in the first line, change `pick` to `squash` on the other lines.
+
+```
+pick e4bf04e47 Fix notifications
+squash d7b939d99 Tests
+squash b37fd5377 Doc updates
+```
+
+Save and let rebase to its job. Then force push the changes to the remote origin.
+
+```bash
+git push -f origin bugfix/notifications
+```
+
+
+## <a id="contributing-testing"></a> Testing
+
+Please follow the [documentation](https://icinga.com/docs/icinga2/snapshot/doc/21-development/#test-icinga-2)
+for build and test instructions.
+
+You can help test-drive the latest Icinga 2 snapshot packages inside the
+[Icinga 2 Vagrant boxes](https://github.com/icinga/icinga-vagrant).
+
+
+## <a id="contributing-patches-source-code"></a> Source Code Patches
+
+Icinga 2 can be built on Linux/Unix nodes and Windows clients. In order to develop patches for Icinga 2,
+you should prepare your own local build environment and know how to work with C++.
+
+Please follow the [development documentation](https://icinga.com/docs/icinga2/latest/doc/21-development/)
+for development environments, the style guide and more advanced insights.
+
+## <a id="contributing-patches-documentation"></a> Documentation Patches
+
+The documentation is written in GitHub flavored [Markdown](https://guides.github.com/features/mastering-markdown/).
+It is located in the `doc/` directory and can be edited with your preferred editor. You can also
+edit it online on GitHub.
+
+```bash
+vim doc/2-getting-started.md
+```
+
+In order to review and test changes, you can install the [mkdocs](https://www.mkdocs.org) Python library.
+
+```bash
+pip install mkdocs
+```
+
+This allows you to start a local mkdocs viewer instance on http://localhost:8000
+
+```bash
+mkdocs serve
+```
+
+Changes on the chapter layout can be done inside the `mkdocs.yml` file in the main tree.
+
+There also is a script to ensure that relative URLs to other sections are updated. This script
+also checks for broken URLs.
+
+```bash
+./doc/update-links.py doc/*.md
+```
+
+## <a id="contributing-patches-itl-checkcommands"></a> Contribute CheckCommand Definitions
+
+The Icinga Template Library (ITL) and its plugin check commands provide a variety of CheckCommand
+object definitions which can be included on-demand.
+
+Advantages of sending them upstream:
+
+* Everyone can use and update/fix them.
+* One single place for configuration and documentation.
+* Developers may suggest updates and help with best practices.
+* You don't need to care about copying the command definitions to your satellites and clients.
+
+#### <a id="contributing-itl-checkcommands-start"></a> Where do I start?
+
+Get to know the check plugin and its options. Read the general documentation on how to integrate
+your check plugins and how to create a good CheckCommand definition.
+
+A good command definition uses:
+
+* Command arguments including `value`, `description`, optional: `set_if`, `required`, etc.
+* Comments `/* ... */` to describe difficult parts.
+* Command name as prefix for the custom attributes referenced (e.g. `disk_`)
+* Default values
+ * If `host.address` is involved, set a custom attribute (e.g. `ping_address`) to the default `$address$`. This allows users to override the host's address later on by setting the custom attribute inside the service apply definitions.
+ * If the plugin is also capable to use ipv6, import the `ipv4-or-ipv6` template and use `$check_address$` instead of `$address$`. This allows to fall back to ipv6 if only this address is set.
+ * If `set_if` is involved, ensure to specify a sane default value if required.
+* Templates if there are multiple plugins with the same basic behaviour (e.g. ping4 and ping6).
+* Your love and enthusiasm in making it the perfect CheckCommand.
+
+#### <a id="contributing-itl-checkcommands-overview"></a> I have created a CheckCommand, what now?
+
+Icinga 2 developers love documentation. This isn't just because we want to annoy anyone sending a patch,
+it's a matter of making your contribution visible to the community.
+
+Your patch should consist of 2 parts:
+
+* The CheckCommand definition.
+* The documentation bits.
+
+[Fork the repository](https://help.github.com/articles/fork-a-repo/) and ensure that the master branch is up-to-date.
+
+Create a new fix or feature branch and start your work.
+
+```bash
+git checkout -b feature/itl-check-printer
+```
+
+#### <a id="contributing-itl-checkcommands-add"></a> Add CheckCommand Definition to Contrib Plugins
+
+There already exists a defined structure for contributed plugins. Navigate to `itl/plugins-contrib.d`
+and verify where your command definitions fits into.
+
+```bash
+cd itl/plugins-contrib.d/
+ls
+```
+
+If you want to add or modify an existing Monitoring Plugin please use `itl/command-plugins.conf` instead.
+
+```bash
+vim itl/command-plugins-conf
+```
+
+##### Existing Configuration File
+
+Just edit it, and add your CheckCommand definition.
+
+```bash
+vim operating-system.conf
+```
+
+Proceed to the documentation.
+
+##### New type for CheckCommand Definition
+
+Create a new file with .conf suffix.
+
+```bash
+vim printer.conf
+```
+
+Add the file to `itl/CMakeLists.txt` in the FILES line in **alpha-numeric order**.
+This ensures that the installation and packages properly include your newly created file.
+
+```
+vim CMakeLists.txt
+
+-FILES ipmi.conf network-components.conf operating-system.conf virtualization.conf vmware.conf
++FILES ipmi.conf network-components.conf operating-system.conf printer.conf virtualization.conf vmware.conf
+```
+
+Add the newly created file to your git commit.
+
+```bash
+git add printer.conf
+```
+
+Do not commit it yet but finish with the documentation.
+
+#### <a id="contributing-itl-checkcommands-docs"></a> Create CheckCommand Documentation
+
+Edit the documentation file in the `doc/` directory. More details on documentation
+updates can be found [here](CONTRIBUTING.md#contributing-documentation).
+
+```bash
+vim doc/10-icinga-template-library.md
+```
+
+The CheckCommand documentation should be located in the same chapter
+similar to the configuration file you have just added/modified.
+
+Create a section for your plugin, add a description and a table of parameters. Each parameter should have at least:
+
+* optional or required
+* description of its purpose
+* the default value, if any
+
+Look at the existing documentation and "copy" the same style and layout.
+
+
+#### <a id="contributing-itl-checkcommands-patch"></a> Send a Patch
+
+Commit your changes which includes a descriptive commit message.
+
+```
+git commit -av
+Add printer CheckCommand definition
+
+Explain its purpose and possible enhancements/shortcomings.
+
+refs #existingticketnumberifany
+```
+
+Push the branch to the remote origin and create a [pull request](https://help.github.com/articles/using-pull-requests/).
+
+```bash
+git push --set-upstream origin feature/itl-check-printer
+hub pull-request
+```
+
+In case developers ask for changes during review, please add them
+to the branch and push those changes.
+
+## <a id="contributing-review"></a> Review
+
+### <a id="contributing-pr-review"></a> Pull Request Review
+
+This is only important for developers who will review pull requests. If you want to join
+the development team, kindly contact us.
+
+- Ensure that the style guide applies.
+- Verify that the patch fixes a problem or linked issue, if any.
+- Discuss new features with team members.
+- Test the patch in your local dev environment.
+
+If there are changes required, kindly ask for an updated patch.
+
+Once the review is completed, merge the PR via GitHub.
+
+#### <a id="contributing-pr-review-fixes"></a> Pull Request Review Fixes
+
+In order to amend the commit message, fix conflicts or add missing changes, you can
+add your changes to the PR.
+
+A PR is just a pointer to a different Git repository and branch.
+By default, pull requests allow to push into the repository of the PR creator.
+
+Example for [#4956](https://github.com/Icinga/icinga2/pull/4956):
+
+At the bottom it says "Add more commits by pushing to the bugfix/persistent-comments-are-not-persistent branch on TheFlyingCorpse/icinga2."
+
+First off, add the remote repository as additional origin and fetch its content:
+
+```bash
+git remote add theflyingcorpse https://github.com/TheFlyingCorpse/icinga2
+git fetch --all
+```
+
+Checkout the mentioned remote branch into a local branch (Note: `theflyingcorpse` is the name of the remote):
+
+```bash
+git checkout theflyingcorpse/bugfix/persistent-comments-are-not-persistent -b bugfix/persistent-comments-are-not-persistent
+```
+
+Rebase, amend, squash or add your own commits on top.
+
+Once you are satisfied, push the changes to the remote `theflyingcorpse` and its branch `bugfix/persistent-comments-are-not-persistent`.
+The syntax here is `git push <remote> <localbranch>:<remotebranch>`.
+
+```bash
+git push theflyingcorpse bugfix/persistent-comments-are-not-persistent:bugfix/persistent-comments-are-not-persistent
+```
+
+In case you've changed the commit history (rebase, amend, squash), you'll need to force push. Be careful, this can't be reverted!
+
+```bash
+git push -f theflyingcorpse bugfix/persistent-comments-are-not-persistent:bugfix/persistent-comments-are-not-persistent
+```
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/ICINGA2_VERSION b/ICINGA2_VERSION
new file mode 100644
index 0000000..4ddf75a
--- /dev/null
+++ b/ICINGA2_VERSION
@@ -0,0 +1,2 @@
+Version: 2.14.2
+Revision: 1
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..3cc501a
--- /dev/null
+++ b/NEWS
@@ -0,0 +1 @@
+News for this application can be found on the project website at https://icinga.com
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..890ebc7
--- /dev/null
+++ b/README.md
@@ -0,0 +1,97 @@
+[![Github Tag](https://img.shields.io/github/tag/Icinga/icinga2.svg)](https://github.com/Icinga/icinga2)
+
+# Icinga 2
+
+![Icinga Logo](https://icinga.com/wp-content/uploads/2014/06/icinga_logo.png)
+
+#### Table of Contents
+
+1. [About][About]
+2. [Installation][Installation]
+3. [Documentation][Documentation]
+4. [Support][Support]
+5. [License][License]
+6. [Contributing][Contributing]
+
+## About
+
+[Icinga](https://icinga.com/products/) is a monitoring system which checks
+the availability of your network resources, notifies users of outages, and generates
+performance data for reporting.
+
+Scalable and extensible, Icinga can monitor large, complex environments across
+multiple locations.
+
+Icinga 2 is the monitoring server and requires [Icinga Web 2](https://icinga.com/products/)
+on top in your Icinga Stack. The [configuration](https://icinga.com/products/configuration/)
+can be easily managed with either the [Icinga Director](https://icinga.com/docs/director/latest/),
+config management tools or plain text within the [Icinga DSL](https://icinga.com/docs/icinga2/latest/doc/17-language-reference/).
+
+![Icinga Dashboard](https://icinga.com/wp-content/uploads/2017/12/icingaweb2-2.5.0-dashboard.png)
+
+## Installation
+
+* [Installation](https://icinga.com/docs/icinga2/latest/doc/02-installation/)
+* [Monitoring Basics](https://icinga.com/docs/icinga2/latest/doc/03-monitoring-basics/)
+* [Configuration](https://icinga.com/docs/icinga2/latest/doc/04-configuration/)
+* [Distributed Monitoring](https://icinga.com/docs/icinga2/latest/doc/06-distributed-monitoring/)
+* [Addons, Integrations and Features](https://icinga.com/docs/icinga2/latest/doc/13-addons/)
+* [Troubleshooting](https://icinga.com/docs/icinga2/latest/doc/15-troubleshooting/)
+* [Upgrading](https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/)
+
+Once Icinga Server and Web are running in your distributed environment,
+make sure to check out the many [Icinga modules](https://icinga.com/docs/)
+for even better monitoring.
+
+## Documentation
+
+The documentation is available on [icinga.com/docs](https://icinga.com/docs/icinga2/latest/).
+
+## Support
+
+Check the [project website](https://icinga.com) for status updates. Join the
+[community channels](https://icinga.com/community/) for questions
+or ask an Icinga partner for [professional support](https://icinga.com/support/).
+
+## License
+
+Icinga 2 and the Icinga 2 documentation are licensed under the terms of the GNU
+General Public License Version 2, you will find a copy of this license in the
+COPYING file included in the source package.
+
+In addition, as a special exception, the copyright holders give
+permission to link the code of portions of this program with the
+OpenSSL library under certain conditions as described in each
+individual source file, and distribute linked combinations including
+the two.
+
+You must obey the GNU General Public License in all respects for all
+of the code used other than OpenSSL. If you modify file(s) with this
+exception, you may extend this exception to your version of the
+file(s), but you are not obligated to do so. If you do not wish to do
+so, delete this exception statement from your version. If you delete
+this exception statement from all source files in the program, then
+also delete it here.
+
+## Contributing
+
+There are many ways to contribute to Icinga -- whether it be sending patches,
+testing, reporting bugs, or reviewing and updating the documentation. Every
+contribution is appreciated!
+
+Please continue reading in the [contributing chapter](CONTRIBUTING.md).
+
+If you are a packager, please read the [development chapter](https://icinga.com/docs/icinga2/latest/doc/21-development/)
+for more details.
+
+### Security Issues
+
+For reporting security issues please visit [this page](https://icinga.com/contact/security/).
+
+<!-- TOC URLs -->
+[About]: #about
+[License]: #license
+[Installation]: #installation
+[Documentation]: #documentation
+[Support]: #support
+[Contributing]: #contributing
diff --git a/RELEASE.md b/RELEASE.md
new file mode 100644
index 0000000..b529059
--- /dev/null
+++ b/RELEASE.md
@@ -0,0 +1,421 @@
+# Release Workflow <a id="release-workflow"></a>
+
+#### Table of Content
+
+- [1. Preparations](#preparations)
+ - [1.1. Issues](#issues)
+ - [1.2. Backport Commits](#backport-commits)
+ - [1.3. Windows Dependencies](#windows-dependencies)
+- [2. Version](#version)
+- [3. Changelog](#changelog)
+- [4. Git Tag](#git-tag)
+- [5. Package Builds](#package-builds)
+ - [5.1. RPM Packages](#rpm-packages)
+ - [5.2. DEB Packages](#deb-packages)
+- [6. Build Server](#build-infrastructure)
+- [7. Release Tests](#release-tests)
+- [8. GitHub Release](#github-release)
+- [9. Docker](#docker)
+- [10. Post Release](#post-release)
+ - [10.1. Online Documentation](#online-documentation)
+ - [10.2. Announcement](#announcement)
+ - [10.3. Project Management](#project-management)
+
+## Preparations <a id="preparations"></a>
+
+Specify the release version.
+
+```bash
+VERSION=2.11.0
+```
+
+Add your signing key to your Git configuration file, if not already there.
+
+```
+vim $HOME/.gitconfig
+
+[user]
+ email = michael.friedrich@icinga.com
+ name = Michael Friedrich
+ signingkey = D14A1F16
+```
+
+### Issues <a id="issues"></a>
+
+Check issues at https://github.com/Icinga/icinga2
+
+### Backport Commits <a id="backport-commits"></a>
+
+For minor versions you need to manually backports any and all commits from the
+master branch which should be part of this release.
+
+### Windows Dependencies <a id="windows-dependencies"></a>
+
+In contrast to Linux, the bundled Windows dependencies
+(at least Boost and OpenSSL) aren't updated automatically.
+(Neither by Icinga administrators, nor at package build time.)
+
+To ensure the upcoming Icinga release ships the latest (i.e. most secure) dependencies on Windows:
+
+#### Update packages.icinga.com
+
+Add the latest Boost and OpenSSL versions to
+https://packages.icinga.com/windows/dependencies/ like this:
+
+```
+localhost:~$ ssh aptly.vm.icinga.com
+aptly:~$ sudo -i
+aptly:~# cd /var/www/html/aptly/public/windows/dependencies
+aptly:dependencies# wget https://master.dl.sourceforge.net/project/boost/boost-binaries/1.76.0/boost_1_76_0-msvc-14.2-64.exe
+aptly:dependencies# wget https://master.dl.sourceforge.net/project/boost/boost-binaries/1.76.0/boost_1_76_0-msvc-14.2-32.exe
+aptly:dependencies# wget https://slproweb.com/download/Win64OpenSSL-1_1_1k.exe
+aptly:dependencies# wget https://slproweb.com/download/Win32OpenSSL-1_1_1k.exe
+```
+
+#### Ensure Compatibility
+
+Preferably on a fresh Windows VM (not to accidentally build Icinga
+with old dependency versions) setup a dev environment using the new dependency versions:
+
+1. Download [doc/win-dev.ps1](doc/win-dev.ps1)
+2. Edit your local copy, adjust the dependency versions
+3. Ensure there are 35 GB free space on C:
+4. Run the following in an administrative Powershell:
+ 1. `Enable-WindowsOptionalFeature -FeatureName "NetFx3" -Online`
+ (reboot when asked!)
+ 2. `powershell -NoProfile -ExecutionPolicy Bypass -File "${Env:USERPROFILE}\Downloads\win-dev.ps1"`
+ (will take some time)
+
+Actually clone and build Icinga using the new dependency versions as described
+[here](https://github.com/Icinga/icinga2/blob/master/doc/21-development.md#tldr).
+Fix incompatibilities if any.
+
+#### Update Build Server, CI/CD and Documentation
+
+* https://git.icinga.com/infra/ansible-windows-build
+ (don't forget to provision!)
+* [doc/21-development.md](doc/21-development.md)
+* [doc/win-dev.ps1](doc/win-dev.ps1)
+ (also affects CI/CD)
+* [tools/win32/configure.ps1](tools/win32/configure.ps1)
+* [tools/win32/configure-dev.ps1](tools/win32/configure-dev.ps1)
+
+#### Re-provision Build Server
+
+Even if there aren't any new releases of dependencies with versions
+hardcoded in the repos and files listed above (Boost, OpenSSL).
+There may be new build versions of other dependencies (VS, MSVC).
+Our GitHub actions (tests) use the latest ones automatically,
+but the GitLab runner (release packages) doesn't.
+
+
+## Version <a id="version"></a>
+
+Update the version:
+
+```bash
+perl -pi -e "s/Version: .*/Version: $VERSION/g" ICINGA2_VERSION
+```
+
+## Changelog <a id="changelog"></a>
+
+Choose the most important issues and summarize them in multiple groups/paragraphs. Provide links to the mentioned
+issues/PRs. At the start include a link to the milestone's closed issues.
+
+
+## Git Tag <a id="git-tag"></a>
+
+```bash
+git commit -v -a -m "Release version $VERSION"
+```
+
+Create a signed tag (tags/v<VERSION>) on the `master` branch (for major
+releases) or the `support` branch (for minor releases).
+
+```bash
+git tag -s -m "Version $VERSION" v$VERSION
+```
+
+Push the tag:
+
+```bash
+git push origin v$VERSION
+```
+
+**For major releases:** Create a new `support` branch:
+
+```bash
+git checkout master
+git push
+
+git checkout -b support/2.12
+git push -u origin support/2.12
+```
+
+
+## Package Builds <a id="package-builds"></a>
+
+```bash
+mkdir $HOME/dev/icinga/packaging
+cd $HOME/dev/icinga/packaging
+```
+
+### RPM Packages <a id="rpm-packages"></a>
+
+```bash
+git clone git@git.icinga.com:packaging/rpm-icinga2.git && cd rpm-icinga2
+```
+
+### DEB Packages <a id="deb-packages"></a>
+
+```bash
+git clone git@git.icinga.com:packaging/deb-icinga2.git && cd deb-icinga2
+```
+
+### Raspbian Packages
+
+```bash
+git clone git@git.icinga.com:packaging/raspbian-icinga2.git && cd raspbian-icinga2
+```
+
+### Windows Packages
+
+```bash
+git clone git@git.icinga.com:packaging/windows-icinga2.git && cd windows-icinga2
+```
+
+
+### Branch Workflow
+
+For each support branch in this repo (e.g. support/2.12), there exists a corresponding branch in the packaging repos
+(e.g. 2.12). Each package revision is a tagged commit on these branches. When doing a major release, create the new
+branch, otherweise switch to the existing one.
+
+
+### Switch Build Type
+
+Ensure that `ICINGA_BUILD_TYPE` is set to `release` in `.gitlab-ci.yml`. This should only be necessary after creating a
+new branch.
+
+```yaml
+variables:
+ ...
+ ICINGA_BUILD_TYPE: release
+ ...
+```
+
+Commit the change.
+
+```bash
+git commit -av -m "Switch build type for 2.13"
+```
+
+#### RPM Release Preparations
+
+Set the `Version`, `revision` and `%changelog` inside the spec file:
+
+```
+perl -pi -e "s/Version:.*/Version: $VERSION/g" icinga2.spec
+
+vim icinga2.spec
+
+%changelog
+* Thu Sep 19 2019 Michael Friedrich <michael.friedrich@icinga.com> 2.11.0-1
+- Update to 2.11.0
+```
+
+#### DEB and Raspbian Release Preparations
+
+Update file `debian/changelog` and add at the beginning:
+
+```
+icinga2 (2.11.0-1) icinga; urgency=medium
+
+ * Release 2.11.0
+
+ -- Michael Friedrich <michael.friedrich@icinga.com> Thu, 19 Sep 2019 10:50:31 +0200
+```
+
+
+#### Windows Release Preparations
+
+Update the file `.gitlab-ci.yml`:
+
+```
+perl -pi -e "s/^ UPSTREAM_GIT_BRANCH: .*/ UPSTREAM_GIT_BRANCH: v$VERSION/g" .gitlab-ci.yml
+perl -pi -e "s/^ ICINGA_FORCE_VERSION: .*/ ICINGA_FORCE_VERSION: v$VERSION/g" .gitlab-ci.yml
+```
+
+
+### Release Commit
+
+Commit the changes and push the branch.
+
+```bash
+git commit -av -m "Release $VERSION-1"
+git push origin 2.11
+```
+
+GitLab will now build snapshot packages based on the tag `v2.11.0` of Icinga 2.
+
+### Package Tests
+
+In order to test the created packages you can download a job's artifacts:
+
+Visit [git.icinga.com](https://git.icinga.com/packaging/rpm-icinga2)
+and navigate to the respective pipeline under `CI / CD -> Pipelines`.
+
+There click on the job you want to download packages from.
+
+The job's output appears. On the right-hand sidebar you can browse its artifacts.
+
+Once there, navigate to `build/RPMS/noarch` where you'll find the packages.
+
+### Release Packages
+
+To build release packages and upload them to [packages.icinga.com](https://packages.icinga.com)
+tag the release commit and push it.
+
+RPM/DEB/Raspbian:
+
+```bash
+git tag -s $VERSION-1 -m "Release v$VERSION-1"
+git push origin $VERSION-1
+```
+
+Windows:
+
+```bash
+git tag -s $VERSION -m "Release v$VERSION"
+git push origin $VERSION
+```
+
+
+Now cherry pick the release commit to `master` so that the changes are transferred back to it.
+
+**Attention**: Only the release commit. *NOT* the one switching the build type!
+
+
+## Build Infrastructure <a id="build-infrastructure"></a>
+
+https://git.icinga.com/packaging/rpm-icinga2/pipelines
+https://git.icinga.com/packaging/deb-icinga2/pipelines
+https://git.icinga.com/packaging/windows-icinga2/pipelines
+https://git.icinga.com/packaging/raspbian-icinga2/pipelines
+
+* Verify package build changes for this version.
+* Test the snapshot packages for all distributions beforehand.
+
+Once the release repository tags are pushed, release builds
+are triggered and automatically published to packages.icinga.com
+
+## Release Tests <a id="release-tests"></a>
+
+* Test DB IDO with MySQL and PostgreSQL.
+* Provision the vagrant boxes and test the release packages.
+* Test the [setup wizard](https://packages.icinga.com/windows/) inside a Windows VM.
+* Start a new docker container and install/run icinga2.
+
+### CentOS
+
+```bash
+docker run -ti centos:7 bash
+
+yum -y install https://packages.icinga.com/epel/icinga-rpm-release-7-latest.noarch.rpm
+yum -y install epel-release
+yum -y install icinga2
+icinga2 daemon -C
+```
+
+### Ubuntu
+
+```bash
+docker run -ti ubuntu:bionic bash
+
+apt-get update
+apt-get -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | apt-key add -
+
+. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
+ echo "deb https://packages.icinga.com/ubuntu icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+ echo "deb-src https://packages.icinga.com/ubuntu icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+
+apt-get update
+
+apt-get -y install icinga2
+icinga2 daemon -C
+```
+
+
+## GitHub Release <a id="github-release"></a>
+
+Create a new release for the newly created Git tag: https://github.com/Icinga/icinga2/releases
+
+> Hint: Choose [tags](https://github.com/Icinga/icinga2/tags), pick one to edit and
+> make this a release. You can also create a draft release.
+
+The release body should contain a short changelog, with links
+into the roadmap, changelog and blogpost.
+
+
+## Post Release <a id="post-release"></a>
+
+### Online Documentation <a id="online-documentation"></a>
+
+> Only required for major releases.
+
+Navigate to `puppet-customer/icinga.git` and do the following steps:
+
+#### Testing
+
+```bash
+git checkout testing && git pull
+vim files/var/www/docs/config/icinga2-latest.yml
+
+git commit -av -m "icinga-web: Update docs for Icinga 2"
+
+git push
+```
+
+SSH into the webserver and do a manual Puppet dry run with the testing environment.
+
+```bash
+puppet agent -t --environment testing --noop
+```
+
+Once succeeded, continue with production deployment.
+
+#### Production
+
+```bash
+git checkout master && git pull
+git merge testing
+git push
+```
+
+SSH into the webserver and do a manual Puppet run from the production environment (default).
+
+```bash
+puppet agent -t
+```
+
+#### Manual Generation
+
+SSH into the webserver or ask @bobapple.
+
+```bash
+cd /usr/local/icinga-docs-tools && ./build-docs.rb -c /var/www/docs/config/icinga2-latest.yml
+```
+
+### Announcement <a id="announcement"></a>
+
+* Create a new blog post on [icinga.com/blog](https://icinga.com/blog) including a featured image
+* Create a release topic on [community.icinga.com](https://community.icinga.com)
+* Release email to net-tech & team
+
+### Project Management <a id="project-management"></a>
+
+* Add new minor version on [GitHub](https://github.com/Icinga/icinga2/milestones).
diff --git a/agent/CMakeLists.txt b/agent/CMakeLists.txt
new file mode 100644
index 0000000..59c1e26
--- /dev/null
+++ b/agent/CMakeLists.txt
@@ -0,0 +1,14 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+if(MSVC)
+ include_external_msproject(
+ icinga2setupagent
+ ${CMAKE_CURRENT_SOURCE_DIR}/windows-setup-agent/Icinga2SetupAgent.csproj
+ TYPE FAE04EC0-301F-11D3-BF4B-00C04F79EFBC
+ )
+
+ install(
+ FILES ${CMAKE_CURRENT_SOURCE_DIR}/windows-setup-agent/bin/\${CMAKE_INSTALL_CONFIG_NAME}/Icinga2SetupAgent.exe ${CMAKE_CURRENT_SOURCE_DIR}/windows-setup-agent/bin/\${CMAKE_INSTALL_CONFIG_NAME}/Icinga2SetupAgent.exe.config
+ DESTINATION ${CMAKE_INSTALL_SBINDIR}
+ )
+endif()
diff --git a/agent/windows-setup-agent/.gitignore b/agent/windows-setup-agent/.gitignore
new file mode 100644
index 0000000..8d4a6c0
--- /dev/null
+++ b/agent/windows-setup-agent/.gitignore
@@ -0,0 +1,2 @@
+bin
+obj \ No newline at end of file
diff --git a/agent/windows-setup-agent/App.config b/agent/windows-setup-agent/App.config
new file mode 100644
index 0000000..5669c35
--- /dev/null
+++ b/agent/windows-setup-agent/App.config
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<configuration>
+ <startup>
+ <supportedRuntime version="v4.0" sku=".NETFramework,Version=v4.6"/>
+ </startup>
+</configuration> \ No newline at end of file
diff --git a/agent/windows-setup-agent/EndpointInputBox.Designer.cs b/agent/windows-setup-agent/EndpointInputBox.Designer.cs
new file mode 100644
index 0000000..04eb762
--- /dev/null
+++ b/agent/windows-setup-agent/EndpointInputBox.Designer.cs
@@ -0,0 +1,177 @@
+namespace Icinga
+{
+ partial class EndpointInputBox
+ {
+ /// <summary>
+ /// Required designer variable.
+ /// </summary>
+ private System.ComponentModel.IContainer components = null;
+
+ /// <summary>
+ /// Clean up any resources being used.
+ /// </summary>
+ /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing && (components != null)) {
+ components.Dispose();
+ }
+ base.Dispose(disposing);
+ }
+
+ #region Windows Form Designer generated code
+
+ /// <summary>
+ /// Required method for Designer support - do not modify
+ /// the contents of this method with the code editor.
+ /// </summary>
+ private void InitializeComponent()
+ {
+ this.btnOK = new System.Windows.Forms.Button();
+ this.btnCancel = new System.Windows.Forms.Button();
+ this.txtHost = new System.Windows.Forms.TextBox();
+ this.txtPort = new System.Windows.Forms.TextBox();
+ this.label1 = new System.Windows.Forms.Label();
+ this.lblHost = new System.Windows.Forms.Label();
+ this.lblPort = new System.Windows.Forms.Label();
+ this.lblInstanceName = new System.Windows.Forms.Label();
+ this.txtInstanceName = new System.Windows.Forms.TextBox();
+ this.chkConnect = new System.Windows.Forms.CheckBox();
+ this.SuspendLayout();
+ //
+ // btnOK
+ //
+ this.btnOK.Location = new System.Drawing.Point(196, 171);
+ this.btnOK.Name = "btnOK";
+ this.btnOK.Size = new System.Drawing.Size(75, 23);
+ this.btnOK.TabIndex = 4;
+ this.btnOK.Text = "OK";
+ this.btnOK.UseVisualStyleBackColor = true;
+ this.btnOK.Click += new System.EventHandler(this.btnOK_Click);
+ //
+ // btnCancel
+ //
+ this.btnCancel.CausesValidation = false;
+ this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel;
+ this.btnCancel.Location = new System.Drawing.Point(277, 171);
+ this.btnCancel.Name = "btnCancel";
+ this.btnCancel.Size = new System.Drawing.Size(75, 23);
+ this.btnCancel.TabIndex = 5;
+ this.btnCancel.Text = "Cancel";
+ this.btnCancel.UseVisualStyleBackColor = true;
+ //
+ // txtHost
+ //
+ this.txtHost.Location = new System.Drawing.Point(101, 103);
+ this.txtHost.Name = "txtHost";
+ this.txtHost.Size = new System.Drawing.Size(251, 20);
+ this.txtHost.TabIndex = 2;
+ //
+ // txtPort
+ //
+ this.txtPort.Location = new System.Drawing.Point(101, 134);
+ this.txtPort.Name = "txtPort";
+ this.txtPort.Size = new System.Drawing.Size(100, 20);
+ this.txtPort.TabIndex = 3;
+ this.txtPort.Text = "5665";
+ //
+ // label1
+ //
+ this.label1.AutoSize = true;
+ this.label1.Location = new System.Drawing.Point(12, 9);
+ this.label1.Name = "label1";
+ this.label1.Size = new System.Drawing.Size(276, 13);
+ this.label1.TabIndex = 4;
+ this.label1.Text = "Please enter the connection details for the new endpoint:";
+ //
+ // lblHost
+ //
+ this.lblHost.AutoSize = true;
+ this.lblHost.Location = new System.Drawing.Point(15, 106);
+ this.lblHost.Name = "lblHost";
+ this.lblHost.Size = new System.Drawing.Size(32, 13);
+ this.lblHost.TabIndex = 5;
+ this.lblHost.Text = "Host:";
+ //
+ // lblPort
+ //
+ this.lblPort.AutoSize = true;
+ this.lblPort.Location = new System.Drawing.Point(15, 137);
+ this.lblPort.Name = "lblPort";
+ this.lblPort.Size = new System.Drawing.Size(29, 13);
+ this.lblPort.TabIndex = 6;
+ this.lblPort.Text = "Port:";
+ //
+ // lblInstanceName
+ //
+ this.lblInstanceName.AutoSize = true;
+ this.lblInstanceName.Location = new System.Drawing.Point(15, 41);
+ this.lblInstanceName.Name = "lblInstanceName";
+ this.lblInstanceName.Size = new System.Drawing.Size(82, 13);
+ this.lblInstanceName.TabIndex = 7;
+ this.lblInstanceName.Text = "Instance Name:";
+ //
+ // txtInstanceName
+ //
+ this.txtInstanceName.Location = new System.Drawing.Point(101, 37);
+ this.txtInstanceName.Name = "txtInstanceName";
+ this.txtInstanceName.Size = new System.Drawing.Size(251, 20);
+ this.txtInstanceName.TabIndex = 0;
+ //
+ // chkConnect
+ //
+ this.chkConnect.AutoSize = true;
+ this.chkConnect.Checked = true;
+ this.chkConnect.CheckState = System.Windows.Forms.CheckState.Checked;
+ this.chkConnect.Location = new System.Drawing.Point(18, 73);
+ this.chkConnect.Name = "chkConnect";
+ this.chkConnect.Size = new System.Drawing.Size(141, 17);
+ this.chkConnect.TabIndex = 1;
+ this.chkConnect.Text = "Connect to this endpoint";
+ this.chkConnect.UseVisualStyleBackColor = true;
+ this.chkConnect.CheckedChanged += new System.EventHandler(this.chkConnect_CheckedChanged);
+ //
+ // EndpointInputBox
+ //
+ this.AcceptButton = this.btnOK;
+ this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
+ this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
+ this.CancelButton = this.btnCancel;
+ this.ClientSize = new System.Drawing.Size(360, 202);
+ this.Controls.Add(this.chkConnect);
+ this.Controls.Add(this.txtInstanceName);
+ this.Controls.Add(this.lblInstanceName);
+ this.Controls.Add(this.lblPort);
+ this.Controls.Add(this.lblHost);
+ this.Controls.Add(this.label1);
+ this.Controls.Add(this.txtPort);
+ this.Controls.Add(this.txtHost);
+ this.Controls.Add(this.btnCancel);
+ this.Controls.Add(this.btnOK);
+ this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog;
+ this.MaximizeBox = false;
+ this.MinimizeBox = false;
+ this.Name = "EndpointInputBox";
+ this.ShowIcon = false;
+ this.ShowInTaskbar = false;
+ this.StartPosition = System.Windows.Forms.FormStartPosition.CenterParent;
+ this.Text = "Add Endpoint";
+ this.ResumeLayout(false);
+ this.PerformLayout();
+
+ }
+
+ #endregion
+
+ private System.Windows.Forms.Button btnOK;
+ private System.Windows.Forms.Button btnCancel;
+ private System.Windows.Forms.Label label1;
+ private System.Windows.Forms.Label lblHost;
+ private System.Windows.Forms.Label lblPort;
+ public System.Windows.Forms.TextBox txtHost;
+ public System.Windows.Forms.TextBox txtPort;
+ public System.Windows.Forms.TextBox txtInstanceName;
+ private System.Windows.Forms.Label lblInstanceName;
+ public System.Windows.Forms.CheckBox chkConnect;
+ }
+} \ No newline at end of file
diff --git a/agent/windows-setup-agent/EndpointInputBox.cs b/agent/windows-setup-agent/EndpointInputBox.cs
new file mode 100644
index 0000000..62ef8a2
--- /dev/null
+++ b/agent/windows-setup-agent/EndpointInputBox.cs
@@ -0,0 +1,52 @@
+using System;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Data;
+using System.Drawing;
+using System.Text;
+using System.Windows.Forms;
+
+namespace Icinga
+{
+ public partial class EndpointInputBox : Form
+ {
+ public EndpointInputBox()
+ {
+ InitializeComponent();
+ }
+
+ private void Warning(string message)
+ {
+ MessageBox.Show(this, message, Text, MessageBoxButtons.OK, MessageBoxIcon.Warning);
+ }
+
+ private void chkConnect_CheckedChanged(object sender, EventArgs e)
+ {
+ txtHost.Enabled = chkConnect.Checked;
+ txtPort.Enabled = chkConnect.Checked;
+ }
+
+ private void btnOK_Click(object sender, EventArgs e)
+ {
+ if (txtInstanceName.Text.Length == 0) {
+ Warning("Please enter an instance name.");
+ return;
+ }
+
+ if (chkConnect.Checked) {
+ if (txtHost.Text.Length == 0) {
+ Warning("Please enter a host name.");
+ return;
+ }
+
+ if (txtPort.Text.Length == 0) {
+ Warning("Please enter a port.");
+ return;
+ }
+ }
+
+ DialogResult = DialogResult.OK;
+ Close();
+ }
+ }
+}
diff --git a/agent/windows-setup-agent/EndpointInputBox.resx b/agent/windows-setup-agent/EndpointInputBox.resx
new file mode 100644
index 0000000..7080a7d
--- /dev/null
+++ b/agent/windows-setup-agent/EndpointInputBox.resx
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="utf-8"?>
+<root>
+ <!--
+ Microsoft ResX Schema
+
+ Version 2.0
+
+ The primary goals of this format is to allow a simple XML format
+ that is mostly human readable. The generation and parsing of the
+ various data types are done through the TypeConverter classes
+ associated with the data types.
+
+ Example:
+
+ ... ado.net/XML headers & schema ...
+ <resheader name="resmimetype">text/microsoft-resx</resheader>
+ <resheader name="version">2.0</resheader>
+ <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
+ <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
+ <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
+ <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
+ <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
+ <value>[base64 mime encoded serialized .NET Framework object]</value>
+ </data>
+ <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
+ <comment>This is a comment</comment>
+ </data>
+
+ There are any number of "resheader" rows that contain simple
+ name/value pairs.
+
+ Each data row contains a name, and value. The row also contains a
+ type or mimetype. Type corresponds to a .NET class that support
+ text/value conversion through the TypeConverter architecture.
+ Classes that don't support this are serialized and stored with the
+ mimetype set.
+
+ The mimetype is used for serialized objects, and tells the
+ ResXResourceReader how to depersist the object. This is currently not
+ extensible. For a given mimetype the value must be set accordingly:
+
+ Note - application/x-microsoft.net.object.binary.base64 is the format
+ that the ResXResourceWriter will generate, however the reader can
+ read any of the formats listed below.
+
+ mimetype: application/x-microsoft.net.object.binary.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.soap.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.bytearray.base64
+ value : The object must be serialized into a byte array
+ : using a System.ComponentModel.TypeConverter
+ : and then encoded with base64 encoding.
+ -->
+ <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
+ <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
+ <xsd:element name="root" msdata:IsDataSet="true">
+ <xsd:complexType>
+ <xsd:choice maxOccurs="unbounded">
+ <xsd:element name="metadata">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" />
+ </xsd:sequence>
+ <xsd:attribute name="name" use="required" type="xsd:string" />
+ <xsd:attribute name="type" type="xsd:string" />
+ <xsd:attribute name="mimetype" type="xsd:string" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="assembly">
+ <xsd:complexType>
+ <xsd:attribute name="alias" type="xsd:string" />
+ <xsd:attribute name="name" type="xsd:string" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="data">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
+ <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
+ <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="resheader">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" />
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:schema>
+ <resheader name="resmimetype">
+ <value>text/microsoft-resx</value>
+ </resheader>
+ <resheader name="version">
+ <value>2.0</value>
+ </resheader>
+ <resheader name="reader">
+ <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <resheader name="writer">
+ <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+</root> \ No newline at end of file
diff --git a/agent/windows-setup-agent/GlobalZonesInputBox.Designer.cs b/agent/windows-setup-agent/GlobalZonesInputBox.Designer.cs
new file mode 100644
index 0000000..beda952
--- /dev/null
+++ b/agent/windows-setup-agent/GlobalZonesInputBox.Designer.cs
@@ -0,0 +1,117 @@
+namespace Icinga
+{
+ partial class GlobalZonesInputBox
+ {
+ /// <summary>
+ /// Required designer variable.
+ /// </summary>
+ private System.ComponentModel.IContainer components = null;
+
+ /// <summary>
+ /// Clean up any resources being used.
+ /// </summary>
+ /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing && (components != null))
+ {
+ components.Dispose();
+ }
+ base.Dispose(disposing);
+ }
+
+ #region Windows Form Designer generated code
+
+ /// <summary>
+ /// Required method for Designer support - do not modify
+ /// the contents of this method with the code editor.
+ /// </summary>
+ private void InitializeComponent()
+ {
+ this.btnOK = new System.Windows.Forms.Button();
+ this.btnCancel = new System.Windows.Forms.Button();
+ this.label1 = new System.Windows.Forms.Label();
+ this.lblGlobalZoneName = new System.Windows.Forms.Label();
+ this.txtGlobalZoneName = new System.Windows.Forms.TextBox();
+ this.SuspendLayout();
+ //
+ // btnOK
+ //
+ this.btnOK.DialogResult = System.Windows.Forms.DialogResult.Cancel;
+ this.btnOK.Location = new System.Drawing.Point(191, 76);
+ this.btnOK.Name = "btnOK";
+ this.btnOK.Size = new System.Drawing.Size(75, 23);
+ this.btnOK.TabIndex = 0;
+ this.btnOK.Text = "OK";
+ this.btnOK.UseVisualStyleBackColor = true;
+ this.btnOK.Click += new System.EventHandler(this.btnOK_Click);
+ //
+ // btnCancel
+ //
+ this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel;
+ this.btnCancel.Location = new System.Drawing.Point(272, 76);
+ this.btnCancel.Name = "btnCancel";
+ this.btnCancel.Size = new System.Drawing.Size(75, 23);
+ this.btnCancel.TabIndex = 1;
+ this.btnCancel.Text = "Cancel";
+ this.btnCancel.UseVisualStyleBackColor = true;
+ //
+ // label1
+ //
+ this.label1.AutoSize = true;
+ this.label1.Location = new System.Drawing.Point(13, 13);
+ this.label1.Name = "label1";
+ this.label1.Size = new System.Drawing.Size(231, 13);
+ this.label1.TabIndex = 2;
+ this.label1.Text = "Please enter the name for the new global Zone:";
+ //
+ // lblGlobalZoneName
+ //
+ this.lblGlobalZoneName.AutoSize = true;
+ this.lblGlobalZoneName.Location = new System.Drawing.Point(16, 46);
+ this.lblGlobalZoneName.Name = "lblGlobalZoneName";
+ this.lblGlobalZoneName.Size = new System.Drawing.Size(68, 13);
+ this.lblGlobalZoneName.TabIndex = 3;
+ this.lblGlobalZoneName.Text = "Global Zone:";
+ //
+ // txtGlobalZoneName
+ //
+ this.txtGlobalZoneName.Location = new System.Drawing.Point(90, 43);
+ this.txtGlobalZoneName.Name = "txtGlobalZoneName";
+ this.txtGlobalZoneName.Size = new System.Drawing.Size(257, 20);
+ this.txtGlobalZoneName.TabIndex = 0;
+ //
+ // GlobalZonesInputBox
+ //
+ this.AcceptButton = this.btnOK;
+ this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
+ this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
+ this.CancelButton = this.btnCancel;
+ this.ClientSize = new System.Drawing.Size(359, 111);
+ this.Controls.Add(this.txtGlobalZoneName);
+ this.Controls.Add(this.lblGlobalZoneName);
+ this.Controls.Add(this.label1);
+ this.Controls.Add(this.btnCancel);
+ this.Controls.Add(this.btnOK);
+ this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog;
+ this.MaximizeBox = false;
+ this.MinimizeBox = false;
+ this.Name = "GlobalZonesInputBox";
+ this.ShowIcon = false;
+ this.ShowInTaskbar = false;
+ this.StartPosition = System.Windows.Forms.FormStartPosition.CenterParent;
+ this.Text = "Add Global Zones";
+ this.ResumeLayout(false);
+ this.PerformLayout();
+
+ }
+
+ #endregion
+
+ private System.Windows.Forms.Button btnOK;
+ private System.Windows.Forms.Button btnCancel;
+ private System.Windows.Forms.Label label1;
+ private System.Windows.Forms.Label lblGlobalZoneName;
+ public System.Windows.Forms.TextBox txtGlobalZoneName;
+ }
+} \ No newline at end of file
diff --git a/agent/windows-setup-agent/GlobalZonesInputBox.cs b/agent/windows-setup-agent/GlobalZonesInputBox.cs
new file mode 100644
index 0000000..cd8054d
--- /dev/null
+++ b/agent/windows-setup-agent/GlobalZonesInputBox.cs
@@ -0,0 +1,46 @@
+using System;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Data;
+using System.Drawing;
+using System.Text;
+using System.Windows.Forms;
+
+namespace Icinga
+{
+ public partial class GlobalZonesInputBox : Form
+ {
+ private ListView.ListViewItemCollection globalZonesItems;
+
+ public GlobalZonesInputBox(ListView.ListViewItemCollection globalZonesItems)
+ {
+ InitializeComponent();
+
+ this.globalZonesItems = globalZonesItems;
+ }
+
+ private void Warning(string message)
+ {
+ MessageBox.Show(this, message, Text, MessageBoxButtons.OK, MessageBoxIcon.Warning);
+ }
+
+
+ private void btnOK_Click(object sender, EventArgs e)
+ {
+ if (txtGlobalZoneName.Text == "global-templates" || txtGlobalZoneName.Text == "director-global") {
+ Warning("This global zone is configured by default.");
+ return;
+ }
+
+ foreach (ListViewItem lvw in globalZonesItems) {
+ if (txtGlobalZoneName.Text == lvw.Text) {
+ Warning("This global zone is already defined.");
+ return;
+ }
+ }
+
+ DialogResult = DialogResult.OK;
+ Close();
+ }
+ }
+}
diff --git a/agent/windows-setup-agent/GlobalZonesInputBox.resx b/agent/windows-setup-agent/GlobalZonesInputBox.resx
new file mode 100644
index 0000000..7080a7d
--- /dev/null
+++ b/agent/windows-setup-agent/GlobalZonesInputBox.resx
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="utf-8"?>
+<root>
+ <!--
+ Microsoft ResX Schema
+
+ Version 2.0
+
+ The primary goals of this format is to allow a simple XML format
+ that is mostly human readable. The generation and parsing of the
+ various data types are done through the TypeConverter classes
+ associated with the data types.
+
+ Example:
+
+ ... ado.net/XML headers & schema ...
+ <resheader name="resmimetype">text/microsoft-resx</resheader>
+ <resheader name="version">2.0</resheader>
+ <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
+ <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
+ <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
+ <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
+ <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
+ <value>[base64 mime encoded serialized .NET Framework object]</value>
+ </data>
+ <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
+ <comment>This is a comment</comment>
+ </data>
+
+ There are any number of "resheader" rows that contain simple
+ name/value pairs.
+
+ Each data row contains a name, and value. The row also contains a
+ type or mimetype. Type corresponds to a .NET class that support
+ text/value conversion through the TypeConverter architecture.
+ Classes that don't support this are serialized and stored with the
+ mimetype set.
+
+ The mimetype is used for serialized objects, and tells the
+ ResXResourceReader how to depersist the object. This is currently not
+ extensible. For a given mimetype the value must be set accordingly:
+
+ Note - application/x-microsoft.net.object.binary.base64 is the format
+ that the ResXResourceWriter will generate, however the reader can
+ read any of the formats listed below.
+
+ mimetype: application/x-microsoft.net.object.binary.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.soap.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.bytearray.base64
+ value : The object must be serialized into a byte array
+ : using a System.ComponentModel.TypeConverter
+ : and then encoded with base64 encoding.
+ -->
+ <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
+ <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
+ <xsd:element name="root" msdata:IsDataSet="true">
+ <xsd:complexType>
+ <xsd:choice maxOccurs="unbounded">
+ <xsd:element name="metadata">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" />
+ </xsd:sequence>
+ <xsd:attribute name="name" use="required" type="xsd:string" />
+ <xsd:attribute name="type" type="xsd:string" />
+ <xsd:attribute name="mimetype" type="xsd:string" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="assembly">
+ <xsd:complexType>
+ <xsd:attribute name="alias" type="xsd:string" />
+ <xsd:attribute name="name" type="xsd:string" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="data">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
+ <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
+ <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="resheader">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" />
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:schema>
+ <resheader name="resmimetype">
+ <value>text/microsoft-resx</value>
+ </resheader>
+ <resheader name="version">
+ <value>2.0</value>
+ </resheader>
+ <resheader name="reader">
+ <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <resheader name="writer">
+ <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+</root> \ No newline at end of file
diff --git a/agent/windows-setup-agent/Icinga2SetupAgent.csproj b/agent/windows-setup-agent/Icinga2SetupAgent.csproj
new file mode 100644
index 0000000..17fe54f
--- /dev/null
+++ b/agent/windows-setup-agent/Icinga2SetupAgent.csproj
@@ -0,0 +1,262 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
+ <PropertyGroup>
+ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
+ <Platform Condition=" '$(Platform)' == '' ">x64</Platform>
+ <ProjectGuid>{A86F1159-66E8-4BDB-BF28-A2BDAF76517C}</ProjectGuid>
+ <OutputType>WinExe</OutputType>
+ <AppDesignerFolder>Properties</AppDesignerFolder>
+ <RootNamespace>Icinga</RootNamespace>
+ <AssemblyName>Icinga2SetupAgent</AssemblyName>
+ <TargetFrameworkVersion>v4.6</TargetFrameworkVersion>
+ <FileAlignment>512</FileAlignment>
+ <TargetFrameworkProfile />
+ <PublishUrl>publish\</PublishUrl>
+ <Install>true</Install>
+ <InstallFrom>Disk</InstallFrom>
+ <UpdateEnabled>false</UpdateEnabled>
+ <UpdateMode>Foreground</UpdateMode>
+ <UpdateInterval>7</UpdateInterval>
+ <UpdateIntervalUnits>Days</UpdateIntervalUnits>
+ <UpdatePeriodically>false</UpdatePeriodically>
+ <UpdateRequired>false</UpdateRequired>
+ <MapFileExtensions>true</MapFileExtensions>
+ <ApplicationRevision>0</ApplicationRevision>
+ <ApplicationVersion>1.0.0.%2a</ApplicationVersion>
+ <IsWebBootstrapper>false</IsWebBootstrapper>
+ <UseApplicationTrust>false</UseApplicationTrust>
+ <BootstrapperEnabled>true</BootstrapperEnabled>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugSymbols>true</DebugSymbols>
+ <DebugType>full</DebugType>
+ <Optimize>false</Optimize>
+ <OutputPath>bin\Debug\</OutputPath>
+ <DefineConstants>DEBUG;TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\Release\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'RelWithDebInfo|x86' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\RelWithDebInfo\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'MinSizeRel|x86' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\MinSizeRel\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|Win32' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugSymbols>true</DebugSymbols>
+ <DebugType>full</DebugType>
+ <Optimize>false</Optimize>
+ <OutputPath>bin\Debug\</OutputPath>
+ <DefineConstants>DEBUG;TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|Win32' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\Release\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'RelWithDebInfo|Win32' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\RelWithDebInfo\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'MinSizeRel|Win32' ">
+ <PlatformTarget>x86</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\MinSizeRel\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x64' ">
+ <PlatformTarget>x64</PlatformTarget>
+ <DebugSymbols>true</DebugSymbols>
+ <DebugType>full</DebugType>
+ <Optimize>false</Optimize>
+ <OutputPath>bin\Debug\</OutputPath>
+ <DefineConstants>DEBUG;TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x64' ">
+ <PlatformTarget>x64</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\Release\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'RelWithDebInfo|x64' ">
+ <PlatformTarget>x64</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\RelWithDebInfo\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'MinSizeRel|x64' ">
+ <PlatformTarget>x64</PlatformTarget>
+ <DebugType>pdbonly</DebugType>
+ <Optimize>true</Optimize>
+ <OutputPath>bin\MinSizeRel\</OutputPath>
+ <DefineConstants>TRACE</DefineConstants>
+ <ErrorReport>prompt</ErrorReport>
+ <WarningLevel>4</WarningLevel>
+ <Prefer32Bit>false</Prefer32Bit>
+ </PropertyGroup>
+ <PropertyGroup>
+ <ApplicationIcon>icinga.ico</ApplicationIcon>
+ </PropertyGroup>
+ <PropertyGroup>
+ <ApplicationManifest>app.manifest</ApplicationManifest>
+ </PropertyGroup>
+ <ItemGroup>
+ <Reference Include="System" />
+ <Reference Include="System.Data" />
+ <Reference Include="System.Drawing" />
+ <Reference Include="System.ServiceProcess" />
+ <Reference Include="System.Windows.Forms" />
+ <Reference Include="System.Xml" />
+ </ItemGroup>
+ <ItemGroup>
+ <Compile Include="GlobalZonesInputBox.cs">
+ <SubType>Form</SubType>
+ </Compile>
+ <Compile Include="GlobalZonesInputBox.Designer.cs">
+ <DependentUpon>GlobalZonesInputBox.cs</DependentUpon>
+ </Compile>
+ <Compile Include="ServiceStatus.cs">
+ <SubType>Form</SubType>
+ </Compile>
+ <Compile Include="ServiceStatus.Designer.cs">
+ <DependentUpon>ServiceStatus.cs</DependentUpon>
+ </Compile>
+ <Compile Include="SetupWizard.cs">
+ <SubType>Form</SubType>
+ </Compile>
+ <Compile Include="SetupWizard.Designer.cs">
+ <DependentUpon>SetupWizard.cs</DependentUpon>
+ </Compile>
+ <Compile Include="EndpointInputBox.cs">
+ <SubType>Form</SubType>
+ </Compile>
+ <Compile Include="EndpointInputBox.Designer.cs">
+ <DependentUpon>EndpointInputBox.cs</DependentUpon>
+ </Compile>
+ <Compile Include="Program.cs" />
+ <Compile Include="Properties\AssemblyInfo.cs" />
+ <EmbeddedResource Include="GlobalZonesInputBox.resx">
+ <DependentUpon>GlobalZonesInputBox.cs</DependentUpon>
+ </EmbeddedResource>
+ <EmbeddedResource Include="ServiceStatus.resx">
+ <DependentUpon>ServiceStatus.cs</DependentUpon>
+ </EmbeddedResource>
+ <EmbeddedResource Include="SetupWizard.resx">
+ <DependentUpon>SetupWizard.cs</DependentUpon>
+ </EmbeddedResource>
+ <EmbeddedResource Include="EndpointInputBox.resx">
+ <DependentUpon>EndpointInputBox.cs</DependentUpon>
+ </EmbeddedResource>
+ <EmbeddedResource Include="Properties\Resources.resx">
+ <Generator>ResXFileCodeGenerator</Generator>
+ <LastGenOutput>Resources.Designer.cs</LastGenOutput>
+ <SubType>Designer</SubType>
+ </EmbeddedResource>
+ <Compile Include="Properties\Resources.Designer.cs">
+ <AutoGen>True</AutoGen>
+ <DependentUpon>Resources.resx</DependentUpon>
+ <DesignTime>True</DesignTime>
+ </Compile>
+ <None Include="app.manifest" />
+ <None Include="Properties\Settings.settings">
+ <Generator>SettingsSingleFileGenerator</Generator>
+ <LastGenOutput>Settings.Designer.cs</LastGenOutput>
+ </None>
+ <Compile Include="Properties\Settings.Designer.cs">
+ <AutoGen>True</AutoGen>
+ <DependentUpon>Settings.settings</DependentUpon>
+ <DesignTimeSharedInput>True</DesignTimeSharedInput>
+ </Compile>
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="App.config" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="icinga-banner.png" />
+ </ItemGroup>
+ <ItemGroup>
+ <Content Include="icinga.ico" />
+ </ItemGroup>
+ <ItemGroup>
+ <BootstrapperPackage Include=".NETFramework,Version=v4.5">
+ <Visible>False</Visible>
+ <ProductName>Microsoft .NET Framework 4.5 %28x86 and x64%29</ProductName>
+ <Install>true</Install>
+ </BootstrapperPackage>
+ <BootstrapperPackage Include="Microsoft.Net.Client.3.5">
+ <Visible>False</Visible>
+ <ProductName>.NET Framework 3.5 SP1 Client Profile</ProductName>
+ <Install>false</Install>
+ </BootstrapperPackage>
+ <BootstrapperPackage Include="Microsoft.Net.Framework.3.5.SP1">
+ <Visible>False</Visible>
+ <ProductName>.NET Framework 3.5 SP1</ProductName>
+ <Install>false</Install>
+ </BootstrapperPackage>
+ </ItemGroup>
+ <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
+ <!-- To modify your build process, add your task inside one of the targets below and uncomment it.
+ Other similar extension points exist, see Microsoft.Common.targets.
+ <Target Name="BeforeBuild">
+ </Target>
+ <Target Name="AfterBuild">
+ </Target>
+ -->
+</Project> \ No newline at end of file
diff --git a/agent/windows-setup-agent/Program.cs b/agent/windows-setup-agent/Program.cs
new file mode 100644
index 0000000..b22b042
--- /dev/null
+++ b/agent/windows-setup-agent/Program.cs
@@ -0,0 +1,109 @@
+using System;
+using System.IO;
+using System.Windows.Forms;
+using Microsoft.Win32;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace Icinga
+{
+ internal static class NativeMethods
+ {
+ [DllImport("msi.dll", CharSet = CharSet.Unicode)]
+ internal static extern int MsiEnumProducts(int iProductIndex, StringBuilder lpProductBuf);
+
+ [DllImport("msi.dll", CharSet = CharSet.Unicode)]
+ internal static extern Int32 MsiGetProductInfo(string product, string property, [Out] StringBuilder valueBuf, ref Int32 len);
+ }
+
+ static class Program
+ {
+ public static string Icinga2InstallDir
+ {
+ get
+ {
+ StringBuilder szProduct;
+
+ for (int index = 0; ; index++) {
+ szProduct = new StringBuilder(39);
+ if (NativeMethods.MsiEnumProducts(index, szProduct) != 0)
+ break;
+
+ int cbName = 128;
+ StringBuilder szName = new StringBuilder(cbName);
+
+ if (NativeMethods.MsiGetProductInfo(szProduct.ToString(), "ProductName", szName, ref cbName) != 0)
+ continue;
+
+ if (szName.ToString() != "Icinga 2")
+ continue;
+
+ int cbLocation = 1024;
+ StringBuilder szLocation = new StringBuilder(cbLocation);
+ if (NativeMethods.MsiGetProductInfo(szProduct.ToString(), "InstallLocation", szLocation, ref cbLocation) == 0)
+ return szLocation.ToString();
+ }
+
+ return "";
+ }
+ }
+
+ public static string Icinga2DataDir
+ {
+ get
+ {
+ return Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData) + "\\icinga2";
+ }
+ }
+
+ public static string Icinga2User
+ {
+ get
+ {
+ if (!File.Exists(Icinga2DataDir + "\\etc\\icinga2\\user"))
+ return "NT AUTHORITY\\NetworkService";
+ System.IO.StreamReader file = new System.IO.StreamReader(Icinga2DataDir + "\\etc\\icinga2\\user");
+ string line = file.ReadLine();
+ file.Close();
+
+ if (line != null)
+ return line;
+ else
+ return "NT AUTHORITY\\NetworkService";
+ }
+ }
+
+
+ public static void FatalError(Form owner, string message)
+ {
+ MessageBox.Show(owner, message, "Icinga 2 Setup Wizard", MessageBoxButtons.OK, MessageBoxIcon.Error);
+ Application.Exit();
+ }
+
+ /// <summary>
+ /// The main entry point for the application.
+ /// </summary>
+ [STAThread]
+ static void Main()
+ {
+ Application.EnableVisualStyles();
+ Application.SetCompatibleTextRenderingDefault(false);
+
+ string installDir = Program.Icinga2InstallDir;
+
+ if (installDir == "") {
+ FatalError(null, "Icinga 2 does not seem to be installed properly.");
+ return;
+ }
+
+ Form form;
+
+ if (File.Exists(Program.Icinga2DataDir + "\\etc\\icinga2\\features-enabled\\api.conf"))
+ form = new ServiceStatus();
+ else
+ form = new SetupWizard();
+
+ Application.Run(form);
+ }
+ }
+}
diff --git a/agent/windows-setup-agent/Properties/AssemblyInfo.cs b/agent/windows-setup-agent/Properties/AssemblyInfo.cs
new file mode 100644
index 0000000..44d894c
--- /dev/null
+++ b/agent/windows-setup-agent/Properties/AssemblyInfo.cs
@@ -0,0 +1,36 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+// General Information about an assembly is controlled through the following
+// set of attributes. Change these attribute values to modify the information
+// associated with an assembly.
+[assembly: AssemblyTitle("Icinga 2 Agent Wizard")]
+[assembly: AssemblyDescription("")]
+[assembly: AssemblyConfiguration("")]
+[assembly: AssemblyCompany("Icinga GmbH")]
+[assembly: AssemblyProduct("Icinga 2")]
+[assembly: AssemblyCopyright("Copyright © 2019 Icinga GmbH")]
+[assembly: AssemblyTrademark("")]
+[assembly: AssemblyCulture("")]
+
+// Setting ComVisible to false makes the types in this assembly not visible
+// to COM components. If you need to access a type in this assembly from
+// COM, set the ComVisible attribute to true on that type.
+[assembly: ComVisible(false)]
+
+// The following GUID is for the ID of the typelib if this project is exposed to COM
+[assembly: Guid("51f4fcaf-8cf8-4d1c-9fde-61526c17a0d8")]
+
+// Version information for an assembly consists of the following four values:
+//
+// Major Version
+// Minor Version
+// Build Number
+// Revision
+//
+// You can specify all the values or you can default the Build and Revision Numbers
+// by using the '*' as shown below:
+// [assembly: AssemblyVersion("1.0.*")]
+[assembly: AssemblyVersion("1.0.0.0")]
+[assembly: AssemblyFileVersion("1.0.0.0")]
diff --git a/agent/windows-setup-agent/Properties/Resources.Designer.cs b/agent/windows-setup-agent/Properties/Resources.Designer.cs
new file mode 100644
index 0000000..737e2d3
--- /dev/null
+++ b/agent/windows-setup-agent/Properties/Resources.Designer.cs
@@ -0,0 +1,73 @@
+//------------------------------------------------------------------------------
+// <auto-generated>
+// This code was generated by a tool.
+// Runtime Version:4.0.30319.42000
+//
+// Changes to this file may cause incorrect behavior and will be lost if
+// the code is regenerated.
+// </auto-generated>
+//------------------------------------------------------------------------------
+
+namespace Icinga.Properties {
+ using System;
+
+
+ /// <summary>
+ /// A strongly-typed resource class, for looking up localized strings, etc.
+ /// </summary>
+ // This class was auto-generated by the StronglyTypedResourceBuilder
+ // class via a tool like ResGen or Visual Studio.
+ // To add or remove a member, edit your .ResX file then rerun ResGen
+ // with the /str option, or rebuild your VS project.
+ [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "15.0.0.0")]
+ [global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
+ [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
+ internal class Resources {
+
+ private static global::System.Resources.ResourceManager resourceMan;
+
+ private static global::System.Globalization.CultureInfo resourceCulture;
+
+ [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
+ internal Resources() {
+ }
+
+ /// <summary>
+ /// Returns the cached ResourceManager instance used by this class.
+ /// </summary>
+ [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
+ internal static global::System.Resources.ResourceManager ResourceManager {
+ get {
+ if (object.ReferenceEquals(resourceMan, null)) {
+ global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Icinga.Properties.Resources", typeof(Resources).Assembly);
+ resourceMan = temp;
+ }
+ return resourceMan;
+ }
+ }
+
+ /// <summary>
+ /// Overrides the current thread's CurrentUICulture property for all
+ /// resource lookups using this strongly typed resource class.
+ /// </summary>
+ [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
+ internal static global::System.Globalization.CultureInfo Culture {
+ get {
+ return resourceCulture;
+ }
+ set {
+ resourceCulture = value;
+ }
+ }
+
+ /// <summary>
+ /// Looks up a localized resource of type System.Drawing.Bitmap.
+ /// </summary>
+ internal static System.Drawing.Bitmap icinga_banner {
+ get {
+ object obj = ResourceManager.GetObject("icinga_banner", resourceCulture);
+ return ((System.Drawing.Bitmap)(obj));
+ }
+ }
+ }
+}
diff --git a/agent/windows-setup-agent/Properties/Resources.resx b/agent/windows-setup-agent/Properties/Resources.resx
new file mode 100644
index 0000000..864cbe1
--- /dev/null
+++ b/agent/windows-setup-agent/Properties/Resources.resx
@@ -0,0 +1,124 @@
+<?xml version="1.0" encoding="utf-8"?>
+<root>
+ <!--
+ Microsoft ResX Schema
+
+ Version 2.0
+
+ The primary goals of this format is to allow a simple XML format
+ that is mostly human readable. The generation and parsing of the
+ various data types are done through the TypeConverter classes
+ associated with the data types.
+
+ Example:
+
+ ... ado.net/XML headers & schema ...
+ <resheader name="resmimetype">text/microsoft-resx</resheader>
+ <resheader name="version">2.0</resheader>
+ <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
+ <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
+ <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
+ <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
+ <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
+ <value>[base64 mime encoded serialized .NET Framework object]</value>
+ </data>
+ <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
+ <comment>This is a comment</comment>
+ </data>
+
+ There are any number of "resheader" rows that contain simple
+ name/value pairs.
+
+ Each data row contains a name, and value. The row also contains a
+ type or mimetype. Type corresponds to a .NET class that support
+ text/value conversion through the TypeConverter architecture.
+ Classes that don't support this are serialized and stored with the
+ mimetype set.
+
+ The mimetype is used for serialized objects, and tells the
+ ResXResourceReader how to depersist the object. This is currently not
+ extensible. For a given mimetype the value must be set accordingly:
+
+ Note - application/x-microsoft.net.object.binary.base64 is the format
+ that the ResXResourceWriter will generate, however the reader can
+ read any of the formats listed below.
+
+ mimetype: application/x-microsoft.net.object.binary.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.soap.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.bytearray.base64
+ value : The object must be serialized into a byte array
+ : using a System.ComponentModel.TypeConverter
+ : and then encoded with base64 encoding.
+ -->
+ <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
+ <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
+ <xsd:element name="root" msdata:IsDataSet="true">
+ <xsd:complexType>
+ <xsd:choice maxOccurs="unbounded">
+ <xsd:element name="metadata">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" />
+ </xsd:sequence>
+ <xsd:attribute name="name" use="required" type="xsd:string" />
+ <xsd:attribute name="type" type="xsd:string" />
+ <xsd:attribute name="mimetype" type="xsd:string" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="assembly">
+ <xsd:complexType>
+ <xsd:attribute name="alias" type="xsd:string" />
+ <xsd:attribute name="name" type="xsd:string" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="data">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
+ <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
+ <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="resheader">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" />
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:schema>
+ <resheader name="resmimetype">
+ <value>text/microsoft-resx</value>
+ </resheader>
+ <resheader name="version">
+ <value>2.0</value>
+ </resheader>
+ <resheader name="reader">
+ <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <resheader name="writer">
+ <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <assembly alias="System.Windows.Forms" name="System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" />
+ <data name="icinga_banner" type="System.Resources.ResXFileRef, System.Windows.Forms">
+ <value>..\icinga-banner.png;System.Drawing.Bitmap, System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a</value>
+ </data>
+</root> \ No newline at end of file
diff --git a/agent/windows-setup-agent/Properties/Settings.Designer.cs b/agent/windows-setup-agent/Properties/Settings.Designer.cs
new file mode 100644
index 0000000..ad169da
--- /dev/null
+++ b/agent/windows-setup-agent/Properties/Settings.Designer.cs
@@ -0,0 +1,26 @@
+//------------------------------------------------------------------------------
+// <auto-generated>
+// This code was generated by a tool.
+// Runtime Version:4.0.30319.42000
+//
+// Changes to this file may cause incorrect behavior and will be lost if
+// the code is regenerated.
+// </auto-generated>
+//------------------------------------------------------------------------------
+
+namespace Icinga.Properties {
+
+
+ [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
+ [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "15.9.0.0")]
+ internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase {
+
+ private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings())));
+
+ public static Settings Default {
+ get {
+ return defaultInstance;
+ }
+ }
+ }
+}
diff --git a/agent/windows-setup-agent/Properties/Settings.settings b/agent/windows-setup-agent/Properties/Settings.settings
new file mode 100644
index 0000000..3964565
--- /dev/null
+++ b/agent/windows-setup-agent/Properties/Settings.settings
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='utf-8'?>
+<SettingsFile xmlns="http://schemas.microsoft.com/VisualStudio/2004/01/settings" CurrentProfile="(Default)">
+ <Profiles>
+ <Profile Name="(Default)" />
+ </Profiles>
+ <Settings />
+</SettingsFile>
diff --git a/agent/windows-setup-agent/ServiceStatus.Designer.cs b/agent/windows-setup-agent/ServiceStatus.Designer.cs
new file mode 100644
index 0000000..cda0fd4
--- /dev/null
+++ b/agent/windows-setup-agent/ServiceStatus.Designer.cs
@@ -0,0 +1,132 @@
+namespace Icinga
+{
+ partial class ServiceStatus
+ {
+ /// <summary>
+ /// Required designer variable.
+ /// </summary>
+ private System.ComponentModel.IContainer components = null;
+
+ /// <summary>
+ /// Clean up any resources being used.
+ /// </summary>
+ /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing && (components != null))
+ {
+ components.Dispose();
+ }
+ base.Dispose(disposing);
+ }
+
+ #region Windows Form Designer generated code
+
+ /// <summary>
+ /// Required method for Designer support - do not modify
+ /// the contents of this method with the code editor.
+ /// </summary>
+ private void InitializeComponent()
+ {
+ System.ComponentModel.ComponentResourceManager resources = new System.ComponentModel.ComponentResourceManager(typeof(ServiceStatus));
+ this.picBanner = new System.Windows.Forms.PictureBox();
+ this.lblStatus = new System.Windows.Forms.Label();
+ this.txtStatus = new System.Windows.Forms.TextBox();
+ this.btnReconfigure = new System.Windows.Forms.Button();
+ this.btnOK = new System.Windows.Forms.Button();
+ this.btnOpenConfigDir = new System.Windows.Forms.Button();
+ ((System.ComponentModel.ISupportInitialize)(this.picBanner)).BeginInit();
+ this.SuspendLayout();
+ //
+ // picBanner
+ //
+ this.picBanner.Image = global::Icinga.Properties.Resources.icinga_banner;
+ this.picBanner.Location = new System.Drawing.Point(0, 0);
+ this.picBanner.Name = "picBanner";
+ this.picBanner.Size = new System.Drawing.Size(625, 77);
+ this.picBanner.TabIndex = 2;
+ this.picBanner.TabStop = false;
+ //
+ // lblStatus
+ //
+ this.lblStatus.AutoSize = true;
+ this.lblStatus.Location = new System.Drawing.Point(12, 105);
+ this.lblStatus.Name = "lblStatus";
+ this.lblStatus.Size = new System.Drawing.Size(79, 13);
+ this.lblStatus.TabIndex = 3;
+ this.lblStatus.Text = "Service Status:";
+ //
+ // txtStatus
+ //
+ this.txtStatus.Location = new System.Drawing.Point(97, 102);
+ this.txtStatus.Name = "txtStatus";
+ this.txtStatus.ReadOnly = true;
+ this.txtStatus.Size = new System.Drawing.Size(278, 20);
+ this.txtStatus.TabIndex = 3;
+ //
+ // btnReconfigure
+ //
+ this.btnReconfigure.Location = new System.Drawing.Point(195, 143);
+ this.btnReconfigure.Name = "btnReconfigure";
+ this.btnReconfigure.Size = new System.Drawing.Size(89, 23);
+ this.btnReconfigure.TabIndex = 1;
+ this.btnReconfigure.Text = "Reconfigure";
+ this.btnReconfigure.UseVisualStyleBackColor = true;
+ this.btnReconfigure.Click += new System.EventHandler(this.btnReconfigure_Click);
+ //
+ // btnOK
+ //
+ this.btnOK.DialogResult = System.Windows.Forms.DialogResult.Cancel;
+ this.btnOK.Location = new System.Drawing.Point(290, 143);
+ this.btnOK.Name = "btnOK";
+ this.btnOK.Size = new System.Drawing.Size(89, 23);
+ this.btnOK.TabIndex = 0;
+ this.btnOK.Text = "OK";
+ this.btnOK.UseVisualStyleBackColor = true;
+ this.btnOK.Click += new System.EventHandler(this.btnOK_Click);
+ //
+ // btnOpenConfigDir
+ //
+ this.btnOpenConfigDir.DialogResult = System.Windows.Forms.DialogResult.Cancel;
+ this.btnOpenConfigDir.Location = new System.Drawing.Point(100, 143);
+ this.btnOpenConfigDir.Name = "btnOpenConfigDir";
+ this.btnOpenConfigDir.Size = new System.Drawing.Size(89, 23);
+ this.btnOpenConfigDir.TabIndex = 2;
+ this.btnOpenConfigDir.Text = "Examine Config";
+ this.btnOpenConfigDir.UseVisualStyleBackColor = true;
+ this.btnOpenConfigDir.Click += new System.EventHandler(this.btnOpenConfigDir_Click);
+ //
+ // ServiceStatus
+ //
+ this.AcceptButton = this.btnOK;
+ this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
+ this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
+ this.CancelButton = this.btnOK;
+ this.ClientSize = new System.Drawing.Size(391, 186);
+ this.Controls.Add(this.btnOpenConfigDir);
+ this.Controls.Add(this.btnOK);
+ this.Controls.Add(this.btnReconfigure);
+ this.Controls.Add(this.txtStatus);
+ this.Controls.Add(this.lblStatus);
+ this.Controls.Add(this.picBanner);
+ this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedSingle;
+ this.Icon = ((System.Drawing.Icon)(resources.GetObject("$this.Icon")));
+ this.MaximizeBox = false;
+ this.Name = "ServiceStatus";
+ this.Text = "Icinga Windows Agent Service Status";
+ ((System.ComponentModel.ISupportInitialize)(this.picBanner)).EndInit();
+ this.ResumeLayout(false);
+ this.PerformLayout();
+
+ }
+
+ #endregion
+
+ private System.Windows.Forms.PictureBox picBanner;
+ private System.Windows.Forms.Label lblStatus;
+ private System.Windows.Forms.TextBox txtStatus;
+ private System.Windows.Forms.Button btnReconfigure;
+ private System.Windows.Forms.Button btnOK;
+ private System.Windows.Forms.Button btnOpenConfigDir;
+ }
+} \ No newline at end of file
diff --git a/agent/windows-setup-agent/ServiceStatus.cs b/agent/windows-setup-agent/ServiceStatus.cs
new file mode 100644
index 0000000..c1272a9
--- /dev/null
+++ b/agent/windows-setup-agent/ServiceStatus.cs
@@ -0,0 +1,37 @@
+using System;
+using System.Windows.Forms;
+using System.ServiceProcess;
+using System.Diagnostics;
+
+namespace Icinga
+{
+ public partial class ServiceStatus : Form
+ {
+ public ServiceStatus()
+ {
+ InitializeComponent();
+
+ try {
+ ServiceController sc = new ServiceController("icinga2");
+
+ txtStatus.Text = sc.Status.ToString();
+ } catch (InvalidOperationException) {
+ txtStatus.Text = "Not Available";
+ }
+ }
+
+ private void btnReconfigure_Click(object sender, EventArgs e)
+ {
+ new SetupWizard().ShowDialog(this);
+ }
+
+ private void btnOK_Click(object sender, EventArgs e)
+ {
+ Close();
+ }
+
+ private void btnOpenConfigDir_Click(object sender, EventArgs e) {
+ Process.Start("explorer.exe", Program.Icinga2DataDir);
+ }
+ }
+}
diff --git a/agent/windows-setup-agent/ServiceStatus.resx b/agent/windows-setup-agent/ServiceStatus.resx
new file mode 100644
index 0000000..0eff4d7
--- /dev/null
+++ b/agent/windows-setup-agent/ServiceStatus.resx
@@ -0,0 +1,580 @@
+<?xml version="1.0" encoding="utf-8"?>
+<root>
+ <!--
+ Microsoft ResX Schema
+
+ Version 2.0
+
+ The primary goals of this format is to allow a simple XML format
+ that is mostly human readable. The generation and parsing of the
+ various data types are done through the TypeConverter classes
+ associated with the data types.
+
+ Example:
+
+ ... ado.net/XML headers & schema ...
+ <resheader name="resmimetype">text/microsoft-resx</resheader>
+ <resheader name="version">2.0</resheader>
+ <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
+ <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
+ <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
+ <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
+ <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
+ <value>[base64 mime encoded serialized .NET Framework object]</value>
+ </data>
+ <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
+ <comment>This is a comment</comment>
+ </data>
+
+ There are any number of "resheader" rows that contain simple
+ name/value pairs.
+
+ Each data row contains a name, and value. The row also contains a
+ type or mimetype. Type corresponds to a .NET class that support
+ text/value conversion through the TypeConverter architecture.
+ Classes that don't support this are serialized and stored with the
+ mimetype set.
+
+ The mimetype is used for serialized objects, and tells the
+ ResXResourceReader how to depersist the object. This is currently not
+ extensible. For a given mimetype the value must be set accordingly:
+
+ Note - application/x-microsoft.net.object.binary.base64 is the format
+ that the ResXResourceWriter will generate, however the reader can
+ read any of the formats listed below.
+
+ mimetype: application/x-microsoft.net.object.binary.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.soap.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.bytearray.base64
+ value : The object must be serialized into a byte array
+ : using a System.ComponentModel.TypeConverter
+ : and then encoded with base64 encoding.
+ -->
+ <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
+ <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
+ <xsd:element name="root" msdata:IsDataSet="true">
+ <xsd:complexType>
+ <xsd:choice maxOccurs="unbounded">
+ <xsd:element name="metadata">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" />
+ </xsd:sequence>
+ <xsd:attribute name="name" use="required" type="xsd:string" />
+ <xsd:attribute name="type" type="xsd:string" />
+ <xsd:attribute name="mimetype" type="xsd:string" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="assembly">
+ <xsd:complexType>
+ <xsd:attribute name="alias" type="xsd:string" />
+ <xsd:attribute name="name" type="xsd:string" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="data">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
+ <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
+ <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="resheader">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" />
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:schema>
+ <resheader name="resmimetype">
+ <value>text/microsoft-resx</value>
+ </resheader>
+ <resheader name="version">
+ <value>2.0</value>
+ </resheader>
+ <resheader name="reader">
+ <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <resheader name="writer">
+ <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <assembly alias="System.Drawing" name="System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a" />
+ <data name="$this.Icon" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>
+ AAABAAUAEBAAAAEAIABoBAAAVgAAABgYAAABACAAiAkAAL4EAAAgIAAAAQAgAKgQAABGDgAAMDAAAAEA
+ IACoJQAA7h4AAAAAAAABACAA6iUAAJZEAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAADDDgAAww4AAAAA
+ AAAAAAAAv5UA/7+VAP+/lQD/v5UB/7+WAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+UAf++kwD/v5YB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/76TAP/Alwv/xZ8Z/72SAP+/lQD/v5UA/7+WAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf++kwD/7eG3///////Orj3/vJAA/8CWA/+9kgD/v5QA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf+/lQD/v5QA//Hoyf//////0rVQ/7yQAP/AlwX/yKMj/8CX
+ Bf+/lAD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/DnBb/zaw2/8WfHP+7jgD/vJAB/9a7
+ Xf/BmQn/vpQA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/vpQD/7qMAP/LqTT/7N+y/+na
+ p//EnSD/vZEA/8CWAv/AlgL/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CXBP+9kgD/7N+z////
+ ////////5dSX/7uPAP+/lQP/vJAA/7uOAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lAL/uYsA/+3h
+ uP///////////+fYov/EnRX/xqEd/82tOv/Vulr/vpQA/7+VAP+/lQD/v5UA/7+VAf++kwD/xJ0S/8+v
+ Qf/NrTr/696v/+zgtP/Fnxv/v5UA/72SAP/cxXP/9e7X/76TAP+/lQD/v5UB/7+VAP+/lgL/vJEA/8uo
+ L//j0ZD/u48A/7uOAP/Algv/wZgQ/7uOAP+/lQD/vpMB/7+VBP+/lAD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP++kwD/vI8A/8CWA//AlgL/wJYE/+nbqf/awWr/vJEA/8CWA/+/lQH/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UB/8CWAv+/lQH/vpQA/8GYCf/7+fD/696w/7uPAP/AlgP/v5UB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP++kwD/w5sS/8GYC/++lAD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76TAP++lAH/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgH/v5UB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAoAAAAGAAAADAAAAABACAAAAAAAAAJAADDDgAAww4AAAAAAAAAAAAAv5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/wJcE/8CWAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ Af+7jwD/uo0A/7yRAP+/lgD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/72SAP/Yv2j/6t2s/82tPf+8kQD/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP/AlgP/u48A/9K0S/////////////n16P/DnBL/vpMA/7+WAf+/lQD/v5YC/7+WAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlwT/u44A/9W6Wf////////////v5
+ 8f/Fnxn/vpMA/7+VAf+/lgD/vZEA/72SAP+/lgD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQH/v5UA/7+VBP/k05b/9e/Z/9/Kgv++kwH/wJYE/8GYCP+7jwD/3cd6/9W6
+ Wf+7jgD/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76U
+ AP+8kAH/vJEA/8KaFv/Mqjb/uowA/7uOAf+7jgD/17xh/8ikJP+9kgD/v5YB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf/AlgP/wJYD/72RAP/JpS3/3sl9/+PR
+ kf/bxHD/wpoc/7yPAP/AlgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5YB/72SAP/o2qb///////79+v//////3cd7/7yQAP/AlwT/v5YB/7+V
+ AP+/lQH/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgL/vJEA/8uo
+ L////////v37//38+f//////+PTl/8KZDP+9kQD/vpMB/76UAP+/lAH/vZIA/7+VAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/vZIA/8upMP///////v37//79+f//////+vbr/86u
+ Pf/Kpiz/x6Ig/8KZC//Alwr/x6Mj/76TAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP++kwH/uo0A/7+VBf/x58b///////79+v//////4MuF/7yQAP/EnRL/yKMk/8ejI//q3Kv//////9K0
+ TP+7jwD/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/Fnhf/2L5k/86tPP/EnRb/3MZ2/+jZ
+ pP/fyoH/v5UJ/7+VAP++lAD/vpMD/7qNAP/eyHz//Pr1/82sOf+8kAD/wJYD/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJYD/7uPAP/StEv//////8mlKf+7jwD/vI8D/7mLAP/EnR7/yKUp/7yQAP/AlwP/v5UB/7+V
+ Af+9kgL/wJYH/72SAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgT/xJ4Y/7+U
+ Af+/lgH/wJcE/8CXBP+/lAL/zKo3/8SdFv+9kQD/v5UA/7+VAP+/lQH/v5QB/7+WAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/vZIA/7+VAP+/lQD/v5UB/7+VAP+/lAD/696x////
+ ///VuVf/vJAA/8CWA/+/lQD/v5UB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5YC/7+VAP+/lQD/v5YB/76TAP/EnhX/+vfs///////l1Jn/u44A/8CXBP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+W
+ Av+9kQD/1rtd/+japv/GoR//vZIA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/vJAA/7qNAP+9kgD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/wJYD/8CXBP+/lgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAAAACAA
+ AABAAAAAAQAgAAAAAAAAEAAAww4AAMMOAAAAAAAAAAAAAL+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/wJYD/7+VAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/76TAf+8kAD/vpQB/8CWAv+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+WAf+8kAD/w5wV/86tOv/CmhH/vJEA/7+W
+ Af+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgL/vJEA/86tPv/69+3///////j0
+ 5f/KqDH/vZEA/8CWAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+WAf+9kgD/7+XC////
+ ///9/Pj//////+vfsv+8kAD/wJYC/7+VAP+/lQD/v5UA/7+WAf/AlgP/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76U
+ AP/z69H///////z79f//////7+XC/72RAP+/lgL/v5UA/7+VAP+/lQD/vZIA/7yPAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP/AlgP/u48A/9m/af///////v37///////Wu2H/u44A/8CXA/+/lQD/v5UB/76TAP/HoyL/0LJG/76U
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQH/vZEA/8+wQ//dyHr/0LFF/8+vQf++lAT/wJYC/8CXBP/BmAj/uo0A/9zF
+ eP/38uD/vpMA/7+VAP+/lQH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgL/vJAA/7qOAf+6jAD/yqcv/86uP/+6jQD/u44C/7uO
+ AP+9kQD/0rVP/8SeF/++kwD/v5UB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/wJcE/8CXBP+9kQD/zKo3/9Cy
+ R//awm7/17xf/8+wQv/Ioyj/vJAA/8CWAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/7yQ
+ AP/eyX////////7+/P///////v37/9CySf+8kAD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CW
+ A/+8kAD/0LFF///////+/vz//v37//79+v//////+PTm/8OcFP++lAD/wJYD/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJcE/7uOAP/k0pP///////79+/////////////79+///////0rRP/7mLAP++kwP/vpQA/7+V
+ Af/AlgL/wJYD/7+VAf/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlwT/u44A/+XTl////////v37/////////////v78///////fyYD/yqcu/8il
+ J//Cmw7/vpQA/72RAP+8kAD/vpQB/7yRAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UB/8GYB/+8kAD/z7BI///////+/vz//v36//79+v//////+vbq/8Wg
+ HP/EnRT/y6ky/8+vQf/Qskf/y6gx/93GeP/48+L/171g/7yRAP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP++kwD/uo0A/7yQAP/LqC//6Nqn/////////v7///////37
+ 9v/Tt1X/u48A/76UAv+9kQD/vZIA/7+WAv/HoiH/+PTm///////z69D/vpQA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgH/vpMA/8ahHf/gy4P/1rte/8qnL/+7jwD/zq8+/+DL
+ g//cxnX/1rth/7yRA/+/lgD/v5UB/7+WAv+/lgL/v5UC/7uPAP/eyX7//Pr0/9rDb/+8kQD/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CXBP+7jgD/2cFr///////hzoz/uYsA/8GY
+ Bv+8kAD/u44C/7iJAP/JpjL/y6gy/72SAP/AlwT/v5UA/7+VAP+/lQD/v5UB/72RAf/Alwj/vZEA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/EnRX/3MV2/8ah
+ H/++kwD/v5YB/8CWA//AlwX/wJYC/8CXCf/Rs0z/u44A/7yQAP+/lgH/v5UA/7+VAP+/lQD/v5YC/7+U
+ AP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76T
+ AP+6jQD/vpMA/7+VAP+/lQD/v5UA/7+VAP/AlgL/u48A/8yrQP/eyX3/zKo2/72RAP+/lgH/v5UA/7+V
+ AP+/lQD/v5UB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UB/8CXBP+/lgH/v5UA/7+VAP+/lQD/v5UB/7+VAP+/lQL/7uO+////////////0LJG/7yQ
+ AP/AlgP/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgL/vZIA/8agHP/9+/b//v35////
+ ///fy4L/uo0A/8CXBP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgH/vZIA/+fX
+ of//////+vfr/8qnLv+9kQD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP++lAD/vpQF/8yrNf/Fnhr/vZEA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQL/vJAA/76TAf/AlgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/v5YB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP8AAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgA
+ AAAwAAAAYAAAAAEAIAAAAAAAACQAAMMOAADDDgAAAAAAAAAAAAC/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+WAv/AlgT/wJYD/7+VAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/72SAP+7jgD/vJAA/7+V
+ Av+/lgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQL/vI8A/8ij
+ JP/Tt1T/zq49/76UBf+9kgD/wJYB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+W
+ Af+9kQD/38uE///+/f///v7///////HoyP/Fnx3/vZIA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJYD/7uPAP/Wu1////////7+/P/+/vz//v37///////t4rv/vZIA/7+VAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/wJYC/7yRAP/u4rv///////79+//////////+///+/f/+/v3/yaYs/72R
+ AP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/72SAP/x6Mj///////7+/f/////////////+
+ /v//////zKs4/7yQAP/AlgP/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJcE/8CWA/+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJcE/7uOAP/izoz///////38
+ +P/+/vz//v36///////48+P/wpoO/76UAP+/lQH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lAD/uo4A/7yQ
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5YB/76T
+ AP/DnBb/9O3W/////////v3//v78///////XvWj/uo0A/8CXA/+/lQD/v5UA/7+VAP+/lQD/v5UB/7+U
+ AP/AlwX/3MV1/9GzSf+9kgD/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/8CWAf+9kgD/wpoT/9/Jf//s4LX/5tWc/9GySf/XvWT/v5UH/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJYD/7yQAP/NrDr///////Pr0f++lAD/v5UA/7+VAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgH/vpQB/7qOAP+8jwD/u48C/7mLAP/OrkH/1blc/7uO
+ AP/AlwP/v5UB/8CWAv/AlgL/wJYC/72SAP/FnyD/7eG5/9O2Uf+9kgD/v5YC/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/8CXBP/AlgP/wJYE/8CX
+ Bf+7jwD/2L5m/8qoM/+8kQD/vpQC/7yQAP+9kQD/v5YC/7yQAP/XvGP/xqAl/7mMAP/AlgH/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAf++lAD/wZgP/9rCcP++lAf/w5wT/8yqNP/Kpy3/vZIA/8ijLP/WvGH/vJAA/8CX
+ Bf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlwP/uo0A/86tSP/y6cv/+vbq////////////9e7X/+nb
+ qv/Alxb/vpMA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CWAv+9kgD/x6Il//fy4P/////////+////
+ //////////7+///////l05j/vZEA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf+9kgD/7eK8////
+ ///+/fr///7+/////////////v79//79+///////1bpe/7uPAP/AlgP/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/7yQ
+ AP/Nqzj///////7+/f////////////////////////////7+/P//////8ObD/76TAP/AlgP/v5UB/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJcE/7uOAP/ZwGj///////79+//////////////////////////////+/v//////+fbq/8CX
+ Ef+8jwD/v5QB/7+WAv/AlgP/v5YB/7+VAP+/lQD/v5UA/7+VAf/AlgT/wJYC/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/wJcE/7uOAP/ZwWr///////79+///////////////////////////////
+ /v///////Pr0/9W5WP/IpCb/wpkL/72RAP+8jwD/vZEA/76UAP+/lgH/wJcD/76UAf+7jgD/vZEA/8CW
+ Av+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/7yQAP/Orj7///////7+/f//////////////
+ //////////////7+/f//////8unM/8qnLv/Ut1T/2L5l/9e8Yv/Rs0v/yaYr/8KaDP+9kgD/u44A/8Kb
+ E//UuFP/yqcu/7yRAP+/lgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/7+WAf++kwD/7uS/////
+ ///+/fn////+///////////////+//79+v//////2L9q/7iKAP+9kQT/vpMA/8OcEv/LqTL/07ZS/9i+
+ Zf/XvWH/0rVP//Tt1P///////////9W5WP+8kAD/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CWAv+/lQH/wJcD/76U
+ Av+8kQD/3cd5//z79v/////////+//7+/P///v3//v79///////q3K3/vZIA/8CWAv/AlgP/v5UB/76T
+ AP+8kAD/vI8A/76UA/+/lgL/4MyH///////8+vT///////Dnxf+9kgD/v5YB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5YA/72R
+ AP++kwH/u48A/8KaEv/cxnf/yaYt/8eiJP/u5L7//v79///////+/v3//v38/+TSk/+9kgL/v5QB/7+W
+ Af+/lQD/v5UA/7+VAf+/lgL/wJYD/8CXBP+6jQD/0bNP///////+/fr//////+rcrf+8kAD/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP/AlgL/vZEA/8yqNf/z69D/6duq/9vDcP/GoCD/vJAA/72SAf+8kQH/yKUm/9GzSv/OrTv/yKQk/9W5
+ Xf+9kgD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/vpMA/+DMiP/59en/7+S//8ah
+ Iv+9kgD/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlgP/vI8A/+rdrv///////////9S4XP+5iwD/wZgF/7+WAv/AlgL/vZEA/7uP
+ AP+9kQP/uo0A/9W6Xv/KqDT/vJEA/7+WAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/7yQ
+ AP/BmAf/vZIC/72SAP+/lgD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/vI8A/9zEdP//////+/jv/8qnLf+9kgD/v5YC/7+V
+ AP+/lQD/v5YC/8CWA//AlgT/vpMA/8SdGv/Zv23/vZEA/8CXA/+/lQH/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UB/8CWA/+/lAD/v5YC/7+WAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/72SAf/NrDn/x6Mk/72R
+ AP+/lgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/7uPAP/Wu2H/yaYv/7qNAP+/lQL/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ Af+8kAD/vZIB/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/Cmxn/171k/8ah
+ Hf/CmQ//vJAA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlgP/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/72R
+ AP/LqDP/+PPj///////59ef/0rVS/7yQAP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQH/v5UA/8CWBf/07NP///////7+/P//////+/jv/8agHP+9kgD/v5YC/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lgL/vZEA/8ilKP/9/Pn///79/////v/+/fn//////9K0TP+7jwD/wJYD/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/vpQA/8KbD//48+T///////79+v///////v38/8mm
+ K/+9kQD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/7yQAP/VuVz//v37////
+ /v//////3sh9/7yQAP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+W
+ Af+8kAD/x6Ij/9S4Vv/Kpy7/vJAA/7+VAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlgL/vZIA/7uOAP+9kQD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5YC/8CWBP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACJUE5HDQoaCgAAAA1JSERSAAABAAAA
+ AQAIAgAAANMQPzEAACWxSURBVHja7Z15fF1Xde9/a+9z7qDharImS/I8JrZjO4mHzCEUAiGEkhJCBwih
+ rw2EqZQOD0qB5qWE1xI+vEfyQmlLKVAIJKShJcEhITij49gZPM+2bGuer3Sv7r3n7L3eH1uSHQ+JbEu6
+ 5/rs78efxNZH0j1XWt9z9rD2WoT7nobFElZEvi/AYsknVgBLqLECWEKNFcASaqwAllBjBbCEGiuAJdRY
+ ASyhxgpgCTVWAEuosQJYQo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjRXAEmqsAJZQ
+ YwWwhBorgCXUWAEsocYKYAk1VgBLqLECWEKNFcASaqwAllBjBbCEGiuAJdRYASyhxgpgCTVWAEuosQJY
+ Qo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjZPvC7CcFkEQRACUZvMRIhIEZmhmzvfl
+ nR9YAQIHAZJIg7Wvta8BwBEgAsBaaV9DCjhCCsGAtiKcG1aAYCGIwOxnPUixeFrJmtrEBZXF88rikghA
+ Mudv60293jP0YnuyP5khKaQr7dPgXLACBAhJpDw/IuV7F9ffvrj+7Y0VrqATPocBAlpS2R/v7fzXnW07
+ O5LCdUiQfRScHYT7ns73NVgAE/1Zb0ld2f1XL7iyvowZRNDMms3wBwBMkEtBAAjIKv21Vw7fs+lQ1tfC
+ kdaBs8AKEAgkQWXVx1c03XvFvJgUihmAIKLTfL5maLAACcLLnYO3rtt+oDclo87YdNkyTuwyaP6RRCqr
+ PnfprPuvXhARQjFLInn66AcgCA4REXzNl9aUrnvvRXMri1VOiTf7IsspsALkGTPy+cSKpm9cPtfc+OW4
+ g5gAR5CveV5Z/LEblzWURrXSVoEzwgqQTwSR8vzldWX/+/K5AAgkzjx8jQMLyou+eeV8aJ3v91RgWAHy
+ iWaGlPdeOb/YkYr5LKLf4AhSzB+YV3Pr4nrOqfE/QyxWgLwhiZDzb54z7dqGcs18jlFrvvgLK2e4UUdp
+ tgaMEytA3tBgSPHhRXUAzn3tRhAxY2lVyfVNlfDsbHi8WAHygyCwrxdWlbxzRiWACYlXDQZw6/waCOIJ
+ cCoUWAHygyCCr1fXlEal0DwxIxYCAbisriwWdbQdBY0PK0A+ubCqBMBEbV6Zp0h9caShJArNdhA0HqwA
+ +UExwxFzEjEAExWpBDAQlWJ+WRyaCdaAt8YKkE/Oet3zzbHLoOPHCpAnTFrb5ESqnf+OHytAfhBE0NyT
+ 8TGa43nu8OhuwEDOB1kNxoUVID8QAUrv6EtN+Hfuy/o7+9IQwp6TGQ9WgPzADEixpScFwJmgqYCJ+F19
+ 6d6MN1nTi/MOK0B+0MxwxIvtAweSGUzQSqj5Jk8c6UXOd4js/X88WAHyAwNSiqGh7IN7OzERZ9sZcARl
+ lP7Rng5Ioe0UYHxYAfIGM8MR39/dlsz5jjjXG7Y5C/bw/q693UPCkfZk2DixAuQNzZCO3N05ePfmZhxX
+ /OfsvpUjqD2d+9uXDkCQXQgdP1aAfKKYKeL8wyuH17f2m3MtZ/FNGDCpb1/YcOBAd0q69vZ/BlgB8gwR
+ seY/emLHjr60I8g7w+DVDHOW4O83N39vW4uIufZc/BlhBcgzmlk48khy+F2/eG17X9oV5I+v0hWbhCJA
+ Ev39K81ffG6frYxyFlgB8o9mlq5zOJm59uev/GhPhyNG0tg0s3FhLKh5NO7HQr83633kqV1ffG6fcKVN
+ gD4LJG64Ld/XYAEDQorhnHp4T8fr/em1tYnyqMMgUxpoLLDN34lIEGV8/ZO9nR9ct/3ZQz0y6rKd+Z4V
+ tjRiUNDMQgoI+u99XXetmg3gpY5ksSsWVRRHRrd1NaMv6+3uH368ueeHezsO9aRIkIzbcf/ZYwUIEIKg
+ s/5ty5uWVBYfHcre9NjWrmFvdllsTiIuCJKoN+Pv7k8nsz7nfDhCRByAbfSfC1aAoECAr9iNuXdcOB3A
+ d3e0dQ0My5h7sC99sCd17JOEgCAn5mqwnfKeO1aAoCCJ/Jz3ewumX1xd2jmc+5cdrXAlA2JsUmwmwcwA
+ fBv6E4QVIBAQ4DOLiPPJJQ0AvrezvaUvfdzg3ob7ZGGXQQOBIELOf9/saZfVl/Xn/H/a3grHJvRPBVaA
+ QKCY4co7lzUA+Pdd7Qd6hqRNaJsSzhMBRhfIC7IQgiRCTt0ws+ptDRVpXz+wtRVS2OCfGgpeAEEwucSs
+ mRUzsykaXkCFETQzHLpzaQOAH+3p2NmVlDapYaoobAEkkfa1P+wJoCzu1hRHSyIOmP1hj9W5lpudsrfA
+ nnp7U+W7ZlZ5mu/fchTC3v6njkJdBSJAEFTWm11VctviuutnVM5LxGOOTPtqW0/qv5t7/m1nW89gVkYd
+ FexbqQaD6BNLGwD8ZG/Hax1JEXHs7X/KKNQeYQLQvv7kiqa7V89ORBw+LmHG/L0tlbvzmT2P7OkQkeDO
+ JgWRzvlXNFY8e/NKDVz2s80vtfUL1wowdRTkEEgQtK/+4cr5//fK+SWu4+tjWZOmd7Riri+O/PxdS+5Y
+ 3qQD3jmLcOeyRgAP7eu00T/1FJ4Akkhn1ceWNn5+RZM5QuWIkZZyNNpmXRJpZmbcf/WCa2dW6ZwfwPmA
+ INKef2l92S3zagDct6XF7ndNPQUmABGU0tMSsa+umgVA0GmbagkiDSbg79fOIVeqCSpBPsEwPrm0URAe
+ Pdj9TEsfReziz1RTYAJIInjqQ/NrGoqjvn6LplqSiIE1tYm3N1YErWmKINKeWlabuHVBLYD7t7RAsyjI
+ bYzCpsAEMFXFr2kox/iqiptcmt9pqgxavXwCoPUnljZEBK073PvEkV7zmMr3dYWOQhKAANZwXbmgvAij
+ DVHe4ksIAOaWxeCI4ISXIChfLawu/cOFtQDu29oCXwfqARUeCkkAAAALopg8s8uOSQEKULlkAkHpO5ZM
+ L3bkb1v6/+tQNyL29p8fCk0AopzS3RkPwHiShE1QdWU8KB2Q7AgiKF/Nqiz+yKJ6mNu/Zzv75o1CEoDN
+ vDanXusewpkUlN3ak4LSAamXbNrj/cmF0yuizovtyZ8f7ELBbv2aRWdHkCAigiA4RIWViFVIAoxAeGR/
+ F8bRWtTUi+3L+j/Z0xGQICOC8vX0iqKPLq4HcN+2Fp0tyErOJvSZWWV9P53Tns++1p72M54/7LGvBVFB
+ zGoKLBdIMZMrnzjc+6vDvdfPqPQ0u6e/sZuSaYIwv7yopS9N0iXK8yETQaQ8//bF9XVFkVe7hn66rxNu
+ 0LOVTvkuWGuV8+PxyNtmTVtZXbqgPF7kSE/r9nRuc+fg+raBw6ZiRUTqINx4Tk+BCYCRWoL6c8/uveh3
+ V9QXRTzNzkm3GmZosCRSzGUR5/Ebl3362b3ffe2IjDhMNIG/ERq5pOP/xYyR6Qmf9MnK11WJ2B9fUA/g
+ /u0t3rDnxN2zKwmaL6QglfVLYu6nLp71xxfUzUnET07EGvLUQ/u7/vG1w9vbkjLm6AC37S68wlimsn7n
+ YOaZ9uS7ZlVVRB0wGOwzmEcmBgQSRBs7B9/72Na6osiSyuIbZ01LxCO/bu7RzFKKc1GAAEEkBQHEAJjZ
+ 1GobqeTG0GyO58iRTxvBEaSz/icuarplXs2O3tSnn93nBWhpalxIIpX1Lmuq/MV7lt46v6Y86gJQmk2I
+ 8+jELCLFimklty2qG2RsaO0XIrgnlQpPABgHHHm0P/3jfZ0VMXdRRVFECkEkiUzVtL6s943Xj/7pb3Yd
+ 7ks9fKCrOOpeVpdYW5e4uLbsicO9qWHPOasSypJImCwjX+msz2AhZYkrG0pjjaWxWWVFDaWxRMyJudJn
+ 9jVrT2lPMQBBriBfcWnc/c41C6ti7l2bmp9v7nEiTgHd/SWRyvnvmV/76LuXmp14GslGecMfMwVWml1B
+ 755ZVVkUefxAtzjnBgiTRKGmQ8OMRJVmX8+aVnxdQ8WiiqJiVw5k/a09qSeO9HYnh8mVQgpoVjn/Y8ub
+ /s8V84ocuaMv/aF127e0DThxV417MCSJGKxzCpqjRZEV00rW1CUurSldUFY0szSWiEhB5BAB8Ji15p6s
+ dyCZ2d2f3tiRfL49ubs3hZxSvvr4qln3X7Vgf3L4kp9u7s96VDjTXxP9lzVWPHnT8rgjFL/1eSNTudoR
+ dPem5r95fl8wzzkUsAAYaTRN2lfwNQAQQWsQwZVSCpMjTQQBUhnvqllVP/ydC5pKor1Z749/s/uRXe0y
+ 5ui36lJq1jp0zocUq6aXf2Bu9XtnTVtQHh//RSrm17uHfrq/61eHe//9ukXLqkr+54YD92w44MQKZvRP
+ ADOXRpxn37/ioqqS8US/gQFmCMLbH33tqUM9MhK4GX9hC2AQNLIkyjwyHz351u4I8jPejMriH7/jwsvq
+ Er7mL208dM/GA0IKCHHKO5N5vqucL6S4fva0zy5rvK6xYmxIr5jNr/bUi94MBmsG0bFu2DmlI1J0Zbyl
+ P9jQkc45UWf8j6D8IolUxvvrtXO+tmaOr/mM2loaWzZ2JNc+/Io2LuX77bzhrRXiHOAEeORpe+wvJ6MZ
+ 0pX96dxP9nY0lcZXVJe8vbFiTnnxukM9OV9J58RpsSAihs75FzdU/PAdF3zx4plzyuJEJrvObPqMjHfH
+ ziG84Q+N1HAWRKacmwYcIQDEpZxeGnu1N9U7mJFSBH8URIBmLolHvnPNwsqYC5zZPpf5CTSWRDe0D+zr
+ SUlHBur9ng8CjBNmCCl8rR/Z25FlumZ6+YrqkmubKp5s6e8fzB4/LZaCtKfijrjnqgXfvWbh3LK4GU2N
+ Bv2Zve5YQXPzdxCWVZXcvrh+GHixtR8MEexT8Gb3/XdmVX5yaSPjbBoQK82CKKv5F/u7RMAqvhTgTvA5
+ oJlZkHTlPS/uv2Xd9t6sd1ld2XPvX3nVzEp/2JNERJCCVMa/oLpk/c0rP3dRIxEUs1liOsdXp9H/KuZi
+ V37z8nmP3risuiiiPV8GJE/jlJdNAPPq2jKcbSc/8+Yuri41c4BAvdVwCQCM7BU4cfeRXe3XPvLajr5U
+ U0n08RuXfWxFk8p6kqGGvXfPq37h5pWX1JT6emJC/wTMAU5f83tnTXvx91YuqUmojDdR/eInHM0MKeaV
+ xTG+MxgnYx6asxOx8qgzMT3BJ47QCQCAAV+zE3e3dCSv/vmrv2zuLXLkP1+z8N5rF/lK/9HShv9899Ky
+ iKP4FHvMEwURTFvIOYn4b25afuWMKj8T0OcAM0CIn2EK+sm4QsSlADhQm2JhFMDga5ZRp3vYu+m/Xr/3
+ 9aMA/uyixg0fvOTbVy1wBWnGFKQoO4IUc3Xc/cV7lq5uLFcZL5gOADj3qNXMGaWBYO1+h1cAmMmZFBD0
+ 57/dffvTu9K+Xl2TKHHF2U31zg6TsFQecR5+15JF1aUqYEVcCHAFQXPzYAZvtWdyWhgA+rJ+2teBuv0j
+ 5ALADHAFQdCzh/sySgM424Hu2WMcaCiO/sc7L0xEHa11fhUwGyAmy5+Zta+Rzr3ensTZ3hc0GMDLncl0
+ 1qOA5USEXQAyWSuO+N47LqiMOoo5L2MQSeRrXjGt5B+umAdfT33u2IlB7yl/OKdzvivF4uqS29fMvmFu
+ teYz3AJ4I+tbB+DroJ19K7x06IlFCFIZ74uXz7uivmz8O/yTgZkP/MmF0399tO+hnW1y8nu+j25QQDNr
+ zexr7Ss4Ihp1F1cXralNrKopvaK+fG5ZfOymcBYXZM7xtaVzP97TgeBVvgi1AIJI5dTS2sTnVzRhHEfM
+ Jhtz479r9exfH+4dyPmTsUl8UtAr7Ws4MhZzLqwpXlNngr5sdiI+9rMY9vWr3YPrWwba0tm7Vs8ue2Ml
+ 1rdEaXYEfXtLS1dyWEZdK0CAML+Kv1s9p9iRPrOTbwEEQTEvKi/63PKmLz+/T0xQuJjhjdnR4+OCPh5z
+ ltaWralNrKotvaK+bGZpbOxLUr7a1Dn4YntyY2fypY7BtlRW5JTy/GJXjqUDjeeHZY7srW8d+PorzQE5
+ lXoC4RXAVGa+vLHifXOmAch79I9dFYA7ljQ8sK21LZUlSWcXM8eCXjMzK09BabiyOOYuqypeU5tYVZO4
+ or6ssSQ69iWDnnq5M/lie3Jjx+DGzmRHKsc5HwQ4EoIo5rhR556XDzWVRD+xpMFnNseCT3cBzFDMrqBd
+ /enbfr1DKS0C2fUjvAIYTFuu/I7+j8ckStTE3Y8srrtnwwEhXTXugfcbgl6zUiNBXxJ3l08vWVObWFWb
+ uLyubHpxZOxLkjn/pY7ki+3JjZ2DL3cOdqazyKmxoHfirjlcCoavmQgkxCef3jPk6b9c0QRAM2vGG/Kj
+ eGTNR4AcQRs6kh94fNvRZEa4QYx+nB/p0GeBIGhfz6ko3nbrpXFHnNGgdrLRDEHY05++6MFNGV+/eUWv
+ E4IeSkNpRJxEzF0xbTTo6xO18WNB35f1N3QkN7QnN3YmN3UOdg/nkFNmaxqCHEFjQX/y6xJBMFTOv35e
+ zdfXzllWVXLyj858ZCDnf/3VI9/Y3JwL6r3fENIngCDSvr5p9rRxHm6a2msDAwvKi66ZXv6r/V3ipCY3
+ p7jTa4Yry+PuxdWlq2pK19QmLqsvmxZzx76kJ+O92J7c0JF8qSO5uWuwb9iDNxL0JEiO3umZ8eZndJih
+ ARF1f7W/68nDvTfMnva7s6ddUlM6KxEjkADah3M7+9KPN/c8uK+zc2BYuE6Qox/hFMAkoiEib5pdle9r
+ OTVm5eSmOdN+tb8Lo0nUAscFva/ADFdWFkUuqS5ZVZtYU5tYW5uoPC7ou4a9F9oHTNC/0j00YIJeEORx
+ Qc9sMqPGf23meIPJ63x0d/ujezrcqFMedWJSMNCd8TI5BU/BlTLqauaAdzsOowBmoN2YiF1aU4oArH6e
+ jLmktzWUx+JuTjMAVqx8HwxE5LTiyKXVpatqE2trE6trE+XRY7/E9nTu+faBlzoGX+oYeK07lRzOwVcQ
+ 4hyD/mRMVrOMugA8xV2p3EiahCDzQqZQRr5/kG9NGAUQIOX7a2sTRY40A+6gYZSclYjPKors6kiKokh1
+ cXRVbemqmtK1dWWrakoTkWO/uJZU9vm25MbO5Ib25Os9Q0MZD56CFJCCpJCunKigPwE21erN1RKNXDRj
+ wl9oUgmjAESA5pXVpQA0cwCfAObgbETQncub2tK5dzZVrqwuKXHl2CccGco+1zZggn5Lbyo9FvSOIGcS
+ g/6U8Bv+V2CEUQDNDFfOScQw9Ylv48Zc1yeXNph/MnBoMPNc28DGjuSGjuS23tRwxoev4AjINwY9wy+E
+ sUdACJ0AZNYZHTnXHHHK9/WcDrOY2Jv1/+tQtwn67b3pbNYEvYQk4QrTAZZt0J8DoRMAAJijjqiOuxhf
+ m5m8YCr7buocvO3x7WAmKfiNQW/KTuX7MgueUAoAOEQjQ+qAxv8IRY4QUSmIzPDGBv2EE0oBGBEpIiLQ
+ ZyGMmEWOJJCpwmkDfzIIdBBMHqa5fL6vYhzXyUEuLX4+EEoBCD5zRp2qhn9gMNc1mFO6cNbUC5FQCgB4
+ mvuzHoIb/yMMegp2/DOZhE4As3WZ89VImYOghpYZoB0azATwHO35ROgEgDnG4euDySzOus7HFEAAcCiZ
+ AQerktR5RhgFIAKU3tGXQoB3gs0Jte29KZxbQyfLmxNGATQDjnyxPWnKvwUwuswl9WS8V7oGIUVBLFgV
+ KGEUgJkhaVtvqiWVBYI4wTQRv7Ej2TscuEpS5xmhFACQQqTTuceaezCa0xtA1h3pg6eCWy30vCCMAozx
+ 6MFuAEGLMFNJKu3rRw92w7Hjn8klpAKYjOgnj/Zt6RkiBKtmvYn4x5t7DvWlyRGBurbzj5AKwIAjyRv2
+ vrO9FQHbDTAHdB7Y1gJmYVdAJ5mQCgDT7Scif7inY39yWBIF5EZrzto+daTvySN9Aaykef4RXgHMVDg5
+ lP3a5sMIzEPA3PLv3nwIym4ATwXhFQBm/Sfi/MuO1vWt/aZIf36vx1Rf+9edbU8f6hXB6yl9XhJqAWAG
+ 3Er/xXP7Ur7K76aYZnYEHUxm/nbDQTgiIE+k856wC6CZZcR5uaX/r17YD+TtvBUDJvvn4+v3tAykT27c
+ bZkkwi4ATKewmHPfa0e/t6vddGrJzzUQvvTSwXX7O2V00ltjWMawAgDmBizFHb/Z9cvmXtO9dCpf2pTb
+ //bWlv+14YCM2qH/lGIFAABmCEE5zX+wbtv61n7jwBSEIfNIGdDv727/1Po95Eqd7x9F2LACjKCZhRQD
+ OfWeX7z+nwe7HUFK86Q+CTRDgx1B39py9LYndggiUIDPJ5ynWAGOYRxI+fqWx7Z+e2uLI86lJ+JbYFoR
+ a8bnX9j/2ad3SylY2OjPAxI33JbvawgQDJAQAH65r2v3YObahvIiR2pmPUFFpBkjJTsF0e7+9Psf3/7g
+ jlYZdbS99+cJ+wQ4EWbWgBNzfrKl5d2/2OJrFkSSyJQfPOsoNT2zcFxfra3dQ88f6nZirmYb/XnDCnAK
+ 2JwVdsUfLqp1BO0bGH7iSB8RHCLTw8vX46rXw4BmU4MIRJBEmvkHu9s/vn5PTunfm1dz0+J6P+MFsDx1
+ eAhlZbi3QgpSGe/mRXWfWtYI4C9f2P/Irva3za3+H4vrr59RWR51xnI0x+7rAEaaefGxbzJaN58I6Ejn
+ HjnYff+21q0dSXj+yurSOy6c/uVLZz15uDflq8loCWwZDyFtkvcmCCLtqxmlsfU3r5xVGrt/a8udT+2S
+ UUfnfAZqy+I3zqy6trFibW1iZmn0zW/enua9/ekNHYOPH+759ZG+gaEsBMmIVL5uKI6+8IGLZ5RE/+al
+ g3e/sF/G7eZXfrACnIggaE8/dMPSm+dWv9YzdO3Dr/Z7PgkSIAa00vAUJMWi7oLy+PyyorllsbqiaIkr
+ S1ypmNO+GsypI0PZ5sHMzr70vuSwyvqmn5cUwkyCpSA17P3pyqYHrl7Yn/Uvf2jzjt5UwJvJna9YAd6A
+ Cc07Vsz4f9csUMzX/edr6w/3yuMSM0eH8tBaQzG0hhngC9MjiEcG/swQApIghBkLqTdOoAlgxm/et/za
+ hvKf7eu85bGtwg1iI/XzHjsJPoYgUjm1pDZx99rZAO7e1Ly+ueeE3AQemdQyEQlXyqjrFEWcuCsjjnCF
+ cB0ZcZy46xRFZNQRjiCCYj55+UgQwVdf3XgQwAfm1bxnbo3O+vYAwNRjBRiBAGYWUnzr6gWVUXd968Bd
+ Lx9CxDldbVozmDErQr5mNVq8/4SPnO6WrphFxFnf3Pudba0AvrpqVlHcVVpbA6YYK8AIQhDn/C9cMvNt
+ DeX9Of8zz+zxfSUmc3GGwXDE3ZsOHRnKrqwu/exFjcgpEbASFec9VgAAkEQq61/RVPmlS2cB+NKGg6+3
+ DcjI5A7KmSEdcaQv/bXNzQA+v2LG4ppS5Sm7LTCVWAFABKV1Iu5+66oFEUGPHuz+9utHRMyZgnVJpRlR
+ 54Htretb+yuizldWz7Z7wlOMFQACBE/dtWbOyuqSI0PZP3tmL4Apax4midhTX3npIIBb5tXcMLfazoan
+ krALIAWprHfj/NpPL2sE8BfP7zvYl5Lu1C3Jm9nwb5t7/2m7mQ3PjsddpdkaMDWEWgBBUL6enojfe8U8
+ AP+yo+3BXe1TfyKRwZB098uHjqayF1eXfuaiRuR8OxueGkItAEDQ+h+vnD+vLL69N/XXL+yDFHrKyzEw
+ Q7ry8Ohs+C9WzFhYXap85QiSRI6dFE8m4RVACtIZ7/YlDR+aXwPgM8/u6x7K5ascw8hseFvrM639lWY2
+ nFUq56uc7w97zGwtmCRCeiBGEGlPLaou+eE7Lix25T2bm7/7+hEZy2dGmiTSvtqbzPzRwrolVcWN5UXv
+ nVP9/vk1RTHn6FBuOOsJKe0K0YQT0nRoZoagb121oCbuvtA+8JWNh+A6+W1IqgEQ7esbbk/nmkqiH1tc
+ LwgM3Lao7shQ9g9+vePZw71ikrcmQsh5MgSi0T/jQQrirP9XF894R1PFkKc+88zerOfLvDZiIYA1R135
+ H9df0FQSVcya2desNCvmppLouhsvunZmlc7ZFdIJpoAFEASHyBFk7pQ8ehZFjHzw1JFiNn1XN1Z8+dLZ
+ AL688eCmln6Z70KcUhBy/ueWN72tocLXbOa+5o+p1RV3xN+tnm3qRVsDJpDCE4AAszCife1nPT+d0/5o
+ DhkzmHXO99M57fnMLImOnz4SoLQuijrfunpB3BGPNffc+8phEZ2KTd83f0e+Zhlzb55bDeDk9U9HkGZc
+ UV/2zqZK2FyJCaWQ5gBkMpa19od9OGLhtJKLp5UuKI9fUFncVBL1NeeU9pnb07lNnYMbOwe39aZSqSyE
+ EK6EqXpCpHL+3102b3VNaVs691mz6Ut5LslABPb1rMriCyuLAZzy0cVggOaWxaGZbOf4iaNgBBBEYFZZ
+ L1EcvXFh3YcX1l7bUOGeZrfowwvrGGhP5R7c3/nA9tbdnYNEFIk6ueHc9XNr/nxFE4C/emH/3u6hIJxF
+ NM3rM75+kwkuM0CoiDqwQ6AJpTAEkETKU0LQHStn/O3FM+uLowyY3l6amWhk+mvCh0dLMNQXRz67rPGO
+ C6c/erD7rk3N21v66sqLv3nlPAD/vrv9B9tbRV7XPY/BAKE/6w/mVJEjT/kpZtiztScFYbuGTSQFIIAk
+ Ujn/gprSb1+14NqG8uNrSwk6Vb0qAkZLmzA4KsUH59XcMLPqzmf2Xj29bFF50Z7+4b98bh9kUGrwm141
+ qeHcg/s6P72s0dd8wpNNMUuiw0PZp1r64Nq+kRNJ0AVwiPyM94EL6r9/3eK4IxQzgcYzCzQlSQBiQDEX
+ u/L71y0C4Gn+1LN7OoayeV/5OR4GQ9C9rx65ZV5NXVHE0ywINLI6OiL5VzYeTKVzditgYgn0KpAk8rPe
+ h5c1/vgdF5jol0RnmiRGphgbwxQ9PziYebF1AFIEqg6zZghHNg8Mv/u/txxIZlxBBBIEMrlAhC9sOPC9
+ rS0iYitHTDDBTYWQglTW/+iyxn+7bpG5kZ/LHhARBJFmnhZzl9eUPryvy9dMQdKfASFF28Dwzw50dWb8
+ uCMU48DA8JNH+z7y1K6H97SLiLSj/wknoGVRzLj/kvqy9e9fWeQIzZio7GDzGPnezrbbn9gRwOGEIGKl
+ OefLqOs4Iqc15xQItmjKJBGke+AoZrsqHnXuu2ZhkSMU8wTmxptukB9dXP++BbU66wUts0AzQ5JTFNGE
+ rK+YISKOjf7JI4gCCCJ4+kuXzlpVU2pu2BP7/c2q6dfXzq0qjSmlA6bASOkhYKSoqGa20T95BE4AIihf
+ NVUU3bGkARNUlf/E90zwNS8oj39ueRN8Jabq+O8ZcXx2k2XyCJwAkgi+/vCiuoqo40/arqd5qvz+gtqS
+ 4qgtRxVmgiUAAb7SpSXRjy6ug6nXMEkvRNDMs0pjN86ssullYSZYAggi+PryurK5iThjwlZ+TokZXXxw
+ fg2k3VsNL8ESgAhQ+tLaUphjspP6zokAXFZXVhpz2WaYhZVgCaA0I+JcUlOKkUSGSaci6swqjUHxJHaE
+ tASYAAlg6jPHI3L5tBJM5gRg7OU0wxF0QWUx7Dw4rARIAABgOEQJ1wGmojahGfrPK4tDT+Rem6WACJgA
+ gOlJOpWvaCM/zARJAAKAqJzqooAZpWG3nMJKkARgwOz8T9krEgD4dg00xARJAABE/Vn/UDIDTMU5dfPm
+ B7L+FC05WYJHgARgQAj4ntrem8JIHYRJfjkiX/OmrkFIe9A2pARIAIz2TtwzkMYUPAEYAI4MZff2D0MS
+ 24FQKAmWAMyAFM+0DgBwJnkubMqgv9SRzGR9kde6iJY8EiwBNDNc+duWvle6BjG6Tj9JmFMBjx7qhtZk
+ 10LDSrAEYMARpDL+j/d2Api8cbk5Y7mtN/Xz/d2w561CTLAEAKCY4cqf7O3sGvacSRuZmIj/5x1tueGc
+ I+34J7wETgBmSCmO9qXu3nwIkzMK0syOoB196X/b1YaIDERxOEueCJwAABQzRZxvvX706ZZ+Uxx8Ar85
+ A2YD7NPP7BkYykkhbPiHmSAKAFMhWfGfP7u3P+c7gibwOaA0C8Ldm5qfOtgtowEqDmfJCwEVQDNLV77a
+ kbx13fa0rwXRuUcqA75mR9BD+7u+/NJBCl5RIMvUE1ABYCpYRZ11+7p+/4ntOc3y3BzQzEqzI+hHezs+
+ 9KttCgyyCXCWAAsAQGmWcffRPZ03/nLL0VTWOHCmGpjiuARyBD2wvfXD63YogER+2qFagkagBYBxIOo8
+ sb975YObHjrQZVoeaYZifvOyOSbujS2S6Egqe/O67R9/aqcpqW4THyyG4BbHHYMB6cpU1v/pno5dyeFZ
+ pbHGkqjpgWe6a2k+VkZKj2yfjZRWFkQpT313R9uHntj+Sku/jLjapv5bjiOgxXFPxmQG6awvXXndzKqP
+ Xzj9qullFVH3dDkMmvnV7qEf7O742f7O1v5hkkI4wi75W06gYAQwSCLNzDkFQWXFkYuqilfXli2uKIpK
+ coWISTHkqZ196V19qR196d29KT/rw5VCCtNpwmI5gQITwCCJGKwVQ2koDSlg+oQRQTN8BUGQgqSQgjTb
+ 0LeclqC3SDolZmpLgoR0TI9Tc3qGGSRJRKT5yFiZZYvldBSkAAazznPClJZ5cpOoLecZQV8GtVgmFSuA
+ JdRYASyhxgpgCTVWAEuosQJYQo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjRXAEmqs
+ AJZQYwWwhBorgCXUWAEsocYKYAk1VgBLqLECWEKNFcASaqwAllBjBbCEGiuAJdRYASyhxgpgCTVWAEuo
+ sQJYQo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjRXAEmqsAJZQYwWwhBorgCXUWAEs
+ oeb/A5fj85sn5OS0AAAAAElFTkSuQmCC
+</value>
+ </data>
+</root> \ No newline at end of file
diff --git a/agent/windows-setup-agent/SetupWizard.Designer.cs b/agent/windows-setup-agent/SetupWizard.Designer.cs
new file mode 100644
index 0000000..dc36ac1
--- /dev/null
+++ b/agent/windows-setup-agent/SetupWizard.Designer.cs
@@ -0,0 +1,818 @@
+namespace Icinga
+{
+ partial class SetupWizard
+ {
+ /// <summary>
+ /// Required designer variable.
+ /// </summary>
+ private System.ComponentModel.IContainer components = null;
+
+ /// <summary>
+ /// Clean up any resources being used.
+ /// </summary>
+ /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing && (components != null))
+ {
+ components.Dispose();
+ }
+ base.Dispose(disposing);
+ }
+
+ #region Windows Form Designer generated code
+
+ /// <summary>
+ /// Required method for Designer support - do not modify
+ /// the contents of this method with the code editor.
+ /// </summary>
+ private void InitializeComponent()
+ {
+ System.ComponentModel.ComponentResourceManager resources = new System.ComponentModel.ComponentResourceManager(typeof(SetupWizard));
+ this.btnBack = new System.Windows.Forms.Button();
+ this.btnNext = new System.Windows.Forms.Button();
+ this.btnCancel = new System.Windows.Forms.Button();
+ this.tabFinish = new System.Windows.Forms.TabPage();
+ this.lblSetupCompleted = new System.Windows.Forms.Label();
+ this.tabConfigure = new System.Windows.Forms.TabPage();
+ this.lblConfigStatus = new System.Windows.Forms.Label();
+ this.prgConfig = new System.Windows.Forms.ProgressBar();
+ this.tabParameters = new System.Windows.Forms.TabPage();
+ this.groupBox4 = new System.Windows.Forms.GroupBox();
+ this.btnEditGlobalZone = new System.Windows.Forms.Button();
+ this.btnRemoveGlobalZone = new System.Windows.Forms.Button();
+ this.btnAddGlobalZone = new System.Windows.Forms.Button();
+ this.lvwGlobalZones = new System.Windows.Forms.ListView();
+ this.colGlobalZoneName = ((System.Windows.Forms.ColumnHeader)(new System.Windows.Forms.ColumnHeader()));
+ this.introduction1 = new System.Windows.Forms.Label();
+ this.groupBox3 = new System.Windows.Forms.GroupBox();
+ this.chkDisableConf = new System.Windows.Forms.CheckBox();
+ this.txtUser = new System.Windows.Forms.TextBox();
+ this.chkRunServiceAsThisUser = new System.Windows.Forms.CheckBox();
+ this.chkAcceptConfig = new System.Windows.Forms.CheckBox();
+ this.chkAcceptCommands = new System.Windows.Forms.CheckBox();
+ this.txtTicket = new System.Windows.Forms.TextBox();
+ this.lblTicket = new System.Windows.Forms.Label();
+ this.txtInstanceName = new System.Windows.Forms.TextBox();
+ this.lblInstanceName = new System.Windows.Forms.Label();
+ this.groupBox2 = new System.Windows.Forms.GroupBox();
+ this.rdoNoListener = new System.Windows.Forms.RadioButton();
+ this.txtListenerPort = new System.Windows.Forms.TextBox();
+ this.lblListenerPort = new System.Windows.Forms.Label();
+ this.rdoListener = new System.Windows.Forms.RadioButton();
+ this.groupBox1 = new System.Windows.Forms.GroupBox();
+ this.btnEditEndpoint = new System.Windows.Forms.Button();
+ this.btnRemoveEndpoint = new System.Windows.Forms.Button();
+ this.btnAddEndpoint = new System.Windows.Forms.Button();
+ this.lvwEndpoints = new System.Windows.Forms.ListView();
+ this.colInstanceName = ((System.Windows.Forms.ColumnHeader)(new System.Windows.Forms.ColumnHeader()));
+ this.colHost = ((System.Windows.Forms.ColumnHeader)(new System.Windows.Forms.ColumnHeader()));
+ this.colPort = ((System.Windows.Forms.ColumnHeader)(new System.Windows.Forms.ColumnHeader()));
+ this.tbcPages = new System.Windows.Forms.TabControl();
+ this.tabRetrieveCertificate = new System.Windows.Forms.TabPage();
+ this.lblRetrieveCertificate = new System.Windows.Forms.Label();
+ this.prgRetrieveCertificate = new System.Windows.Forms.ProgressBar();
+ this.tabVerifyCertificate = new System.Windows.Forms.TabPage();
+ this.grpX509Fields = new System.Windows.Forms.GroupBox();
+ this.txtX509Field = new System.Windows.Forms.TextBox();
+ this.lvwX509Fields = new System.Windows.Forms.ListView();
+ this.colField = ((System.Windows.Forms.ColumnHeader)(new System.Windows.Forms.ColumnHeader()));
+ this.colValue = ((System.Windows.Forms.ColumnHeader)(new System.Windows.Forms.ColumnHeader()));
+ this.txtX509Subject = new System.Windows.Forms.TextBox();
+ this.txtX509Issuer = new System.Windows.Forms.TextBox();
+ this.lblX509Subject = new System.Windows.Forms.Label();
+ this.lblX509Issuer = new System.Windows.Forms.Label();
+ this.lblX509Prompt = new System.Windows.Forms.Label();
+ this.tabError = new System.Windows.Forms.TabPage();
+ this.txtError = new System.Windows.Forms.TextBox();
+ this.lblError = new System.Windows.Forms.Label();
+ this.picBanner = new System.Windows.Forms.PictureBox();
+ this.linkLabelDocs = new System.Windows.Forms.LinkLabel();
+ this.tabFinish.SuspendLayout();
+ this.tabConfigure.SuspendLayout();
+ this.tabParameters.SuspendLayout();
+ this.groupBox4.SuspendLayout();
+ this.groupBox3.SuspendLayout();
+ this.groupBox2.SuspendLayout();
+ this.groupBox1.SuspendLayout();
+ this.tbcPages.SuspendLayout();
+ this.tabRetrieveCertificate.SuspendLayout();
+ this.tabVerifyCertificate.SuspendLayout();
+ this.grpX509Fields.SuspendLayout();
+ this.tabError.SuspendLayout();
+ ((System.ComponentModel.ISupportInitialize)(this.picBanner)).BeginInit();
+ this.SuspendLayout();
+ //
+ // btnBack
+ //
+ this.btnBack.Enabled = false;
+ this.btnBack.Location = new System.Drawing.Point(376, 587);
+ this.btnBack.Name = "btnBack";
+ this.btnBack.Size = new System.Drawing.Size(75, 23);
+ this.btnBack.TabIndex = 1;
+ this.btnBack.Text = "< &Back";
+ this.btnBack.UseVisualStyleBackColor = true;
+ this.btnBack.Click += new System.EventHandler(this.btnBack_Click);
+ //
+ // btnNext
+ //
+ this.btnNext.Location = new System.Drawing.Point(457, 587);
+ this.btnNext.Name = "btnNext";
+ this.btnNext.Size = new System.Drawing.Size(75, 23);
+ this.btnNext.TabIndex = 2;
+ this.btnNext.Text = "&Next >";
+ this.btnNext.UseVisualStyleBackColor = true;
+ this.btnNext.Click += new System.EventHandler(this.btnNext_Click);
+ //
+ // btnCancel
+ //
+ this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel;
+ this.btnCancel.Location = new System.Drawing.Point(538, 587);
+ this.btnCancel.Name = "btnCancel";
+ this.btnCancel.Size = new System.Drawing.Size(75, 23);
+ this.btnCancel.TabIndex = 3;
+ this.btnCancel.Text = "Cancel";
+ this.btnCancel.UseVisualStyleBackColor = true;
+ this.btnCancel.Click += new System.EventHandler(this.btnCancel_Click);
+ //
+ // tabFinish
+ //
+ this.tabFinish.Controls.Add(this.lblSetupCompleted);
+ this.tabFinish.Location = new System.Drawing.Point(4, 5);
+ this.tabFinish.Name = "tabFinish";
+ this.tabFinish.Padding = new System.Windows.Forms.Padding(3);
+ this.tabFinish.Size = new System.Drawing.Size(617, 495);
+ this.tabFinish.TabIndex = 5;
+ this.tabFinish.Text = "Finish";
+ this.tabFinish.UseVisualStyleBackColor = true;
+ //
+ // lblSetupCompleted
+ //
+ this.lblSetupCompleted.AutoSize = true;
+ this.lblSetupCompleted.Location = new System.Drawing.Point(34, 35);
+ this.lblSetupCompleted.Name = "lblSetupCompleted";
+ this.lblSetupCompleted.Size = new System.Drawing.Size(252, 13);
+ this.lblSetupCompleted.TabIndex = 0;
+ this.lblSetupCompleted.Text = "The Icinga Windows agent was set up successfully.";
+ //
+ // tabConfigure
+ //
+ this.tabConfigure.Controls.Add(this.lblConfigStatus);
+ this.tabConfigure.Controls.Add(this.prgConfig);
+ this.tabConfigure.Location = new System.Drawing.Point(4, 5);
+ this.tabConfigure.Name = "tabConfigure";
+ this.tabConfigure.Padding = new System.Windows.Forms.Padding(3);
+ this.tabConfigure.Size = new System.Drawing.Size(617, 495);
+ this.tabConfigure.TabIndex = 4;
+ this.tabConfigure.Text = "Configure Icinga 2";
+ this.tabConfigure.UseVisualStyleBackColor = true;
+ //
+ // lblConfigStatus
+ //
+ this.lblConfigStatus.AutoSize = true;
+ this.lblConfigStatus.Location = new System.Drawing.Point(184, 204);
+ this.lblConfigStatus.Name = "lblConfigStatus";
+ this.lblConfigStatus.Size = new System.Drawing.Size(141, 13);
+ this.lblConfigStatus.TabIndex = 1;
+ this.lblConfigStatus.Text = "Updating the configuration...";
+ //
+ // prgConfig
+ //
+ this.prgConfig.Location = new System.Drawing.Point(184, 223);
+ this.prgConfig.Name = "prgConfig";
+ this.prgConfig.Size = new System.Drawing.Size(289, 23);
+ this.prgConfig.TabIndex = 0;
+ //
+ // tabParameters
+ //
+ this.tabParameters.Controls.Add(this.linkLabelDocs);
+ this.tabParameters.Controls.Add(this.groupBox4);
+ this.tabParameters.Controls.Add(this.introduction1);
+ this.tabParameters.Controls.Add(this.groupBox3);
+ this.tabParameters.Controls.Add(this.txtTicket);
+ this.tabParameters.Controls.Add(this.lblTicket);
+ this.tabParameters.Controls.Add(this.txtInstanceName);
+ this.tabParameters.Controls.Add(this.lblInstanceName);
+ this.tabParameters.Controls.Add(this.groupBox2);
+ this.tabParameters.Controls.Add(this.groupBox1);
+ this.tabParameters.Location = new System.Drawing.Point(4, 5);
+ this.tabParameters.Name = "tabParameters";
+ this.tabParameters.Padding = new System.Windows.Forms.Padding(3);
+ this.tabParameters.Size = new System.Drawing.Size(617, 495);
+ this.tabParameters.TabIndex = 3;
+ this.tabParameters.Text = "Agent Parameters";
+ this.tabParameters.UseVisualStyleBackColor = true;
+ //
+ // groupBox4
+ //
+ this.groupBox4.Controls.Add(this.btnEditGlobalZone);
+ this.groupBox4.Controls.Add(this.btnRemoveGlobalZone);
+ this.groupBox4.Controls.Add(this.btnAddGlobalZone);
+ this.groupBox4.Controls.Add(this.lvwGlobalZones);
+ this.groupBox4.Location = new System.Drawing.Point(8, 210);
+ this.groupBox4.Name = "groupBox4";
+ this.groupBox4.Size = new System.Drawing.Size(601, 110);
+ this.groupBox4.TabIndex = 9;
+ this.groupBox4.TabStop = false;
+ this.groupBox4.Text = "Global Zones";
+ //
+ // btnEditGlobalZone
+ //
+ this.btnEditGlobalZone.Enabled = false;
+ this.btnEditGlobalZone.Location = new System.Drawing.Point(520, 48);
+ this.btnEditGlobalZone.Name = "btnEditGlobalZone";
+ this.btnEditGlobalZone.Size = new System.Drawing.Size(75, 23);
+ this.btnEditGlobalZone.TabIndex = 7;
+ this.btnEditGlobalZone.Text = "Edit";
+ this.btnEditGlobalZone.UseVisualStyleBackColor = true;
+ this.btnEditGlobalZone.Click += new System.EventHandler(this.btnEditGlobalZone_Click);
+ //
+ // btnRemoveGlobalZone
+ //
+ this.btnRemoveGlobalZone.Enabled = false;
+ this.btnRemoveGlobalZone.Location = new System.Drawing.Point(520, 77);
+ this.btnRemoveGlobalZone.Name = "btnRemoveGlobalZone";
+ this.btnRemoveGlobalZone.Size = new System.Drawing.Size(75, 23);
+ this.btnRemoveGlobalZone.TabIndex = 6;
+ this.btnRemoveGlobalZone.Text = "Remove";
+ this.btnRemoveGlobalZone.UseVisualStyleBackColor = true;
+ this.btnRemoveGlobalZone.Click += new System.EventHandler(this.btnRemoveGlobalZone_Click);
+ //
+ // btnAddGlobalZone
+ //
+ this.btnAddGlobalZone.Location = new System.Drawing.Point(520, 19);
+ this.btnAddGlobalZone.Name = "btnAddGlobalZone";
+ this.btnAddGlobalZone.Size = new System.Drawing.Size(75, 23);
+ this.btnAddGlobalZone.TabIndex = 5;
+ this.btnAddGlobalZone.Text = "Add";
+ this.btnAddGlobalZone.UseVisualStyleBackColor = true;
+ this.btnAddGlobalZone.Click += new System.EventHandler(this.btnAddGlobalZone_Click);
+ //
+ // lvwGlobalZones
+ //
+ this.lvwGlobalZones.Columns.AddRange(new System.Windows.Forms.ColumnHeader[] {
+ this.colGlobalZoneName});
+ this.lvwGlobalZones.FullRowSelect = true;
+ this.lvwGlobalZones.Location = new System.Drawing.Point(6, 19);
+ this.lvwGlobalZones.Name = "lvwGlobalZones";
+ this.lvwGlobalZones.Size = new System.Drawing.Size(500, 81);
+ this.lvwGlobalZones.TabIndex = 4;
+ this.lvwGlobalZones.UseCompatibleStateImageBehavior = false;
+ this.lvwGlobalZones.View = System.Windows.Forms.View.Details;
+ this.lvwGlobalZones.SelectedIndexChanged += new System.EventHandler(this.lvwGlobalZones_SelectedIndexChanged);
+ //
+ // colGlobalZoneName
+ //
+ this.colGlobalZoneName.Text = "Zone Name";
+ this.colGlobalZoneName.Width = 496;
+ //
+ // introduction1
+ //
+ this.introduction1.AutoSize = true;
+ this.introduction1.Location = new System.Drawing.Point(11, 3);
+ this.introduction1.Name = "introduction1";
+ this.introduction1.Size = new System.Drawing.Size(262, 13);
+ this.introduction1.TabIndex = 6;
+ this.introduction1.Text = "Welcome to the Icinga Windows Agent Setup Wizard!";
+ //
+ // groupBox3
+ //
+ this.groupBox3.Controls.Add(this.chkDisableConf);
+ this.groupBox3.Controls.Add(this.txtUser);
+ this.groupBox3.Controls.Add(this.chkRunServiceAsThisUser);
+ this.groupBox3.Controls.Add(this.chkAcceptConfig);
+ this.groupBox3.Controls.Add(this.chkAcceptCommands);
+ this.groupBox3.Location = new System.Drawing.Point(308, 326);
+ this.groupBox3.Name = "groupBox3";
+ this.groupBox3.Size = new System.Drawing.Size(301, 163);
+ this.groupBox3.TabIndex = 5;
+ this.groupBox3.TabStop = false;
+ this.groupBox3.Text = "Advanced Options";
+ //
+ // chkDisableConf
+ //
+ this.chkDisableConf.AutoSize = true;
+ this.chkDisableConf.Checked = true;
+ this.chkDisableConf.CheckState = System.Windows.Forms.CheckState.Checked;
+ this.chkDisableConf.Location = new System.Drawing.Point(9, 137);
+ this.chkDisableConf.Name = "chkDisableConf";
+ this.chkDisableConf.Size = new System.Drawing.Size(211, 17);
+ this.chkDisableConf.TabIndex = 9;
+ this.chkDisableConf.Text = "Disable including local \'conf.d\' directory";
+ this.chkDisableConf.UseVisualStyleBackColor = true;
+ //
+ // txtUser
+ //
+ this.txtUser.Enabled = false;
+ this.txtUser.Location = new System.Drawing.Point(28, 88);
+ this.txtUser.Name = "txtUser";
+ this.txtUser.Size = new System.Drawing.Size(178, 20);
+ this.txtUser.TabIndex = 8;
+ this.txtUser.Text = "NT AUTHORITY\\NetworkService";
+ //
+ // chkRunServiceAsThisUser
+ //
+ this.chkRunServiceAsThisUser.AutoSize = true;
+ this.chkRunServiceAsThisUser.Location = new System.Drawing.Point(9, 65);
+ this.chkRunServiceAsThisUser.Name = "chkRunServiceAsThisUser";
+ this.chkRunServiceAsThisUser.Size = new System.Drawing.Size(183, 17);
+ this.chkRunServiceAsThisUser.TabIndex = 7;
+ this.chkRunServiceAsThisUser.Text = "Run Icinga 2 service as this user:";
+ this.chkRunServiceAsThisUser.UseVisualStyleBackColor = true;
+ this.chkRunServiceAsThisUser.CheckedChanged += new System.EventHandler(this.chkRunServiceAsThisUser_CheckedChanged);
+ //
+ // chkAcceptConfig
+ //
+ this.chkAcceptConfig.AutoSize = true;
+ this.chkAcceptConfig.Location = new System.Drawing.Point(9, 42);
+ this.chkAcceptConfig.Name = "chkAcceptConfig";
+ this.chkAcceptConfig.Size = new System.Drawing.Size(284, 17);
+ this.chkAcceptConfig.TabIndex = 1;
+ this.chkAcceptConfig.Text = "Accept config updates from master/satellite instance(s)";
+ this.chkAcceptConfig.UseVisualStyleBackColor = true;
+ //
+ // chkAcceptCommands
+ //
+ this.chkAcceptCommands.AutoSize = true;
+ this.chkAcceptCommands.Location = new System.Drawing.Point(9, 19);
+ this.chkAcceptCommands.Name = "chkAcceptCommands";
+ this.chkAcceptCommands.Size = new System.Drawing.Size(265, 17);
+ this.chkAcceptCommands.TabIndex = 0;
+ this.chkAcceptCommands.Text = "Accept commands from master/satellite instance(s)";
+ this.chkAcceptCommands.UseVisualStyleBackColor = true;
+ //
+ // txtTicket
+ //
+ this.txtTicket.Location = new System.Drawing.Point(164, 56);
+ this.txtTicket.Name = "txtTicket";
+ this.txtTicket.Size = new System.Drawing.Size(350, 20);
+ this.txtTicket.TabIndex = 1;
+ //
+ // lblTicket
+ //
+ this.lblTicket.AutoSize = true;
+ this.lblTicket.Location = new System.Drawing.Point(9, 59);
+ this.lblTicket.Name = "lblTicket";
+ this.lblTicket.Size = new System.Drawing.Size(149, 13);
+ this.lblTicket.TabIndex = 4;
+ this.lblTicket.Text = "CSR Signing Ticket (optional):";
+ //
+ // txtInstanceName
+ //
+ this.txtInstanceName.Location = new System.Drawing.Point(164, 27);
+ this.txtInstanceName.Name = "txtInstanceName";
+ this.txtInstanceName.Size = new System.Drawing.Size(350, 20);
+ this.txtInstanceName.TabIndex = 0;
+ //
+ // lblInstanceName
+ //
+ this.lblInstanceName.AutoSize = true;
+ this.lblInstanceName.Location = new System.Drawing.Point(11, 30);
+ this.lblInstanceName.Name = "lblInstanceName";
+ this.lblInstanceName.Size = new System.Drawing.Size(121, 13);
+ this.lblInstanceName.TabIndex = 3;
+ this.lblInstanceName.Text = "Instance Name (FQDN):";
+ //
+ // groupBox2
+ //
+ this.groupBox2.Controls.Add(this.rdoNoListener);
+ this.groupBox2.Controls.Add(this.txtListenerPort);
+ this.groupBox2.Controls.Add(this.lblListenerPort);
+ this.groupBox2.Controls.Add(this.rdoListener);
+ this.groupBox2.Location = new System.Drawing.Point(8, 326);
+ this.groupBox2.Name = "groupBox2";
+ this.groupBox2.Size = new System.Drawing.Size(298, 163);
+ this.groupBox2.TabIndex = 2;
+ this.groupBox2.TabStop = false;
+ this.groupBox2.Text = "TCP Listener";
+ //
+ // rdoNoListener
+ //
+ this.rdoNoListener.AutoSize = true;
+ this.rdoNoListener.Checked = true;
+ this.rdoNoListener.Location = new System.Drawing.Point(11, 82);
+ this.rdoNoListener.Name = "rdoNoListener";
+ this.rdoNoListener.Size = new System.Drawing.Size(163, 17);
+ this.rdoNoListener.TabIndex = 9;
+ this.rdoNoListener.TabStop = true;
+ this.rdoNoListener.Text = "Do not listen for connections.";
+ this.rdoNoListener.UseVisualStyleBackColor = true;
+ this.rdoNoListener.CheckedChanged += new System.EventHandler(this.RadioListener_CheckedChanged);
+ //
+ // txtListenerPort
+ //
+ this.txtListenerPort.Enabled = false;
+ this.txtListenerPort.Location = new System.Drawing.Point(66, 47);
+ this.txtListenerPort.Name = "txtListenerPort";
+ this.txtListenerPort.Size = new System.Drawing.Size(84, 20);
+ this.txtListenerPort.TabIndex = 8;
+ this.txtListenerPort.Text = "5665";
+ //
+ // lblListenerPort
+ //
+ this.lblListenerPort.AutoSize = true;
+ this.lblListenerPort.Location = new System.Drawing.Point(31, 51);
+ this.lblListenerPort.Name = "lblListenerPort";
+ this.lblListenerPort.Size = new System.Drawing.Size(29, 13);
+ this.lblListenerPort.TabIndex = 1;
+ this.lblListenerPort.Text = "Port:";
+ //
+ // rdoListener
+ //
+ this.rdoListener.AutoSize = true;
+ this.rdoListener.Location = new System.Drawing.Point(11, 24);
+ this.rdoListener.Name = "rdoListener";
+ this.rdoListener.Size = new System.Drawing.Size(283, 17);
+ this.rdoListener.TabIndex = 7;
+ this.rdoListener.Text = "Listen for connections from master/satellite instance(s):";
+ this.rdoListener.UseVisualStyleBackColor = true;
+ this.rdoListener.CheckedChanged += new System.EventHandler(this.RadioListener_CheckedChanged);
+ //
+ // groupBox1
+ //
+ this.groupBox1.Controls.Add(this.btnEditEndpoint);
+ this.groupBox1.Controls.Add(this.btnRemoveEndpoint);
+ this.groupBox1.Controls.Add(this.btnAddEndpoint);
+ this.groupBox1.Controls.Add(this.lvwEndpoints);
+ this.groupBox1.Location = new System.Drawing.Point(8, 94);
+ this.groupBox1.Name = "groupBox1";
+ this.groupBox1.Size = new System.Drawing.Size(601, 110);
+ this.groupBox1.TabIndex = 1;
+ this.groupBox1.TabStop = false;
+ this.groupBox1.Text = "Parent master/satellite instance(s) for this agent";
+ //
+ // btnEditEndpoint
+ //
+ this.btnEditEndpoint.Enabled = false;
+ this.btnEditEndpoint.Location = new System.Drawing.Point(520, 48);
+ this.btnEditEndpoint.Name = "btnEditEndpoint";
+ this.btnEditEndpoint.Size = new System.Drawing.Size(75, 23);
+ this.btnEditEndpoint.TabIndex = 7;
+ this.btnEditEndpoint.Text = "Edit";
+ this.btnEditEndpoint.UseVisualStyleBackColor = true;
+ this.btnEditEndpoint.Click += new System.EventHandler(this.btnEditEndpoint_Click);
+ //
+ // btnRemoveEndpoint
+ //
+ this.btnRemoveEndpoint.Enabled = false;
+ this.btnRemoveEndpoint.Location = new System.Drawing.Point(520, 77);
+ this.btnRemoveEndpoint.Name = "btnRemoveEndpoint";
+ this.btnRemoveEndpoint.Size = new System.Drawing.Size(75, 23);
+ this.btnRemoveEndpoint.TabIndex = 6;
+ this.btnRemoveEndpoint.Text = "Remove";
+ this.btnRemoveEndpoint.UseVisualStyleBackColor = true;
+ this.btnRemoveEndpoint.Click += new System.EventHandler(this.btnRemoveEndpoint_Click);
+ //
+ // btnAddEndpoint
+ //
+ this.btnAddEndpoint.Location = new System.Drawing.Point(520, 19);
+ this.btnAddEndpoint.Name = "btnAddEndpoint";
+ this.btnAddEndpoint.Size = new System.Drawing.Size(75, 23);
+ this.btnAddEndpoint.TabIndex = 5;
+ this.btnAddEndpoint.Text = "Add";
+ this.btnAddEndpoint.UseVisualStyleBackColor = true;
+ this.btnAddEndpoint.Click += new System.EventHandler(this.btnAddEndpoint_Click);
+ //
+ // lvwEndpoints
+ //
+ this.lvwEndpoints.Columns.AddRange(new System.Windows.Forms.ColumnHeader[] {
+ this.colInstanceName,
+ this.colHost,
+ this.colPort});
+ this.lvwEndpoints.FullRowSelect = true;
+ this.lvwEndpoints.Location = new System.Drawing.Point(6, 19);
+ this.lvwEndpoints.Name = "lvwEndpoints";
+ this.lvwEndpoints.Size = new System.Drawing.Size(500, 81);
+ this.lvwEndpoints.TabIndex = 4;
+ this.lvwEndpoints.UseCompatibleStateImageBehavior = false;
+ this.lvwEndpoints.View = System.Windows.Forms.View.Details;
+ this.lvwEndpoints.SelectedIndexChanged += new System.EventHandler(this.lvwEndpoints_SelectedIndexChanged);
+ //
+ // colInstanceName
+ //
+ this.colInstanceName.Text = "Instance Name";
+ this.colInstanceName.Width = 200;
+ //
+ // colHost
+ //
+ this.colHost.Text = "Host";
+ this.colHost.Width = 200;
+ //
+ // colPort
+ //
+ this.colPort.Text = "Port";
+ this.colPort.Width = 80;
+ //
+ // tbcPages
+ //
+ this.tbcPages.Appearance = System.Windows.Forms.TabAppearance.FlatButtons;
+ this.tbcPages.Controls.Add(this.tabParameters);
+ this.tbcPages.Controls.Add(this.tabRetrieveCertificate);
+ this.tbcPages.Controls.Add(this.tabVerifyCertificate);
+ this.tbcPages.Controls.Add(this.tabConfigure);
+ this.tbcPages.Controls.Add(this.tabFinish);
+ this.tbcPages.Controls.Add(this.tabError);
+ this.tbcPages.ItemSize = new System.Drawing.Size(0, 1);
+ this.tbcPages.Location = new System.Drawing.Point(0, 80);
+ this.tbcPages.Margin = new System.Windows.Forms.Padding(0);
+ this.tbcPages.Name = "tbcPages";
+ this.tbcPages.SelectedIndex = 0;
+ this.tbcPages.Size = new System.Drawing.Size(625, 504);
+ this.tbcPages.SizeMode = System.Windows.Forms.TabSizeMode.Fixed;
+ this.tbcPages.TabIndex = 0;
+ this.tbcPages.SelectedIndexChanged += new System.EventHandler(this.tbcPages_SelectedIndexChanged);
+ //
+ // tabRetrieveCertificate
+ //
+ this.tabRetrieveCertificate.Controls.Add(this.lblRetrieveCertificate);
+ this.tabRetrieveCertificate.Controls.Add(this.prgRetrieveCertificate);
+ this.tabRetrieveCertificate.Location = new System.Drawing.Point(4, 5);
+ this.tabRetrieveCertificate.Name = "tabRetrieveCertificate";
+ this.tabRetrieveCertificate.Padding = new System.Windows.Forms.Padding(3);
+ this.tabRetrieveCertificate.Size = new System.Drawing.Size(617, 495);
+ this.tabRetrieveCertificate.TabIndex = 7;
+ this.tabRetrieveCertificate.Text = "Checking Certificate";
+ this.tabRetrieveCertificate.UseVisualStyleBackColor = true;
+ //
+ // lblRetrieveCertificate
+ //
+ this.lblRetrieveCertificate.AutoSize = true;
+ this.lblRetrieveCertificate.Location = new System.Drawing.Point(164, 229);
+ this.lblRetrieveCertificate.Name = "lblRetrieveCertificate";
+ this.lblRetrieveCertificate.Size = new System.Drawing.Size(110, 13);
+ this.lblRetrieveCertificate.TabIndex = 3;
+ this.lblRetrieveCertificate.Text = "Checking certificate...";
+ //
+ // prgRetrieveCertificate
+ //
+ this.prgRetrieveCertificate.Location = new System.Drawing.Point(164, 248);
+ this.prgRetrieveCertificate.Name = "prgRetrieveCertificate";
+ this.prgRetrieveCertificate.Size = new System.Drawing.Size(289, 23);
+ this.prgRetrieveCertificate.TabIndex = 2;
+ //
+ // tabVerifyCertificate
+ //
+ this.tabVerifyCertificate.Controls.Add(this.grpX509Fields);
+ this.tabVerifyCertificate.Controls.Add(this.txtX509Subject);
+ this.tabVerifyCertificate.Controls.Add(this.txtX509Issuer);
+ this.tabVerifyCertificate.Controls.Add(this.lblX509Subject);
+ this.tabVerifyCertificate.Controls.Add(this.lblX509Issuer);
+ this.tabVerifyCertificate.Controls.Add(this.lblX509Prompt);
+ this.tabVerifyCertificate.Location = new System.Drawing.Point(4, 5);
+ this.tabVerifyCertificate.Name = "tabVerifyCertificate";
+ this.tabVerifyCertificate.Padding = new System.Windows.Forms.Padding(3);
+ this.tabVerifyCertificate.Size = new System.Drawing.Size(617, 495);
+ this.tabVerifyCertificate.TabIndex = 6;
+ this.tabVerifyCertificate.Text = "Verify Certificate";
+ this.tabVerifyCertificate.UseVisualStyleBackColor = true;
+ //
+ // grpX509Fields
+ //
+ this.grpX509Fields.Controls.Add(this.txtX509Field);
+ this.grpX509Fields.Controls.Add(this.lvwX509Fields);
+ this.grpX509Fields.Location = new System.Drawing.Point(11, 115);
+ this.grpX509Fields.Name = "grpX509Fields";
+ this.grpX509Fields.Size = new System.Drawing.Size(598, 369);
+ this.grpX509Fields.TabIndex = 8;
+ this.grpX509Fields.TabStop = false;
+ this.grpX509Fields.Text = "X509 Fields";
+ //
+ // txtX509Field
+ //
+ this.txtX509Field.Font = new System.Drawing.Font("Courier New", 9.75F, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, ((byte)(0)));
+ this.txtX509Field.Location = new System.Drawing.Point(6, 197);
+ this.txtX509Field.Multiline = true;
+ this.txtX509Field.Name = "txtX509Field";
+ this.txtX509Field.ReadOnly = true;
+ this.txtX509Field.ScrollBars = System.Windows.Forms.ScrollBars.Vertical;
+ this.txtX509Field.Size = new System.Drawing.Size(586, 166);
+ this.txtX509Field.TabIndex = 9;
+ //
+ // lvwX509Fields
+ //
+ this.lvwX509Fields.Columns.AddRange(new System.Windows.Forms.ColumnHeader[] {
+ this.colField,
+ this.colValue});
+ this.lvwX509Fields.Location = new System.Drawing.Point(6, 19);
+ this.lvwX509Fields.Name = "lvwX509Fields";
+ this.lvwX509Fields.Size = new System.Drawing.Size(586, 172);
+ this.lvwX509Fields.TabIndex = 8;
+ this.lvwX509Fields.UseCompatibleStateImageBehavior = false;
+ this.lvwX509Fields.View = System.Windows.Forms.View.Details;
+ this.lvwX509Fields.SelectedIndexChanged += new System.EventHandler(this.lvwX509Fields_SelectedIndexChanged);
+ //
+ // colField
+ //
+ this.colField.Text = "Field";
+ this.colField.Width = 200;
+ //
+ // colValue
+ //
+ this.colValue.Text = "Value";
+ this.colValue.Width = 350;
+ //
+ // txtX509Subject
+ //
+ this.txtX509Subject.Location = new System.Drawing.Point(71, 73);
+ this.txtX509Subject.Name = "txtX509Subject";
+ this.txtX509Subject.ReadOnly = true;
+ this.txtX509Subject.Size = new System.Drawing.Size(532, 20);
+ this.txtX509Subject.TabIndex = 4;
+ //
+ // txtX509Issuer
+ //
+ this.txtX509Issuer.Location = new System.Drawing.Point(71, 47);
+ this.txtX509Issuer.Name = "txtX509Issuer";
+ this.txtX509Issuer.ReadOnly = true;
+ this.txtX509Issuer.Size = new System.Drawing.Size(532, 20);
+ this.txtX509Issuer.TabIndex = 3;
+ //
+ // lblX509Subject
+ //
+ this.lblX509Subject.AutoSize = true;
+ this.lblX509Subject.Location = new System.Drawing.Point(8, 77);
+ this.lblX509Subject.Name = "lblX509Subject";
+ this.lblX509Subject.Size = new System.Drawing.Size(46, 13);
+ this.lblX509Subject.TabIndex = 2;
+ this.lblX509Subject.Text = "Subject:";
+ //
+ // lblX509Issuer
+ //
+ this.lblX509Issuer.AutoSize = true;
+ this.lblX509Issuer.Location = new System.Drawing.Point(8, 50);
+ this.lblX509Issuer.Name = "lblX509Issuer";
+ this.lblX509Issuer.Size = new System.Drawing.Size(38, 13);
+ this.lblX509Issuer.TabIndex = 1;
+ this.lblX509Issuer.Text = "Issuer:";
+ //
+ // lblX509Prompt
+ //
+ this.lblX509Prompt.AutoSize = true;
+ this.lblX509Prompt.Location = new System.Drawing.Point(8, 15);
+ this.lblX509Prompt.Name = "lblX509Prompt";
+ this.lblX509Prompt.Size = new System.Drawing.Size(241, 13);
+ this.lblX509Prompt.TabIndex = 0;
+ this.lblX509Prompt.Text = "Please verify the master/satellite\'s SSL certificate:";
+ //
+ // tabError
+ //
+ this.tabError.Controls.Add(this.txtError);
+ this.tabError.Controls.Add(this.lblError);
+ this.tabError.Location = new System.Drawing.Point(4, 5);
+ this.tabError.Name = "tabError";
+ this.tabError.Padding = new System.Windows.Forms.Padding(3);
+ this.tabError.Size = new System.Drawing.Size(617, 495);
+ this.tabError.TabIndex = 8;
+ this.tabError.Text = "Error";
+ this.tabError.UseVisualStyleBackColor = true;
+ //
+ // txtError
+ //
+ this.txtError.Font = new System.Drawing.Font("Courier New", 9.75F, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, ((byte)(0)));
+ this.txtError.Location = new System.Drawing.Point(11, 38);
+ this.txtError.Multiline = true;
+ this.txtError.Name = "txtError";
+ this.txtError.ReadOnly = true;
+ this.txtError.ScrollBars = System.Windows.Forms.ScrollBars.Vertical;
+ this.txtError.Size = new System.Drawing.Size(598, 397);
+ this.txtError.TabIndex = 1;
+ //
+ // lblError
+ //
+ this.lblError.AutoSize = true;
+ this.lblError.Location = new System.Drawing.Point(8, 12);
+ this.lblError.Name = "lblError";
+ this.lblError.Size = new System.Drawing.Size(209, 13);
+ this.lblError.TabIndex = 0;
+ this.lblError.Text = "An error occurred while setting up Icinga 2:";
+ //
+ // picBanner
+ //
+ this.picBanner.Image = ((System.Drawing.Image)(resources.GetObject("picBanner.Image")));
+ this.picBanner.Location = new System.Drawing.Point(0, 0);
+ this.picBanner.Name = "picBanner";
+ this.picBanner.Size = new System.Drawing.Size(625, 77);
+ this.picBanner.TabIndex = 1;
+ this.picBanner.TabStop = false;
+ //
+ // linkLabelDocs
+ //
+ this.linkLabelDocs.AutoSize = true;
+ this.linkLabelDocs.LinkColor = System.Drawing.Color.FromArgb(((int)(((byte)(0)))), ((int)(((byte)(149)))), ((int)(((byte)(191)))));
+ this.linkLabelDocs.Location = new System.Drawing.Point(525, 3);
+ this.linkLabelDocs.Name = "linkLabelDocs";
+ this.linkLabelDocs.Size = new System.Drawing.Size(79, 13);
+ this.linkLabelDocs.TabIndex = 10;
+ this.linkLabelDocs.TabStop = true;
+ this.linkLabelDocs.Text = "Documentation";
+ this.linkLabelDocs.VisitedLinkColor = System.Drawing.Color.FromArgb(((int)(((byte)(0)))), ((int)(((byte)(149)))), ((int)(((byte)(191)))));
+ this.linkLabelDocs.LinkClicked += new System.Windows.Forms.LinkLabelLinkClickedEventHandler(this.linkLabelDocs_LinkClicked);
+ //
+ // SetupWizard
+ //
+ this.AcceptButton = this.btnNext;
+ this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
+ this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
+ this.CancelButton = this.btnCancel;
+ this.ClientSize = new System.Drawing.Size(625, 622);
+ this.Controls.Add(this.btnCancel);
+ this.Controls.Add(this.btnNext);
+ this.Controls.Add(this.btnBack);
+ this.Controls.Add(this.picBanner);
+ this.Controls.Add(this.tbcPages);
+ this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedSingle;
+ this.Icon = ((System.Drawing.Icon)(resources.GetObject("$this.Icon")));
+ this.MaximizeBox = false;
+ this.Name = "SetupWizard";
+ this.Text = "Icinga Windows Agent Setup Wizard";
+ this.Load += new System.EventHandler(this.SetupWizard_Load);
+ this.tabFinish.ResumeLayout(false);
+ this.tabFinish.PerformLayout();
+ this.tabConfigure.ResumeLayout(false);
+ this.tabConfigure.PerformLayout();
+ this.tabParameters.ResumeLayout(false);
+ this.tabParameters.PerformLayout();
+ this.groupBox4.ResumeLayout(false);
+ this.groupBox3.ResumeLayout(false);
+ this.groupBox3.PerformLayout();
+ this.groupBox2.ResumeLayout(false);
+ this.groupBox2.PerformLayout();
+ this.groupBox1.ResumeLayout(false);
+ this.tbcPages.ResumeLayout(false);
+ this.tabRetrieveCertificate.ResumeLayout(false);
+ this.tabRetrieveCertificate.PerformLayout();
+ this.tabVerifyCertificate.ResumeLayout(false);
+ this.tabVerifyCertificate.PerformLayout();
+ this.grpX509Fields.ResumeLayout(false);
+ this.grpX509Fields.PerformLayout();
+ this.tabError.ResumeLayout(false);
+ this.tabError.PerformLayout();
+ ((System.ComponentModel.ISupportInitialize)(this.picBanner)).EndInit();
+ this.ResumeLayout(false);
+
+ }
+
+ #endregion
+
+ private System.Windows.Forms.PictureBox picBanner;
+ private System.Windows.Forms.Button btnBack;
+ private System.Windows.Forms.Button btnNext;
+ private System.Windows.Forms.Button btnCancel;
+ private System.Windows.Forms.TabPage tabFinish;
+ private System.Windows.Forms.Label lblSetupCompleted;
+ private System.Windows.Forms.TabPage tabConfigure;
+ private System.Windows.Forms.Label lblConfigStatus;
+ private System.Windows.Forms.ProgressBar prgConfig;
+ private System.Windows.Forms.TabPage tabParameters;
+ private System.Windows.Forms.TextBox txtInstanceName;
+ private System.Windows.Forms.Label lblInstanceName;
+ private System.Windows.Forms.GroupBox groupBox2;
+ private System.Windows.Forms.RadioButton rdoNoListener;
+ private System.Windows.Forms.TextBox txtListenerPort;
+ private System.Windows.Forms.Label lblListenerPort;
+ private System.Windows.Forms.RadioButton rdoListener;
+ private System.Windows.Forms.GroupBox groupBox1;
+ private System.Windows.Forms.Button btnRemoveEndpoint;
+ private System.Windows.Forms.Button btnAddEndpoint;
+ private System.Windows.Forms.ListView lvwEndpoints;
+ private System.Windows.Forms.ColumnHeader colHost;
+ private System.Windows.Forms.ColumnHeader colPort;
+ private System.Windows.Forms.TabControl tbcPages;
+ private System.Windows.Forms.TabPage tabVerifyCertificate;
+ private System.Windows.Forms.Label lblX509Prompt;
+ private System.Windows.Forms.TextBox txtX509Subject;
+ private System.Windows.Forms.TextBox txtX509Issuer;
+ private System.Windows.Forms.Label lblX509Subject;
+ private System.Windows.Forms.Label lblX509Issuer;
+ private System.Windows.Forms.GroupBox grpX509Fields;
+ private System.Windows.Forms.ListView lvwX509Fields;
+ private System.Windows.Forms.ColumnHeader colField;
+ private System.Windows.Forms.ColumnHeader colValue;
+ private System.Windows.Forms.TextBox txtX509Field;
+ private System.Windows.Forms.TabPage tabRetrieveCertificate;
+ private System.Windows.Forms.Label lblRetrieveCertificate;
+ private System.Windows.Forms.ProgressBar prgRetrieveCertificate;
+ private System.Windows.Forms.TabPage tabError;
+ private System.Windows.Forms.TextBox txtError;
+ private System.Windows.Forms.Label lblError;
+ private System.Windows.Forms.TextBox txtTicket;
+ private System.Windows.Forms.Label lblTicket;
+ private System.Windows.Forms.ColumnHeader colInstanceName;
+ private System.Windows.Forms.GroupBox groupBox3;
+ private System.Windows.Forms.CheckBox chkAcceptConfig;
+ private System.Windows.Forms.CheckBox chkAcceptCommands;
+ private System.Windows.Forms.TextBox txtUser;
+ private System.Windows.Forms.CheckBox chkRunServiceAsThisUser;
+ private System.Windows.Forms.Button btnEditEndpoint;
+ private System.Windows.Forms.Label introduction1;
+ private System.Windows.Forms.GroupBox groupBox4;
+ private System.Windows.Forms.Button btnEditGlobalZone;
+ private System.Windows.Forms.Button btnRemoveGlobalZone;
+ private System.Windows.Forms.Button btnAddGlobalZone;
+ private System.Windows.Forms.ListView lvwGlobalZones;
+ private System.Windows.Forms.ColumnHeader colGlobalZoneName;
+ private System.Windows.Forms.CheckBox chkDisableConf;
+ private System.Windows.Forms.LinkLabel linkLabelDocs;
+ }
+}
+
diff --git a/agent/windows-setup-agent/SetupWizard.cs b/agent/windows-setup-agent/SetupWizard.cs
new file mode 100644
index 0000000..327611c
--- /dev/null
+++ b/agent/windows-setup-agent/SetupWizard.cs
@@ -0,0 +1,574 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Windows.Forms;
+using System.Security.Cryptography.X509Certificates;
+using System.Threading;
+using System.Net.NetworkInformation;
+using System.Diagnostics;
+using System.Security.AccessControl;
+
+namespace Icinga
+{
+ public partial class SetupWizard : Form
+ {
+ private string _TrustedFile;
+ private string Icinga2User;
+
+ public SetupWizard()
+ {
+ InitializeComponent();
+
+ txtInstanceName.Text = Icinga2InstanceName;
+
+ Icinga2User = Program.Icinga2User;
+ txtUser.Text = Icinga2User;
+ }
+
+ private void Warning(string message)
+ {
+ MessageBox.Show(this, message, Text, MessageBoxButtons.OK, MessageBoxIcon.Warning);
+ }
+
+ private string Icinga2InstanceName
+ {
+ get
+ {
+ IPGlobalProperties props = IPGlobalProperties.GetIPGlobalProperties();
+
+ string fqdn = props.HostName;
+
+ if (props.DomainName != "")
+ fqdn += "." + props.DomainName;
+
+ return fqdn.ToLower();
+ }
+ }
+
+ private bool GetMasterHostPort(out string host, out string port)
+ {
+ foreach (ListViewItem lvi in lvwEndpoints.Items) {
+ if (lvi.SubItems.Count > 1) {
+ host = lvi.SubItems[1].Text.Trim();
+ port = lvi.SubItems[2].Text.Trim();
+ return true;
+ }
+ }
+
+ host = null;
+ port = null;
+ return false;
+ }
+
+ private void EnableFeature(string feature)
+ {
+ FileStream fp = null;
+ try {
+ fp = File.Open(Program.Icinga2DataDir + String.Format("\\etc\\icinga2\\features-enabled\\{0}.conf", feature), FileMode.Create);
+ using (StreamWriter sw = new StreamWriter(fp, Encoding.ASCII)) {
+ fp = null;
+ sw.Write(String.Format("include \"../features-available/{0}.conf\"\n", feature));
+ }
+ } finally {
+ if (fp != null)
+ fp.Dispose();
+ }
+ }
+
+ private void SetRetrievalStatus(int pct)
+ {
+ if (InvokeRequired) {
+ Invoke((MethodInvoker)delegate { SetRetrievalStatus(pct); });
+ return;
+ }
+
+ prgRetrieveCertificate.Value = pct;
+ }
+
+ private void SetConfigureStatus(int pct, string message)
+ {
+ if (InvokeRequired) {
+ Invoke((MethodInvoker)delegate { SetConfigureStatus(pct, message); });
+ return;
+ }
+
+ prgConfig.Value = pct;
+ lblConfigStatus.Text = message;
+ }
+
+ private void ShowErrorText(string text)
+ {
+ if (InvokeRequired) {
+ Invoke((MethodInvoker)delegate { ShowErrorText(text); });
+ return;
+ }
+
+ txtError.Text = text;
+ tbcPages.SelectedTab = tabError;
+ }
+
+ private bool RunProcess(string filename, string arguments, out string output)
+ {
+ ProcessStartInfo psi = new ProcessStartInfo();
+ psi.FileName = filename;
+ psi.Arguments = arguments;
+ psi.CreateNoWindow = true;
+ psi.UseShellExecute = false;
+ psi.RedirectStandardOutput = true;
+ psi.RedirectStandardError = true;
+
+ String result = "";
+
+ using (Process proc = Process.Start(psi)) {
+ proc.ErrorDataReceived += delegate (object sender, DataReceivedEventArgs args)
+ {
+ result += args.Data + "\r\n";
+ };
+ proc.OutputDataReceived += delegate (object sender, DataReceivedEventArgs args)
+ {
+ result += args.Data + "\r\n";
+ };
+ proc.BeginOutputReadLine();
+ proc.BeginErrorReadLine();
+ proc.WaitForExit();
+
+ output = result;
+
+ if (proc.ExitCode != 0)
+ return false;
+ }
+
+ return true;
+ }
+
+ private void VerifyCertificate(string host, string port)
+ {
+ SetRetrievalStatus(25);
+
+ string pathPrefix = Program.Icinga2DataDir + "\\etc\\icinga2\\pki\\" + txtInstanceName.Text;
+ string processArguments = "pki new-cert --cn \"" + txtInstanceName.Text + "\" --key \"" + pathPrefix + ".key\" --cert \"" + pathPrefix + ".crt\"";
+ string output;
+
+ if (!File.Exists(pathPrefix + ".crt")) {
+ if (!RunProcess(Program.Icinga2InstallDir + "\\sbin\\icinga2.exe",
+ processArguments,
+ out output)) {
+ ShowErrorText("Running command 'icinga2.exe " + processArguments + "' produced the following output:\n" + output);
+ return;
+ }
+ }
+
+ SetRetrievalStatus(50);
+
+ _TrustedFile = Path.GetTempFileName();
+
+ processArguments = "pki save-cert --host \"" + host + "\" --port \"" + port + "\" --trustedcert \"" + _TrustedFile + "\"";
+ if (!RunProcess(Program.Icinga2InstallDir + "\\sbin\\icinga2.exe",
+ processArguments,
+ out output)) {
+ ShowErrorText("Running command 'icinga2.exe " + processArguments + "' produced the following output:\n" + output);
+ return;
+ }
+
+ SetRetrievalStatus(100);
+ try {
+ X509Certificate2 cert = new X509Certificate2(_TrustedFile);
+ Invoke((MethodInvoker)delegate { ShowCertificatePrompt(cert); });
+ } catch (Exception e) {
+ ShowErrorText("Failed to receive certificate: " + e.Message);
+ }
+ }
+
+ private void ConfigureService()
+ {
+ SetConfigureStatus(0, "Updating configuration files...");
+
+ string output;
+
+ string args = "";
+
+ Invoke((MethodInvoker)delegate
+ {
+ string master_host, master_port;
+ GetMasterHostPort(out master_host, out master_port);
+
+ args += " --master_host " + master_host + "," + master_port;
+
+ foreach (ListViewItem lvi in lvwEndpoints.Items) {
+ args += " --endpoint " + lvi.SubItems[0].Text.Trim();
+
+ if (lvi.SubItems.Count > 1)
+ args += "," + lvi.SubItems[1].Text.Trim() + "," + lvi.SubItems[2].Text.Trim();
+ }
+ });
+
+ if (rdoListener.Checked)
+ args += " --listen ::," + txtListenerPort.Text.Trim();
+
+ if (chkAcceptConfig.Checked)
+ args += " --accept-config";
+
+ if (chkAcceptCommands.Checked)
+ args += " --accept-commands";
+
+ string ticket = txtTicket.Text.Trim();
+
+ if (ticket.Length > 0)
+ args += " --ticket \"" + ticket + "\"";
+
+ args += " --trustedcert \"" + _TrustedFile + "\"";
+ args += " --cn \"" + txtInstanceName.Text.Trim() + "\"";
+ args += " --zone \"" + txtInstanceName.Text.Trim() + "\"";
+
+ foreach (ListViewItem lvi in lvwGlobalZones.Items) {
+ args += " --global_zones " + lvi.SubItems[0].Text.Trim();
+ }
+
+ if (chkDisableConf.Checked)
+ args += " --disable-confd";
+
+ if (!RunProcess(Program.Icinga2InstallDir + "\\sbin\\icinga2.exe",
+ "node setup" + args,
+ out output)) {
+ ShowErrorText("Running command 'icinga2.exe " + "node setup" + args + "' produced the following output:\n" + output);
+ return;
+ }
+
+ SetConfigureStatus(50, "Setting ACLs for the Icinga 2 directory...");
+
+ string serviceUser = txtUser.Text.Trim();
+
+ DirectoryInfo di = new DirectoryInfo(Program.Icinga2InstallDir);
+ DirectorySecurity ds = di.GetAccessControl();
+ FileSystemAccessRule rule = new FileSystemAccessRule(serviceUser,
+ FileSystemRights.Modify,
+ InheritanceFlags.ObjectInherit | InheritanceFlags.ContainerInherit, PropagationFlags.None, AccessControlType.Allow);
+ try {
+ ds.AddAccessRule(rule);
+ di.SetAccessControl(ds);
+ } catch (System.Security.Principal.IdentityNotMappedException) {
+ ShowErrorText("Could not set ACLs for user \"" + serviceUser + "\". Identitiy is not mapped.\n");
+ return;
+ }
+
+ SetConfigureStatus(75, "Installing the Icinga 2 service...");
+
+ RunProcess(Program.Icinga2InstallDir + "\\sbin\\icinga2.exe",
+ "--scm-uninstall",
+ out output);
+
+ if (!RunProcess(Program.Icinga2InstallDir + "\\sbin\\icinga2.exe",
+ "daemon --validate",
+ out output)) {
+ ShowErrorText("Running command 'icinga2.exe daemon --validate' produced the following output:\n" + output);
+ return;
+ }
+
+ if (!RunProcess(Program.Icinga2InstallDir + "\\sbin\\icinga2.exe",
+ "--scm-install --scm-user \"" + serviceUser + "\" daemon",
+ out output)) {
+ ShowErrorText("\nRunning command 'icinga2.exe --scm-install --scm-user \"" +
+ serviceUser + "\" daemon' produced the following output:\n" + output);
+ return;
+ }
+
+ SetConfigureStatus(100, "Finished.");
+
+ // Override the completed text
+ lblSetupCompleted.Text = "The Icinga Windows agent was set up successfully.";
+
+ // Add a note for the user for ticket-less signing
+ if (ticket.Length == 0) {
+ lblSetupCompleted.Text += "\n\nTicket was not specified. Please sign the certificate request on the Icinga 2 master node (requires v2.8+).";
+ }
+
+ FinishConfigure();
+ }
+
+ private void FinishConfigure()
+ {
+ if (InvokeRequired) {
+ Invoke((MethodInvoker)FinishConfigure);
+ return;
+ }
+
+ tbcPages.SelectedTab = tabFinish;
+ }
+
+ private void btnBack_Click(object sender, EventArgs e)
+ {
+ if (tbcPages.SelectedTab == tabError) {
+ tbcPages.SelectedIndex = 0;
+ return;
+ }
+
+ int offset = 1;
+
+ if (tbcPages.SelectedTab == tabVerifyCertificate)
+ offset++;
+
+ tbcPages.SelectedIndex -= offset;
+ }
+
+ private void btnNext_Click(object sender, EventArgs e)
+ {
+ if (tbcPages.SelectedTab == tabParameters) {
+ if (txtInstanceName.Text.Length == 0) {
+ Warning("Please enter an instance name.");
+ return;
+ }
+
+ if (lvwEndpoints.Items.Count == 0) {
+ Warning("You need to add at least one master/satellite endpoint.");
+ return;
+ }
+
+ string host, port;
+ if (!GetMasterHostPort(out host, out port)) {
+ Warning("Please enter a remote host and port for at least one of your endpoints.");
+ return;
+ }
+
+ if (rdoListener.Checked && (txtListenerPort.Text == "")) {
+ Warning("You need to specify a listener port.");
+ return;
+ }
+
+ if (txtUser.Text.Length == 0) {
+ Warning("Icinga 2 service user may not be empty.");
+ return;
+ }
+ }
+
+ if (tbcPages.SelectedTab == tabFinish || tbcPages.SelectedTab == tabError)
+ Application.Exit();
+
+ tbcPages.SelectedIndex++;
+ }
+
+ private void btnCancel_Click(object sender, EventArgs e)
+ {
+ Application.Exit();
+ }
+
+ private void tbcPages_SelectedIndexChanged(object sender, EventArgs e)
+ {
+ Refresh();
+
+ btnBack.Enabled = (tbcPages.SelectedTab == tabVerifyCertificate || tbcPages.SelectedTab == tabError);
+ btnNext.Enabled = (tbcPages.SelectedTab == tabParameters || tbcPages.SelectedTab == tabVerifyCertificate || tbcPages.SelectedTab == tabFinish);
+
+ if (tbcPages.SelectedTab == tabFinish) {
+ btnNext.Text = "&Finish >";
+ btnCancel.Enabled = false;
+ }
+
+ if (tbcPages.SelectedTab == tabRetrieveCertificate) {
+ ListViewItem lvi = lvwEndpoints.Items[0];
+
+ string master_host, master_port;
+ GetMasterHostPort(out master_host, out master_port);
+
+ Thread thread = new Thread((ThreadStart)delegate { VerifyCertificate(master_host, master_port); });
+ thread.Start();
+ }
+
+ if (tbcPages.SelectedTab == tabConfigure) {
+ Thread thread = new Thread(ConfigureService);
+ thread.Start();
+ }
+ }
+
+ private void RadioListener_CheckedChanged(object sender, EventArgs e)
+ {
+ txtListenerPort.Enabled = rdoListener.Checked;
+ }
+
+ private void AddCertificateField(string name, string shortValue, string longValue = null)
+ {
+ ListViewItem lvi = new ListViewItem();
+ lvi.Text = name;
+ lvi.SubItems.Add(shortValue);
+ if (longValue == null)
+ longValue = shortValue;
+ lvi.Tag = longValue;
+ lvwX509Fields.Items.Add(lvi);
+ }
+
+ private string PadText(string input)
+ {
+ string output = "";
+
+ for (int i = 0; i < input.Length; i += 2) {
+ if (output != "")
+ output += " ";
+
+ int len = 2;
+ if (input.Length - i < 2)
+ len = input.Length - i;
+ output += input.Substring(i, len);
+ }
+
+ return output;
+ }
+
+ private void ShowCertificatePrompt(X509Certificate2 certificate)
+ {
+ txtX509Issuer.Text = certificate.Issuer;
+ txtX509Subject.Text = certificate.Subject;
+
+ lvwX509Fields.Items.Clear();
+
+ AddCertificateField("Version", "V" + certificate.Version.ToString());
+ AddCertificateField("Serial number", certificate.SerialNumber);
+ AddCertificateField("Signature algorithm", certificate.SignatureAlgorithm.FriendlyName);
+ AddCertificateField("Valid from", certificate.NotBefore.ToString());
+ AddCertificateField("Valid to", certificate.NotAfter.ToString());
+
+ string pkey = BitConverter.ToString(certificate.PublicKey.EncodedKeyValue.RawData).Replace("-", " ");
+ AddCertificateField("Public key", certificate.PublicKey.Oid.FriendlyName + " (" + certificate.PublicKey.Key.KeySize + " bits)", pkey);
+
+ string thumbprint = PadText(certificate.Thumbprint);
+ AddCertificateField("Thumbprint", thumbprint);
+
+ tbcPages.SelectedTab = tabVerifyCertificate;
+ }
+
+ private void btnAddEndpoint_Click(object sender, EventArgs e)
+ {
+ EndpointInputBox eib = new EndpointInputBox();
+
+ if (eib.ShowDialog(this) == DialogResult.Cancel)
+ return;
+
+ ListViewItem lvi = new ListViewItem();
+ lvi.Text = eib.txtInstanceName.Text;
+
+ if (eib.chkConnect.Checked) {
+ lvi.SubItems.Add(eib.txtHost.Text);
+ lvi.SubItems.Add(eib.txtPort.Text);
+ }
+
+ lvwEndpoints.Items.Add(lvi);
+ }
+
+ private void lvwEndpoints_SelectedIndexChanged(object sender, EventArgs e)
+ {
+ btnRemoveEndpoint.Enabled = lvwEndpoints.SelectedItems.Count > 0;
+ btnEditEndpoint.Enabled = lvwEndpoints.SelectedItems.Count > 0;
+ }
+
+ private void lvwX509Fields_SelectedIndexChanged(object sender, EventArgs e)
+ {
+ if (lvwX509Fields.SelectedItems.Count == 0)
+ return;
+
+ ListViewItem lvi = lvwX509Fields.SelectedItems[0];
+
+ txtX509Field.Text = Convert.ToString(lvi.Tag);
+ }
+
+ private void btnRemoveEndpoint_Click(object sender, EventArgs e)
+ {
+ while (lvwEndpoints.SelectedItems.Count > 0) {
+ lvwEndpoints.Items.Remove(lvwEndpoints.SelectedItems[0]);
+ }
+ }
+
+ private void chkRunServiceAsThisUser_CheckedChanged(object sender, EventArgs e)
+ {
+ txtUser.Enabled = !txtUser.Enabled;
+ if (!txtUser.Enabled)
+ txtUser.Text = Icinga2User;
+ }
+
+ private void btnEditEndpoint_Click(object sender, EventArgs e)
+ {
+ ListViewItem lvi = lvwEndpoints.SelectedItems[0];
+ EndpointInputBox eib = new EndpointInputBox();
+
+ eib.Text = "Edit Endpoint";
+ eib.txtInstanceName.Text = lvi.SubItems[0].Text;
+
+ if (lvi.SubItems.Count >= 2) {
+ eib.txtHost.Text = lvi.SubItems[1].Text;
+ eib.txtPort.Text = lvi.SubItems[2].Text;
+ eib.chkConnect.Checked = true;
+ }
+
+ if (eib.ShowDialog(this) == DialogResult.Cancel)
+ return;
+
+ lvwEndpoints.Items.Remove(lvi);
+
+ ListViewItem lvi2 = new ListViewItem();
+ lvi2.Text = eib.txtInstanceName.Text;
+
+ if (eib.chkConnect.Checked) {
+ lvi2.SubItems.Add(eib.txtHost.Text);
+ lvi2.SubItems.Add(eib.txtPort.Text);
+ }
+
+ lvwEndpoints.Items.Add(lvi2);
+ }
+
+ private void btnAddGlobalZone_Click(object sender, EventArgs e)
+ {
+ GlobalZonesInputBox gzib = new GlobalZonesInputBox(lvwGlobalZones.Items);
+
+ if (gzib.ShowDialog(this) == DialogResult.Cancel)
+ return;
+
+ ListViewItem lvi = new ListViewItem();
+ lvi.Text = gzib.txtGlobalZoneName.Text;
+
+ lvwGlobalZones.Items.Add(lvi);
+ }
+
+ private void btnRemoveGlobalZone_Click(object sender, EventArgs e)
+ {
+ while (lvwGlobalZones.SelectedItems.Count > 0) {
+ lvwGlobalZones.Items.Remove(lvwGlobalZones.SelectedItems[0]);
+ }
+ }
+
+ private void lvwGlobalZones_SelectedIndexChanged(object sender, EventArgs e)
+ {
+ btnEditGlobalZone.Enabled = lvwGlobalZones.SelectedItems.Count > 0;
+ btnRemoveGlobalZone.Enabled = lvwGlobalZones.SelectedItems.Count > 0;
+ }
+
+ private void btnEditGlobalZone_Click(object sender, EventArgs e)
+ {
+ ListViewItem lvi = lvwGlobalZones.SelectedItems[0];
+ GlobalZonesInputBox gzib = new GlobalZonesInputBox(lvwGlobalZones.Items);
+
+ gzib.Text = "Edit Global Zone";
+ gzib.txtGlobalZoneName.Text = lvi.SubItems[0].Text;
+
+ if (gzib.ShowDialog(this) == DialogResult.Cancel)
+ return;
+
+ lvwGlobalZones.Items.Remove(lvi);
+
+ ListViewItem lvi2 = new ListViewItem();
+ lvi2.Text = gzib.txtGlobalZoneName.Text;
+
+ lvwGlobalZones.Items.Add(lvi2);
+ }
+
+ private void SetupWizard_Load(object sender, EventArgs e)
+ {
+ this.MinimumSize = this.Size;
+ this.MaximumSize = this.Size;
+ }
+
+ private void linkLabelDocs_LinkClicked(object sender, LinkLabelLinkClickedEventArgs e)
+ {
+ linkLabelDocs.LinkVisited = true;
+
+ Process.Start("https://icinga.com/docs/icinga2/latest/");
+ }
+ }
+}
+
diff --git a/agent/windows-setup-agent/SetupWizard.resx b/agent/windows-setup-agent/SetupWizard.resx
new file mode 100644
index 0000000..9d11b6c
--- /dev/null
+++ b/agent/windows-setup-agent/SetupWizard.resx
@@ -0,0 +1,1432 @@
+<?xml version="1.0" encoding="utf-8"?>
+<root>
+ <!--
+ Microsoft ResX Schema
+
+ Version 2.0
+
+ The primary goals of this format is to allow a simple XML format
+ that is mostly human readable. The generation and parsing of the
+ various data types are done through the TypeConverter classes
+ associated with the data types.
+
+ Example:
+
+ ... ado.net/XML headers & schema ...
+ <resheader name="resmimetype">text/microsoft-resx</resheader>
+ <resheader name="version">2.0</resheader>
+ <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
+ <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
+ <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
+ <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
+ <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
+ <value>[base64 mime encoded serialized .NET Framework object]</value>
+ </data>
+ <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
+ <comment>This is a comment</comment>
+ </data>
+
+ There are any number of "resheader" rows that contain simple
+ name/value pairs.
+
+ Each data row contains a name, and value. The row also contains a
+ type or mimetype. Type corresponds to a .NET class that support
+ text/value conversion through the TypeConverter architecture.
+ Classes that don't support this are serialized and stored with the
+ mimetype set.
+
+ The mimetype is used for serialized objects, and tells the
+ ResXResourceReader how to depersist the object. This is currently not
+ extensible. For a given mimetype the value must be set accordingly:
+
+ Note - application/x-microsoft.net.object.binary.base64 is the format
+ that the ResXResourceWriter will generate, however the reader can
+ read any of the formats listed below.
+
+ mimetype: application/x-microsoft.net.object.binary.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.soap.base64
+ value : The object must be serialized with
+ : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
+ : and then encoded with base64 encoding.
+
+ mimetype: application/x-microsoft.net.object.bytearray.base64
+ value : The object must be serialized into a byte array
+ : using a System.ComponentModel.TypeConverter
+ : and then encoded with base64 encoding.
+ -->
+ <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
+ <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
+ <xsd:element name="root" msdata:IsDataSet="true">
+ <xsd:complexType>
+ <xsd:choice maxOccurs="unbounded">
+ <xsd:element name="metadata">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" />
+ </xsd:sequence>
+ <xsd:attribute name="name" use="required" type="xsd:string" />
+ <xsd:attribute name="type" type="xsd:string" />
+ <xsd:attribute name="mimetype" type="xsd:string" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="assembly">
+ <xsd:complexType>
+ <xsd:attribute name="alias" type="xsd:string" />
+ <xsd:attribute name="name" type="xsd:string" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="data">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
+ <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
+ <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
+ <xsd:attribute ref="xml:space" />
+ </xsd:complexType>
+ </xsd:element>
+ <xsd:element name="resheader">
+ <xsd:complexType>
+ <xsd:sequence>
+ <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
+ </xsd:sequence>
+ <xsd:attribute name="name" type="xsd:string" use="required" />
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:choice>
+ </xsd:complexType>
+ </xsd:element>
+ </xsd:schema>
+ <resheader name="resmimetype">
+ <value>text/microsoft-resx</value>
+ </resheader>
+ <resheader name="version">
+ <value>2.0</value>
+ </resheader>
+ <resheader name="reader">
+ <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <resheader name="writer">
+ <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
+ </resheader>
+ <assembly alias="System.Drawing" name="System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a" />
+ <data name="picBanner.Image" type="System.Drawing.Bitmap, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>
+ iVBORw0KGgoAAAANSUhEUgAAAnIAAABOCAYAAACkCftqAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8
+ YQUAAAAJcEhZcwAACxMAAAsTAQCanBgAAAAHdElNRQfiChgPIB4skikEAADGP0lEQVR4XuydB3gVV5K2
+ PeNAMjkKISQQIucMTjjngAPGOIExOeecFVBEOeeIkIQkEAgQOWdwnvGMZzy7k2cn7+yE3f3rr6/61tVR
+ qyWEjT32rO7zfM/t2923b4dzTr23zqk6t33nxU10K/XdlzbX1Ctbauj2SVtr6pUAS6/W1h2TA92687Wg
+ GrpryvZauvON4BvqrjdDqMlboaKmb4dVa1q4qNk7EbXUdKqlZtN2uNX8nUi3mk6vrWbvRrnVfEZ0DbWY
+ GSO6e1asWy1mW7p7Tly9au5Sy7nxorvnJdRSy/mJbrVakFJLLRdaarUotZZaL06rpTZL0mup7dIMt9ot
+ y3RU++VZog4rsmuo7cpqtVuV46j2q3Pd6rAmr4Y6rs13VId1ljquL6ilTht21qkOG3ZRx41Fok6biutU
+ 580ltdRly+5a6rzVUpdtpfXKw79M1DWg3KW9DtJt5eQZWCHqGrSnWvhsk2fQPkvb94q6bd/vlm4z13kF
+ V9aSZ0htdQs9cFPyCjt4y9Q9/FCD5BVRLad19m0NldM53ayc7lF9cnoG5jMyn2F9cpcHlatcQI7lx0HO
+ ZdOSlmOnMq51QeqDQ11ROdUtlVNd1PoKof5CqMudNxZSl027RFq/tf532VBInrxft02F1J2399hSJO+d
+ 1hW425Uua/LJY22BvNdoW1xtD9ojp/bKbNMgs73TdhByaidNme2qU7urbbJT263S9l3aeHe7n+QW7ILd
+ ZrRgWwKJTTHsTV1Se1VDLrumUntn2kInW2naUlOws2J3nWyyy16bNlztOgQ7r7IzwB2vbxc5MoSNMyCT
+ Q2pxivKLjWvs3GPnIid2+jJqBDlVI8iJTIBTOTUoZoPj1CBBjSBX27CZ+meCnAkETvJ0gAgn2KhPTjDz
+ ReUEV05ygjVznX2bd0SVW7ruRnI6v4bI6R7VJ6dnYD4j87nWJ3d5UH2NIGfKY6slxzrjULdUTnXRCeQU
+ 4jw2F9ULcp4bdwrA+fJ+PTYXk8c6bgNcbYuCHCRtitHeKPA5wZzZpkFme9cIcjcPci2m8/5sZ5u8XYdN
+ bgS5GrqlIGee6O0vM7SxzIuxX6xc8CT/2jeHVRfAqcybrw/G/sBU5kM1H7ZZCBxBzgC3hsCbyiy49QGc
+ U2VwqjSqVkaFs1dEqOX8ZJFZaU2Ac8uh8kO3At5UZuMFmQ2bvRE0VRe82aWNa8e13FCzOq9nGKsH3uyf
+ ZV0D4A1yMi4qtyEyPQ8ORsxJTkZRVW1E97jlaHQdjLOuczLqKhMIPF2QcCOYsG9zgpWGygmQ6pMJXG7w
+ 2lFbTrDmuM34jlu2/aFax7qBnK61obLf3xtJn4v5LFXms3aXESe5yky3YC5L/F7jzwGWXboZoDPVdRu/
+ K8BxmTfridQVA+bcdclWx5zqpCmtx6Y3Dsv2+i6Qt3EXdd1URN14fx9+99lSTF7YVyCNhT+C3K50ccEc
+ wM5sb/C58+pqmHOCOsgEOrMddGorVWb7Cjm1wWYb7dSGQ84gVy39kw+bYbcn7s8O9seUo+2CTTNADjLt
+ n2kXVXbbadpYu/01bbMd5Ey7Dpk234kJTJksobLzRr0wBzmxjck+ykMGI0FOHPVF1AhyjSDnltlIODUi
+ ZiPj1BCZMhsv6P86yDkaOQdjqPqmgFx9coKRhsoJgupTQ0HOO/JwtWz7mdvM77i313Ncp3NyktO1NlRO
+ 97g+6XMzn6WT9Jm7y4opV5n5JoGcSuuYU500pfVYAU5lr+8KchA8cwA4gJw3f4a3DiDXbr3Lq89ti90z
+ BwHiFO6gbzvI2dUIcpbsvPF/AuTsJycyLsKUXKS6Ix26VM2bZr+ZkONNd3g4KvOBqkyAazLVgjizkLgL
+ i6s7Fe9mAYPsBRDSwqmF1izEZuF2AjmnymKXCXJ1wVxd8FajcjtU/voADmoIvEFmo2XCmzZskFPDpwBX
+ H8hVw5shG7yZsjfkkDb6ppyMgwlrdtUwNmqQbOBmVw3jZnSXWlJjiO5TG7QZxtRtVBXYIMMom6BmyjTq
+ ni7jb8oJEpzkBB/1yQ03pifMtU6hyZR7HyeQ+rbJdZ2mnO5RfXJ6Bk5yeqbmM3cqEyqz/NgFuHOXO1d5
+ rP6DAaizl+NqmWXdqT6o6gM6p7pn1k9d1nqs3jgFOnvdV5DDO0AOy923FFGPrcXUfQMfg/8Q6vAMqMva
+ QjfMqScObRXaInzWdZ1W5VLHlTUBTpe1/TPbRcip7VTdDNBh2ak9d8vBFsBGqL2or5tVh/E42SLItF81
+ ZNg6SG2g2keVkw2121lIbXAt+6wwVwfQ4bPafCc2gBraxXpDkDOZxgZ0kJuBbJzkxFNfRI0g1whyokaQ
+ qym7ATFVw9g0glydcgNMI8iJnO5RfXJ6Bk5yeqbmM3cqEyqz/Nj1fwHkEPjgwzAHr5x0rxogB7DTLlaF
+ OW2bAHCmFORMNYJcI8ip3Axk4yQnnvoiagS5RpATNYJcTdkNiKkaxuYbDHKmMVd5Ohh9JzhwkhNsqJxg
+ xb2ugSDnCETfVrmusy6Z96kuOT0DJzk9U/OZ28uFKbP82PVtATnVFwE5BD4A5Hpu5WO6ulWrxe0FtzGA
+ uK68DKEdQnsFcAPAqWcOXjltu7RNawS5RpBTuRnIxklOPPVFdMvGyJknZ46Nc7qoGhdtuyH2m2a/oU43
+ vSHj48yHrA9d5CoI9kIi4gIEORUuewG0F1KVE8Q5Fn6WU2WBTHiD7BXPrIx2kKsP3kxpw+AEc9qgODU6
+ KrNxcgI4bdycZAIcVDfAoWF1yQHcVPYGHDIjU+uDOLvhgGoZGJfxcQI4NVwmuFkGzYI1D382im5wYxAT
+ o2iDtzrkNrIGyDkZZ9OIQ56GgXcCASc5wYQJGVANMLGBmsgJcL5iOY2DM9fZt32tMu+Ng+z310nm82io
+ 7OUBspcZE+BU3QxVA5xVVk2Z5dkuez2w1xfIrE9Sp7SeOdRFlb3OemDd5l2iDgxn7vpttAEKcoA8gThE
+ rLre0b0KuINXrrq9YZhztT+mZw4A59ReYb12s0Jm+2e2iTeCORPi7EJ7bLbVppzadXf7b9gEsQvzk2vZ
+ DhVsjGZFuFEUq6MdczkqxNYZtk+BzoQ6ux1VmbZW7bACnQl1dpCDnEDuRjCnUqawc4fJInZmgTDWX2Wy
+ jhvglIds4+ScWMrUnS9uZE4KoqYT1ztuVzWC3FcEcmahNQvzvzLIqcxGqxHkLFUbtq8G5JyMtcrzS4Bc
+ faoBIk6wYkLMl5QThH2RdfZtX6vMe+Mgp3tsl9OzupGcyoQJcZAJcCoT5LxQxrCfAXDVsiDPLNcqez2w
+ 1xfIrE9Sp7SeOdRFlb3OAuQ6b+E6vanQktZvow1wAjkEOWDZmwGwO47rAHLt11ptUOc11QEQsp7bKrMN
+ 025WSGGuEeQaQc7UzYLcXZM307iVMXTbs+sct6tuadeqnqQJcu4LMC8OF2u7EXWBm116o5u8HuyW08OB
+ 9AHqQ3UCuVpdq0ahUZmFyqnQNQTgVCbIOVUMu1CR1MV9I5CzV1ZV64W1K7i9EagL3lRmA2NvdOwQp42X
+ Cg2dvttVF8CZjalKuzzQKDcE4DS/lBPE2Q1BfTIhTmQzQuZnqNpw1e+tcBvAIN4PwrJNTgbWlBpi00h7
+ 3gS0OcGCk2oAx1fkfXMCrm+DnK6loarRtaz31+H+1yen5+okLRdmWXGCOHSpioL4j4JLus2pjLrLr0MZ
+ N6X14ma7W53qpF1al+31XIU2ACCH1CMAOQgpSOCF0+UeW0ukq7XzWm6LXN2qVnvkSk3CwjqP9RbQqQdO
+ gc5sx0zvnL1NNNtLyN6m2qXtrr2Ntrfh9jbelAJdTajDn35LTkCnctsdwy456YuAHORkU02ba9piO8ip
+ 3EBnQJ3ae6g+kDOlfKGy84dyisrOMSKTdewcxGxkhzkVWOq2iRstvbCB99tE3jMjaXFyGfWZE0bffWGj
+ HMPOX/I9p5U3Kz2RRpBrBLlGkPt6QE4B7qsCOTtU6GdZ1whyNeR0LQ2VE8ipzOdRn5yeq5O0XJhlBbKX
+ pf+LIId3pCJBFCtAreP6QqvNcQC5LuvyBOR0fBzaLnt7VpdnDjLbS8jeptql7a69jba34fY23lQjyH07
+ QA7vALnbX9xAo9dmkMebW+m+LTtp/MYcuvulDbzN2YvXCHKNICeyNxLaeKjMBsXeyJiNUiPIffUgpwbX
+ NMKmYfb8loGcT9QRkRMkfRvkdE0NlRPI6WfzedQnp+fqJC0XZllRaTkS/YuDHN7RPpgg15XBDSDns62E
+ um5Ae+IMcvgMj512sUJou9CWaRtngpx65cz20GwvIXubape2u/Y22t6G29t4U40g980HOXjhAHHt+Z4M
+ WBRLd/HnOycF0JOBeXTby0H0aGAx89ZX5JGrdUJ6wrYLkc91jIuz3xzIvHmANfsN1j5tx4dhAJwJcm4Z
+ D10LgbtAaEGxAZzKXuDMAmkWVBPcTHgz5VQZTLnHKNgqFSqdygQ2JwHinEAOMhsBeyOhjYfKqYExGyMA
+ nMpstExoc5Id4FRYbwc4J3gzZQc5syE3wU1Vl0GoAW2mHIwOBOOk75ahcjZkkMKbAJxNJqypahhYlml0
+ TUPs6TLSkJMRVzmBgJNqQJoNMJyApKFyAqEvKgXAWyWn37gVcroP9cnxPruegdOzcpLTs9fy0T3kgFta
+ fuoqYyqnsukuu0EWyJnj5pzKvgp/cFT2OmTKDnQqs+7is9ZbJ5ltgB3kzICHLhuKyHszZnookpQk9rFy
+ kL1NkgAIVxcstqm0XYPUK2d2s5oy21CnNhbSNtjeRmu7bbbjkFNb7wxy1VJ7YtoaldohdShATvYKqmXj
+ 6oA5025Cdrtqt7tqj02oEzUQ5JQFVE7cANk5w+QQlZ1V7Dyj7OMkNw8ZMHfbS8xSL2+muyauI9/5ceQx
+ LYi+88Iaun3iJtE9GzIY5LZR+zf9ZR/ZH+z1wkY3hzWCXCPIieyNhAlxkFMDYzZCZuPUCHLOBgz6vwhy
+ TnBzK+QEY19GTr9xK+R0T+rTVwVyqkaQqw1y2N5Vc8ptda1ztT8qs01CAASCHwBz1vRe1ZGsZvsGeLN7
+ 58y20mxDndpYSNtgexut7bbZjkNObX0jyH1zQe72F9dR74Xx5D03mm57djV9l6ENXagANoDc/Zuz6Tuv
+ +FPTF7fQg1vyqBl6NPn73+HjKIfdUpCTkzPBzS4XyNm7Vs2bojfKvHE3A3Lmw7I/SEgesFPuOFsBsRck
+ lVnYFN60MGohhczC+0VBDtAmlcaoVApyUsHqAjmHiqyyV3yzYbgRuNllNkKQNk5mQ+YkbfDs8KayGkv8
+ 03XJAdpUdniDtOF2athVagjsshsNMRRqUGyGBmoIvEFuA2eAm8rJSNoNKdQ12JKnAW0qJ4OtcjL0TjKh
+ rQZENEBOAHOzcoIrL15vV3de/3XJ6Tft5+h0LTeS0z1skMxn5JLTs3SSU9lQKdCZMsueUxlVOZVphbua
+ dYDXG58hrT9OdUul9c+EOq2rmPrLrL+o2/a6b7YHADkdJ4fJ9bEMaNNxc5ZXDvtVe+VqeuesNslsr7SL
+ Fct2kFMB4iD10Jkgh7bUXK5PZhtttt32Nt2p7XfLwWbYIc78DBskdseQk72Catk4dWQY9tC0k6YNNW2r
+ KbW9dhstqgfkTJlcANm5QWXnDBm6ZbAIZPKKyuQZt8PKxj/KRfC+3TmJOYmBrcmkTTR4WSq1nuLvyFcA
+ ufs2ZdNtL2+l217cSK1e20zj1qXz+i10x4vV4+UaQa4R5NwyGwmnRsQuE+KgRpCrW40gd2PZAQkyAeqb
+ Ivs5Ol3LjeR0Dxsk8xm55PQsneRUNlQ3AjmVU1l1KtNfNchBMn8r6qdRf1G3Gwpy6qGD1EvntXmXRK8i
+ 6KHzem5PGOJuBHIa9IDt+OwEc4A3SL1yKgW4RpD7vwFy0B2vbqO7Jm+nDm8H0fDlCdKNeserzmPfLI9c
+ joAc+OrOlzdR3/kx1OGd7dT0OYZC136NINcIcm6ZjYRTI2KXCXHaGDWCnLMaQe7GsgMS5ARS/2zZz9Hp
+ Wm4kp3vYIJnPyCWnZ+kkp7Kh+raBnE7Eb6+/qNsNBTkIy5JPzgVyXTYUiFcOQo65hoAcvGwyuwPvp+vs
+ bR7aRQU5O8xp24l3p3bWlNlGm223vU13avvdcrAZjSDn4gcbZ3wVIIcu0xErEqnbtO30nZe2iaftzsl1
+ e+Qe2JJL33mF95NgCOasSZvpBf9Cum1y9X5fCuRMiGsQyJkXa8h+U+w3DtIbWxfAqfRB2eHNVA2A04Lg
+ GhenqgveTNnhzZS74NoKtsqpEkCoMHaZFcyUHeTcYyBsFReV2/5ZK7wps5FwakRMmeCmqm9cnMKbXWaD
+ qKpuNF1yADioLoAzG267dL3Z+JvQpqphPGyGRY1PTYCrPU2RjoUTA+Ygu0G0G00YUydwczLEppyMuJMU
+ BHozjAyIO0L944+Qb6TD+CybnMCkoVL4cQIkJ3lHHxU1dN3NqqHHMPezy6nb98uAntM9d5J7LN1NQp1T
+ mTGl5aw+qKtRfjVBNS+7y3eQC+RMOdQRO9Cp7HUOUpBTiHPyyKns9V5leuAAcjpODuvQhsALh2AHmUh/
+ C4IguK3hNghjdNvLMiDOWqfSdgswZ49kdZICnAl1CnIqextstst4t7fdkLbrpsx2X1XXWDlNEuwktUWA
+ OnEu1CNHe1cH0KkTROVkZ01bLPbYZavl3cmW24DOZIEbgZzKhDmVnUlMZrEzjTirDPaBFw7dqT4zwqjv
+ smQZF4c8ceheFW4yOAqRq8pYt720QcbI3fai4X3D+LlnltNz24qoKViLv//1gNzLvA3rJ21z1O2vwkNX
+ rTsmA+6qhZvWf1EcRe05Rdd/8DmtSt9LbabyDReo4xtq6M43AHl849/kh+FSk7cAdtVqOo0fsE1N8NCn
+ 8sN3qdk0LhQuNeHC4aSm0xn63gXQ1VbzGYA81iwGOwe1mA3Iq63mc2JrqcVcAF5t3T0P3jmEjlu6e4Gl
+ lguTa6jVIlTcmp+h1osBddVqswRwZ6nt0vQaDYldjSD37QY5NfwAgQFxx+je9DN0T8YJ6htbRT0YXHpE
+ HhH57GD4cskJOm5WTiBXHyyZMPVNkf3czPNV/auBnErLZo3y+zWCnAlxtxLkIHwGrGF8XO/AMhoUvIf8
+ AkqpO38PgCcBV3WAHNoy9cx5ri90r3NqC9FWmiBnjptrBLl/TZBrOcWfhixPo/ZTQyx2QkDDiy5WugHI
+ PbAlRzx3uu6OV7ZSUz5+y7cDqOUkPtYL/B3d2FDhR8wfVeFk6gQ5l+xdqpB5M8ybpDcO72NWJtLuU9fo
+ f/73f+m//+d/6A9//gsdu/wBb2PIM2++6yFB+uD0YcoDdepSVTl44twFx1aw7IUPsnvi3AXXqVA7FH7I
+ dF1Ddm+c6e62V0KpiFpBHSqvWbmdGgFtKLSxcJI2MioT3uqDOMiEN8gEN3yubhitrouGgBuEeRVV9QGc
+ U2OvulmAq4a3ugDOoRvVZcxMaDOXTZmGEwZV5WR0nWQ33DUMvSsHnAogAE/cuNQzND7lDPWLqSK/6CPU
+ M+oo+UYfE5jTZbxDCnmmFEKcAMZc5wQ9JiR9G1TfeZvXZZd5H/TeNFR2eKtL5rOW520rC3Y5lR+VWfbs
+ IKfSMmyXG96MOuD+U2OrP1adqq4/9cEc6qhCnL3uSv211W2zDUDb4LHZ6k4FyGEqL8CbeuSwDMEbNyKs
+ gh6Jr6L7oytpUMg+Br7qNskCumqIM9sxvKOLFRPsA9Dw2ak9RFupQIf9IEAd2lWnttcus60223GznVc5
+ 2QORgw0x7Y1dCnMQYK5ZHbbM0ebVAXJ2qT017a2TTVZ77ZZhy+sDOXw2OQEyGUKlPX8myCmTmDLZRZnm
+ u8w6iCgV7uHPrd8Kpv5LUuguOKqUhVyc5ARykPLW7RO30kMb8uiuF2qzGPRIQA7d8eLWrwfkdB36eQFz
+ pvTiITvU6c1bkX2I/vGPf5D5+utf/0YTVsVQ0zfhgbNuvPkg9CGZD/Gut/ndBXO1xHSvpG9Xk3dqyixk
+ qrqArvms2nIq6JC9QqCymJXHrEz492TX3QssKdCZMiuwU2U3G4O6gK4R5L65IOdkpFU1DLsL4BQM4Gnr
+ HVVFfWOOsY5Qn+jDDHJVNYBNAQ4wd2MdcUs8ezY5wc8/Uz583XY5bTfXNUS3GuTcMqHNAe7MZy3P26E8
+ mHIqSyqz7P0rgBzUdYsr5chmbisMkIMAd123FNOA7eX0RGIVvZ53kl7LPkmPJxymXv6WV07HzKmq261q
+ zxyWtZsVy1hnbw8V5FRO3az29tdUI8jZ7PQ3COTumhJIt728mdq+FULD12ZT69eYj17dJt2rXwTkHt6Y
+ T03qmNHhzudW0T2IanXaWJ/sP2jCm5yUAXAiBDe8zAAH1dOtau9O1S5V3LzA3afof/7nf+h///f/uTCO
+ 6P/9v/9H9y/aTk3f4H3kYQDkLKH71LFbdSpAjh9mA7tU6+padepSdXel1tOl6tSVqmrm0t1zUDksOXWn
+ tpwPzxxU3aXq1kJAXG1pV6ops0sV3aj2blV716od4FQ3gjgFN8iEtxpy5Ykz1WCQM2ZtsDfapuwNPORu
+ /OuBNxgWXXYGOWsQt8JbLYBzyex+UtmN4s12ozoZZlNqzO2GXpe1y7RHJMCNjVXUIYE4CCDms+NQjS5V
+ hRCFMhPgesUcJ7/YE/K9XjFHeRnrjrp03C3dX+EQUtBxAiKVCUQqE76+TjmdX0Nknrs5pk6X7ZBnejzl
+ OeD5ueDtpnQLoc78k2EHO7Nsq9xAZ8isK9VgV/PPkNY1ATquowpvXbdx/WNp3YXqq+OmOm22QK3blhJ5
+ hxdOu1ZlHX/utqWU+geVCby9lXeKXs89TU8mHZZuVkSydsMMERsKa7ZNLohToNPPAD3AHKAOn7VNBKzZ
+ pe2pAh2W269kYFtutcMmuOmyHeTw2Q50jgDHqvVH3w1zSW5wM3uAWjC86bJ2r8o6A+CcVAvqDJCzw5zp
+ EDFBTvVFQc6EOTvIOUEcZAc4lQlxkIIcOAgBC+KQesWfBi5OoJ4L4gTebpvEgGfjohrMVA/IfXfianp4
+ Uw7d/uKaGhymwvF9Zkd+XSDnb8nmjTM9cnpDTOnNe2pLBr3/6Y9dCGe9PvnBZ9RlGu8HarY9EDwwfVD6
+ AEVTq7tXVU3eBsBZ3rib8chpgVOZBVJ0E544CP9u7P9wUFHsQuWyxJBml61yqpwqsvmPzWwAtEHQRkLV
+ CHL/+iCn7wJ1DGDiVQNEuOBCQcNJCnc9Y46IfGMZ4OIY5lgAvPrkG3ucv+MMSiZAfVNknt/N6MuAnK5z
+ BLUb6VsOcnaIg/DZhDmpxw51vIYYxDy3lgi0wSPnHifHy+qVA8z1DSij+6IP0nNpR+mp5CM0LqpS4A5p
+ SRDNKhPrr7dk98qZIId3dLFKNyuiWjEWzgVq6n2zgxyWdVuHVdzeMsxpG6xtsrbP+tlst3W5EeS+XpC7
+ 7aWtdOergdSceaT/Cr7vrzP3vLLJgjZmHzsX1WCm+jxyDHAAuTteWluDw1ToIb3jpfXfbJDDjcLNazkl
+ gN4KzqL3v/dD+s3v/0iHL71PrwRmVt904wFAeGCNIGdUUoeK3AhyjSBnBzkTyhTm4EXDZxMwVAogCiG6
+ nwV0x9wyPXJOIKfqFVdbCk89GPbsMuHKSQ3d72blBGkNkQlypuwgp2oEuWqQA7x5+vM2A+TscqrjptQj
+ B2DTrlVI1jPAWTC3i3z9d9PgkL0McPtoWPge6rltt0CcL7cPPV3RrJinFfOz6swOTkCHtk27WAFzWIYU
+ 5EyZUKfrOq/GZ6vNNdtibZ+1jTbbbV1uBLmvF+S++1ogecyKoKGr06kJkv3yZ0lBwmxzmzE2TlWDmeoB
+ OQAcQO6uV9bX4LBqWSlJvjTIiYyTkhMzT5hhDWPjbneCNtfnWutdEpCD+Ab2nhVKCyIyaVnGAbp/ZZzc
+ bBkX5+rLrvUgFORc8GaqoSBnBziFOAjLdoirAXQ3AXEo8Fo5AHMmuJkVCLLDW33j4iCz8prwphVepQ2D
+ uWw2HibIQfXBm+qGEPclAU5VF8g5NebmP3i3DHBzkmlU7AbHgrja4KYSY+Z6h0zDZwpG0fMWAZx9HJwp
+ E+B0Wce/ARiq4Q7j3I5R72gLwABkdoAzYcQJXJykcAcvnEqhrXfcSZFffP3S/U2oc4I2+zpzvy8j8zfs
+ crrm+gToVdlBzpTeeyc5PWdHfQmg03JpBznIsTy71A1l3pD9T4/KrFPdtpdS+00MTgF7BKoU3gTkeDvq
+ YY066lCnneo+2oSuvM0Ncq52owvWM8hhwvwe/Hu+/qXUm3+nf9Ae6oXf523oTsU7ulcx8wM8c0gejFQl
+ 6G4Vr9t6jO+1AM5s97BO05JoUINdJshVA122wBzeAXQW1GWI2i1Dl2v18BcT4tosQQYCVe1xc6ZNgMRO
+ GCCnMGeCHPRdtm9vpRygkIrTtKbgEtu1yFrgZlctW3eDsXJqUyET4iAT5EzZQc6U2HjYfBvQQfWBnMoO
+ clDTN0KEV8AngLTWb/rTiPV5Epmq7KJM43ZYmUxk5yUHkFPd+fI6um9NWo2uVXjhVPiurNONDZXTj9U6
+ MfOk9UL4oppOCSSvWTvIe3YktXyTQQwXbQCcSbomxLV5K5hGLo2h4UtiJUGfJOmz33AXuLkBTgncgDZV
+ jQdtgzezgNgLEmQWMi18ZoF0F1B7AXYo5CqNUIUAc2blgaq9cLUTADsBmymzAldX9GpYs0ObuWzqVoKc
+ +x/r/3GQMw0h3tVYQk7GVOVkfGuoHpBTSDNBDsAGmNNtst0FEOpRA9ApbJgAd7MgV5dMUDIhT+Gtd8Kp
+ WuoTXy3drycDnso85q2UE8CpnK6tPn2TQc5xnavMmjLLtOrLgJzX1jJ6LPkwxZ77IcWcfL8GyNWCOMih
+ TjvVfQU5HSOnbQZArjtvB8D5BlpdqL34uH2CyqlnANd/QJ+rHcKYOi/e3w5ykCcLk+jDS6dAB6H9A8Dp
+ eDkncDN7OdxalSUCwAHmoA4r0BY7g1y10MtiyWzzVaZtcNsKt02pG+QmxRZR4N6LtHrvR7T50FmanXvW
+ 0Z6ZqmUH/wVADqyCd3CNz4IEGrw6g+56mXnnVXCM5aW7VSAHgLtnVYqMlTM5TCHuuxPX092vfkmQw4Hk
+ YPYTwwnDrYhuVb6I77yCiwyg4UvjKar8JGUcPEsPb0hzXbBT3jhAHd8sF8j5Tg+i3jN4HW6ueOD4Zjnk
+ j6udO04jVa0AB3ugg6iOAAfIHuQAIdDBHuxgBjm0mAmY40J5E4EOEPLFIdjBDGwwc8ghZxzeGxrgANUV
+ 3KByCnAwZQc5s5GpD+RMgHOCOCsPk/Xv1Q50dojTBtSu+gAOsjfijo19PV2qJrxVA5x2p1YbHXeXkQFv
+ 7i4kGC6Xahk6lwGsYRBdxrIumYbVlBhnF7xB2nVqGnCFNjfEuQBAPW8AOawDNChUyD78DphD16cd5pzA
+ xJQT6KicAKl+Acws+TKwQQpvdYGe7ufkwbsVsOfN982HIRfAifuC+4T13WOcr928Nwpv5jqVwrF6PiGF
+ Nke5nndDpUBnyqlcmdIy6DThfo0ybCvn7iEFxrACd50xpH+IoN5BFeR/8aeUceWHdPJHv6Yu/rw9eDd5
+ BvD3/fdSN4a6jpsZwLie1qjDTnXc1g5A0r3KsIYxchrs0J2PBWiDMGk+vHN+gQxy2ywPnnSdutoijKsD
+ wHXn73vz97vxOk/eJjC3vtAteOnQzrVfnS8gZ4c5bUO1HcW72cZC+scZyxbwWSCn76bM9touE+TactsP
+ tWG7oMut2V5AgLjW4jRgG8JqITaG95sVTxEVFyjp2If0/uf/TrkXfkjbDr5HzebHUCuGvNazoCRqMzue
+ Ws1JZJsGcEPwXk2guxHImbLDnBPEQaYjxg51NwI5U3aIg0yAazKFeWVKEN32egjd8cpmGrUylTryMbFN
+ nVEqBTmFOeWhumBOxEwFQMM7ukzBW/j+wOXJdNuL2Gbx1u28rd2bAdR9Zhj1XRxPXWdHfH0gh/03ZFbQ
+ 7/70nxKssKP4MN3+/BrXhZsgV+2Zww1q9uoW6jN1M/m5QM4J4qCaIGdBXFNEr75tda82glwjyNVq5E0j
+ YEDcvxrI6Wc7yEHarWoHORPYsKzj2/CO/euCEFNOMKOyQ9GNVQ1yus70vim0merFMAf1STwt0s+Q+V2V
+ HvdmzxMwpwLU+cTVPIbK6R45SUHOlBvaHITnaZc+cyfZIU7Kj0O5MuVUHj0dulvt5fyLgFy3gD00a/dF
+ yjn3CV37+e9oxZ5r9GD6Beq9HWNWuX4GWF451Eu83yzIAeIgeNrQXQoPnRd/3w5yvfh37CCn7VHn9fkC
+ cIA5CCCnY+EgN9TxOzx02qWKd4Ac3tE2or1sKMhZwgT7aJMtj5zpfXOCNicpvCnMQQpxLRcny+eOS1Oo
+ 67I08uM2e+jWnTQysJimJpZT7MEL9OG//5LSj12ndbtP0NANhTR4SxF5LEuk1vNiqTmS1M9mezUbXryk
+ fymQwxCuJswa7adtp8Hrc6jZpM283uIVE+KgLwJySPSrbIWJ9cFWt03aQoMWRVPLVwPJY1ow+c6Lop6z
+ Iqnz1GBqOXkrfef5DdZ+TrBWn/SHoJsCOX6fE7OLfvLL/xCQiyiqojueXem6cJcnjpdxU5rwTYNw8+5f
+ uoNGLIyQm3inDd5M2UGuHV8ovHhtpmySh9kIco0gV6uRdxmAf3WQU+Pu7jbFZxccAMpuBHJ4x3ZAXN+Y
+ E9SHhe84QYgTwDjJCYjqlwFy8ZZMgNJtJpgp0KmHzgQ5hTvTg6cyYdAOeabg5cMxhiafpOGpp2lYyinq
+ l3hG1jtds9P9cpId4vQZ1CV9rqZMcLPLDnFSfhzKlSktg55h1eoaWimqUYZt5fyLgNyEbL5X/nsY3Iro
+ 6bgyejb/LC0qvUgPpZ6gkSEldF/yGeoevE/qZY067FTHbe0A1I3hDALIAeBUGB9nghwgzgnkLGEe1kLy
+ WLeLvDZa3jkdKwdQq4Y6zPRQM9hBvXJo+9A+NgTkILTDgDh0tapHru2SrGq5ulad4M0cYqPet3bc7rfn
+ dr7Dsgzy4GN6rs6hPgwofdflkC+fDyDOZ3kmteV9u6/ZSS0W59LC/BOUcPhjGr0xl7xWpNE9gXyPl6XS
+ nYuSqN3COOq/KZeGBxZRDz5W50Vp/1Ig14pBaujaDGqHXj7kigOTuBxOtwLklKmQOLgTM4b3nAjyeDeC
+ Ri2Po/ZvIegzkL77gjXrgzAYL9/x0kaJkG0wyJkA55bTybBMkLMuBPni+P2FdfS6fwrdv3A79X1zDY2Y
+ E0St37C8bwC5znyjHlibTFN4n8n+qXTPqkQaviCCuk4FqPGNtXWrat44U3hAw1ck0M4j5wUYf/Yff6DH
+ N6XwA7ceqgAdHqorb1xDQE7h7UYQZwc5AbR6IM7MGWdCmylrGi6MV1A1HOQgO8gB0EyAqwvk2nHlNhsR
+ E95UdoAz5QRx1TBndTlAaAxrNpCWnOBNwc2uG0Ec5NTI2+HNBDgT1uwgZxodO7yJUTIADhKDZixDMHqe
+ X3A8XA0D7II3LLsNtc0Tp8ZdPDUuL44AHkOAeuTEw+YCOVnP4GH3uul4OeSIg8T75AIUO7RATvDjLBeg
+ OcgHUPQF1TMOIHbSfaz6IA8ClPVNOuOGPMiEP5V+tw/D5Ii0M/T4zkv0dMFleqLwMk3IuUhDkvl7/Bv2
+ 63S6R+b9VSlIO0mfD+S0Tp+vKS0PTjLLj7tMOZQ5szya5RPSclwv0KH8G0BX448Pq3PgXvIJKqPHMs/Q
+ 0Mj9XOf28OcKGhZVRXPLLtCQkP30dMphmll0XurjwJAyujf5MI2LOSD7tt+21/LUcb0267u9HYAAcV7+
+ pTUgDh46n62lAnOQghzGy3kx2Ekgg0M7Bal3DkCH1CSYn1WjWJF2BB45RLhCGEMHuIMAdNgu7aQDwJnL
+ dlV3rVqAZwe4anBD25/C61KpI0NZR27bO63Ioq6AN17uzu1xzw2F1GNtLn9OFSjrytvbsc1os4DhjNWd
+ 23N0t97vv5P6r0ilYetTqc/ydPJenU0t50TTsMASGsL3oO1cK6tCqznx1ImXe67IoBHbdtKAdZnUZWUq
+ NZkTKd46jAlvigDAG0AcZAYY1gV1CnAY747gxWaw6bDtbO8V5hTkFOYAbnaYMwEO4/ABaBbMBZLnnB3U
+ d1kK3fXaNuEVu+oDOeiOSbyehewdd0xi8HqNNXmreNyav7KJOk0LJa950dRtbhS1fyOAWkzaKMETzZhp
+ /BZG0XefXyfMha5Xk8vgxXsscNfXA3KmmvNNacLEeSer/ZQt5PPGBuo3J5Re2ppGn//iN/SrP/xZul9/
+ +8f/pN//51/pxNWP6bXgXGrxtutGg4JdN9t8CCo8nHnJe+l///d/BeSQOPizn/2SWs3gB84PVR5sPQEO
+ prTQaKFSmQXO/GcBuf9x4N+H65+I/iuxq77puFRWgIM1ANUSg5kpHaxah7RCawXXSm+Xet5UtRuORpD7
+ MiBn19cBck6eONPAw/CrRw5AoOsgJ5BTyEB+OYAc4KcuQIHsIFO3asKbKSdAa6jsINcjFufrEoMYpPtY
+ qt1F6wRyCK6A521Y2ml6JP8SvVpylV7ffZ2msF4qukZjM8/JvbF78pzukSnzHtclfT6Q+Rzt60yZ4GbX
+ PxvktI70DK6gJ7Mu0KCIcgEywFl3BrnBDHKPpp2iaeUfUq+IAzQq/jjN3vuBBE505H3QDXtvfBXdl3BY
+ vGkY99ZpS/0gB3Dz5jqNLlWVE8ghOrUhIAcJoDHAiXeOwQifAWlWD4TVEwG4w3HgoZMxdfwZANiZIQrd
+ rgpuCm/msqq6jU7nzxnuQAlsQ1ep6WUDsHXmdhrLHRjkOvG2brzOb+Mu6rmRf5vb6bYLk2Sb19o86rqK
+ 2/RFydR6XrKAHKDPk7/fYj7bohnhNDX7JB+Df3dZGk2M2UtN56Tyd9Oo+dxY8lqcSGOCSwXsAH0YN9dq
+ Xjw1m8M2am4SefPvDOX7g25YnxV8frxfS7alluOjpg21bCtsrTpM6g98UIhz23M4Ym4ByEHN3wyigasy
+ qMfcGGr6Rih9d3JtiGsIyIGFAHHgo7te2UwdGdB6MgP0nBVFHaeGULOXN1JTBrrWbwRSq6nbqdnkbQJt
+ t728lbrPDrfmU7VBHNR/XhS1YY66qa7VukBO3YXqMqwP5CC9WNyAbtOCaG1qKf30178T6LK/MKPD9/7t
+ FzQztpjumMI3DTfaBXPmQ2gxNYzavulPfWYEUFLFSde3rdenP/xRg0HOXmCgG4GcCW92OQGcCvCmcgI4
+ fXcCufqiVav/jdWEOFVd8GbKhDxtUBoCcXXBm0q7UqUB5GWRqzE0ZQKcPTq1LnizQ1wNaFMZ0FZ/lypD
+ mQvcFN7wDgNiQptpjOqTGrQaxs5mEE3ZDanKNLgiB+OsUohzMu4w+gADc8yb2ZWnIAeZIAcBOuBx6htr
+ pQsBsAiIMNyZ4FKtajBTOYGXezsD1DdFvgxtlqrhDpAGkBuTfo6e3XWZ3tjzAb2z5yN6s/wDmlL2Ht2f
+ e1E8e7h2q9u3bpkgp7Lfb5X5fEzdCORUTmVEJX8AXBCnZcte9iCnsqrydAEdZAe6GnUhaC95MNB1Dizn
+ z/upR+AeejjtGPXZYf0h0kjW3gx1A6L3UY+oSnoo5QiNCdlDvuGV4o1bXnyGRsXw8QMqqOOWMvJkiBsS
+ cZgeTD5JQwN2WbnmNpWR9yau5/6l1JGBTNsEQJwP/zHTLlbx0HFbYI2VK5auVXjjkGoEkazd+F1nc3D/
+ qbS1V7LNBWoIoJCuVv7cZS1/j/+wmm0gEgOLl24DPHkF1H0jf4dBSqJSV/I+quXclrLaLuP2djm3r6w2
+ DGKtl1f/0e7Cn61u0Szy4WN7rMiV7W25DW+zJIW6MMT1WJtNfXE+a/hYy9OpFWzC/BSBOvkOA5vYlPnJ
+ 0o0KAOvJENqZ17dcmEhNFxfQU1Hl0h2LLle8byg+S83nJ1Enl+cONgxptsYGFlOHxQkCeW3fjZDgB8nE
+ 4FKzOTHUnm1aL76+EVsLqc9KBs+5vJ6B7q65KdR2RozY6rvfiaLWM2Kp1btsX99l6OP1LaYz+KnQGyY9
+ ZJbdbjEtUsCw3axI6sBw1Bq23gVz1hh5a9kJ6FRgCqQWufP1ELqdIa4jA9UItk8t3rLGx5ndrCoT5MA2
+ WG72Kn+etJ2+8+p2WYfEwC3e3EYecyKo74J48prJ3DElmBlqq8wC0ZaP3R6/j55LjHdzBTyIJvlTt5mh
+ dMeLRpLgSYA61osbaez6DP69m+hahWpAHNRAkMPJmFKYw5i4hzdl0tXvV8/aAJgzhRe8a+c//hE9sTXL
+ TcxQO745D6+JoxFzA6nrm5up3ZTN1H16ED3vn0m7jl2gzz//nM5eukovBqTzw3dBHB7sF8wd5wR2plfO
+ njsOcoI7leaOg0yogwBx+g6Yq1ayqKHzqtYHdias2QWY08aiEeS+GMipETP1dYCcGu4vA3L4XJdXDsAG
+ 4ADMQYA5p67EahmQ5tK3BeT0nEzPHa4JkbKDU07Tw3kX6c2K92n63g/prb3v0+Td79F9OZeoX/JZl0cO
+ cronluwQB9W63y6Zz8fUtxHkIMDakOjDNC6Jyxl/B92rnVGfXCDnF15GA2P20yAux/dmnqZXcs5Rn+17
+ qGfYfuobfJxGRpTRuyXXyTewhDwYCjsF7LGiWbmeD4zYRxOSTlF/eNI3cXuxlWEP3jUGM4AcolRNkKv2
+ zhWRjJUD3G3eZb0z2AHOzDbJqc3S9gxAhyCH7twewePWZV11+4f2sbqtzBZvnBXxannqAHSaCNgp/UiX
+ VQxerihYtM3tWGivu6zIJA9e7sVta1++jn7ctgHQ2i9GZCoDGdsBeOw8GPIAdFjfcmES24dktgku5wDb
+ lXZLU8lzJQMjw5k6EfosS6DRgUXyuQ9fG2xS+JEPZUwc4AyevtawW7PiqRmDG7po7+X9O8xiwIOt4/UK
+ ck0Y2CRXKgvdq4C0VgxeXZcmU/+NOTRwXTZ1XcBg904I3e2yny35O61c+7o1I0oEoGvFNrgDb++xOIlG
+ bMgWeS1KoLthrxXgGgxy26n11FAauCqdPOfG0J0uiLv9NWeQgwBv8NTdzlDWbEoQtWCu6T4rmnouiCXv
+ udESYfqdlxnOmIvuenEbteTvtJ0aTK3eDKRmkxncXmZIA0sxU7njDlyMBY+cx/Tt7rFxEODvuxPX0n1b
+ 8um7fDx0r35NIFd7flWAXKu3QmhecgX97W9/E2BzeinM/edf/0ZpVRfJb1Yw9Zi6mfrM2k7eb2+mwfNC
+ qee7geKNa/Z6ADV/K5haTwulwcvi6K0dBfTw+iRq9laQ60GCzKvHyAmxM8g5eeScAhzqGiNXV4ADQE4L
+ sMqCOBRQBj3X2Din8XEa3IBK4+SRu5uXofrGxdnHx1kCyFmyj4szZY6RawS5Lw9yJsC5jZyDMVQ5GVHI
+ DXAqJ6PsMtxfFch1j3N53+ItkINnDt2NdcOcAk21vi0gpx65nnGn3cK5Yh1gbXzmWXq++CpNKrpOE4uu
+ 0tM7L9KorPONIKdl3AHkvLZznfEvpyF8D8fEVUn3KepS56AS3s6ygdyAkP003L+CuoWX0mu7zlNHroeD
+ Qguob8Qx6hu6h97ZfZUGRRyQOu+xqZS6bi6nLlv3SBetT9AueiTpJD3AwAjPHwAOEg+cDeQQ1ACQQxct
+ PHEAOXStdt/Gx20AyJnrNRAC3azSfbqev8cQhDawHYOUiNtLTNkFKNOgCEjH0PnwdyHAHWAO3anqkYPH
+ DQL0+W4uov58jt4b0M2KP+Hcti9Pp7u5jW+12Opi9VqbI2Pf2s5HUEIa3c1/+NssShN40/eOy7MJ4+HE
+ 5rCdgBMA69/KOsrvbDsWJksABOyQf8VFhi/L0dCGhTF3ALZW82IF6pqx7RoVyL/HIIOuV7f9czk8LDto
+ zWYE26mAdifb1Hb8nQErkmnYpnzqsZQhkfcRaDM8cvgMwGvLdrQjH7fbgkTJ5/ZwQAE94J9HvZayrWTA
+ u1mQ67YwlgavYhv6JiJU0fsXQk2nhNAdU4LrhDjs14qPjZxyvRbFU4fpvP+rW2QeVOSVg2eu+aQg6jR1
+ B7WbFkJNpliTJKhTCxGqGqVqQhyEtCOdpwVKdKpy2G3PbaRO74ZS77cYAp9bQbc9ve7WgJwd5kR8gion
+ kMMNaM9QtS7vsHSf3uj1j//+byo9c52Gzg0WgOvNQNfh7UC6yxXgoHI/JMOdCk9cjYhVDIg0ulXvnhZB
+ bd/lmzwjUjx3Frwp1MErZ8Fccy5kWFfDC8cFSmFOAhsgVyGFWvLntkz27bgwt5ptbUOBlnFxNwA5hbiW
+ 8xJl0GkH/geFithyHv5JWd62mwE5eOG029TyyDkLEKcgpxAH2aHNLkCcyg5w1ZGqlm5mbJwJcnaAM2VC
+ nB3kNLKtfnjDWDiFt+oxchAMi47rMWUaKFN2iPM0jJzKbgjtRrOWMXWNZ8KyfTycqfrGxUEKcVbwghXs
+ 4AQOkBNomAKMoLsRIIcuR4U5a5sFMnZgg5ygqT71xJizf6JqnAufv9XFeoKGpp6ie7MvisYxwI1MP0OD
+ kk/RgORz0v2K/eDFwzXbx8zVJfs9VgHanJ6RE9TZn7kppzJTQy6QM1WjHLpkL7+mtIybMIcp6LoFM5Cx
+ xiadouHRB6h/JO9rr1tBe8Rj1yuslIbEVIp3rXvAHhodf1TmPfWSGR54e/BeGhheyfuV0+Sci/RE5mmr
+ 7m8rl3ouY+V4GdGpPtuK6Z7oIzSB728fPo9Om3ZbdZ4BCCAnk+i78ssh7YjMKLGpyB3BCpCTSFVuj5za
+ LZHRvqlnDl2rADnLO2elIkF7iDZS5l11dbNqXjoBP3j0+PtdGPaw3RpDh3FwGeTBf6J78n5+W/nc+DsI
+ YMCYOHjVxEPHIIcxbBA8b93xp5nX4Q+8vXfG+sz2gW1GNz4njIlz9+zMT6TmDHhtFybQUP9C8eh1YBvU
+ ZUkK26s4mhhfKQEMGrzQbkGq2Ch45JB6pCVyyTHYDd+cSwNWJcnQI3jjTNsow5FcdtQU7Cvs7N0z2CbP
+ iiTfJXE0aEMm9V6dTl0Y8tBl25ntqQf/XleGJ08+/6683HNJskSVDl/P4Mq2ssU7bOPVYWMDOWUFgbi3
+ Iqj5m8HUd3kiec2PEyeTgt1dDHAYGwePXBN45V4LoKav+VPrt4Jl3158P7qy/W75dpBwD+BNulr5velr
+ gdSGeaMdnEeY9cEFd3ZWcjOUC+RqeuS2UcvJm+lO5JF7fr1sw3i6+zZl020vrHOz2T8V5DpMC6WNBUdd
+ qFb/629//wdlVF2kFnxD5AYbJC0pR2SdlQgY+ePazwinvotjqPs8LhQMdQpyzQByri7VzjMj6eEt2bQ0
+ 6xCt4/PYUHicluceptdjSqknk3lTDJp8F5EzvD8KBbpSGeg85sXQkJVJ1Jn/fbTgQuf2yrkgDv84ui6M
+ owf9c2gJH28tHxdakX+UXoosoZ7LrPw66DpVmKsBcgA9rkgduBKN35pDs9IP0tqiU7S++DStLjxBU5P3
+ 07CNXLEF1LgySRdrHSDHlRIQ15YrrveKNHoyvJj/VTGoadeqA9A1gty3G+QU4m4FyDUEMLANwNI3oTrS
+ 0/rOvybI4Vpxjf2TTtOg1HMMbWdEfRlOAG8YH+fnioAdmHhW7ovOOmHez7rkdI+hrwvk3GXrFoOcd8h+
+ 6hVaQaOTjtPIuGPku90KDMIMEDXqVZAFcvDIDY3lehF8gHry5wERB8gvrJLeLr7IdZXrsf8e8g4sp8FR
+ h6gTg9oIhrxXc87RqGius1znZTaIgFKBwO7+FfIOcMNcqcPCK+i+uCoasL1cPHT1gRy6WqV7ltsgtEdO
+ 7ZaoRvvmAjaGNHStdmfo8uXf7sW/gVkh4HHTthI9HHiHZ85MGNyVAa43f29ocDkNCNpNvTZz+7gyUwIY
+ 2nN7jNxxLZfyH3R44HgZnjdv/n53/j6AD4EPADtswx93O8gB4jDGDkEPbRYkSv44dLl25P27LU+m7vwb
+ 7+bwn7S12TSAQfbBkCIa7b+LnojdT2nvf0r++9+jhxiih/O6EYG76f6wPTRy2y7qvSaLfFemU4/lqdRl
+ sRXYMGILA+s8Bjy2edozVR/IiZeO7Wgr3hceuc7zksiDbaLngjjyW55CY/l3xmzJlyhawB28b+hKbT83
+ QbpVOwIWGwByCIzoszSJ/JYlW5Gqb4dKVyoYA2r2ZqjAmMescPJdmkKebJ9b8nfvnOwv+8NTJ+DH70hJ
+ guCItu+EUwtmkCbS3coMJD2TYB8bI7l0I5Br9vJ6ATl8RjfqA1sz6M7nN9JtL1WzWb0gV2PQnSH3j9UD
+ cuo2FJDTuVYNAeTaTg2hlVkH6R//+IcL1+p+/e0f/03Jh6/QHUzEcpORfgQDEAFwhvBwWjEgPh2QRXlV
+ 52jLzipq9Q4eGsQPkgGuBT+8UWvTaWXmfjr34Q9q/f5vf/d7iiw7Ro8wiLVn8se/A42e8VoQS3MSS6ng
+ yHlalFZBLeEmBsQB6KTgxdJopuU1eQfpzAef0t///nfXUa2xfp/9+88puPQ4jdmUJfsC5qRgu3Q3PnOB
+ 9+aCNSt1H+059x7913/9l7uLGe9//NOfKOfoJXo9fg914Yos/6AY5gBtqtaotC6hEqNyrs2vorMffI+2
+ FbMB58ppeecY3gBzhtphjBxLu1br607VxsiUHeBE6xjI3HLuToXsECcNZx3dqaq64K2GbPCmMiGuJsgZ
+ YsNSl5zADYIBM6WGDXIyfE6GUuSCNyeDawrAplKIU8Otxl2lEIf0IXWBnAkQChd24FBQg3Q6LU3boVGb
+ SI5bY3yZC4ZM2WHJBKhvpPi6AG1DU87RwJTzAmz9k84K3EH4jOuHsDyIYQ6AZ92Tk+Qdw9cJwJVAiOp7
+ aL+/5jMwZX9WpuxAB5kQZ5dTWXLrBiAHOZVlSMs6ZoDw2L5PlnuHVopXbVhsFfUMszxzWn/cEa0sHUPX
+ M7yUBsVy3QqoIN8dXKcC99JIhrbhCSfo8ZRj1In3wR8zv5AK6h/OMMjANTz6EE3MOk2Ppp0QMOsWWCYe
+ N4yN07ZAo1YRlTooZC89mHSExkUfJo+NO6XLV8fIYTJ97IuuVgW5DrxPe6QXQbvkarPwud16DBNBF+ou
+ 6rKhSMbIYWJ9BElAHrwdQnoSL14P7xy8dmgvtZu1LUOaDwNg/0C+bj5vPwbOzhhHx20vIK0L79OV21Sd
+ f1XG0PEf7i68Dzxv2E8iV13JguHBw3ewHmPoZBwdxtvx76ALdSAff9DWQuq7IVeWod6bCsh7dSZ5rUii
+ 8QHF9GDobklN0nVlBo3g6+uyPImmZ/P9DyuhxTu5DeFjTQgro+FbdlLbmTtkvBzSabWbE09efC5eDD89
+ lqfL2Lfn4w7S03GHaFTALhrlX0L912dTr1UZ5MPX4MkQiS7Slu/uoNYzo6j9rDj+nCAAh23d2U514+N1
+ ZtvYjo/fhiEQArD5LU+j/msyyY+P1XlOFHWbF0ud+FgKcHdhSBTGwr8dwZ9DBdbw+e63g2kYn1cHeACZ
+ G777pjVLAyCsy6xoy9s2O4LaTefvvRFijZWbEkTNXg+huxjStHu15Vvh1I6P15qhrzlDHcAOMp1ZbinY
+ maxkcBSk3axgrdte2SCpSJq8xMw0cRsNX5lEbV7fao25e4nfldVMcLPrloGcywtnCiDXnG/MW5HF9Ic/
+ /tENKU7C61e//yOtzatyQRyI2VJ1EmBLeCDdmJ7DSo7I95DKpPtseOEsTxy6U8etTaXiU9fcAIeu3V/9
+ 7g/0i//4Pf3pz9bME3h98OOf0vNhhdSCCxf+JQDmJmzOpKufWsEZf/7zn6kt4IsL1N34N8GF6oFtuVR2
+ /kP3sf/7v/+bfs3HxvH/8pf/knUAuoOXPqR7t2RZXjmR5Y1ryQXVc1kqBRcf4XP5s+yP8/vtH/9Ev/zt
+ H+gPfH6aWgXHnZNcTggft3vizO5UaNTWfPc5/eW//kqvxJRSB4Y7N8wZMj1yjSD37QE50wsHqbE2jTr0
+ VYAcPgNwFGAU5rRrsSEg920Rrm1wylmGOAa05LP1ghyk2/smWBGvGF9X895V30NT5jMwZX9Wpr5pINcl
+ mOtG6H4JUBiZcJwGRFVSn6gqATYBvHpArkfYbhocx4DGdbA3gxrq69jE4/xeTq/mctkJrJA6C88culn7
+ RR6kLgG7xdP3KAPDC+l8v7mOAsbgiUM74LHVCnaA0PWK73dGSoxtxXRf9EG6N/Ywg1QJ+WAWCYY3jJ0D
+ 2GH8HOZkdbdL6DYFoG205ljFXK0YUwdQQxoSQJq0f9zWiXgZbSPWC+TxPhq1ijFuA7eXU2+GR0+GKgCa
+ HJ8BD5639gxLUKc1Fsh1X5tDPvzdwUHl1G/rLuq1uVCS+QLSOkv3K4PRamtcXJ+NhdQPY+h4v35bGNo2
+ 7yQk6+3H547vtF6IHp00/vOfyH/iU6gb2wOJWF2ZSzPyT0t3KxwLXvz7IxjAno2ppAFb02lcwH4KO/ID
+ 6sG/1Xx+GnksSaDx4fsYjNKp48Jkasog1mJBupVeazYvz0sRh0XvJUkCcR3mx8p6wFiH+ZHktSSeQSyN
+ Bq3LopFbd9KEYMsDCN0bkE8jNzAMrkgn74UAPIYwhqvm0sO2g1pOj6bmU/mdl72XpgqAjtrCwIuxebD7
+ zAR3AehYd6LHjrmhC8PgSL6HTd4OohZvBkgPns+ieBlb13lmOLWYGkzffd1yGgHgzDFyd74WILDXBkOy
+ 3tlBLcAk6E5leMO7LtcAONXNghxDHDxytz8P79xa6j03StKX3D4RfGaMmzPBza5bBXKQUqpKL7jH7B2U
+ efCcwEpdEIf3tANn6I4XVlsgx9JuVXN8nLpLvWaGUvDOA/Ld3/zxL9RzLkjceuht+AGlHDgnMIMX4ObY
+ 5fdp4tod9PCyMApI20m///NfZBteZScv0cjVSRbITY+kR/1z3VG2//W3f1Bbhi8BOVbLt/wpveq8+7z/
+ k8Gt6vxVmrQ5hp5eG0kpJfvoP/7wJ9mGV+rBs9R7meWBU3VcnExr8g4JAOL1v//7/+js9Q9pcUwOPb56
+ B21MK6Lv/+hz92/89W9/p2eC8/ifGOCt2m2uAAf5rc+lgtPvu+8pQDD1+Hs0iP9FAeTsUoBrBLlvD8gp
+ xCHBr0Acusdcxto06tBXAXLaZarzngJe0KWI5VsNcr4MUnWpofs77eckp/0BZUNcXaqAOhPkAK66Du8Q
+ 1sEjNzAJ7xYIyr0w7p+lmvfXfAam7M/K1DcN5DADhN+Og5L/bSC/I3DBO5SBK6jC2t4QkGNg6x3K+yBY
+ IcCalqtX+GF6c+c5ao9IV3/U4z00KIABjI+LsWyINH004yS9mH1GAiE6bGNg4/YBIIeuVICclW6E6/zm
+ MvLcVEYdNu8m762FNDaygp5J5j8lOEcGOIAc9vXg71vfAdhhnlZX8l+0UWi/1uyiTmuLqPO6Ylt7t1O6
+ WNsxIGHZhwFxcMheGs33Yhhfb891eQJ1OJbn2p3SPYpcb+iOBbi5vWr8jnxvgLHeG/Olq7UvXwu8d4MD
+ i/n6i3hbPvms5/3WZFLnZckyVg6RqNYUXpaXzot/B+DWZnEidVuRRj4rUiUNSZclbDt4X0zTNXRznnQ3
+ NpuXKF7Cjgti6LXEKhq+JVvG1PVfmk0bCy/wMfn7y1KoxZwMKyfdolR6NLhYulvb834IesCYOnSPtuTl
+ OzHGm4Hqnm1F1JVhrtO8WGq7IJ66LEqmnovTyZPPs+sSdMlmMuDxec9JotYMhQiEaDsjSjxtGA/Xh8Fv
+ 0KpUGrI2mUbzuSJSdRhfNzxzA9dnybi8PktieTsvr84mT/wuQx1+e/D6bBqyOoW8FsVJYmNPvhet3oli
+ WAunJm+G0e1vhcu4OIyJu+NNC+QwNq45r2vL0Njx3R3U6u0QuuO1rbIPvHSy7yQGv1cttjHZp4ZuFuQm
+ b6a7Jq4RkHt0TZaMs7vtxS2SjgTj7dysZoKbXXaQuxG8ifREMahPVNsj556SywVzPd/dTsevfChgorCh
+ oILXrqPn3XOtmrM41OWR85oZRiGFB+W7v/nTX6jHHAQ8WN2q0+LL6Yc/t6YJQ7RscOEhiXa1Ahus4IYB
+ s4PEO6fnsCqviiHOGiP36LYcuvy9H8n6v/z17wJy8MRBwRUX6Pf/aXnd4DkLLz1OLaaHyz8RHRfw1OYk
+ +tHPfiX7ACI3l54RgENhx7sP/8P55W+t38b2pL3HuLCZ0arJNG5zFh28+L4cA/td+cFPJMs2AM7JGzdy
+ awF98rPf1Livn/zbL+il2D3UHuMoGN5qe+MwHYw1Nu5mQK4mxCFKlRs5lv4zdTd6dUjhzYxSdYI3lQlx
+ KoW3+sbFQdUAx1DmBjcdF8dGhQ2IU9Jft/ExhO12mPNUY8ZyMnZOhtEtF8DpeCUZs+RgbNUTB4BTb5wa
+ aTXkatxVMPiIVrVmaTghUId1JizYoaLm+C47gFSDHJYBKb0Y4gTmWAJzCQC2amhrKMiZ8PXPVM/4M3JN
+ vfl6BqahS/WsjIMDpAHi+qecc4MchG2Quc434Tj1Sz7N+1v3Rr9ffW9r3tPq+10t8xmpTJBT2Z85VB/Y
+ OZWtGrpJqMNnjIkbm8h/GsKraEQ8lymGMXS11qgjrvqDcXKmEB3uE1rKIIe6t496bC+X76P+3cf3zSuo
+ nIZE7qUXdl4mj4AK6QZFPe4TUkGDdvBx/fdKHR/N1zshpoqeyTzD8FRKQUc/o+AzP6Y5hddk/FwXBj4I
+ 34cHDkmB4cXrH7KHevkX01j+zWcz+Hlt53Zic6HVLnE7hYnzuzJoIcoUnj3vjdb4OoCfF7dLELpWkXgX
+ 3rZhoWXi6eu2Ht45buukTcwlzAqBJMKAK3jnsH+fbcXU179EPG4jGGjH8rkMD9pN/Tcx7K3IEi+Zdp9q
+ WhIIsAfBo4excRg7hxkdMBYOaUcGbttNA/nPu/f6POrMn1uzbUD0KgIZdPzc3fPjyYth79nEAzLjA8bM
+ eSxJohdSqsh3aZJ0uw7h+9pteTbdG2rBWqe5ieSxiiFwfqK7h6njkmTquDCeJkRUkB9/B8DWcnaCTNvV
+ kaGq66JE6rMmgx4LraDuDIxIP9JuXhx1YHBsA1uJ4EEGNx3SBFXbaFO83zu8z4xYQqowpCNpMzuSuiyI
+ lbFy6Grtz/fiUYbczflH6MOf/pRGrYyVMXxDGfJGbimi4RtyaTAD4ICVaeTHcIuUIV3nRJPH7Cjq/G4E
+ dZmxgzq+EyawBlBTz5xEtapjaTKvcwmRqrfzfnBmQVaE6lZJMeIEcpDJVaKX/ek25i1Mnt/i5U3Uj+9v
+ m9eZo4w0JDVYzWml6isHOZdnDgP4fN9YR5n7T9KHP/opfcag8+Nf/IYufPIjSireR/eviJabZQ1C/IIg
+ xySOQYtxFafp967u06SKU9T29a1SEBDQ4M4bx8sPrYykn/3md7JfQMkJajfTilZ9ZFu2I8i1nR1NGcev
+ 01///g+BpcT9Z6j5W4FugINkeUYkrc7eR79gWMOr/PwHNGxNshT+NnNj6JktyRKhi2N88tNf0YiNWW6I
+ Qyi4jIdj4HtgaxYD4S/lGOhi7bE2xxHkunBlfjd5L/3+j39yQxxegNgVhSfIc6XlhWsEuW8nyKknTiHu
+ nw5yDCYq9czBGwUvFKDGCdbscgKpL6Je/NuQ07abVa+EswxxFrAhwAHLCmgmyAFWJS0LLyvM4d5Bvomu
+ mSF4HTxzGGeHZXgxrWhfu2o+A8h8RioT4FT2Zw59nSCHCNWRyScZ3A7SiLjD1H8Hg13oQfKw1xFX/QG8
+ dQ+uFNlBDkmDvQNLpXsW9XJk1GHqwmDVa/tBmlZwgl7L5OMzcPluKyRvBpWB20toRBR/n4EIiX/7Be+l
+ B1OOUMTpzyn+zEcUeuRjCq265s4nByE1iV/gXhkb5xdYzjCHIIgS8QD2Cd5NI8P3SRqT8dGHyBftCUPX
+ M8nH6Y2so+IdQ3vVaV0ew9hO6sEw14+PMWD7XhmHhy7SbhsZ8ngfjI/rw5A2MHiPeNIGBJUJ6CF9SK8t
+ VhJhBTJEpfoAUJEseLXVnkreOPScuEBNu13NfHNIM4IABu+1+eTLoAhh7Bvgrd0SbtvxPT4OAA+55O5e
+ kCggB9sCGzIti+/vinRqzlCFsdWPJxyiXgw+/TftlEnxh2zKJW/+bofFSeQ1jyGLbRu6dDuyjYIda8dg
+ h5kdAG2AM5/lSfRAaAkN25TD0JYqMztg/Bwm5e+yIF629V2b6Q52aMYQV0MIMsQ4NgeQk6wT/A5PXbdF
+ SeTDQIgccOM2ZNOjQYV0j38ejWIb+mr8PgG2sVtyGfriqckbAcIKUDNwA7MEJsSXYIWpwTJdVvf5MeQx
+ K0KmzfJdkihTdA1iKB3Bx4GGwPvH8NePQdRzDsPjzHDqOD1Evt+Sj3/Xq0j4C3baKCx0MyCHYIqh8yNp
+ 0+7zFFR6nt4MSaPvvLBOcsg5sprTSpVj16rtB+0nVA1y1TKjVVV3ujxyTacEUddp22nA9C1Cvz4MM94z
+ Q8l7Vjh58Q1sMy2URizeQSOWWjAnQQ51gJwVmcrfm+XkkYsgX6btEx9aEPa73/2OFqbskYLQfJqVYkSF
+ AtKFST6k5ChVnLlKz27Pc3etPhZQs2u1HfK9cUH2XhBFxWfeky7i3zE0rcg5aHniBOIwfs4S1j0WlEdX
+ Pv2JdHH+8Bf/QdNTK8Ub57s6g0rOfeD2nB28/gPqzYBmBTNYeeNaLcCEx6nkxZVhQ0EVVV37Hq3LOygV
+ SUHOgjkmeAY5uMhLLnws54tjHjh/nf6bzxGvXSev0Jht+W6QU4izg5wTwJmqG+KsAcCWsOxSHfCmwr9d
+ hbj6YK4ugHPLgDa7AGsKctUQZ0ngLciCNpEL5ExwU5lGCWpIN6rKNIJiIF3wBjkaUxXvC7AzvXFqpE2p
+ AfeCoeZ3GHh3tCqDg0qBwAkc7JJ98R2AhwvYTNWAMpdnDvACTxbeAT49Emvup7Bk/3wjKajVgLVkXr4J
+ mceodSxDfonnqF8SIO4CDUy7YAFakrUNENeXJev0Gvg6cc0YQ9eL9+vB4Ib7g/3xGxbMnaEBCQbk4p7K
+ fcYzsd4bCnUNATmVE8iZcixzLLN8iozyCyF4oXs4l0sENTCkem3fI564gTGHGey4/rjqBOqIWWekHgXt
+ 5f0rXLK8cr0iK2lYIsBuD3mF7KVeXGeQGw5wNTz6MN2Teoz8D1ylqs9+RasPXqNpO0/TnLKrNLv0Cq3c
+ /x5tOvEDWl35KW2q+oy2Hf8xXf71X+jDX/6RfvCr/6STP/wVreLvLiu/SPOKztH0vHP0asYZeib1CD2V
+ XEWPxu2n8ZF7aVgwQ0ZkEZ/DTmq/JVs8Z8ND9vDvXKboYx/T9qqrFHj4Q+rL5zg+rMrdXTqIQW1I2D7q
+ w0AIcOuxBWlIMC6OgWoNt4Grcqktw1n71fzndrU1Hg4BC738d0t0qzfaRVdAA6JZEcmKthjQJpGo3Ga3
+ WWrNumO13xgXlyOpSTBuDt21CGjotiabevFvYm5WdK9KF+sSaxovBEaIzVhopR1B123PVZk0PrxUen7g
+ OHh4Rzndzcd/nK+l3eJ4cSj0WFNAXvhjz8B2z2YAYZpM4+W9km3V3DiJgEWqkq5sf9BN2m1ZOnVbjjla
+ c+iJiD107/Zi8sAYOd4XgRGAN4DioHXZ4oWDrb1jxg6BuLtnMXRNZ4jDDA9sf9vxOszjCpDstzqLeixj
+ cEV6rndDyHtRrHjWRqzLov4rGBLnRko+uomR5eJVa/NuJI1ZmyvwdgcAbmo4c0OIjJu7e2oEtXknitoh
+ AwUiTt+ycsypx82UeuMQ0dqUeQTzoTZlaGsxJYABzsrGgd/rwTDYa2Ec9V6cQANx/WuyaNCKFF5Opn5L
+ kshvQRx5MwBiYvx2DJB3vxZATSZtkYnw73h+DW3OP8iqopzj12lL0Sm6YxID4QtrnVnNaaXqqwQ5jJO7
+ i0Guy9TtNHBmALV/kwnW5bKUwYXifbNuXOepgeQ3fRvfqK3WjZQbfPMg14dp/dz3/03Wf/+Hn9ErwQxo
+ AnLVEKdCgfJeGE99+Ka3nw03L0BuR50g57comkrPWiD301//lual73d546ohTkFuyKokOnjlYxkH95Pf
+ /J7mZB4SkOu3IYcu/9jysgG6EivPUg+uWKhkmgAYIIfK1IYroffKDBq8LoO6M9Ths0IcBIhrx5X1jZQD
+ 9Iv/sKY/+8Uvf0lDVyfR2fes3/7rX/9K83KOSENgqRHkvg0gZ0Kc2xvHBvqrBjkFjYaCnH4WLxQDDcAG
+ 0ALPlLmfCUx21QdXus2UE6zVJ8djOPxGn+Rz0qU6OO28XAu23QjkcN0AOV1v3jN47nAvtPsZ4wmrYc5J
+ tZ9HfSCn0jJgygneTDmVOcgsnyKj/ELeO/jYIRU0OpWvN6yChjDM9ebfkzFvXN61TjiBHACue7D1Dm8c
+ PHMKcuhaxdg5P4YJTKyPiNV3y6+Qr385TS+9JtD2fMZJmpx/mR5MO03eAUUy/Zcf0p2E7aV+O/bR+PST
+ lHzxR3TmJ7+l9377B9r/6c9pIP9e7+2V0pU6KKSIRkXvpcfiK+mJxIP0ZNIhmphylF5IPkIvZxyjSdkn
+ 6cWs4/RmwUl6p+A0HfrJn+jD//gD/dsvfktl732Pnk47SWOi9lLfgALqvj7TmgwfAMVw1m5DkUS1Ijq1
+ 85pd0lai3UTXKiJVkZ7EZ0uRC+Bck+rzdwF3HVej29RKTSLzsTIswSuHaFWMn4PHDkKqEqyzxsFhO6bt
+ yhWwa7883ZowH2069sFwGhbgDt20nRkCvdHNy8v3he+hzosZjJZk0ePRe8h7uTXN15jteVaKEoYm2B8v
+ BiUENEzPOSbdttiGIAdfhisPhizAmyf/LlKReC1F/rk06sg2qsWCWDn+0K07BWyQ1LfpvDhqzuq0NIFG
+ bs2nVmx/W7wdIWPhfBgEe/JvoSsU4+faz2M7yHCHwIXWbJcR/DCE7SYiYwF4SA+G7BHe8+NpesZBmhDA
+ AMyfEbEKr93otQUCcE2mhkiwBLZ1mhUrU3opwJns4SRJU8KcIr2JLlapMf4f4+RYt78WIsmAb3ulOvDh
+ u5MsYTou1d2vbaN2rweS1zvB5MdgN4w55f616ZRx8Dxln3yPwoqO0KaC49Rk4nK68yV0tzqwmtNK1Rfp
+ WtUgB32Xi9N3vVC+AXITJgdQl7cDqN/M6lBeuUG2G4c5Vp8LyKLEipM0bm2KrLPyxtkCHiRfXBh5zQ6v
+ CXJzrem5enOBOvPJ57L+w+99ShMD+R8AgxyCILRb1epaxQwO1X3zgDgREz1A7oqra9UEud6LY6j83PsW
+ yP3mdzQ/o9IFctXdqhCieAYsj6d9Fz4QmPr817+nWVzgAHIDN+bR1Z/8Wo4N8Irec1LGzFkeOf6HBIhz
+ jWewC/+KJN0IPHEub1znxUm0POeQeP7+6+//oOCSI5JeZGZyOf3id1akcPyBc+TL/xIU5jTAARnEG9Kl
+ aoc3uwBxdeWMg+wAZ3ap3gzEmSBXX5eqwpslhjQHiKvRnerSjeDNlBosqD54U2mXVQ05GFJIjC2DHOAN
+ QQoKbVhvBzgnw66GXwIdGBrw7gQJChAKbZB0D7pAxC4FGCdhO2DIzwVzABtACwSos0OTE3A5qVfK2XpV
+ 3344F7vMY5uCRw3dqUPSL0miX79EXg84c4HcgNTzAnICYnrNLlhFF6xCrNO9wr1Q75xGwOIe4153jztO
+ 3vF879Fd20CgUzk9exPm3GXEADiVU7lTaTc/pEDnE3aIuoYfpJHxB2l4zHHqsuMgDYvl42M8nKvcm3XC
+ rCvuOhTCEMeSlCTby2UZwRFj4w8JwI2N3kfPZp2he5OPSooQz8376Z28ffRY1lV6t/QqvZB5hpBceELc
+ IRoRv49mFV2l13eepjExh2g0H2dSJj+TqEoKO/kjSjz9E9p08BotrvyEhmwvo/abdnObgqTA1fOwIqCh
+ V1A5+SKQgIFvROQeGpO0nyalHqeH06voCQbE97jt/uTf/0jrjn0sXanoUvXYWExd1pdIHjoEWUyI3U/P
+ px6iZ5Iq6cHIchq1HRGuBdR1fQ715OvATBRteH8AXxt0q67JlG2IXvVkwENqkS6IVuVlr407pftVomJd
+ k+VX/wlnod1emiXBDQioANRJrlCX5w6BDp0REcvwCGjrziCHFCeIcsX3+q5LowdDSqjToiR6mp8fbAjm
+ RQWMYUxcUwYr9BphnlbAXXsGs5CjH/L3s0WAuz4MlH3WpAvcYR/PRWnkuSCD7p6fTk1mx1HTuUnUZE6i
+ qNO8aOkGfYOf0VP8u178vS4L08iPIfQe/wJqMpNt63RmA0S9su3FuDV0bY7dWihpTTC7A+wyxr4D3pq+
+ Eyv55F6O4+fF96/3ilSGtxiZCaId23MkGH5gEwPx7Ehq8y7v/7aVT84uMAXSjUieuDerha5XCJ635q/5
+ y8T2SEOCsXMyRRcCH1i3vR5Ct/H7XQxzTfm9OX/2nBFGPuiinR8n6s723oMhtj3zCqJe74KDi/kKYIcp
+ u257fi0F7L1Cm3Yepq35R2l90Rm6/ZkV1HRSoDOrOa1U3TKQc+neVYn0btQums4avTyOevBDGjQnmFq/
+ 6QI4BTylXIOM1+RVSU62HaXH6G7sbwCc9nXfCOT8llSD3Eef/pBe2p7jBjmz3x0C0OkASzfM8bvpkcMY
+ uXZuj1yMeOQAZxhbNz/zgBvkFOD0feCKBDfI/fhXv6sFcoA4KLbidDXIudRzVQY9G1ZIU2J306vRJTQ5
+ Zjc9FrzTAjkD4qDHworp3Ec/kHP9w1/+SkP5nyIq/LDN+XT9xz+X9f/2i1/TWymVsl4hDpVeIa4R5L55
+ IOfUpSrrXctuI+1gzNXQf50gBymoiVfOBTYCdC4Y+qaCHM4H5wuQQ5cqlvUcAXJYNkFOrwXbcN2APlwr
+ JOuNeyL3he+deOaM4BAFXb23uPffZJDz4DI8MP449Ys5Ql0YcIbHHKXBcVzWGPC03Jt1wqwr7jpkgBw8
+ cN4MgWN2VNJLuSfp4dSzkh8OAQ99w63vdAkup2UHvk+vFl2jSXln6e1dF+iepGP0dPpZmpx/kQZH7yff
+ bUX00s5ztLDiCo0LLZN246mYvTQ6upKmFJyixXsu09Si8zQq3JoJAkmGJYdcQKm8I3CiX1AJDYsoo8eT
+ T9IYBkp4yZB8eMflH9OzSVUUVHVdpsbCoH4k7R0QXCpj9IaEVtCIHQcY5g5SL4y927SLfDftFHAcvaOc
+ 7o2rpCdSGAgTq2QM2riIShrozxDI4AbYkmTA23ZT/6A95MuACO8b2mZ0t7q7WfmzCXKYVgtj67pv4HZU
+ ulItiFMB5DA+DgETSD/iu57bXF7XiYEPUaOTMo7zn/9kejKB//AvxHjsJOrD4INgB4AfAidGwNvJx+rO
+ 5+jJvxVw8D3qu36XpCxB6hJ43bqvhBMhmdoyBGICfgQydOdjdF+ZKmPhMENDL14GMCKwoSkDXds5UfRg
+ wB7qvz6L7p4dJVNv3bstR5IAD12XTUMYzDCuDilGYK9ht6W79d1YunNquMzF6rMwTsbCtZ8RSl7wAM6O
+ pi7z4iS1SB+GW8wSMW5TjqQcuWsaf78ekMPUXHYpkzR7k3njDd6X14FFkGcOY+m8F8RSz8UJ0pWKblXM
+ t9p5aqh425ogLuAVjJljTmKOwrys7ihXhjfdrtN1tXs7lJq/vIae9s+ip4MKqflzKyRi9faXqyNVa7Ca
+ 00rVLQM5PtG5CWV0+PKH9PNf/or+/ee/oKrLH9Gq5BIaPNvliXNBnBPI3cEU/NSWdNpZdYYWJ5dKwl8F
+ uZvxyJkg9+H3f1AD5EyPnEqhriEgB49cXSBn98gNXFHtkQPIzWwgyLVekEjTU/bT+z/8nL7343+jD/gd
+ Ovn+pzRqI/9TcnWpQggRX198SiJf/+uvf6P8oxekcqLCd16SQokHz9Mf/tNKs5J56gMZeKogBzV65L55
+ IAcjC1Azu1RNA/xNBjmVeKAYhgA+gBuAnAKcyg5TdckJ0BqqhoIcPGoAOICcwFqKtb4nHwPvdYGc7MOf
+ NdJVAdB+PzBeEN+DtKtVPXP4bu2cc7WflQlwKqdnbwKcu4wY5UflVPZUTiA3PO4YDYg7IstDYo9S/3he
+ DuO6wGVby71ZJ+z1RWSAnC/D2vjk4/R46gmamGtFqHoGMNwxEPXiuoEZG55MOUG9ww/QwvJLNLX0Oi2o
+ fJ/eKrjI9bGUevL+E1L5PjKEPZd9VoIOXtp5hd7NPU/3hO1icNtHE9MqacC2QklhMm3neYaqI5JuBEET
+ CHToxTA3NraK7k84RoPD9shsEB6B3MZsLJUu1FdSD9PY8P209OA1yR03El3J4XslChXpT9C2dWF4gxfN
+ b8tOiVhFypH+QWU0gtuQ0QyqI8MPWusCi2jQ1gKBzUcYQJ+IP0RPJ2Gs3hF6OGY/71vK51ok+eMsD53V
+ 1Wr3yGH8Hqb2Uk8colIV4sQmLE4RSPRZz+3y8myrW9XV5Xpf+G56IHg3PR13mDosSaYWDGLdlzJ0cbvc
+ l68D3aXwsD3A19id2/Uea7IE5l5PKKc+/JuYKL8T2x90ocIzN9zfSjTca022NU5uOR+T7WDzmWwTMYn+
+ 7ASZsgvZGjCMCd661vMTyGdRIj0dXkovxO0XgLs/iAF4Kab+4u/OsKCt5QzLywa7jLQkLd4Jp4e25tMj
+ Ibup9fQdEkDRbsYO8luWJLNACNDNjaY7GI6GrclgFgiju95x9sgpT2DKLqQr6TA9jDrP3CHeQHTX9liY
+ SL58bzCvqicfF7M4iGPJ5ZmTyFaXkHsOnrrvMKzdMUkV4IY4XYY3DjAnaUeYoVrzNTYDS720VVKNIFDi
+ tonMYC9YHObIak4rVSbEOYGcI8wxvJkCyN2/OkGS59pfH3/+c3pyU1p1l6oL5JCXxQ1yLNzcO17eQM2f
+ X0ptMJYON5uFdxPkZAoO+xi5P2KMnJUIuDf/M6jLI+cEcApx1SBXd9eqH4Ncmatr1QnkpMDis8sjt//i
+ h3WCHF5OINeRC9H28jPyG5D5WhC3kytTNciN4H8eRWesFCVIRrwiuYiejSyl56LK6MHAPNq++5i7e/Wj
+ z39GbyVWSMPQ4QuAnB3ozPFwGq1qBziVCXLQjSBOoQ3LCm61ZIM3E+DMZTvEieoBuPpADgZKZYKbKTvA
+ QTeCN5VAmssTJzAH48xGtyHwZkrzxyFSsjpaUqGgGhgU3mrAWpwrjYZr+UYQ54YaQxbYWB65fgxAABw/
+ gSbeLu815QRhX1RO8Oa0nwrb+zPEoUsVMGeBZ/U5yfnxZwU57O90zfDK1eg2dbhXEGBOu1oV6vCO5yQA
+ bXtG9YGcKadyYMKclBsbzNlllkX5zOUV4+HGpJ8jHwYqfB6XeYF6MoRhGWXbqQ6IXHVGZ3zAuDjkmhub
+ cpqGRjLgxB6mwTFHaGhEBd2XdFzSmEgwBINcHwafUQyOT6cfpWHRh2nZgY/o2ZRjNDqUQSrqAA2IPkKe
+ W4skh9wzmScl6KBH6F7pLp2YeYym5Zyi2YXH6DEGlvGJR6kHQ96UjLMCjC9lHqXHk47So8kMptuRjqRQ
+ JtHHNF3ISwch0GH9oQ/osagqAbm30s5KUAOCF4by++CwMsn/hvQjvQL2yLvVbZonastC1yiiWRH9ii5Q
+ gFk7Bh7IY02OABkiVuFRHBS4m4YFl9K48D30QMQ+mhC5lx6NqaTnEw/QMwkMfYlH6B4GwPsj9lIPTMzP
+ 59FuaTa1WZpDLZfxMrfl8NR5cxuN7W0XJknPDda1W55EmOKr7doCejVuN72YzmVzFQMhAyNShoxisEOe
+ uZHbS/j6kqnnigwa659PA9al02C+joHcXiNn2+uJ+8hvRTy1Z9ABjCGAoSXbPE8GxKazYti28e+w3UOq
+ khZzE92OjeazIq3xchuzadS2Auq3ms9tQQzdOZNt77RQ8lmWRuOCdtPgtZY9az87iu58l7/LkNZkOtvV
+ OTskJ9wrkYdk0np47LwZTjFDBAIbOsxPps4LUkUt3o2g5m9vlzQeADtMswkPGhIA92bI7LU0kXoujhcP
+ HiCwLX9fxsy5ulPvmmKlHdHxcXYpu6jMnkjIzkVILwIhnkA8dYhqZZDrNCOS7hbPW7VMBnPiNOhrAbmD
+ Fz+gv//DSnALQNEXlvNPvkd38nGsm4Hxc3zRfNPghVNhPJyMi1N4k+VqYX5VkVOwA0ButoJcEp3+2GmM
+ XM1AB+SSq+GJU/HDfdS/Oo9czWAHyyNXV7BD9eT4NwY59cjVGCPHwj4vRe2m8nPv0eHr36c/uHLW4Te3
+ 5HODuNKCOGhp3mH6wx/+INv//t//Q5/8+6/o2g9+Qtd/+BO68unn9NkvfiPTnuGF80g89p4FcuKyz2SQ
+ y2wEuW8YyJndqWpgv3Ug54Ih9czVhDlLfqkMRS6ZYPVl1VCAg/DbffjcBmdcEvXhz4DNHja4xLFMkNPr
+ M68Z3ay4PgRF4LplndP94nsJmEPaFoU5lXXPq5+PJX1u1fo6QK5b+EH5UzE2/axEqGI7Zm3ADA5Yr2Xd
+ Xv5VXUMtzxsADuA1JvmUjCcbEX+ERiVW55sbuGO/gBzmVx3E5zyAf6df2H6avfc6DYo8SIMjK+mxpCoa
+ F8/1LqCEnik4S89mnaJnC69Sn6BSSQ6M7tYB4RWSguT53FP0VNw+ejiBzz2yguYVX5YuzUERpTQ58yyt
+ 2f8BTc7lMsnn1SWQQXNrqYyVQ1errz8vbyykjYc/YmjcQ2MYQh+MPUrjow/QuKiD3NblyuT3DzNUjuTf
+ 9mTQ0fYP+en8uM2C+mwqIQ8kCeZ2st0qtJcFkiy408p8UbvVO91Cu4pgCPyxRmoRTNeFKFR41DAeD8mA
+ x4eU0gM7GHgZ9J6I2UMTk6vo5Yyj9HzyIXoiag+NDy6je0LKJUEwZm6Alw49N4hYHc/PIvDAB5R85D2a
+ llZJmPGh97pcmegek+KP9t9Jo4L4viLoJLCExm0vZYjLpF6rMR47gZovQDL7JFpddk6SDwPikLIENg7g
+ hpkb4KHDZ9jCDgsSqAPbSqQgGbR5Fw3buJN6LkqipjOQwiSRmsxKplbvxojHTSbYn8kQyNswzu2B7UX0
+ bBSXr0WJDFkIUthBr/DncWszJZrWiwG148wokSc8e0tSqS+f04j1GfRAYD71ZnDtyeeAmSMw6X6rqdvd
+ Q7K++9YOt9Cjhym87norjO54w+INq1ewboiDbhbkNChUvHRI08Zs1eFtpC0JsjjqqwI590GdIE4jVSHb
+ CeMiMNhe4USF11//hi6/y9RlqhXsoDfB9Mi5PXMumFO5oc4lcYvyA6jVtWp45Prwg3V75FxdqwA2J28c
+ IK4TQ5rX4iSZhsvsWlWPnOSRm68gh6jV6+6u1Xm2YAdxDeOzC+T2Mdxi31rBDp//yn2P4vZVe+RkDtVF
+ qdQG2bPnx9E4/wL68N8t6APIrU4v439AaQJxfbmiFZ//SM5RjwWZL/v6C5/+Gz0VtsuCN5c3zgngTJkw
+ Z8k1i4Mr4SXUEIhDd6qqPpBTiDNlAlxd3akmvKE79aa7VPlfu8gGce5uIZaTwbLLDnHaNaVdVaaxVGE9
+ jCMkSX9hiA0DqxGpYowNQ20aayybhh0GX7081mcLDBTe7ABnygIOS3YQkW2ADpsUbgRwXLCGZYUqeL0A
+ c70FjmqC0q3SzUBhz5TzAl4DMs7T0IzLNCj9Ip+bBWoCa8a+OGd469weOdf12YXrxT7SxYr7hC5T1/20
+ C88GMAfoU+EzpEBnPVOF7/qBziwTprR8QPqHwPyTYKobl0WvMP6zEXGUfBmiHsk8J7niuoVX0phEPt8o
+ bLeVbxb+2Mgy15GuDGeY6QGfkVsOud0QyDBkRyXdw9fovZ3hKZihiEHKJ2wP3Zt4hCbuuij54/qF7KPh
+ cUcloAHj1by37WPIO0oj+VyeY/jzidpLLySfoxm7r9JIBhiMb8Pcrr4MXUOiD9ObuSclKnVy8hEBt7Gh
+ e+mlhEM0t+x9mlZwgV5Kr6JeW0poVvo5mrLzGIMiX6v/Puq4eTf5BXHbsymfppecpedyLlP/rfkSFHFP
+ XCX1Cz1MT0TvI79Abn8xlm3zTpqQeJyGMyAisa/kkdvEbZtrhgdTAnrcbko7upbBDuPvVnPbuIrXrwTo
+ oWckw90WYz9MvdV9bRZ1XZVNffB7DJAdBfYY0BDosCyTWq/h9pm/N4Tv4+jgchoXVsFgd4JeTTtGU7NP
+ 0kuJB2li9B5KPfshJZ76kE7/+OcUdvh9BtQyidh9MaWKHowollkckLS3D9phPvbYoN3ksSJVEga3W5RM
+ dzOYdVqWQUFHvk8eDJ4Y84aZHdBzhPQj7RYkky+fd3cGrb7rcmgcP8N7GSoR0dphkTV3KsbmYf5URKh2
+ 5+9itoduC+P59xjM+B3j87ozgPZcHEsD16bTu1lHaH3xSVq08xQN3pxP/ddmsN3MFe+dJ/+ueNTmMEQy
+ CKLbtePsSEl10nrqDrr97UBJOIzACQy1qqtr1ZSdO+xcoiAHyDNBDnICOV0GvCFXHPgJLIX0I/hs8pUw
+ 1jcJ5M5c/YD+YXjkVP/1t79TetUlavWG5basyyMHmbnj7GlH1Ct319vwyoVRdxvI+bg9csl0yuWRQx65
+ RWn7XCBX0xsHiOs8L5Z27DlNZz/6IZP/7mqQ868L5OCRcwI5yyPXzOWVqwY5yyNnglxvrqAVV74n9wbw
+ e+i9z6jv+lyXRw6DT62EwAC6AZsL6MqPfyH74t5O3p5OHTF4VLxxR+jn/2ElM/7b3/9Ov/ndH+jnv/4N
+ /eI3/+EWPv+S98EUX3j98S9/pR2HrkqjYXWrcqNgQJuTGkHuqwc5hTgnbxxUH8iZMg07DP03CeQAOPDM
+ 4d2CuVurm/XuYV+cz+CMC+KNs87ry4OcjpUDmKGL2en+QXgukHa1miBnQSA/CwE267mZAKcyn7dTeYBu
+ BuSw3nPHAerHADYm8Qx5RR2QdcNTT9GQeAZL15g4uzwZ3lC2uzJUIQ/ciPjjNDqJyx1DYN/o0zQu+bTM
+ veoRvl/qWJ8IRLwepb4MgCMjK+n5vPM0KPaIdIGie/W++KPktaOcv3+Ans45RwNC99HjGafoIYaqd0ou
+ M/BV0pOph2S9T9Ae8cp5bC6iSYUX6IWMs/RMGkNK/D6amHGGXiw4S8MjK2RC/d4MYksrPyTfHfvoyZj9
+ NIuBcCADUM+A3dQtYA+9nHuGFu48KuPvxkRVUPctRfR42nHqwyD3Ujr/wdpaIgmFEemKY2FcHMbcdV1f
+ aEEbt3doCwFsGEMHL5wpbTfbrykUb1zbVbzMbau0swA1lpX015pvdcDmQuqxNlumzxJxO4qACnjrBm7J
+ p7Fh5dIlOnB7KQ1hmB0RWkrDQ3bTqPC9ksftxZhyOvrRv9OnP/0dffL5z+jYT35DIQc/oE2V12lb5TWa
+ lnmCXo7fT4+GFdNo/100GNcbuZeGMtCiy7XfuiyZpB4zPfjvv0KDGGBl3eps6rk8iQauTqXx2/Lp0cgy
+ eiS0kHrxtXgsTiOf5ZkCd5gIH+lEevDnIevSaMSmLBrvXyDTez0VUU6vxvAzYth8OryEnggppvu25NLQ
+ LRl037Y0mpq+j+56J8ry1rGdbj2Tbes7ruTADG+IXAWsYbnrgjjx3rXl/ZtM3S5zuwrEYRjWPwHkVIA2
+ wBzUhrml7dRQN9RhnZuxvkkgd9/yaPr9n61uQIU4vDAJ/Nqcg+6boTfgVnrkfv3H/6Qec6xIlzbvhFHc
+ 3lP0+z9ZMztElp2gJlO21fDKqTfu0c3p9Pmvfiv7Be0+Se1nRoqrF3Ot1g1y1cEON/LIVVyo7ZFrx4Xy
+ 5W0pAl+4Rx//9Nc0aK2VRw7wVv2eTPcF5NNnv/yt7Acg7jk3jLdZszrEV12W80OQQ/qRy9Rh9g7+B5VC
+ 7Rn0VB34nxD+vUUfuizRwAiKKDn/EfXifzjilWv0yH0jQE6DGsQbx+92A6sAVxfIqSE3DTsM/dcBcibA
+ qUywUTAC4ACEFIhMUPoy6p12XtQn/YK8O+3jJHSjDs44R0Mzra5fwNqtADlAGPaTfTWFie3+QXqvFeRw
+ fxTkzO5W6d7+mkAOZXEgQ9Sg2GPUKeoQ+YYdpgEJRxnOuC7wNinL9vLN6rx9Dw1KOk7jGfh6hu2VujI2
+ oZKGxh2TXHE9GNyG8PliMn3P0L2SugRdqKNiDtKYmMM0IZ2vJ+SQ1DeAIPK1Dd5xSMDv5ZyL4jl7KvcY
+ zS+6SB47jvG2PTSUofGR9FPkHczHCyijvqEVMon+q9knaSXDih+m+9q2hx6L20uPZx7nY3Ad5vbFl4Ht
+ 8ZxDdD9D4KTs0xR+4hNKu/gZZZ//AQVWXKVx4VU0jK/z+fRjDIrl9DSfW5ctZTSt+LLM2NB+o9XewbPW
+ G9Ns8bUjGbDnxgKZVN+b26ge2xjyAsupD7cng0IsYYzdmJAKGhe2X7ptRzB8DQ0opiGYAWLLLgk28OP2
+ 0pchEBGsmCS/89pcmQWi+4Yi+W1PhsOuG/KpV2AJdVyNblgrPQny0Hmsz5cAhcHbdtFD0ZU0OY2vIbKY
+ lhaeo9Of/IxyL/2QBq9Joh6rdtLgwP3UZl4ytVjE8Lg0XcbMIWL0EYapt5NKaE5GFa0uOk9zs47Tq4kH
+ aDTDYtnlTyiq6gI9HVHE8FVGT0WV0oNBhfR01D56Wj6X07PRe2nMtjzqtz6X+vG5eS/LorvnplHTuSnU
+ bG4sNZ0TQ3fOjqK7GLQgGWc3M1rAq/28GPJaFCszRPgsT6R+m7Il+W+H2Wx7Gdhgs5ELFmrJthuS1CQM
+ dpgHthum6prBtvddKwkwGAH6Z4CcBoFiPByA7W5mluavbHJ76Ey+Esb6siBnftkOcO4faSDI3TU5gNYk
+ 7qRPf/Iz+v0f/ki/+e3v6Qf/9nNanblfZniwIM7yxkF2b9wt8chNtbxyU7gAf/YLa67VP//nXyi49CRv
+ swAQBaLNjB00Zm2KJOrFC7C1OvcgF4oIF8jZ5lqtAXJ1e+TMMXL9kUfO8MjNTD8gINdqQRL5rcmi3/ze
+ CkIAYCUeukTDNuW6c8i1nJ9I9/jnU8m5D91AfOnjH5A3Vzh44+ZnV7mv7+cMyuuLTwiwIXJJo5tUGHvx
+ evwe+uDHP5X9f8TnMq/ghEBca7jPGwRv1aoeF4cGzVJ9AKeqC9zqgjdTJsgptDnBXL0gx//c7XLDmwvg
+ VHaQQ9cR1CBwY+l4OICbgpwd3iDTC6cQp4bW9MKpnAy1acjxrkYeXXOAAvXs1AdvTqABiJOISoYJO7jp
+ uwk1Jvg4CZCE8WiAJvXMAab8Up33b6jgWQPA4d0up/3Rpdo77Rz1Y4gblMWQkM7nwpBW1/4QQA5dw31t
+ oGhCnPs+AMhYGFOnnjnz/jnda0juKb9DCnQQPlvj5qrhTZf1s70c1CctSybQSVnkZQQ1IMUIyqVXxAEa
+ nnCCBjCMWV2uB6U8e7rKNNKOdEfwQtJpGhfL5xjOQBe+j4YlnpJpu/pG7BcoHJzIfyYiD5JHcLl0sQ7j
+ 4w+KPizeuh5cp5C646FM3j+EzyFyPz2fw88jhCGM62QfBo4n88/RlF3XpPt09u73ZWL8p7LPCyCOTTpM
+ A0PKaHzMEZqx+z0aHlYukPNGwVmakHiUYaqQ4SpPZnB4LPmwRKrid3sFFdD4+GPUwX8/xV/+KZ379z/S
+ 9V/8no589mt6Of8SvZxexQBYQffuKKcpBWfo3vgqCqi8Ts8BVmP20bjoChobtZdGBu+hZxKO0fgdFXRP
+ 7GEJugDUAfB8NxeJ90zVYyMCG0qo59Zi8t64SwIgIOzXa0sxydg6Brhh3Fb153au16bd5Lcxn/ptKaDB
+ 3NYN37abHtyxjwGwhPpvK6IBW0tp0BaGQT7m0O276f6wMpqcfIjeYmh9OrKcHoook3QpqxhA7wvK5nUl
+ NGxrCQ3ZtpPGhHIbifF43P4jF1z/DXkykf+4IIbFdTnUfX029d3KAMmQ13ddHkNvAV3+7LdUefH74q1r
+ w3YM4+aaz0lgOIu3IG0e2z54zxbx+nnxkvOt8+JUsYumswO6m8ENgRJt2U4C4vA7nReyXUS07oJ4Seo7
+ YE0GjWKYg71WwbbDAweAg0cOUIdo1s78+13gJGEoxLyr8Map4JVToFPZgc4OcnY1FORUiEC9g4HtOy9t
+ pVb8e8gjB4hzs5RLbsb6JoEc1PatYBoyP4Je2JhAT6+NpSELo6jttDDrRsiF32qQOyBwApDrORceOQvk
+ 2kwNpsxD52WuUbx+/+e/UOXlj2lp2h6aGrVLZlNQEMJr/9lrNH59mkAcgh0eD8yja59a3bN/+8f/uEGu
+ jyQErg52qE4IXH+wA4BxtmtmB4BcBy7gyzL2CcSJt+3v/6CTH/2INhUeoXcSSimk/BS99/kv3IEKf/rT
+ n+j5EP4XuAQDUFMo8eh1+svf/iHbSs6+T14LogXa2vB2TMNlCiDXdkECRVackXP5y9/+TjvPfUTeXGFb
+ cyVuBLl/PsippIvVZWD/1UBOBbgByMHDhXcL5uoGL1O6382qxjFSLlhRqpkXaWAmw1m6BZnmPnbVBXLm
+ b+h9QMoSpFvBdQJWAXXm/XO61yrtaq1LJtCpzGevy/VJy5KWs+74AxHBUJTBzySGt0dwGQw/QCMzGXQZ
+ uLwxjsxVVqVMh+ynPlxWR/O1jeVz8gyroC4MaP0YjB7ka0ZAxOC44wxvXPa3W9GkQ3j/EXEMawx73YMq
+ yCf0oIBgr/DDvK2KHsw4QX23H6Aewbvp5cKrkprEm8FvUHwFrdn3KY1kkOrkv4+ez+Oyw+sfTT0k3bIv
+ ZF+kt3e9R/0Z5u7P4vKNAAoGm7eLLlGPLUU0OtKaEmw8n+cgPuZz6SfoHoaue2OO07tFF2hVxXXKf/8z
+ OvWDn9FP//Rnyj3/PRrPMHp/8gnpQvVg4IInrcPG3TSv5Dr5bdlDHTaVUPuNxZY2F1Kbtdk0gu8BpsXC
+ pPuYh9XqsbDaSsyfCmEWCARKQDKn6kpuZxF0xvCCYIfWmB2BAQ2ettYrM6jNav6TvbqQv4fu213UJ6CQ
+ em3Nl+E2mBTfb1sB9WbYGhZaIulMnkusolHBReS3KZcGM4R7rcmmB8PL6c3UIwJso8J2cpufzeBXJCmq
+ JLqV7RCCIvpsKhAHwaiAneS1JIPhrYDuCeFnuiGb4SqZnmUon5lyiCZE76LxEXsZwKKpFYNbq7kxMvNC
+ Wwa69nOsAAhEsbbm91YueGvHNs+EOAgBEu0YADF+rjOGFDGAAeZavruDevC1I5q0x6I4GsfQieFOCnJI
+ RyLi7QpynefHSToSj7lxMusDJtH/Z4Mcol8xgX5rZpj2DJpIP2ICnMrNWF8G5MwvumUAnPtH9IcbAHKi
+ V/2pxWvbJCMyZnXAxeMm4KLRnaqyuy8h941z3WC94SJ+GPJA+MHgAXWbHUFhJUcEZv7r7/9NXjNC3SAH
+ d+vg5YlUevqaTFGFF8aj/YyBCl2pf3RFguL13g9+TM9tz6OWTPkCcvw+YVMGXf/UlUfuL3+htq6o1Z4L
+ Iqnq6iey/nd//DMtyWY44wKraUe0axXA1ndpLB177/uy769+/yeam3lQIM5SskT1bN1ZJd2deAHosN9P
+ fv07+u0frW5hvH7729/SosxK6rTUmiDfgwvq3qufyjZ8N3jvWW5ELE+c5hNSqUcOmhRTKh5SvL7/s1/T
+ 8MAiR4iDFOScYE5BTrtVRTcAOft0XHaQU9UJb6bqhDcL4Cxw00nxy8kzkIGNZe9OdXep1gFvppwAzpQd
+ 4uqDNxU8ITCOOiYO0i4uy8DWDXBqtCG7YYexh9CtanXN3RzAQQocgDWFOU2MKyDngpabATgV4E88c+kW
+ UInnSmDIBmoMXFDv1Itu6TZznV36vRrfdQEYvgsYG5hxiYZmXbECMPj39bj2c1XhOwPSL7qh02kfFa5P
+ pV2sbiDGPcO76/7WJUCbjpvDsgIe3hXM61J9QFejTO3AnwdeH1ZJYzIYVhni8MejF0PO2ORjAmaAOt8w
+ ywvXdcch6sPHeCT9OA2Itspwl/CTNBQZ+3PPSbdq3xj+AxJuRbz6hVRI1yrGy8Fb1yPsgByrZ+gh6hFS
+ HSDRP/YwPZvB9yXhPL1efIF8AneTV8AB8crN3neBBsYepMH8fdTt8YmH6fmM/RK1+kDCEa63u2kcvz/K
+ GsLQNiCM6+/WvTRt33vUN3SPAN7rBSdoUfF5ejf/Ar2dd07ytvWLOiCANC6hSiJpP/oF0Se//BMtLrlA
+ z8ccoEcTD5FvYKlEj/oFlFK7DQU0s/R98t6KLs5C8kAbhUS7vAy15T/FQxlQ228uov7c5nTeaLWB8kfX
+ 1n5C1jysgDwL+AB/6E5F3jgBvjW5/Nt51Iu/PxiRuehOZWjsv3UnjdxeTA9F76UnWY9HltMzcftpBN8z
+ vw1FMi5tcOAuhsQ06rwugybxdQzYmE+dV6TQA9t30qCgYv68i//Ysy1hO9RrbSG1WxhDgxmYEFDwUOR+
+ GYeHCfn7r8+WgIVHIvbRCN4+PngX+TKE9lieSoO3MMCyjbtrJlKRWJPnQ7CFiGTtyjAmXjNe12kpg5oA
+ XIy1z6xY6sogiZymbWbzd2dEUs+VKeKdgyeuE9tGryUMZ2yLuy5iOJwa7AY5HSMHkJPuVWaBznOS5Le6
+ zubvL4yV+U4xhZeygls2iINuFuRMOUGcrgMTYT7VFm+HCtBBJsBBNRjLBnKQE7N9rSCnpKpSkIOcoK3G
+ DbMBnEnR5kMByLV+N5wmBmVT8eHTFLjzILV+J0wgTkEOGr02jdZm7aMz1z+qlePuV7/6FcWXH6MnA3Oo
+ w1wuaFxwVD6L4mlJSintPnKaVmbs5X8X1j+J1rMiaU7ibio6fIr8dx6QWRTEI8eFENLxcQC5tnzMBcml
+ tKvqFG0uOEADuGJVg1yKyHtpMs3nhunAuau1zu/3v/897Tx6jt5JriAPhj5AHNR2cQrNS91LJXxuEbuP
+ 0HiuuAptdinEQb3W5tDitD1UceIcbS3if9frETHVCHL/DJBTWHN74nj5/xLIwQOG7k0AErxyFlBZXaQq
+ J5BzbzfXNUQ4Hh8f4+gAcUMyL8s7fl+32WWe8xcFuWqv3NmbBjlTCnDmOjxX+7OHGgpy6C7tHX9U8sJJ
+ uY06LoEOIzL5GqO4XPH+gDJ43+7jZ4QuUinbDH6Y1/TR7FP0cO4ZGf8G4EN5Bwii63Rkwgnqz6AIgPNh
+ AJT0JSH73V5t/F4vBihoUGwlPZd9jh6IPiRQ1inA6iodFr2PJqQdlXlUJyScpIdSTtGTGcfpwfgq8o2s
+ pHcLr0vQweDQClpUdpaeTj5AU3JO0ws5l2jbiU9pTOQB8aohGMI3sIQm8nUNDmXoYaAcEXeIum0skYTA
+ RZc+pZCLH9Ly8isyp+uTDKYvpJ+U9CV9/POpN7cx3dbn08vpJ2R/pChBIEOnddVtXed1iFwtopEMqG03
+ 5Mpcql6bGahsbae7TV2HSNYc6sTCBPo4PoTcclB/hkhElw7030V9tu2SHHNjcB927KeHd+yV+WEfiqyQ
+ btK+/Lvd1+XyeyF58bGR+BdAODSwQGalaLUoWdYhyADzonquypQxcYhS7ccg12d9Dg3fspO816RTxyXJ
+ 5LGC91mZLSlKHo+ppB4MWQ9HVdKAzWwvFqVQP5zn4mRxBMAb12IegxrbPBXsYbOZUYTJ/O+eZdlTJBJu
+ w6AlE+wDzlw9WC1nMsQxtLZBdykLY93ggfNblSaT6APQBq7LlvHskibMZdcBcgA9pDlpMc0KZOyMNCiL
+ 42kEQ/U/E+TAQ63e3E5tpkVYAPfqNvqOyVEu1WCsbxLImcum9KLlIm+BRw5Sl2mnmeE0cEGEzHEGgMN8
+ bDInmwFzbaaF0sObM2hNdiUF5O+X7tiNuZU0P2UP+WA2CC4gAnCIgoF4+W5+7zYvmkYsj6G2M8KpBQqk
+ /KvgfxPzo2kYr+++IEoKraNHzgVz3fgfwrAVcdSd3/EZEIfxb63mW5PiQxjwed+WTFqSdYC2Fx+l0OIq
+ CuR35KgbsS5NghsU4nRqLp/lKQypyeS3IrmWF86UCXQYFNuT/6mN35BKvqvT6g12aAS5rw7kFNQg8cYZ
+ 3an/F0BOAwp803jZBXNI/QGYc4Ma4OoWgpzCWb9MyxM3LPOKeAT7pJ+nXvgt13bz981z/jIgJ93Hyfwd
+ vW8NADk7uNk/6/g5+7OHGgpyA/h496SfI18GLs8I/KGoopEpp6U7tRtvH8jP+wHcr0TeHnOAekbzH1cG
+ v4ezztF9aQxw4bxfmDVWbiAD3LjUMzQ83oIznfHBAr+D4vVDd63pkZO0Jlx3+sVU0fN8zGkV70u06FM5
+ V2gsH2cw14t3ys7R8/knGVyOUy8Gtnv4+oYyUI1POkdvlpynV/N5e/oRmr/7Ko2KOkSj+TfaBpXSrPKP
+ qcfWYvLcWkKDNu+XtgFj4jCt15A4htPg3TScrxHj3BaVXaa1FZ/S60Vn6ZXMSppZwNfG8DQ+ooyeTuDz
+ 3babOm3Np2fiD1L/kAqGuTJu3yyQ0/YNYNYFULQlh/psLZeuWES1eiAwwqHthDcPCYUHBmNqrkIZIwf1
+ Yfgbzm3VMIakfgxpg5AfLnyvzPqAgAOcz7jgYvJdz7DIx0H0Krx1QwN2M2BxO7zSmkR/RFAxPRCO2Rhy
+ qSVDl8fSdHo2toJ68L5+fO6YYeLupbBBSBicQh0WJ9D94RUyt2n7xSkMfKn0WFS5dHti5oeBazNpaPBe
+ STI8eFsxeS9PI4+FcfI7gDT1yinIwZmB6bQwK0Q7JOtdmCTTdbWbG23ZyZlWdywgDbB2O9tpn2UZhGwS
+ LeckUn++PgRDYJYHRN8KxKEr1WXTJVp1eqR49ppPjaIm/LnjLKQxSaAxmwr+aSAH/mn+Vhh1mR4uCYDd
+ EFfP+DhhrK8K5Ow/Kj88yd8thTYT3sxllQKcygneTLlvnOsG14A5F8ThoTSDGOZUgDcT5MQrZ8Ccumab
+ vraVmkzaQE3eDrEKhksAOC0sWIb7F1J4gzB4825+V3hzi9cB4CD1zIlXDoIrGQWcC7qAHANcS0Py2RWl
+ Ct09cwfdPTWIWvI/HZ1XVcTbdFlhrga02T+zanjmWJoIGFJgcwI5Z4irHalaDXSsekDOKVLVCeKgukBO
+ I1VvHK2KBMDVEGeBnCtSlf+ZO46Nc8kOcvAeqOzgBpnwpkZLBc+Eyg5xWAcvnAQ3sHFViFPVBW+QCXAq
+ uyFXkAPEwdhjuT54M6FCIcMUAARQYgKcE8DcSApLdmiClwwQB/UDLPFnX2xXEHPtJ3CVdvELyvqdgVmX
+ JcAB3jiNcoUUGk3peWM7QA7nh3fzmuoT7pMFc1aaE2usnAuOZbluoDOflcKbvZtVl+X52soAZJYRLT/w
+ wmHb4OQTNCztNJdFBDAcJM8dR2lEykmGHAa4+OPS1YouT8zgMIivY0jSCRqdcILGZZzjMnuIukVWUo8o
+ K4XImGQ+Ly7zADUPV/4479CD1HPHIeoVdVgkXrtQrlch++Qdwj4IVNhx5jPK//jX9Gb2KXp6z3s0fEc5
+ g9oZeoDP55WM0zQ2rpJeyz9NE/PO0VuF52lyyWV6ltd39d9ND8TtpwGRR2l07CF6Kv2kJAxG+pJZFdfJ
+ m7fLxPsMRt0ZvoYziPYK2k2PpxyjQSH7yYdhLfDkD2nNwffpufRTtHLvdZpXfomm7bpE3oGFki/uSQbI
+ x5KPytizoaG7ZG5XgJxAHEOXtnFuAcQi+BoZ4Dqv3yXdspg5ArM8eHKb6L25iHx4nyEMm8NC9lB/f4Y/
+ /xKGuF0SsDAmrJwGMzSODNsjU4wNCSoSiMNMFGOCywXgMM9qF26n0Y4jYGEgXzNmacByl1V5/LlUxtFN
+ yToh+w/l9nPcjjIaG8B/5hcnib3BXKyYdL8FcsCtzSPMw4pAiE6rM6g3PHDhZTIbQzf+HYyT67ggjvz4
+ NzAvay++Fp9VvG4xputKkGTCmM0B4+4AdLCJCIBA12u3Jcni/WvL65FzDmPnMD4O49t6r8mRfVuz3VRP
+ HQAPY+CwrRlDHGzwgLVZMnZO7LvLjjeTMXKR4pFTG49jAjzv3ZorkCec4HL4mCB3szAHLnGCODfTMOdg
+ Hd4x5ZeMiTPYCBJmsvNUPSBnchqmUHUvmxtU5hfd+hIg5wRvtW6Aw40yVQveWO4H4HogJsCpFN5U+nBV
+ TY2UIxCWa4DcTIyNs97FI+cAclANgDOkHjkFN7vcIOfywplCxTJhTlUD5FzwZqo+cBN4M7pV1SNnBzkn
+ NYLcVwtyOi4OMGd64FQmuNllGmeV3YB/G0EOnrG+GRdoQPol6o8uzwwGujSGLZbuBzlD2o2F4w/IvCQQ
+ NyCLgYx/q/q3bV4/l/Q8sV3H890syMm9S7TSrgDmkIC4ISCnwvOye+NMmdvsQGeWES0/PaIP06iscwxX
+ h6yuUy6D8Bw/tfMiPZB5kQZJlOoRBrdT4rHrGXFYvHSjUvjYkVXUa8dBGhF/jEYlclmKqJTyrWUf+/bi
+ Mu4XeYj68b59+bcwwT7Um8u1H5dz6WoNraTuwfvIl3839tKPKfni53T6Jz+jio9+RCsrP6KXCi7Rkzln
+ 6OG045LXrQ8DzUBXIuJeEftpQiIfh6EHARQYK/dQ+hmpvw8nHZYExIC2WQyEmKu1i/9ebhN2y+wNSAky
+ ir8/gM/7weQDFHTkE5pZdJ5m7LKCNqbnnqaN+9+jRxIP0TsF16njpny6N/m4AN2IHRU0MfMoDQ3fLZGn
+ fRgOO26q2esAdeO2DVOHDWdgxJi2nogoDUBet33UL6CY+gWW0ig+FqJa/RicemzcRYMCy2hMqNWVisn2
+ e/P3AWf3hO+RwAxAmhe3seiGRX45pByB5w0RrRjPhoCJTivgicuiEfw9DLV5q+A83Ru6R6APueH8NmRR
+ r43WfKsdl6ZR2+Vp1G9LoczJOnDbbgl46L0ik8GxhIYwXAKkEHTQbEks3c/3DtDmuYi/t9iaKB/zqmId
+ HBbobsVvYA5WjG/rwrYLiXsxF2vrebGSV67t/CSxnwA+ePAwJZh45fj77ecmSPJhtcuw3x1msZ3GnKvT
+ ImUcHKbY0u0Kch4MhohYlflY+TuANyQCvm9bnkDd1wVy+AzuafpGCLV8K9jNQyYn3XKQM1eaX3SrHpDT
+ kzJlApxd5sWq6upSVeHG6Y1VuW+64ZFzy+WBM6UwZ87kYOaPg7CsUAeZUCeaxWDnIDvYQSbImR45U4A4
+ vJveONXdXPgV5kzVB3V2sLNDnRPYQUgCrLJ3rZoAZwe5GtCmIOcAb5C9cVOIcwI3XQ9w03cT4NxygDeo
+ NsBZEaranSpdqi54q6tL1d61qt2pADhVQ0BOVE+XqnShspzGxJldXnV54yDTOJtGG1KAg0zDjzxkPkgP
+ gWVDdohQsHBLoc0FcionaLFLIchchpfLBKUaoIXuRxYADt2fAzIvu7o+re9gH9/06v39GPgaIl8GOAvE
+ GFL4mIhUBSwi3Yge60Zyg5zLi6fXdTNCl6yAIAIfXEBsBzqV+UxM2YFOP6vQlQ5hJoheiSf4mR+Rqdn6
+ xKK8HCff6CN0b9Yl6pd4SgIdMI3biLQz9HLJNZlNYQTuUcIxBq6j5Mf7jk0/TROyTotnri9vH556mgYn
+ cjmLQjnGn5WDDGxHaGTcceofd4z6xR6lPgxvmE+1d/h+gTxAW0+uK32jD9FQ/t69yUcZ0s7SS4WXaUbF
+ +3Tih3+ko5//ByEcrfSjz+m5RC77wVyPuY72jz4oU29hrldM1QVvXs/gPTQ6sYoBbQ89nHiMhkUep8eS
+ TpAHgIlh7/7kU9IOzC+/Tp4BpVz/y6x3eMYY6B5JP0F9o47Ts6mnaHPFRZpRdk0iPmV2icwLtOXQR/Rq
+ PkNkDK+LPUD3xR+nThvzBASHRpTRxKQ9NIIBC92m3bfvkdkckIi426ZCSR4McMOYt74Mk48lH5Oxbr25
+ /RocVE5DtxfTML62XpuKxGPXe2uhTP01Npyvlb/Xc+su6sfHujf8AA0PKaNu63PJY22egFunVQxrhgB7
+ GFPXcUUGYd5WjJMbxBA3mgG318YcejashHx5O2aGQLoSdG322ZApOdpgBzyR4201pgHj723Jlxkc7gku
+ pX4bC6jdkgzquDCZBmzIo+Hrc2SWB8zh2gaT1M9Npg4MbJjrtOuyDMsDNy9JUo4M3JxH3ssSZaJ9TNnV
+ lrfBA4dxct34PNFl2m5eHA3iY6I7tT1vhxevI0Ne+7lsO11OFEShihOFYU3t8biAQpniSz+7u1WnW04b
+ dd50mJ1A921nm8MgiIjWO128INxwC0EOAs/gHayDCfU9ZkbWYCKTk5xADjJBTmHODnVfO8jZoc1+4XLR
+ LlirSzXAzSX3A3DBWw1vnAvaTLm9cC5oM6XghmUtFJAWIrccgA2ye+IgjAswgc1JN/LImdDmBG8mtDnB
+ mx3YILtHDlJ4u1GXaiPIfTUgp8ENCnDfRJBzA4YL5EyIu1mQg9SbVh/IqXcMIAcB5OBBU88coKwXg5Qd
+ 1G4sC+LQlTo464q8a/et+fv16VaAHNKXVHvlqr11pm4EcvpcVXaYU5DzZaDCcXwSTpIXL3fFd7m8jMu5
+ JCltAHQjMs7SAxlnrGjTzPMCYYC3njFHJAHwuPTLDGVcpvh7Y1LP0qCkI+Kt68/7DWPYgwbFVFG/uCoa
+ mHiURvH2cSnH6cGsc/Ro3iV6LPc8PZl2gl7MO0evFpynNwsv0cuF5+nRjNM0hgFzCMMeAiS2X/iMsi78
+ G334mz/S+5//lgaH7qL74k7R41mnaGL2cbovmesHw8+wGK5zDHFewWU0JMqqv4hkfSj5OE0uOEteDEq+
+ DFb9+bjwFsIjhwn0uzLAdWPoggBzmEB/HEPlwrLrtBBzuW4toJfyuPzx9rkFp+m5tNO0uuoq+QQfpHfy
+ jhFy0XVg6OrKbZA3Q9mzDI+Y3/R5vo7uW3P5ewxO/vC0lVN/bmP6B5WJ4HnDOLf7oyvJh7+LXHHoSvXd
+ Vijj7+7dUUnjd+yn/pvyyQ/JgP2L6L6wvTRmx17qsSGbuiIYwuV9g9A+I5IVADaAQRHf6cdtHqAQEabw
+ sqHLs9vaTHot/Qz125ZPHtzGowsXwQvd2B4M4fMdsInhcDUfe5n1HczSgLlXJ0Tsob783S58LHjN0KWK
+ OVOfjT9GnVam00AG1eZzY6nNPIaz+Ql8rGLyXonxb0kCcwA339VZNCxgF7VbmCDrAXx4B9Ah8K/36kzq
+ tSZb7Cg8euh6xTg6T7ZfyBShttd3ZSbdMXUHtZgeKrYZtnoEg6vYe5f9RsQqcsYBCE2Qazsjlu4JzJd5
+ W1u8s+MrBTlNRQKI68jnLcl++XMjyLEaQa4R5P5VQU4hDkAn8Mbrvi0gh89fBuQEgOoBOYyJ03FsgDdA
+ E0CufxYDxZcAORxrYMZlGpp1iYZkX3V32wLwzN+vT7cC5NClisAHeOXsIKf3V+55PSCnz1jBzQnkJAgi
+ DjN6HKetJz6noMu/oWfyrjHE8T2PPETjsy/QI7lXGbpO0mM7r9GwRCvXIDx3/ZPO0oO5V+heBoF7kVR2
+ 5wUZTzYu8xQ9wRD8DMPYs7su08MMf/dnnKR7Uo/RyMRD1C/mkBx7KB/joZzr9FLRNXqRv/tQzlkaHM9l
+ PqJSpu7qHsL1I4RBJWIfeYXvpQFBxdSbl5fvv0pVn/yM4fEwhZz4MQ1mKOzM9fH+5GP0HIPcE5lnafKu
+ a5KbDl6w4fGHyIPreLfNe+jVXefptTwG0OAK2QYP3PiE4zS1hJ8z6q4L4hTkEKzwUvYpWlh+lR6KP0CP
+ pfN1hBbTcznnaVrmMXoi9gA9kXGEXss9Td22lNAr+Vwn+LhIOYLI1yfTjtF9/L1XGVIn8vkO3l5Cw0IZ
+ 3hjSAHWIcO3J38NYOOSgG7i9nIZH7hcvXZ+gSnok+gA9yvcLyXYBgD7bSmh0xH4awQDkAQ/aup3kuYrb
+ TeSeW5Ut8AavHH4fQQ2AP3jcuq7NltQlrZdkytyrSCzst7mY+q7fRROiSmnAljJqvzhJxr+1W5ZOnrzf
+ SP888b4BEFstZpvB8NVxeS49GVFCj0ftZxhLYcCzcsgh7QgSBQ9cmyUpWEZvL+PvpMm8qkj2O4zPvSfb
+ EHjluq7IJi+2LS3nJIuXzYeBrRXvA4DDuDgAHY49eF2ejINrtTBRjg+vW9vZyEMX6548H+rD19Z0VgK1
+ QQAj22rYZ0yS32pmhNuGt5sTL7njxGtngFzr6dE0PiBPEgR/1SAHxmn2Zih1nRUlszXc+XpIDR4yOemW
+ g5wpc2f3geoBOQ2ltXTzEAfVB3J6Q82bDNUHcmZX6o1ATiHODnI1AI7lNDauLohT1TdGrjrlSHXakbq6
+ UJ0gzgQ5QFtDIE4FeDPHxtUFcSbIKbyZ+jIgB1gzpRAHmSCnMgGurrFxJsAhUrU6WtUB5FzgVgPidGyc
+ AXAmxEFO8Aaos8ObOTbODm8CaxG1IU6lXaw3gri64M1S9dgoBbnqHGSnZZwVvDN2MFBoUICDFDJUJoxI
+ yhB0DRrrTJnwpqoL3OzS8XCqvumXpYsVIAeAwjLgywnW6hOgbVD2Fcsb5zqWgqGTHM9NIO6iePJwPDMQ
+ Q6/T6X7Yhf3QpQvPnDlWzlR9IFdDLqCzw5ys52MHnP05RV79GWVd+zHt/ujntP349yWZ7vMFV+hZhrkZ
+ lT+kV0reo+d2XqJXd1+kFwrP0WNZxwXQBscymEUdY0Dj34ngMhleRZ5RB6nvjkMCfo/zMV4ofI9eZT2a
+ c4ZGxByUCe+RXgRlHSlMPPnPjL3uILgBARGYeB9j7TCODnVuTNJhWrH/Q3q3/Br58e+9tesizSi9KHnp
+ ZPYHhrBB4fvIN2QfDQyvpKdST9DDKackYvWldIbF0BIaEm3lmescuJf3q6TF+95nECuSOV29A8vJi9d3
+ Y9B6kq9v7eGPaVL2SfLeWkzTyq5RX4ZAJBl+OfM0jeTfmVH6Pr2TUUXP5V+kFYe+TwMj9tIYvrahDKsz
+ yq7QBOTYCyim+xkoH2XoHBtZIbM1oGu1KwMVIlI9GMoGcNszjiF2VMRueiruCA2NqiC/gBLqvamAJkRb
+ c8x231wgoAahnUUOOc91ReS9Pk+8bgP53HrxPn39S6g7vHSrqv+Uy1hnBiKkK8HUXm1WptNEBk2/DTmE
+ Se6HMbR2WZEpgREeS5Np2KYcGsLtaHvM5rMkg7osT6En4/bJXKpjgndLN2cX/h7SgXRZkU4PxOyT8XDw
+ 3A3330U9GfA6L00XD1yP5ek0FDDKkAdvHAAPzgoI3aR+G/KoFQNO67kJksYE860iNYgvn2PnudES8NBl
+ caIEOWCsnNhsDF3iZYyvgx3W+dBFbOtH+RfRXdPY5jMDAAjhjVOQE3vPLIB1Y7fwOc2LE6+dMIKLG0wp
+ WyhzmHJiE3CLsAuzDcbCgWWavh5EbaaF1eAeJ4hzg5xm/FCecrFWLRlcBjWCXCPINYLcNwDkFOIaQa5u
+ 2UEO+wPeIIxpG8gghuXeGZdFJnzZ1+lnCF20w7KvCsTJuDs+xj8L5CDcP3jl0NX6VYAc1CPuNIVc+gkl
+ n/8x/fQv/0N7v/9zCr/2CxqdyEY+7hCNzjlNfRKO0KiMc/RQ3lXx0g3BpPYxJ6g3Zl/gcjYIk+anHBPv
+ 27NFV+iZnVfogazzkn6kB0Mb0osgRQm6R7vtOCqfpT4w9PVkISWJ1g/UG4li5fWAuN6RSDnCdSWY62TI
+ AXog7QTNLXuflpTz/eX6geAGdMkuP/gxg+Q+STvSM4jrNdfdzv6lNDDmMA2POSKBEI/EH6YXii6KB240
+ n3ef7XsY3PbS8soPqR+DDCbTR5coJsQfH8NwlnWC1jE09gvbJyD1UGIV9QsqkahXeLuezjxHc/Zep3d2
+ n6UpGefpmcQD9FbhBfIJKZdjzS29SiOjD0hKkv58/piS64GYSoY5htjNu2SeVczugICGwfwdHHsEH/dB
+ hl2MicMYubFhB6jH1jzpTu28Mle6UWXGBwYoBEAMQnLfrbxtdY5Eofbm3+rAQNaa4anjKm5/V1oRq+2W
+ W0EPyCHXY701tVY/FsbPDQooYvDKk67YNmwj4BUbsimXhnLbCU8cZnoYv303PbK9RCJY4alrvzhVvHAe
+ DHmdliaR9/IkSUWCIIb+m3aSF9sNQBkmzsdyX/4tjH0D2OG77WHL2N4B0hTmkKYEXrm2vI/n4iSZ5cGP
+ zxndtj1XZEmUKexn61lx4pVrNydevGmWva622XfNiKERGzLpTl7XbmYstZ1jzbNqBznMEDF8HV/H/HjJ
+ M4cxdCbAqZQtTIBTObGJghy6UyF44jrPjKrFPspD/xyQczoYy4Q4+WGcjO0EbxbknG4SZN5IE+IgE+RM
+ iHMCOX2oTiBnFY7aIKcwB4BTfVGQg8wuVvxD+TIgpwBnglxDAA6yQ5yCXH0QZ6omyLngTeQCOgeIg6oh
+ bhd1rgPgVCbA3QzI1YS5miBnj1B1A5zCmwvgVHaIA6yZqg/kakCcCXK8DO+EDPh2jY0zAU5lApwd5BTe
+ TNUGOAviFODUwGuqCkcIYJnwJgBRB7zZhYAEO8y5wc3oOgUA1QVGELaZ4GbClKo3QxMEAAPIDULXKAOZ
+ 3TNnBzmrC/YCQ9tF8cQNzrkqx8B3cTzzuw0R4E2+7+paRRewXoder0AdX7/cA2w37o9d6KbVJMH2e28+
+ F8jp2ZmyQxwEr9+U3e9Tyfs/pZ1Xv0/X/uM/6aXckzSGf+/pwiv0eN4FmlT2ET3HgIaozHFJx+iJ/Is0
+ sfQDeomBCtA2If2kjIPrF83wF+WKenWVZ/NPSq3y75JVN6xIU8zZivQjmBUCYAZPnKQhYYhDvXsi8zS9
+ U3JZPGxYxn7D447R2sqP6Q2GNEBbj+1WGhF0qQ6PPkQTGEiHRB2l3qH76J6UUzS54Dz1CGd4YrgaGXuY
+ 1h34SFJ2eAcBuCrJb0clPZJ6khbs+YBeyztDntsO0Ys5x2lI8G4BuIfiD0nC3Ufiq2he8UWaWXiWXss+
+ JfOs3h+9Xyai9+a2ZmoR/7Hgc+nJEDaEobELt2HIV4dI10cSD8uxxuzYR8NCd9Pw0L2SrLjHxgJZnpxz
+ SjxtXTYUWjM5MBwBuJB6BOPqfBiQEMAgY+G4HfVYmyvbAGuAN5W247ABODaiaJuuLKBJDKWIYu0vwFZC
+ SCkCj5zYiyVJNHRzHvXh3+i6NJnGhTKYrs6UuVp783cwhg6zMQDIEI0KsLt7cTa1n5dEdy+Mo56r0qnH
+ WgY5tgWAs+4YN8fH8+PP8MZBADmMr8MYOtjLPmuyaMSWAhknB5jThMEYZ+e3Jp28liaQF/9WWwY/BETA
+ M9eR7Wcb2FH+DBvuBrl3oshnYSrDXrxAWot3LWjTMXJi7wFyU0Oo55JE2ac9b2+lnKAAN9UVMOliCjvM
+ ObEJpPwCb1zz1wOp65zoaqYxuOdmQK4GW5nsZUCcSpmtEeRcagS5RpD7OkDODnF2T9y3GeTMKa1q6CsC
+ OXjRAGEDstE9esUFZPUL3zG7VNUbZ0Kgub/5u3bdapBDBCuCHvqm8H5fEuTMZ65C2fHgMuF/5Hu0+PAP
+ 6c2ia5R05Wc0e+9FmrfvfVp06BOaWn6VXiu9LmPeJmSfo3uyzskMD+g67Rtzgo9xgrz4OF7R1py/8Boj
+ ZQ7UK7JaprdZpWUfY0UlCTCDHKJZ4ckbwMcTgAvnuuMCuRd3XqJXck9Jt+nkwnPkHbxXPH8Ts07TixkX
+ aEbxeQmQQAQskggPYliazEAFIATcIV/cE6nHyTe4nMGuknpzPUS+uUeTquiBpMP0dPpRmlZyhablHqWF
+ u8/JPKp9gkolAGNc1D56Ou0kPZ5wiO6POSDHeYqh7pHYffTGzvN0f0QpPZZ4hKYUvyft5Ms5pyXi1Gdb
+ sUDjwO2lMhcrptF6Pv0YPZt2lB6I2i9A14chbWz4fhoduYd8thZS1y3FEqUKmOvH14iACHSbwlMHjxza
+ VJ2LFSAH7xzGyWFMWw2QY6F990IXLuYlXcpgtSqJno6slOAGQCO6TtHtCk+egtwwhrN+m7JpTGCJzPAA
+ b1qXNTk0mq+j05I06s/nB5BD9+pj8ZhOMoU6sjozuPXB1GFL0wXsEOGKBMPD/YvJdwVvX5bqBjl4/jot
+ jBdvHYCxC++LKb1gI2UsOdtReOz8GCK9+ZyQekRSmfB2RLj2XJEmgIYEv7Dhaq+RZQJRqGCALmxTJXGw
+ A8i1mhZK3RfECsh1xG+5mOHLghyYBl64u17dSl3mxFOTKbWZB/raQA5uOvtO6FJVmQes9WP1gNyNYE5d
+ k3V1rZo30w5vpuoCObzrA3U/WBvEQfWBnFPXqh3Y7NIgBxPeFOBuBcjdCObsQIfPOoZCdStArj3GcGCM
+ hiEniIMU5G6UO64+gHPLBm8mwCm4OcoFbyIX0JnwZgKcCXGQCW521QlwLim83QjgILNLVeHNlDPEKbxV
+ yzTkWAbA2UFOocAOCg2BN1PwxtUCORfAmKCmMoHINxWJfi/cAN5qAhnUJ/OKW4C4gTnoKr0igGXBXvV+
+ eEeOOEAcghsAcg0BP5XTOVkgxzCZzufgOm+na3UDHd8Tp3unwnZ4zSyY4/uXzOtcAQ/6PKSL2/as3PDm
+ kuYFVCGoxTfeClx4eOeH9O7eD6l/0mnqHX+aHmZIO/ab/6Lpe75PTxVdpodzLjG8XaB7M85L2pF+8ehW
+ PU794k6TX9wpGpBQRcOQhDbtFD2QfI7G87GRhsQ38qDMBCHlHGU3AmWdAS7sEAMb8sRZXjpAHMANACdj
+ 4qIOSbBCD96uQs45zL8KKHs27Th1DNlPM8o/ksCDQQwlYxP5esKqxEv3bOZJernoggQ0dI/YT88UnJWU
+ Jt2CK8gnpIIe4PN8NO0EdeG67Mv18/Wi69Q7eI8c650S/n7KMdp86H2ZZ/WZ1CO0svIKvVZ4kV7IOkov
+ 5GJGh3P0YFwl3RO5j0ZFV9BT2Wcld90rWWdoQiwDXtphmpx5nu5NrqABYaX0SMQR8guqoGERlTQyai8N
+ gzcwbI98/430E/RcxhEBqq7rC2WaLYAewM17U6F0zQLa0MZ68h9j5IdDBCpy0CFPJxIEo1sV4+FMgBOI
+ 4za84+psOU7ntTk0AJP4M5C9XXCc2/E8GhtSRq0XVQ+/wVRd6JLFfKePhZdIIAPyw6F7tdcathn8jhxw
+ 6F7FGDkfbutHbckhv7VZ5MXbvVYzVPJxWvM2CPtAsGsdGMAw5+pAbuvhhcN6j4UJEgiBd3j8OjLUwRuH
+ +V4BcE1nxZHHskTxuMF+IqEwcsx5L2Wbx8vDN+RJ9yrywCHFiNprsbWzY2jsthwraTDLDnJQu+kR1O7d
+ MJmztQvbX0y+32xqTZgTGSBXF8wpu2h36p2TtlBnBlnhGeYdvNuZx+QhOyvVBXImc4lsfObYtdoIco0g
+ 1whyjSDn1tcIcg2RE1zZQc70zPVhwLLvB+hCdyoEoPuyIAdPngRKNBDkJNXKDWAO9xAgp+lIbiXIDeHv
+ jsq8SG/t+YQGpSO1yHF6tegKvZRzmoo//QW9Xvoe3Z97kUYkn5JZHAbGH6GxDDL3Zl8UjUo7TQMSuQzF
+ cpljEPOJPCDqG3WURjBcjUw6RaNTztDwJIY7BjS/qAMy4T7y1UkKk6jD0j06kNdhEn3fmCoaEn9MPHMY
+ K6cCxGGcHLxwmEEB9QvghZkURjIQjUCXKsMggikmMIA9n32aJhe/R/3Cy+n+lMPUmYGnWxC8cPtoWMwh
+ ejH/IvXYvpcGcZ17o/gK+TEYwsP2cMoJWld5jabtukiPpjOghR+g2TtP032hDHq8HdN7IYXJQylHaGDQ
+ XurLx3ucz8dnSzGNjCinB2IO0KvZJ2lh0Xl6NuEwjQwvpXvjD1LfzWXSdQpgu5fPFx69+7he+/rn0ZNJ
+ hxnqAIaVAnAy08PaXJkRAuPfAGKY7QFRqh7r82V6rgEBuyS6Fd44wJomALaDHLpevbg97rEph7oxaA0P
+ KqOh24pk0n2vNdnUZkUOtXXZArUJQ7buohei98jYuB4bcmXdoK0Mg8uSre5RBrk2DGZIBPxMfBWN5vay
+ w7x4tj8pErUK24WkwfDYKchh3RB+Bn58rt34nDAurt+GQuowP5aBEAEWqRKtCpCDdw4RtAiAwHJTBjmM
+ ifPkYyIFSftFidSONZCBD0l9AXPocoU9hr0Wmz4rmh6PLKc7GcycPHIQolvbTg+lDnxMgBy6V+HJ+6Ig
+ B4BDd2qzN7ZLShMT3sxllclDdlZqBDlDjSDXCHKNIPfPBznAilsugHGCGzsUCQRlWHICKHOdygQ5QJWC
+ 3KDca9RXYK56e98syws3hLcNzAHEXZJ9nI7rJPM8VTcLcuh+VZhTOd1HQJymI7mVIPdg/nXyjT1MT+Zd
+ pf78O/fnXqbJ5R/Q6MzzNGnXZYq5/Dk9tfOyeNswTdcQdAfzd5EzDnnkMKUWvG9DE46LxqSfo7EZ52WG
+ hwFxALJDMv0WZnqAx20En+/otLNWt+wOJP+1vHBIOwJ4w3cwowO6UxEEoYJHDuunFF2gJ9NOUu+wwzSK
+ AWli7ll6PuccDcSxWP0ZIofsYCiKqaSn04/L9FuPpcGrhwjVvTQs9oikMXkl94zM3DAm4QS9XXaVBoRX
+ yLHm7b5E66o+lqCDsVEHaWzsQZq684RA1iPw0KXzfQgtk+UnE4/Rw3GH6LnEKln3cPwBGsOAhvxzc8su
+ UuCR9yj49Me0rPgiea0vlmAJ5Iq7L/ogjeLzwTRg/RkGvRjcHuTjjIzYK+PcMFWXJ6/rsi6POq7KlZke
+ PBngOjLcAeiwP5IHIwAC29uv5m2r8qkTL0MmyCFCFhGtGFPXenk2Tc84Qe2WZtMEhpx23N6rHVDbgNki
+ +q3LpLGBuyS3XF/+HrYhp1z/bXky8wLsCgIvRmzbKePnuq/l31qWIePlkKRYPXKYxQGCLWs2N0HSiPjw
+ OWBs3OiA3ZKkF12zbRYwjPE+CJRAVCu6XXssS6ERDI8ANESztmJYg+3FNF6tEMCwKI4hz4I4CN2ueEfQ
+ Qqt3GaKmR9Dw1cnU5O1wSS3iBHJd5sRS62nB1I7Xe7DtRfeqTtdVQy6+qA/k3BCHwIa5sdTktWqeUdmZ
+ x+QhOyvdUpCz72SHN1WNH9MTsJ2YedKQeUH2C74ZiKsL5OwQJ3IYG6dyAjgT3uwA5wRyN4I5HcBZP8hZ
+ 8GaqPohDpaoL5LSCmqoP5BTeTNUFcia4Vcs1NZcL3gBoTvCmUoiT/eoAOFVdIFffuDjkiqsGOeSNqy2N
+ VFXZAa4uiPOsY1ycwptddoBTKcCp7ACHMUfQjQAOyxa8mTIgzmXAFeIQrQhjDoiD7CCgAjC4ZQOMGwlQ
+ AkhB1yDezfFidpkwZEKSHZ5MmVBWlwBr0sXKwDaYlwdkAeiuUt9sBi6Gt6E51wTkAHzqtbMfo67fsp8P
+ JFN78bH6ZVyR89d8dk7XbJd6K/0cgA6fMeWXBD4Y6yEFOoFsAJ7x/OxAp8JUYCMzz9GgZHjTTsgyIG54
+ 6il6be/HDHWnJPL0ibyztOn4j+j13VdpfMYFiV7tF3+MesUclVx0fROP8/JxGQ+H8ov18LJ5xxymri7v
+ nF90lczmMCKhisannpBZH+ChG8wg2Ad50mKraGTCUbo37ZzAYH/8wcD4Udc8qxAmzkeX7POFp2lECv82
+ 1wm/sEp6u/Q6TSq+Itu6cV1F4EPPsArxlCGg4N2979Or2cdoyi5+XoFlNATnwuf0YPwReqPwFI1POEnT
+ Ci7RxPwrMj8spgEDkD3CwDYq6gCNitknMzu8mHdKxry9wfs9wICHQAfAF/K+AQgnpRygidlH6PX8U/Rc
+ wiGaU3yBzn/+H1T4/i8p69L3qfTDn9LD/Nu9QneTF9ozjFljIENaE8zH2nVDvnSTPppyWOCp+zYrvxxy
+ wgHgxkczEG/Mox5bisjT1dUqU28FlLjaZMAct8sugIOXDuPuuvA79sc+CGzowu3+6Ig95L0m3QVy/L4s
+ TYBqbChD5ZpM8lyVKTMeSFdqUKlAWe+NuTITBCa277mhkGEwme7ne4y0JL7rcqjtnBTJUTfUn7/nAjhT
+ LeZaMzdgBog+6xH0kCRj77ryb6tdRD45eOQ6LkyUKbW6LYmnIRsLJEEwYA4Rr5jZoftSPudZkdRhDttl
+ dLuyDUYkazsGM0vxMjYO3rvey605WQFxZtQq5LckTt6RYw4euY6zo2WcnMkRIgPilDdMkMN78zdCmWUC
+ qNOMSIE4gJ2dayCTe1TKRMpK6gSTd5OtXKrBXzZGgxpBrhHkGkHOBnFfJci54c0Au28KyNUAOJUNIG4k
+ wMdXAXJOUKUSSHOpN8Nbn+yrvL46AAJdqIC3oflXaXg+v+ddF6CDJ+6bDHIQAh8G8r3UsXK6/ouAXI+4
+ w/TQrvekTAxkqBqecZGe2HmVBiedosd3XqKhSRhXdpjuZciZXPI+LT70EU3d96F0pcLjNjzptDWdF5dL
+ QBy8dP0YDtFtCi9dn8hjEgjhE8lllIX0I5hsvxuvx/yqfaKqCLnmRjK43cfnjXF4wxkKMbH+AN4+iH97
+ KEPWkPgTMl5uGMMi9n8s8xANwzypiScZ9o7IXKvPZjJIBnP9DK6gkQxy8M5hntWJuy7TQ4kH6fVdlyRp
+ 7+QiBvfw/TIlGMbKTSu6Svfw78/f/Z54wZ5OrqIH+Bwm5fE1xh6kQREHZIL8exNP0JTC83RPVAXdk1RF
+ o2P30/0Jx+jFrJMyNReSCs8ovUz38nWOidovs0lkXvsJnfzsl/SXv/+DSq79gMKO/ZA8N5VRF3RRcjvn
+ sbHYmrJr4y6Zsgsw58nQ0mV1ukTZ9tyMHHF5NGD7XurFbRmWn0o6LulDkD8O4IY2F9MnIhIV7TLaa3jk
+ kGcOEaqYcxXXhSCG5guzGUJPkQ9D2IiQfdR+JdsAbvMR/ACQG4fEy2uyCVN4Ia3IuO2F1J6BafC2YskN
+ N3h9tkwPhshWAB5mXuiyMpntRrpEvXZgW9RyUToN2pJnpRcxBJCTMXGLU+gRvq/dl2G9BXftFyVKgmDM
+ o9oM9nN+PLVn9V3LMLqEv8NA139DAbVmyJLgCQAXgx6CH5Bbru0ctqFsgwFrADckDIZ6sn3D8e8NKhBv
+ nJNHrt+KJOmdQ7QqxsnBk9YG4+T4s8kS9YGcckizt4LIc35C9Rg5sIqNayCTe1TKRMpKXwvIfedlXnYC
+ OJUL5Oxdq04QZ16YebH1gRz0dYGcHeYAbnagu3Vdq0n8nlwnyDlBHGSCnAlxKieAcwI5QNyNQE67UesC
+ OXukKsZ4OAEcZELcjUDODnF1gZwd5gBqCnLVMFfdpdqQCfJhHEyYs4OcCXH1wVxdIOcEbyqzO9UJ5BTi
+ aoOcAXAsABsMtnanmVCHyfIBcjDuJgAovLkBwQCK+qQQAmn3ISBOuxAVWhDIoLDmFNCgYOQETSZMuQVg
+ s8u1zS+Hv+fqNgW43V/8Hj3OxvsRfh+R/750hfbLvsbC/nwu/N2+DHsQvmeXXyZDFwvHtp/TgOzL0l1r
+ nq9eE+Z/VZkAp3LfL0Pu+8rgZnax9uZnI12qtmejz8sJ5PQz3h8qYIjjsuKXwqCSdY0mFX5EvinH6Z60
+ 4/RA3hXJFYdZHAB2EzLPS0LgWXvep7n7r9MAhs1xuWdpPEPdwAQuY5FHXJPqW384+nAZw1g7AJ2Oh4PQ
+ xYp1AD+Ue0Sy+oZXynReZv3APkhlgumzxjIwIpBhXMJB6Up9Kv8ITcg+wvDGz4xB6z6+lqmFF+lBjFnj
+ Y+LzcIbAvlEnaHLBGXoy5YTA3JtlVxm+DtJLJdcIMzf04no6IukETUjYR+8Un6ERvC3s5KfUb8c+msJA
+ i0CHIWEH6LXC05Inb0r+eXqWfx8g8lAqw2XIQXpjJz9bdMEmHqU1FR/S9OwTNJsh+AUGY3jmPvz5r+mn
+ v/srJZ37mI69/zk9zLCERMNo0+BpA8h1YIjrtHkXdV6fT1783mMrt20Ma/cknqLhYbtlyq5hgSU0PGQP
+ eazbKTM7tF9TSK3XWkAn8MbwhblXOzKcoesVXaI4FpL7wkMH6Bu8NZ2GbyqiCaF7qcd6q5sVueXarUin
+ e7bvIR8k8MUk+bx/m0VpNDawWMbOYVqw4f6FNHST5aHD2DfM2vAQP7cWCxG0kCDTenWUAIYkGe+mXatQ
+ B/4O1J3BC3nlMMUXPHGwm+rYALwBHhEc0JzBDMDXhgEOgQ3t5zDs8XbM4AAb22RWlKQcGboWEasRAoXo
+ TgWkAeAAc23mRJHHPIbMeVE0iu9D63fCqM0sgFxN2z90fSbdOXWHfLcDf9eDfwdTeYEVTJawgxwE/gCL
+ oDu1yRR/8pqXQs0wW4OLW/BuMo1KeQcymQhSVhJ2UoeYA2OZIAcmMxmtwV2rjSDXCHKNIPfFQU4hrhHk
+ vnqQq/bMWRAHj9yYne/TsxWf0NRDP6TXD31K9xR/KJ45c5wcgA771yXLawfVPg8FOSzr+eo1mdBm/wy5
+ 75ch9311gRy8cYA5TOGFz+a9h+oDOZU3l5GxfK0oGz5JZ+mZne/ROIbLPmmn6Jld12kkZkAAKHL56Bt3
+ nO7LuUT3ZZ2hSWXvSyDECwWnaBA/3yEpJ+neDCsNCeZk9Wb4Qpm1ym6VrEN0K2AOXaEIagDgoesVgQ7o
+ PkWXK8DNBLnumFs1nOsSxspxffIOq6TeYXtksv6HC47ShLxT9Nauq/RgxglJB+J/+jN6bTc/26TDktOu
+ X9QBGhBRSfcmVckk/Q9lnqXHU0/R87mXaND2Cnou9xz1DOK2gOvxjsM/oMk7T9E7DKavlZ6n+xjonkk5
+ TPMYWp9JPUbTdp/n3zhFk7NOyxRf2C7j8rLOkf/BH9Drqadlxoa3809S5y2FNDr+MD2WeYqW7b1Gxz/9
+ FV372R8o7tRnNCGugu6P2ktL975HL6QeJs8NDFobuL3jtg/drJ03cpu4Lk9grWdAKXXjtvN+vl8P8++h
+ OxWeNa/NhTJHqvdGBkC0tzI+LtcdmYqEwm0ZkjDtF7pM4clrzYDUZnEyvZN/hXw25pKPP7e1K7NkfddV
+ 8LxxGyfzpjJIoc1nuwBYGxu0U4IuRjB49t2QTUO375ZxcAhygDdtAG+TdCUMb0hH4svnBODC7A41QI6/
+ g+m5MME+ZmzAhPnIK4cxbwpykoaL7SfGx3VakChTeWE9ulPhhWvL9tFraYrMq9rKZUf7r04lH6RbmRMt
+ 4/bgqQPgQUgmjJkaWs0OJy8GTKQYkZxzDHMtpkeJYPMV5NDl2l4CHuLkHaxgskRdIAc1mxpKHnPjBOLA
+ LrdPYZ5hRmkEOVYjyDWCnKl/JZDT7lRVI8g1DOScZHajOoGc5WWzPG2D867T0Lz3aELJhzSl8ns098hn
+ NPXg9+nhsk9oXOE1GlFwlYYXvC8eu2EMe0Pyr9Og3KsSJGEXgBAyf19/CzCIVCZyTq7zdF+L6zohBbyG
+ ghygDTAHgAPIAeg0Iti8//WBHD7juaOr2weD/pMvUDeGqQXHP+dycoqB6zA9upOvMe0MDUnm3+Dnjy7T
+ QXy8B7Iv0UO5F2hi/hl6Ou88Td97me5niOuTeJJGJZ+me3j70GQuUwxz8MyhbOPdO9IV6BB5REAO4980
+ zcgwRLsy0HULMyCO5Qa40INWmhJe7hNtBSbcl3OYhqZU0ZMA6aiDMh/qtN2XaRAf+770E/RE1imJUp3P
+ APYkQ/WjmZdpfOJheiL7NI1JPEgj4vfT0B1HaHjyPlq46xoFHr9MC0ov0Jqqj+jeuCM0a/dFvrartLbi
+ Mm0/+j1aeeA6rWQoW1BygabvPkMjGSh7Be+lXuFH6IW0czQqsErA6cnkAzI919MMl29mHaAFZVdozaHv
+ 0YZDn9Cyysv0YCxf87bddG8E1+Nt+2Qe1VllH9ADfC981pdSOwY45NL03FAs3a09t5TKhP0e6zJlmjEP
+ bnORJ85n8y4aFYyJ8Mukje3ObaEVscpt+MocGh25zxofx+0dQAtDXSbEchu0KZ+GYA7YFbnS9nfj9rov
+ gikwXm1JliQXBgRignxMr/XEjlK2H6nUi89r4JadNCriACGFCGzEwC351GxJInVblCNj7iQ6lSFt4KYC
+ ajOb4Yztmar7Gj7HZYkCXR0Bdmyz4F3ryPYKMztArecx9IkNTCCfZSkS0NB8nmUnAXwIjuiMLtrZoeTH
+ 96HN7ETqszKN7p4dJfu2YRusEIcuWi+GS4zHazErgtpOj6Z7t7Md4W3wuiF4AvsD9IZtyKIm77CdZzaA
+ Jw6RpgA+gJ3JEnaQU/bAem/+LeEY5pLb32BuedNilG8UyDlFrZoHkQPZf0hPwIA4SE/WvBCnC4VMaLNL
+ Ic4EOb2p7gR+Jsi54M1UXQB3UyDngjdTTvCmQoGtH+R0eq6aAKdygjjICd4gBbi6IA6yQ9wXAznXuDhj
+ bFzDQW6XWwpyDYE5hTg7yJkQpyBnl8KbAJyDAG0qhTc7xEF2eAPU1QVvKkCbOzqVl90JgQ14U2k3lTk+
+ zoQ5BbhqeDNlAZwPG02VQhwkRl2WT1pddInVhh/SLrsvBHCucV4CIQakAF76ptYPbaZMYLNL4cmEtmqg
+ uiKC56w/csgJwDGc5V2lkTvfowcZ5Cbu/Zgm7/8evbzvezSh6CMau/M6gxwr/33RsPz3RPiepiUBwElO
+ Onx2LetvQRhfNxiBE3wuGIOnsIf1TurHIALpMvLP6TqAlgrj4iBAsIjvtXav4h3PSp+BG+Lw3ADhxjOF
+ eiScI9+E4/RowWUX3J2UCNLni65LxOqwlJP0YM55GphynqHshOSY8+XyNCCBwY63Tci+QMP5d9/e9zG9
+ kH9BolsREOHHANQ//giNYAAck8plAd45Lrf4I4JACMCddL+ysA3dpoA4pBnBPKraBSvRrREHRb4Mf+6x
+ dAw7A6OO0aM55+jx7GM0ILqcnuTzxGwSGBOHnHAYF/cUg+ao+OPkFVROD2XxfglHZTqtB/jcEbzwdvF5
+ mph7mt7dfY5eL7hArxZclIn3UxDgkHCY5pacp+CTH1PMxR9TwNH3KebcZ/Ry1hHJK3dP3AFJGeIdUES+
+ /uU0OLKSXsg8xW0Br+N26K2iC/Ry9llJDry47DK9zFD5wI4yiYJdvueSfL87t0lIQfIAX1uHdWhHi2lQ
+ aDm9ln+anoyrpO4bCqmLf5EkLR7uXyFdp5iTFTD0YNIRhsgKaXPh/Xo0iaHYv5DarCyQtrkNJs1fW0De
+ 3LYODymT6bYQydp2ZSZNTONnw/cH6zy4TceMDQP8iwX4ZKJ9BkF4ztDlitke4GG7J6hQQBAJfEcyCHkj
+ iGIx26jFGXRvWLl47SB40dRu3RdaSi0XpIm9unteCvVYk8Xfy5LjYfJ7ABmWh20rJK/lbJv4swDc/Hh5
+ hxcO+7RkW+q1Ik3SkbSbZ0WtiubFSqTrQLYx8M4h0AHbIXj6ms2MknQkgDkZK8efEQQxLoCvhfeFt83U
+ GAbS5jNi2c5Hytg4gBzUmrlBEgqrg8glN2swXzSZGiLJfptOsZjEzikNATmVyUZuZqoD5Oz8ZeezRpBr
+ BLlGkGN91SDn9sL9HwU5THAP3QqQM7s7qz1iFmjBA4fuUoUydJ8qyD1e+iE9Wf4xPVH2Cd1T9AGNYZBD
+ 8MOQXMtrB4AT+HPBHI6D4+G4KjvIAdowLRjAz4yC7eeCM6gukLPvh3laq8XAxpKIVRVD18CMSzQI2xnm
+ AHcQJtcXsHM9UxXm0LXm0T1BY7MvUn8GHF8GqYFJx+mRnZdpXMYZCaJ4aOdVSfzbP+msgBz2R3mC1xZd
+ pIN53YScizSEwW1y+TV6LPMkvclAPJZBaQADFLpihyafpvEpfLwELnMMaCbEyUwPvA7dqZijVRL98mfk
+ j+vP78glh9kcBsUC1qx1mJGhNwPdgMijAnKP5JygPjv2CLwNi62i+xgeh8YcpsfT+NhcDxH1OZiPPSbm
+ pHjYEDwB2BvN5zeCl2W8XdxhWrr3As3YeZK2n/mUfvCnf9Daysu0cs9lWlZxnRaWXadpxZdpxb7r9ELO
+ KSs6NfoAw1AF9WFY6bGlnHwDyyTXHtqgCalnaPouTNN1kiZmH6c3Cy/QrAK+JwG7aHLOSXon5xg9knBQ
+ 2rkuSB/CYDSC24sBATvJy7+U27QyGszwOYV/6/WsM/QgQ2KHTTlWu8jtKLph4YkbHLibHk48JrnkOjHI
+ IA0LwA1A1m5VlnTBSiTr1iLqG1Amy0+lHBBo67QakLeTj1EsINeF4RDbu65lkON3pD1BcANSk7RdnErj
+ thewbcgQL9zo4CIBJPT6+K7PpZ5rs2VfqNuKLLFbGFc3fGuBdAmjd6nPJis5MGZ4kJ6mRehtwkwPKdR3
+ baZMpI80IxgHJ/aOoQi55ABnsKVtGKgwxyqADsmBZU5WtpOIWvViGByywUr222xGhHTZwqPWdkE8deXf
+ AJghCXCrmQxkDGpe8+Oo7bsR1HaWNQYOEAfv3D3+BRIs0e7dGGqLcXJzoiXgAYmBkbJEkgMjD50d5N7Y
+ Th4Mli3etIIe1Atn6hsNcuLGMw5i/5E7Xt0mbkGVHeTsFwA5XqztpphSiDNvrNxc3Gx1hZogZ+omQc4O
+ cRC6Us3uVFNOAKcyQc6EOOhGIOcEcNqdWle36o1ATiHOCeRMeLPLGeYs2btWRTaAUwHkkDvOzB8HYLO/
+ 1wVwKieAg6q7U7UrVWHO6FJ1UF0gB3gDrJkyQc4OcSoFOIU4hbe6AA5eDLNLVcHNlEJcXSCn8KZdqJDm
+ j9NuNnxGlypADkYeAKfdqdql6gRsdvkZAGeHNxPiBORcEFcfyDkDW00ptLm9ceIBu8yAZUGUSoFseMEH
+ AmUjdr1PE3Z/QBOKPxCgg8YXfijj5mScXP518eINzgO48fcLqkFO3yH1zPVFtKuKYQzrdB8T0HAtuDa9
+ dr0Xdd2Tfmn8PZfUM6dgpzAHuBuMWSnSrc/quXMU4I6feb/kk5I3zi/1LHkzxD2Wf5keKLgqCX4Bck/s
+ ukbDGZB6M4QhUbBZpvox+KGLFXnixqWeov7J52kKAzHGn71SfJHuSb0gYIhxcYA4pDIZzZ8RWdojio8Z
+ baUpQaoRBD7A42Z54g7zZ/4OQxwiVBGpiim3BscepiH8u/DGYYYIv7gqeiznMk3KPS+zHdyXfEpSjTyR
+ cYSG8nHe2nmFBkeeoBlF12h08mGJOB244yANiDpIY+IO0iNpx+hR/g66f98puSjRqS/kXqDDP/gDVX7/
+ ZxR9/Do9V3ieFpd9QAv3vUeL9lyh5Qx12w5+Ru/uOk338TEwv+pjSZXkEbCf2oYV0YyCiwK4SGUys+gi
+ rahkuE2qohHBu2g5HwPdsA8l7qXnMw7RGwX8DIK4PdrIIOi/W9rGfsGlMh2Xz7YS6SIeFFxGI8Iwl2sV
+ TWIoHLWd28b1edR27S4Grl00dEsxDdy2mx6L3i8zNeDP89PJB8mToU/mWWV46sNgCLBDNGt/Pu6LaUeo
+ N9rJtVk0fAuD45ps6rqpiG1EqqQN6b2lRGaMgE2ArYDt6Mjw9UjkHpmEvwMD10MRe+gujH1bninBHrAZ
+ mNYLwRHeqzMt+zU/nnpsLKSu+N0NeTL+DnnlxI4xyGEfFUCsPwMhxtMB5lovTLVyyvF6HTcHSbcr74+A
+ BiT4hf3FZ3TTdlmaQN0Wxsp6eOJ6rMghzwXWBPzY7/+3d25PVZVhGL/CtEzTLDuMiuggsFEgUhCMEkhM
+ bahpshvRMWLEQ2liOnnIQ5KDoignZbM5KLgVt6AgHkaJ4iCZOgqC03T4C/onnp7n3Szcbmmm6cLxgotn
+ 9tqLdfrWYn/fb953fe9jY3nOUY7VmvxQhudzCzFuBcWxXJMjlF6dv4/3lmBozg7rig34pn113GBOJUnG
+ rFJUzs8aft4owHOEt6mEUkFcSNbhYRlF+q8g50CcFMhLkjFUEGNJjzFYUGpVGgG5QY2A3JMA52gE5P4f
+ yAWnU591kBsO2oaTQZz0FEFOEsgJuiQXwUngJRBz4E1ygExSNG7B+QGk+AaQ2NA/tDyPsKa/xTdQZ/u5
+ be9QlE0SnGmdc3xHgkadW9voMxjkAlOrKkUSqMD74Nwbaeh+8D46cu7vUGq1SsuEMwKcy3PLX45E6/Us
+ LCL3pPQ8NQM17Vw/ppe183s3MrwPMIswk3GO7eaxYghdirYl1PF4FV2I47EFf45k36WJDwYuZ+4juaob
+ sfIVJfgkE+5WNvcjvrITSbWESsKXzPPn8FyL6nuQeuqWlRaZV9Fp54kmqElzuE7wJqkQcFRpOyJLfoSr
+ +AZmE+5UmkRSujTO044PCU6fnua1ymievxV5pb5fTUDk70/RwdjSa0jhNS339do+qiUXoaK/hLA07p/u
+ 6UZMyWUr2pt+8mcUdPxhadWiW38R2Ppwoe9vpLr979pl1t1EOj8XlF8z8EsgSCaWtyGzth0uAlkqj/dl
+ 622sbuwzE/39nX8i29tlhXaXea5ine9XzC1qRWKRD2nuK0h337DJCHr/TbXjZIY/meAzlQAmy66ogy3m
+ 5qBi6krVyjs1/tAFfMTzLK9h2/KbrH9WlkPFfdPYBhXvFbAls43R+RcM3CZvY5++lX38t14kHWzEgqOt
+ ePkbDxYRaMN2qJ+uQ9huFRRu9Pf3ei+O+zjjgsaSN9m3pxw8b+OIZrMuZvte3FKD8O01Vipk3NdKq1YZ
+ yIURBpViHb/RXxdO9ebC2e9P0DYbBsexIJBT9G022yCgE5hNWlcBzWbVepnkOzXjBHNOhE7v1ykipzIi
+ mjgxdm0pQvOqLMUqU/wxa0usZMmEXH/WTBqd4y/mP/aLYqtHp5St3n9zNGd7NV5YU44JOSWEt3JM21SJ
+ t3bWIYGSmf747GMICfBaHb3ysEXixmT5OSQk69CwjCKNgJwaO8yNcTQCciMgNwJyTwfkBBDB4BYsBzSe
+ dkTOVd/3GFwNQZvAy5EBGKGO4JGkaNz5fiRwWcCWfO6haW7DA4O4OL0nR7hR9C7qdB8i63st0udE/zSJ
+ QfCmczwCRb9mE9yCQU7LDsi5agh91Vyu5bX/C9AF3hPHZ1by19570vlBUDfLrRmsj0BOgOY8P6VYHel5
+ zyQoqdhvGLeLKu8243s9+6W+PpuB+k79TSRW9yC+lpB4guBQ+Yv9jziSBZesuGL5jOMIY2neHkTzfG9X
+ 3zQj+4RTd7G+dcBcDbSsdGkMjysf0qyLvWb1lVFHoHN32CxXScV/ZfsliBP8ORLQmeNDSZt9uvi7iCFg
+ LfHewSdewuuRSxaFk+F9yonrCCtsQ3RpM96t72QburHizD0ke3rgklPEYEQuiQCXeeo2MhvuYObRq1jv
+ u48DHXex5acB5Lfew56O37Dj8h2sanpo0THZfyUTxGTxpTImcn6Ysb+JYNSCD9jmWPYJ3135HeEEnvfK
+ 2pBV2WozTJdUd2FN431k81o+86qAcJdNxlAx4djCZkz/4aKVBpnCfmvaPh9C9/rgOnQJkQXNmLTjLF6y
+ CQgyw29A2N5mROxvxFxew7Kq61jquW4Ru1e2ec2XdWGhDxnH2zD/cIsBYUxBE2bs8Vl6U9vkNt7Fq3nV
+ +JgAO3On+us6xOzyIXJvA8eMSkzczH6c/b9SrIFjw2uEw9QjF22cCWf/LFP8sTzOwtIWwpnfZ1VRNG03
+ XfC0yWP+q3H5DXh9s9siZor4qY7bcCCn7xE7TyKM++v7lI0cswhiVm9uQwUM5rheY6Yf5oo55sq9ocze
+ m9OkCtWcU5pVkyNC82psm1CeW1ZhOobG7xCO+TLQlxNE6oFmjF4fUBg4uwgReW6MyuH4/Pkxe0dOadn4
+ XbVI+b4eUVvdlm41i69BkHsjtxwTVxciZCUhjhq16lkGud34B+aXO5I36oUUAAAAAElFTkSuQmCC
+</value>
+ </data>
+ <data name="$this.Icon" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
+ <value>
+ AAABAAUAEBAAAAEAIABoBAAAVgAAABgYAAABACAAiAkAAL4EAAAgIAAAAQAgAKgQAABGDgAAMDAAAAEA
+ IACoJQAA7h4AAAAAAAABACAA6iUAAJZEAAAoAAAAEAAAACAAAAABACAAAAAAAAAEAADDDgAAww4AAAAA
+ AAAAAAAAv5UA/7+VAP+/lQD/v5UB/7+WAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+UAf++kwD/v5YB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/76TAP/Alwv/xZ8Z/72SAP+/lQD/v5UA/7+WAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf++kwD/7eG3///////Orj3/vJAA/8CWA/+9kgD/v5QA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf+/lQD/v5QA//Hoyf//////0rVQ/7yQAP/AlwX/yKMj/8CX
+ Bf+/lAD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/DnBb/zaw2/8WfHP+7jgD/vJAB/9a7
+ Xf/BmQn/vpQA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/vpQD/7qMAP/LqTT/7N+y/+na
+ p//EnSD/vZEA/8CWAv/AlgL/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CXBP+9kgD/7N+z////
+ ////////5dSX/7uPAP+/lQP/vJAA/7uOAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lAL/uYsA/+3h
+ uP///////////+fYov/EnRX/xqEd/82tOv/Vulr/vpQA/7+VAP+/lQD/v5UA/7+VAf++kwD/xJ0S/8+v
+ Qf/NrTr/696v/+zgtP/Fnxv/v5UA/72SAP/cxXP/9e7X/76TAP+/lQD/v5UB/7+VAP+/lgL/vJEA/8uo
+ L//j0ZD/u48A/7uOAP/Algv/wZgQ/7uOAP+/lQD/vpMB/7+VBP+/lAD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP++kwD/vI8A/8CWA//AlgL/wJYE/+nbqf/awWr/vJEA/8CWA/+/lQH/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UB/8CWAv+/lQH/vpQA/8GYCf/7+fD/696w/7uPAP/AlgP/v5UB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP++kwD/w5sS/8GYC/++lAD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76TAP++lAH/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgH/v5UB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAoAAAAGAAAADAAAAABACAAAAAAAAAJAADDDgAAww4AAAAAAAAAAAAAv5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/wJcE/8CWAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ Af+7jwD/uo0A/7yRAP+/lgD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/72SAP/Yv2j/6t2s/82tPf+8kQD/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP/AlgP/u48A/9K0S/////////////n16P/DnBL/vpMA/7+WAf+/lQD/v5YC/7+WAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlwT/u44A/9W6Wf////////////v5
+ 8f/Fnxn/vpMA/7+VAf+/lgD/vZEA/72SAP+/lgD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQH/v5UA/7+VBP/k05b/9e/Z/9/Kgv++kwH/wJYE/8GYCP+7jwD/3cd6/9W6
+ Wf+7jgD/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76U
+ AP+8kAH/vJEA/8KaFv/Mqjb/uowA/7uOAf+7jgD/17xh/8ikJP+9kgD/v5YB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf/AlgP/wJYD/72RAP/JpS3/3sl9/+PR
+ kf/bxHD/wpoc/7yPAP/AlgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5YB/72SAP/o2qb///////79+v//////3cd7/7yQAP/AlwT/v5YB/7+V
+ AP+/lQH/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgL/vJEA/8uo
+ L////////v37//38+f//////+PTl/8KZDP+9kQD/vpMB/76UAP+/lAH/vZIA/7+VAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/vZIA/8upMP///////v37//79+f//////+vbr/86u
+ Pf/Kpiz/x6Ig/8KZC//Alwr/x6Mj/76TAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP++kwH/uo0A/7+VBf/x58b///////79+v//////4MuF/7yQAP/EnRL/yKMk/8ejI//q3Kv//////9K0
+ TP+7jwD/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/Fnhf/2L5k/86tPP/EnRb/3MZ2/+jZ
+ pP/fyoH/v5UJ/7+VAP++lAD/vpMD/7qNAP/eyHz//Pr1/82sOf+8kAD/wJYD/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJYD/7uPAP/StEv//////8mlKf+7jwD/vI8D/7mLAP/EnR7/yKUp/7yQAP/AlwP/v5UB/7+V
+ Af+9kgL/wJYH/72SAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgT/xJ4Y/7+U
+ Af+/lgH/wJcE/8CXBP+/lAL/zKo3/8SdFv+9kQD/v5UA/7+VAP+/lQH/v5QB/7+WAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/vZIA/7+VAP+/lQD/v5UB/7+VAP+/lAD/696x////
+ ///VuVf/vJAA/8CWA/+/lQD/v5UB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5YC/7+VAP+/lQD/v5YB/76TAP/EnhX/+vfs///////l1Jn/u44A/8CXBP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+W
+ Av+9kQD/1rtd/+japv/GoR//vZIA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/vJAA/7qNAP+9kgD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/wJYD/8CXBP+/lgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAAAACAA
+ AABAAAAAAQAgAAAAAAAAEAAAww4AAMMOAAAAAAAAAAAAAL+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/wJYD/7+VAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/76TAf+8kAD/vpQB/8CWAv+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+WAf+8kAD/w5wV/86tOv/CmhH/vJEA/7+W
+ Af+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgL/vJEA/86tPv/69+3///////j0
+ 5f/KqDH/vZEA/8CWAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+WAf+9kgD/7+XC////
+ ///9/Pj//////+vfsv+8kAD/wJYC/7+VAP+/lQD/v5UA/7+WAf/AlgP/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76U
+ AP/z69H///////z79f//////7+XC/72RAP+/lgL/v5UA/7+VAP+/lQD/vZIA/7yPAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP/AlgP/u48A/9m/af///////v37///////Wu2H/u44A/8CXA/+/lQD/v5UB/76TAP/HoyL/0LJG/76U
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQH/vZEA/8+wQ//dyHr/0LFF/8+vQf++lAT/wJYC/8CXBP/BmAj/uo0A/9zF
+ eP/38uD/vpMA/7+VAP+/lQH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgL/vJAA/7qOAf+6jAD/yqcv/86uP/+6jQD/u44C/7uO
+ AP+9kQD/0rVP/8SeF/++kwD/v5UB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/wJcE/8CXBP+9kQD/zKo3/9Cy
+ R//awm7/17xf/8+wQv/Ioyj/vJAA/8CWAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/7yQ
+ AP/eyX////////7+/P///////v37/9CySf+8kAD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CW
+ A/+8kAD/0LFF///////+/vz//v37//79+v//////+PTm/8OcFP++lAD/wJYD/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJcE/7uOAP/k0pP///////79+/////////////79+///////0rRP/7mLAP++kwP/vpQA/7+V
+ Af/AlgL/wJYD/7+VAf/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlwT/u44A/+XTl////////v37/////////////v78///////fyYD/yqcu/8il
+ J//Cmw7/vpQA/72RAP+8kAD/vpQB/7yRAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UB/8GYB/+8kAD/z7BI///////+/vz//v36//79+v//////+vbq/8Wg
+ HP/EnRT/y6ky/8+vQf/Qskf/y6gx/93GeP/48+L/171g/7yRAP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP++kwD/uo0A/7yQAP/LqC//6Nqn/////////v7///////37
+ 9v/Tt1X/u48A/76UAv+9kQD/vZIA/7+WAv/HoiH/+PTm///////z69D/vpQA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgH/vpMA/8ahHf/gy4P/1rte/8qnL/+7jwD/zq8+/+DL
+ g//cxnX/1rth/7yRA/+/lgD/v5UB/7+WAv+/lgL/v5UC/7uPAP/eyX7//Pr0/9rDb/+8kQD/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CXBP+7jgD/2cFr///////hzoz/uYsA/8GY
+ Bv+8kAD/u44C/7iJAP/JpjL/y6gy/72SAP/AlwT/v5UA/7+VAP+/lQD/v5UB/72RAf/Alwj/vZEA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/EnRX/3MV2/8ah
+ H/++kwD/v5YB/8CWA//AlwX/wJYC/8CXCf/Rs0z/u44A/7yQAP+/lgH/v5UA/7+VAP+/lQD/v5YC/7+U
+ AP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/76T
+ AP+6jQD/vpMA/7+VAP+/lQD/v5UA/7+VAP/AlgL/u48A/8yrQP/eyX3/zKo2/72RAP+/lgH/v5UA/7+V
+ AP+/lQD/v5UB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UB/8CXBP+/lgH/v5UA/7+VAP+/lQD/v5UB/7+VAP+/lQL/7uO+////////////0LJG/7yQ
+ AP/AlgP/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgL/vZIA/8agHP/9+/b//v35////
+ ///fy4L/uo0A/8CXBP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lgH/vZIA/+fX
+ of//////+vfr/8qnLv+9kQD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP++lAD/vpQF/8yrNf/Fnhr/vZEA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQL/vJAA/76TAf/AlgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/v5YB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP8AAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACgA
+ AAAwAAAAYAAAAAEAIAAAAAAAACQAAMMOAADDDgAAAAAAAAAAAAC/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+WAv/AlgT/wJYD/7+VAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/72SAP+7jgD/vJAA/7+V
+ Av+/lgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQL/vI8A/8ij
+ JP/Tt1T/zq49/76UBf+9kgD/wJYB/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+W
+ Af+9kQD/38uE///+/f///v7///////HoyP/Fnx3/vZIA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJYD/7uPAP/Wu1////////7+/P/+/vz//v37///////t4rv/vZIA/7+VAf+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/wJYC/7yRAP/u4rv///////79+//////////+///+/f/+/v3/yaYs/72R
+ AP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/72SAP/x6Mj///////7+/f/////////////+
+ /v//////zKs4/7yQAP/AlgP/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJcE/8CWA/+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJcE/7uOAP/izoz///////38
+ +P/+/vz//v36///////48+P/wpoO/76UAP+/lQH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lAD/uo4A/7yQ
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5YB/76T
+ AP/DnBb/9O3W/////////v3//v78///////XvWj/uo0A/8CXA/+/lQD/v5UA/7+VAP+/lQD/v5UB/7+U
+ AP/AlwX/3MV1/9GzSf+9kgD/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/8CWAf+9kgD/wpoT/9/Jf//s4LX/5tWc/9GySf/XvWT/v5UH/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJYD/7yQAP/NrDr///////Pr0f++lAD/v5UA/7+VAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgH/vpQB/7qOAP+8jwD/u48C/7mLAP/OrkH/1blc/7uO
+ AP/AlwP/v5UB/8CWAv/AlgL/wJYC/72SAP/FnyD/7eG5/9O2Uf+9kgD/v5YC/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/8CXBP/AlgP/wJYE/8CX
+ Bf+7jwD/2L5m/8qoM/+8kQD/vpQC/7yQAP+9kQD/v5YC/7yQAP/XvGP/xqAl/7mMAP/AlgH/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAf++lAD/wZgP/9rCcP++lAf/w5wT/8yqNP/Kpy3/vZIA/8ijLP/WvGH/vJAA/8CX
+ Bf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlwP/uo0A/86tSP/y6cv/+vbq////////////9e7X/+nb
+ qv/Alxb/vpMA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CWAv+9kgD/x6Il//fy4P/////////+////
+ //////////7+///////l05j/vZEA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAf+9kgD/7eK8////
+ ///+/fr///7+/////////////v79//79+///////1bpe/7uPAP/AlgP/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/7yQ
+ AP/Nqzj///////7+/f////////////////////////////7+/P//////8ObD/76TAP/AlgP/v5UB/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/wJcE/7uOAP/ZwGj///////79+//////////////////////////////+/v//////+fbq/8CX
+ Ef+8jwD/v5QB/7+WAv/AlgP/v5YB/7+VAP+/lQD/v5UA/7+VAf/AlgT/wJYC/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/wJcE/7uOAP/ZwWr///////79+///////////////////////////////
+ /v///////Pr0/9W5WP/IpCb/wpkL/72RAP+8jwD/vZEA/76UAP+/lgH/wJcD/76UAf+7jgD/vZEA/8CW
+ Av+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/7yQAP/Orj7///////7+/f//////////////
+ //////////////7+/f//////8unM/8qnLv/Ut1T/2L5l/9e8Yv/Rs0v/yaYr/8KaDP+9kgD/u44A/8Kb
+ E//UuFP/yqcu/7yRAP+/lgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/7+WAf++kwD/7uS/////
+ ///+/fn////+///////////////+//79+v//////2L9q/7iKAP+9kQT/vpMA/8OcEv/LqTL/07ZS/9i+
+ Zf/XvWH/0rVP//Tt1P///////////9W5WP+8kAD/wJYD/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/8CWAv+/lQH/wJcD/76U
+ Av+8kQD/3cd5//z79v/////////+//7+/P///v3//v79///////q3K3/vZIA/8CWAv/AlgP/v5UB/76T
+ AP+8kAD/vI8A/76UA/+/lgL/4MyH///////8+vT///////Dnxf+9kgD/v5YB/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5YA/72R
+ AP++kwH/u48A/8KaEv/cxnf/yaYt/8eiJP/u5L7//v79///////+/v3//v38/+TSk/+9kgL/v5QB/7+W
+ Af+/lQD/v5UA/7+VAf+/lgL/wJYD/8CXBP+6jQD/0bNP///////+/fr//////+rcrf+8kAD/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP/AlgL/vZEA/8yqNf/z69D/6duq/9vDcP/GoCD/vJAA/72SAf+8kQH/yKUm/9GzSv/OrTv/yKQk/9W5
+ Xf+9kgD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/vpMA/+DMiP/59en/7+S//8ah
+ Iv+9kgD/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlgP/vI8A/+rdrv///////////9S4XP+5iwD/wZgF/7+WAv/AlgL/vZEA/7uP
+ AP+9kQP/uo0A/9W6Xv/KqDT/vJEA/7+WAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/7yQ
+ AP/BmAf/vZIC/72SAP+/lgD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP/AlgP/vI8A/9zEdP//////+/jv/8qnLf+9kgD/v5YC/7+V
+ AP+/lQD/v5YC/8CWA//AlgT/vpMA/8SdGv/Zv23/vZEA/8CXA/+/lQH/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UB/8CWA/+/lAD/v5YC/7+WAv+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/72SAf/NrDn/x6Mk/72R
+ AP+/lgH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/7uPAP/Wu2H/yaYv/7qNAP+/lQL/wJYC/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ Af+8kAD/vZIB/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UB/76TAP/Cmxn/171k/8ah
+ Hf/CmQ//vJAA/7+WAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlgP/v5YC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYC/72R
+ AP/LqDP/+PPj///////59ef/0rVS/7yQAP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQH/v5UA/8CWBf/07NP///////7+/P//////+/jv/8agHP+9kgD/v5YC/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lgL/vZEA/8ilKP/9/Pn///79/////v/+/fn//////9K0TP+7jwD/wJYD/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQH/vpQA/8KbD//48+T///////79+v///////v38/8mm
+ K/+9kQD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/wJYD/7yQAP/VuVz//v37////
+ /v//////3sh9/7yQAP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+W
+ Af+8kAD/x6Ij/9S4Vv/Kpy7/vJAA/7+VAf+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP/AlgL/vZIA/7uOAP+9kQD/wJYC/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5YC/8CWBP/AlgL/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+VAP+/lQD/v5UA/7+V
+ AP+/lQD/v5UA/7+VAP8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACJUE5HDQoaCgAAAA1JSERSAAABAAAA
+ AQAIAgAAANMQPzEAACWxSURBVHja7Z15fF1Xde9/a+9z7qDharImS/I8JrZjO4mHzCEUAiGEkhJCBwih
+ rw2EqZQOD0qB5qWE1xI+vEfyQmlLKVAIJKShJcEhITij49gZPM+2bGuer3Sv7r3n7L3eH1uSHQ+JbEu6
+ 5/rs78efxNZH0j1XWt9z9rD2WoT7nobFElZEvi/AYsknVgBLqLECWEKNFcASaqwAllBjBbCEGiuAJdRY
+ ASyhxgpgCTVWAEuosQJYQo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjRXAEmqsAJZQ
+ YwWwhBorgCXUWAEsocYKYAk1VgBLqLECWEKNFcASaqwAllBjBbCEGiuAJdRYASyhxgpgCTVWAEuosQJY
+ Qo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjZPvC7CcFkEQRACUZvMRIhIEZmhmzvfl
+ nR9YAQIHAZJIg7Wvta8BwBEgAsBaaV9DCjhCCsGAtiKcG1aAYCGIwOxnPUixeFrJmtrEBZXF88rikghA
+ Mudv60293jP0YnuyP5khKaQr7dPgXLACBAhJpDw/IuV7F9ffvrj+7Y0VrqATPocBAlpS2R/v7fzXnW07
+ O5LCdUiQfRScHYT7ns73NVgAE/1Zb0ld2f1XL7iyvowZRNDMms3wBwBMkEtBAAjIKv21Vw7fs+lQ1tfC
+ kdaBs8AKEAgkQWXVx1c03XvFvJgUihmAIKLTfL5maLAACcLLnYO3rtt+oDclo87YdNkyTuwyaP6RRCqr
+ PnfprPuvXhARQjFLInn66AcgCA4REXzNl9aUrnvvRXMri1VOiTf7IsspsALkGTPy+cSKpm9cPtfc+OW4
+ g5gAR5CveV5Z/LEblzWURrXSVoEzwgqQTwSR8vzldWX/+/K5AAgkzjx8jQMLyou+eeV8aJ3v91RgWAHy
+ iWaGlPdeOb/YkYr5LKLf4AhSzB+YV3Pr4nrOqfE/QyxWgLwhiZDzb54z7dqGcs18jlFrvvgLK2e4UUdp
+ tgaMEytA3tBgSPHhRXUAzn3tRhAxY2lVyfVNlfDsbHi8WAHygyCwrxdWlbxzRiWACYlXDQZw6/waCOIJ
+ cCoUWAHygyCCr1fXlEal0DwxIxYCAbisriwWdbQdBY0PK0A+ubCqBMBEbV6Zp0h9caShJArNdhA0HqwA
+ +UExwxFzEjEAExWpBDAQlWJ+WRyaCdaAt8YKkE/Oet3zzbHLoOPHCpAnTFrb5ESqnf+OHytAfhBE0NyT
+ 8TGa43nu8OhuwEDOB1kNxoUVID8QAUrv6EtN+Hfuy/o7+9IQwp6TGQ9WgPzADEixpScFwJmgqYCJ+F19
+ 6d6MN1nTi/MOK0B+0MxwxIvtAweSGUzQSqj5Jk8c6UXOd4js/X88WAHyAwNSiqGh7IN7OzERZ9sZcARl
+ lP7Rng5Ioe0UYHxYAfIGM8MR39/dlsz5jjjXG7Y5C/bw/q693UPCkfZk2DixAuQNzZCO3N05ePfmZhxX
+ /OfsvpUjqD2d+9uXDkCQXQgdP1aAfKKYKeL8wyuH17f2m3MtZ/FNGDCpb1/YcOBAd0q69vZ/BlgB8gwR
+ seY/emLHjr60I8g7w+DVDHOW4O83N39vW4uIufZc/BlhBcgzmlk48khy+F2/eG17X9oV5I+v0hWbhCJA
+ Ev39K81ffG6frYxyFlgB8o9mlq5zOJm59uev/GhPhyNG0tg0s3FhLKh5NO7HQr83633kqV1ffG6fcKVN
+ gD4LJG64Ld/XYAEDQorhnHp4T8fr/em1tYnyqMMgUxpoLLDN34lIEGV8/ZO9nR9ct/3ZQz0y6rKd+Z4V
+ tjRiUNDMQgoI+u99XXetmg3gpY5ksSsWVRRHRrd1NaMv6+3uH368ueeHezsO9aRIkIzbcf/ZYwUIEIKg
+ s/5ty5uWVBYfHcre9NjWrmFvdllsTiIuCJKoN+Pv7k8nsz7nfDhCRByAbfSfC1aAoECAr9iNuXdcOB3A
+ d3e0dQ0My5h7sC99sCd17JOEgCAn5mqwnfKeO1aAoCCJ/Jz3ewumX1xd2jmc+5cdrXAlA2JsUmwmwcwA
+ fBv6E4QVIBAQ4DOLiPPJJQ0AvrezvaUvfdzg3ob7ZGGXQQOBIELOf9/saZfVl/Xn/H/a3grHJvRPBVaA
+ QKCY4co7lzUA+Pdd7Qd6hqRNaJsSzhMBRhfIC7IQgiRCTt0ws+ptDRVpXz+wtRVS2OCfGgpeAEEwucSs
+ mRUzsykaXkCFETQzHLpzaQOAH+3p2NmVlDapYaoobAEkkfa1P+wJoCzu1hRHSyIOmP1hj9W5lpudsrfA
+ nnp7U+W7ZlZ5mu/fchTC3v6njkJdBSJAEFTWm11VctviuutnVM5LxGOOTPtqW0/qv5t7/m1nW89gVkYd
+ FexbqQaD6BNLGwD8ZG/Hax1JEXHs7X/KKNQeYQLQvv7kiqa7V89ORBw+LmHG/L0tlbvzmT2P7OkQkeDO
+ JgWRzvlXNFY8e/NKDVz2s80vtfUL1wowdRTkEEgQtK/+4cr5//fK+SWu4+tjWZOmd7Riri+O/PxdS+5Y
+ 3qQD3jmLcOeyRgAP7eu00T/1FJ4Akkhn1ceWNn5+RZM5QuWIkZZyNNpmXRJpZmbcf/WCa2dW6ZwfwPmA
+ INKef2l92S3zagDct6XF7ndNPQUmABGU0tMSsa+umgVA0GmbagkiDSbg79fOIVeqCSpBPsEwPrm0URAe
+ Pdj9TEsfReziz1RTYAJIInjqQ/NrGoqjvn6LplqSiIE1tYm3N1YErWmKINKeWlabuHVBLYD7t7RAsyjI
+ bYzCpsAEMFXFr2kox/iqiptcmt9pqgxavXwCoPUnljZEBK073PvEkV7zmMr3dYWOQhKAANZwXbmgvAij
+ DVHe4ksIAOaWxeCI4ISXIChfLawu/cOFtQDu29oCXwfqARUeCkkAAAALopg8s8uOSQEKULlkAkHpO5ZM
+ L3bkb1v6/+tQNyL29p8fCk0AopzS3RkPwHiShE1QdWU8KB2Q7AgiKF/Nqiz+yKJ6mNu/Zzv75o1CEoDN
+ vDanXusewpkUlN3ak4LSAamXbNrj/cmF0yuizovtyZ8f7ELBbv2aRWdHkCAigiA4RIWViFVIAoxAeGR/
+ F8bRWtTUi+3L+j/Z0xGQICOC8vX0iqKPLq4HcN+2Fp0tyErOJvSZWWV9P53Tns++1p72M54/7LGvBVFB
+ zGoKLBdIMZMrnzjc+6vDvdfPqPQ0u6e/sZuSaYIwv7yopS9N0iXK8yETQaQ8//bF9XVFkVe7hn66rxNu
+ 0LOVTvkuWGuV8+PxyNtmTVtZXbqgPF7kSE/r9nRuc+fg+raBw6ZiRUTqINx4Tk+BCYCRWoL6c8/uveh3
+ V9QXRTzNzkm3GmZosCRSzGUR5/Ebl3362b3ffe2IjDhMNIG/ERq5pOP/xYyR6Qmf9MnK11WJ2B9fUA/g
+ /u0t3rDnxN2zKwmaL6QglfVLYu6nLp71xxfUzUnET07EGvLUQ/u7/vG1w9vbkjLm6AC37S68wlimsn7n
+ YOaZ9uS7ZlVVRB0wGOwzmEcmBgQSRBs7B9/72Na6osiSyuIbZ01LxCO/bu7RzFKKc1GAAEEkBQHEAJjZ
+ 1GobqeTG0GyO58iRTxvBEaSz/icuarplXs2O3tSnn93nBWhpalxIIpX1Lmuq/MV7lt46v6Y86gJQmk2I
+ 8+jELCLFimklty2qG2RsaO0XIrgnlQpPABgHHHm0P/3jfZ0VMXdRRVFECkEkiUzVtL6s943Xj/7pb3Yd
+ 7ks9fKCrOOpeVpdYW5e4uLbsicO9qWHPOasSypJImCwjX+msz2AhZYkrG0pjjaWxWWVFDaWxRMyJudJn
+ 9jVrT2lPMQBBriBfcWnc/c41C6ti7l2bmp9v7nEiTgHd/SWRyvnvmV/76LuXmp14GslGecMfMwVWml1B
+ 755ZVVkUefxAtzjnBgiTRKGmQ8OMRJVmX8+aVnxdQ8WiiqJiVw5k/a09qSeO9HYnh8mVQgpoVjn/Y8ub
+ /s8V84ocuaMv/aF127e0DThxV417MCSJGKxzCpqjRZEV00rW1CUurSldUFY0szSWiEhB5BAB8Ji15p6s
+ dyCZ2d2f3tiRfL49ubs3hZxSvvr4qln3X7Vgf3L4kp9u7s96VDjTXxP9lzVWPHnT8rgjFL/1eSNTudoR
+ dPem5r95fl8wzzkUsAAYaTRN2lfwNQAQQWsQwZVSCpMjTQQBUhnvqllVP/ydC5pKor1Z749/s/uRXe0y
+ 5ui36lJq1jp0zocUq6aXf2Bu9XtnTVtQHh//RSrm17uHfrq/61eHe//9ukXLqkr+54YD92w44MQKZvRP
+ ADOXRpxn37/ioqqS8US/gQFmCMLbH33tqUM9MhK4GX9hC2AQNLIkyjwyHz351u4I8jPejMriH7/jwsvq
+ Er7mL208dM/GA0IKCHHKO5N5vqucL6S4fva0zy5rvK6xYmxIr5jNr/bUi94MBmsG0bFu2DmlI1J0Zbyl
+ P9jQkc45UWf8j6D8IolUxvvrtXO+tmaOr/mM2loaWzZ2JNc+/Io2LuX77bzhrRXiHOAEeORpe+wvJ6MZ
+ 0pX96dxP9nY0lcZXVJe8vbFiTnnxukM9OV9J58RpsSAihs75FzdU/PAdF3zx4plzyuJEJrvObPqMjHfH
+ ziG84Q+N1HAWRKacmwYcIQDEpZxeGnu1N9U7mJFSBH8URIBmLolHvnPNwsqYC5zZPpf5CTSWRDe0D+zr
+ SUlHBur9ng8CjBNmCCl8rR/Z25FlumZ6+YrqkmubKp5s6e8fzB4/LZaCtKfijrjnqgXfvWbh3LK4GU2N
+ Bv2Zve5YQXPzdxCWVZXcvrh+GHixtR8MEexT8Gb3/XdmVX5yaSPjbBoQK82CKKv5F/u7RMAqvhTgTvA5
+ oJlZkHTlPS/uv2Xd9t6sd1ld2XPvX3nVzEp/2JNERJCCVMa/oLpk/c0rP3dRIxEUs1liOsdXp9H/KuZi
+ V37z8nmP3risuiiiPV8GJE/jlJdNAPPq2jKcbSc/8+Yuri41c4BAvdVwCQCM7BU4cfeRXe3XPvLajr5U
+ U0n08RuXfWxFk8p6kqGGvXfPq37h5pWX1JT6emJC/wTMAU5f83tnTXvx91YuqUmojDdR/eInHM0MKeaV
+ xTG+MxgnYx6asxOx8qgzMT3BJ47QCQCAAV+zE3e3dCSv/vmrv2zuLXLkP1+z8N5rF/lK/9HShv9899Ky
+ iKP4FHvMEwURTFvIOYn4b25afuWMKj8T0OcAM0CIn2EK+sm4QsSlADhQm2JhFMDga5ZRp3vYu+m/Xr/3
+ 9aMA/uyixg0fvOTbVy1wBWnGFKQoO4IUc3Xc/cV7lq5uLFcZL5gOADj3qNXMGaWBYO1+h1cAmMmZFBD0
+ 57/dffvTu9K+Xl2TKHHF2U31zg6TsFQecR5+15JF1aUqYEVcCHAFQXPzYAZvtWdyWhgA+rJ+2teBuv0j
+ 5ALADHAFQdCzh/sySgM424Hu2WMcaCiO/sc7L0xEHa11fhUwGyAmy5+Zta+Rzr3ensTZ3hc0GMDLncl0
+ 1qOA5USEXQAyWSuO+N47LqiMOoo5L2MQSeRrXjGt5B+umAdfT33u2IlB7yl/OKdzvivF4uqS29fMvmFu
+ teYz3AJ4I+tbB+DroJ19K7x06IlFCFIZ74uXz7uivmz8O/yTgZkP/MmF0399tO+hnW1y8nu+j25QQDNr
+ zexr7Ss4Ihp1F1cXralNrKopvaK+fG5ZfOymcBYXZM7xtaVzP97TgeBVvgi1AIJI5dTS2sTnVzRhHEfM
+ Jhtz479r9exfH+4dyPmTsUl8UtAr7Ws4MhZzLqwpXlNngr5sdiI+9rMY9vWr3YPrWwba0tm7Vs8ue2Ml
+ 1rdEaXYEfXtLS1dyWEZdK0CAML+Kv1s9p9iRPrOTbwEEQTEvKi/63PKmLz+/T0xQuJjhjdnR4+OCPh5z
+ ltaWralNrKotvaK+bGZpbOxLUr7a1Dn4YntyY2fypY7BtlRW5JTy/GJXjqUDjeeHZY7srW8d+PorzQE5
+ lXoC4RXAVGa+vLHifXOmAch79I9dFYA7ljQ8sK21LZUlSWcXM8eCXjMzK09BabiyOOYuqypeU5tYVZO4
+ or6ssSQ69iWDnnq5M/lie3Jjx+DGzmRHKsc5HwQ4EoIo5rhR556XDzWVRD+xpMFnNseCT3cBzFDMrqBd
+ /enbfr1DKS0C2fUjvAIYTFuu/I7+j8ckStTE3Y8srrtnwwEhXTXugfcbgl6zUiNBXxJ3l08vWVObWFWb
+ uLyubHpxZOxLkjn/pY7ki+3JjZ2DL3cOdqazyKmxoHfirjlcCoavmQgkxCef3jPk6b9c0QRAM2vGG/Kj
+ eGTNR4AcQRs6kh94fNvRZEa4QYx+nB/p0GeBIGhfz6ko3nbrpXFHnNGgdrLRDEHY05++6MFNGV+/eUWv
+ E4IeSkNpRJxEzF0xbTTo6xO18WNB35f1N3QkN7QnN3YmN3UOdg/nkFNmaxqCHEFjQX/y6xJBMFTOv35e
+ zdfXzllWVXLyj858ZCDnf/3VI9/Y3JwL6r3fENIngCDSvr5p9rRxHm6a2msDAwvKi66ZXv6r/V3ipCY3
+ p7jTa4Yry+PuxdWlq2pK19QmLqsvmxZzx76kJ+O92J7c0JF8qSO5uWuwb9iDNxL0JEiO3umZ8eZndJih
+ ARF1f7W/68nDvTfMnva7s6ddUlM6KxEjkADah3M7+9KPN/c8uK+zc2BYuE6Qox/hFMAkoiEib5pdle9r
+ OTVm5eSmOdN+tb8Lo0nUAscFva/ADFdWFkUuqS5ZVZtYU5tYW5uoPC7ou4a9F9oHTNC/0j00YIJeEORx
+ Qc9sMqPGf23meIPJ63x0d/ujezrcqFMedWJSMNCd8TI5BU/BlTLqauaAdzsOowBmoN2YiF1aU4oArH6e
+ jLmktzWUx+JuTjMAVqx8HwxE5LTiyKXVpatqE2trE6trE+XRY7/E9nTu+faBlzoGX+oYeK07lRzOwVcQ
+ 4hyD/mRMVrOMugA8xV2p3EiahCDzQqZQRr5/kG9NGAUQIOX7a2sTRY40A+6gYZSclYjPKors6kiKokh1
+ cXRVbemqmtK1dWWrakoTkWO/uJZU9vm25MbO5Ib25Os9Q0MZD56CFJCCpJCunKigPwE21erN1RKNXDRj
+ wl9oUgmjAESA5pXVpQA0cwCfAObgbETQncub2tK5dzZVrqwuKXHl2CccGco+1zZggn5Lbyo9FvSOIGcS
+ g/6U8Bv+V2CEUQDNDFfOScQw9Ylv48Zc1yeXNph/MnBoMPNc28DGjuSGjuS23tRwxoev4AjINwY9wy+E
+ sUdACJ0AZNYZHTnXHHHK9/WcDrOY2Jv1/+tQtwn67b3pbNYEvYQk4QrTAZZt0J8DoRMAAJijjqiOuxhf
+ m5m8YCr7buocvO3x7WAmKfiNQW/KTuX7MgueUAoAOEQjQ+qAxv8IRY4QUSmIzPDGBv2EE0oBGBEpIiLQ
+ ZyGMmEWOJJCpwmkDfzIIdBBMHqa5fL6vYhzXyUEuLX4+EEoBCD5zRp2qhn9gMNc1mFO6cNbUC5FQCgB4
+ mvuzHoIb/yMMegp2/DOZhE4As3WZ89VImYOghpYZoB0azATwHO35ROgEgDnG4euDySzOus7HFEAAcCiZ
+ AQerktR5RhgFIAKU3tGXQoB3gs0Jte29KZxbQyfLmxNGATQDjnyxPWnKvwUwuswl9WS8V7oGIUVBLFgV
+ KGEUgJkhaVtvqiWVBYI4wTQRv7Ej2TscuEpS5xmhFACQQqTTuceaezCa0xtA1h3pg6eCWy30vCCMAozx
+ 6MFuAEGLMFNJKu3rRw92w7Hjn8klpAKYjOgnj/Zt6RkiBKtmvYn4x5t7DvWlyRGBurbzj5AKwIAjyRv2
+ vrO9FQHbDTAHdB7Y1gJmYVdAJ5mQCgDT7Scif7inY39yWBIF5EZrzto+daTvySN9Aaykef4RXgHMVDg5
+ lP3a5sMIzEPA3PLv3nwIym4ATwXhFQBm/Sfi/MuO1vWt/aZIf36vx1Rf+9edbU8f6hXB6yl9XhJqAWAG
+ 3Er/xXP7Ur7K76aYZnYEHUxm/nbDQTgiIE+k856wC6CZZcR5uaX/r17YD+TtvBUDJvvn4+v3tAykT27c
+ bZkkwi4ATKewmHPfa0e/t6vddGrJzzUQvvTSwXX7O2V00ltjWMawAgDmBizFHb/Z9cvmXtO9dCpf2pTb
+ //bWlv+14YCM2qH/lGIFAABmCEE5zX+wbtv61n7jwBSEIfNIGdDv727/1Po95Eqd7x9F2LACjKCZhRQD
+ OfWeX7z+nwe7HUFK86Q+CTRDgx1B39py9LYndggiUIDPJ5ynWAGOYRxI+fqWx7Z+e2uLI86lJ+JbYFoR
+ a8bnX9j/2ad3SylY2OjPAxI33JbvawgQDJAQAH65r2v3YObahvIiR2pmPUFFpBkjJTsF0e7+9Psf3/7g
+ jlYZdbS99+cJ+wQ4EWbWgBNzfrKl5d2/2OJrFkSSyJQfPOsoNT2zcFxfra3dQ88f6nZirmYb/XnDCnAK
+ 2JwVdsUfLqp1BO0bGH7iSB8RHCLTw8vX46rXw4BmU4MIRJBEmvkHu9s/vn5PTunfm1dz0+J6P+MFsDx1
+ eAhlZbi3QgpSGe/mRXWfWtYI4C9f2P/Irva3za3+H4vrr59RWR51xnI0x+7rAEaaefGxbzJaN58I6Ejn
+ HjnYff+21q0dSXj+yurSOy6c/uVLZz15uDflq8loCWwZDyFtkvcmCCLtqxmlsfU3r5xVGrt/a8udT+2S
+ UUfnfAZqy+I3zqy6trFibW1iZmn0zW/enua9/ekNHYOPH+759ZG+gaEsBMmIVL5uKI6+8IGLZ5RE/+al
+ g3e/sF/G7eZXfrACnIggaE8/dMPSm+dWv9YzdO3Dr/Z7PgkSIAa00vAUJMWi7oLy+PyyorllsbqiaIkr
+ S1ypmNO+GsypI0PZ5sHMzr70vuSwyvqmn5cUwkyCpSA17P3pyqYHrl7Yn/Uvf2jzjt5UwJvJna9YAd6A
+ Cc07Vsz4f9csUMzX/edr6w/3yuMSM0eH8tBaQzG0hhngC9MjiEcG/swQApIghBkLqTdOoAlgxm/et/za
+ hvKf7eu85bGtwg1iI/XzHjsJPoYgUjm1pDZx99rZAO7e1Ly+ueeE3AQemdQyEQlXyqjrFEWcuCsjjnCF
+ cB0ZcZy46xRFZNQRjiCCYj55+UgQwVdf3XgQwAfm1bxnbo3O+vYAwNRjBRiBAGYWUnzr6gWVUXd968Bd
+ Lx9CxDldbVozmDErQr5mNVq8/4SPnO6WrphFxFnf3Pudba0AvrpqVlHcVVpbA6YYK8AIQhDn/C9cMvNt
+ DeX9Of8zz+zxfSUmc3GGwXDE3ZsOHRnKrqwu/exFjcgpEbASFec9VgAAkEQq61/RVPmlS2cB+NKGg6+3
+ DcjI5A7KmSEdcaQv/bXNzQA+v2LG4ppS5Sm7LTCVWAFABKV1Iu5+66oFEUGPHuz+9utHRMyZgnVJpRlR
+ 54Htretb+yuizldWz7Z7wlOMFQACBE/dtWbOyuqSI0PZP3tmL4Apax4midhTX3npIIBb5tXcMLfazoan
+ krALIAWprHfj/NpPL2sE8BfP7zvYl5Lu1C3Jm9nwb5t7/2m7mQ3PjsddpdkaMDWEWgBBUL6enojfe8U8
+ AP+yo+3BXe1TfyKRwZB098uHjqayF1eXfuaiRuR8OxueGkItAEDQ+h+vnD+vLL69N/XXL+yDFHrKyzEw
+ Q7ry8Ohs+C9WzFhYXap85QiSRI6dFE8m4RVACtIZ7/YlDR+aXwPgM8/u6x7K5ascw8hseFvrM639lWY2
+ nFUq56uc7w97zGwtmCRCeiBGEGlPLaou+eE7Lix25T2bm7/7+hEZy2dGmiTSvtqbzPzRwrolVcWN5UXv
+ nVP9/vk1RTHn6FBuOOsJKe0K0YQT0nRoZoagb121oCbuvtA+8JWNh+A6+W1IqgEQ7esbbk/nmkqiH1tc
+ LwgM3Lao7shQ9g9+vePZw71ikrcmQsh5MgSi0T/jQQrirP9XF894R1PFkKc+88zerOfLvDZiIYA1R135
+ H9df0FQSVcya2desNCvmppLouhsvunZmlc7ZFdIJpoAFEASHyBFk7pQ8ehZFjHzw1JFiNn1XN1Z8+dLZ
+ AL688eCmln6Z70KcUhBy/ueWN72tocLXbOa+5o+p1RV3xN+tnm3qRVsDJpDCE4AAszCife1nPT+d0/5o
+ DhkzmHXO99M57fnMLImOnz4SoLQuijrfunpB3BGPNffc+8phEZ2KTd83f0e+Zhlzb55bDeDk9U9HkGZc
+ UV/2zqZK2FyJCaWQ5gBkMpa19od9OGLhtJKLp5UuKI9fUFncVBL1NeeU9pnb07lNnYMbOwe39aZSqSyE
+ EK6EqXpCpHL+3102b3VNaVs691mz6Ut5LslABPb1rMriCyuLAZzy0cVggOaWxaGZbOf4iaNgBBBEYFZZ
+ L1EcvXFh3YcX1l7bUOGeZrfowwvrGGhP5R7c3/nA9tbdnYNEFIk6ueHc9XNr/nxFE4C/emH/3u6hIJxF
+ NM3rM75+kwkuM0CoiDqwQ6AJpTAEkETKU0LQHStn/O3FM+uLowyY3l6amWhk+mvCh0dLMNQXRz67rPGO
+ C6c/erD7rk3N21v66sqLv3nlPAD/vrv9B9tbRV7XPY/BAKE/6w/mVJEjT/kpZtiztScFYbuGTSQFIIAk
+ Ujn/gprSb1+14NqG8uNrSwk6Vb0qAkZLmzA4KsUH59XcMLPqzmf2Xj29bFF50Z7+4b98bh9kUGrwm141
+ qeHcg/s6P72s0dd8wpNNMUuiw0PZp1r64Nq+kRNJ0AVwiPyM94EL6r9/3eK4IxQzgcYzCzQlSQBiQDEX
+ u/L71y0C4Gn+1LN7OoayeV/5OR4GQ9C9rx65ZV5NXVHE0ywINLI6OiL5VzYeTKVzditgYgn0KpAk8rPe
+ h5c1/vgdF5jol0RnmiRGphgbwxQ9PziYebF1AFIEqg6zZghHNg8Mv/u/txxIZlxBBBIEMrlAhC9sOPC9
+ rS0iYitHTDDBTYWQglTW/+iyxn+7bpG5kZ/LHhARBJFmnhZzl9eUPryvy9dMQdKfASFF28Dwzw50dWb8
+ uCMU48DA8JNH+z7y1K6H97SLiLSj/wknoGVRzLj/kvqy9e9fWeQIzZio7GDzGPnezrbbn9gRwOGEIGKl
+ OefLqOs4Iqc15xQItmjKJBGke+AoZrsqHnXuu2ZhkSMU8wTmxptukB9dXP++BbU66wUts0AzQ5JTFNGE
+ rK+YISKOjf7JI4gCCCJ4+kuXzlpVU2pu2BP7/c2q6dfXzq0qjSmlA6bASOkhYKSoqGa20T95BE4AIihf
+ NVUU3bGkARNUlf/E90zwNS8oj39ueRN8Jabq+O8ZcXx2k2XyCJwAkgi+/vCiuoqo40/arqd5qvz+gtqS
+ 4qgtRxVmgiUAAb7SpSXRjy6ug6nXMEkvRNDMs0pjN86ssullYSZYAggi+PryurK5iThjwlZ+TokZXXxw
+ fg2k3VsNL8ESgAhQ+tLaUphjspP6zokAXFZXVhpz2WaYhZVgCaA0I+JcUlOKkUSGSaci6swqjUHxJHaE
+ tASYAAlg6jPHI3L5tBJM5gRg7OU0wxF0QWUx7Dw4rARIAABgOEQJ1wGmojahGfrPK4tDT+Rem6WACJgA
+ gOlJOpWvaCM/zARJAAKAqJzqooAZpWG3nMJKkARgwOz8T9krEgD4dg00xARJAABE/Vn/UDIDTMU5dfPm
+ B7L+FC05WYJHgARgQAj4ntrem8JIHYRJfjkiX/OmrkFIe9A2pARIAIz2TtwzkMYUPAEYAI4MZff2D0MS
+ 24FQKAmWAMyAFM+0DgBwJnkubMqgv9SRzGR9kde6iJY8EiwBNDNc+duWvle6BjG6Tj9JmFMBjx7qhtZk
+ 10LDSrAEYMARpDL+j/d2Api8cbk5Y7mtN/Xz/d2w561CTLAEAKCY4cqf7O3sGvacSRuZmIj/5x1tueGc
+ I+34J7wETgBmSCmO9qXu3nwIkzMK0syOoB196X/b1YaIDERxOEueCJwAABQzRZxvvX706ZZ+Uxx8Ar85
+ A2YD7NPP7BkYykkhbPiHmSAKAFMhWfGfP7u3P+c7gibwOaA0C8Ldm5qfOtgtowEqDmfJCwEVQDNLV77a
+ kbx13fa0rwXRuUcqA75mR9BD+7u+/NJBCl5RIMvUE1ABYCpYRZ11+7p+/4ntOc3y3BzQzEqzI+hHezs+
+ 9KttCgyyCXCWAAsAQGmWcffRPZ03/nLL0VTWOHCmGpjiuARyBD2wvfXD63YogER+2qFagkagBYBxIOo8
+ sb975YObHjrQZVoeaYZifvOyOSbujS2S6Egqe/O67R9/aqcpqW4THyyG4BbHHYMB6cpU1v/pno5dyeFZ
+ pbHGkqjpgWe6a2k+VkZKj2yfjZRWFkQpT313R9uHntj+Sku/jLjapv5bjiOgxXFPxmQG6awvXXndzKqP
+ Xzj9qullFVH3dDkMmvnV7qEf7O742f7O1v5hkkI4wi75W06gYAQwSCLNzDkFQWXFkYuqilfXli2uKIpK
+ coWISTHkqZ196V19qR196d29KT/rw5VCCtNpwmI5gQITwCCJGKwVQ2koDSlg+oQRQTN8BUGQgqSQgjTb
+ 0LeclqC3SDolZmpLgoR0TI9Tc3qGGSRJRKT5yFiZZYvldBSkAAazznPClJZ5cpOoLecZQV8GtVgmFSuA
+ JdRYASyhxgpgCTVWAEuosQJYQo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjRXAEmqs
+ AJZQYwWwhBorgCXUWAEsocYKYAk1VgBLqLECWEKNFcASaqwAllBjBbCEGiuAJdRYASyhxgpgCTVWAEuo
+ sQJYQo0VwBJqrACWUGMFsIQaK4Al1FgBLKHGCmAJNVYAS6ixAlhCjRXAEmqsAJZQYwWwhBorgCXUWAEs
+ oeb/A5fj85sn5OS0AAAAAElFTkSuQmCC
+</value>
+ </data>
+</root> \ No newline at end of file
diff --git a/agent/windows-setup-agent/app.manifest b/agent/windows-setup-agent/app.manifest
new file mode 100644
index 0000000..ff3d8ae
--- /dev/null
+++ b/agent/windows-setup-agent/app.manifest
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="utf-8"?>
+<asmv1:assembly manifestVersion="1.0" xmlns="urn:schemas-microsoft-com:asm.v1" xmlns:asmv1="urn:schemas-microsoft-com:asm.v1" xmlns:asmv2="urn:schemas-microsoft-com:asm.v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <assemblyIdentity version="1.0.0.0" name="MyApplication.app"/>
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v2">
+ <security>
+ <requestedPrivileges xmlns="urn:schemas-microsoft-com:asm.v3">
+ <!-- UAC Manifest Options
+ If you want to change the Windows User Account Control level replace the
+ requestedExecutionLevel node with one of the following.
+
+ <requestedExecutionLevel level="asInvoker" uiAccess="false" />
+ <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
+ <requestedExecutionLevel level="highestAvailable" uiAccess="false" />
+
+ Specifying requestedExecutionLevel node will disable file and registry virtualization.
+ If you want to utilize File and Registry Virtualization for backward
+ compatibility then delete the requestedExecutionLevel node.
+ -->
+ <requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+
+ <compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
+ <application>
+ <!-- A list of all Windows versions that this application is designed to work with.
+ Windows will automatically select the most compatible environment.-->
+
+ <!-- If your application is designed to work with Windows Vista, uncomment the following supportedOS node-->
+ <!--<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"></supportedOS>-->
+
+ <!-- If your application is designed to work with Windows 7, uncomment the following supportedOS node-->
+ <!--<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>-->
+
+ <!-- If your application is designed to work with Windows 8, uncomment the following supportedOS node-->
+ <!--<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"></supportedOS>-->
+
+ <!-- If your application is designed to work with Windows 8.1, uncomment the following supportedOS node-->
+ <!--<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>-->
+
+ </application>
+ </compatibility>
+
+ <!-- Enable themes for Windows common controls and dialogs (Windows XP and later) -->
+ <!-- <dependency>
+ <dependentAssembly>
+ <assemblyIdentity
+ type="win32"
+ name="Microsoft.Windows.Common-Controls"
+ version="6.0.0.0"
+ processorArchitecture="*"
+ publicKeyToken="6595b64144ccf1df"
+ language="*"
+ />
+ </dependentAssembly>
+ </dependency>-->
+
+</asmv1:assembly>
diff --git a/agent/windows-setup-agent/icinga-banner.png b/agent/windows-setup-agent/icinga-banner.png
new file mode 100644
index 0000000..2451495
--- /dev/null
+++ b/agent/windows-setup-agent/icinga-banner.png
Binary files differ
diff --git a/agent/windows-setup-agent/icinga.ico b/agent/windows-setup-agent/icinga.ico
new file mode 100644
index 0000000..9be324c
--- /dev/null
+++ b/agent/windows-setup-agent/icinga.ico
Binary files differ
diff --git a/choco/CMakeLists.txt b/choco/CMakeLists.txt
new file mode 100644
index 0000000..fb147a1
--- /dev/null
+++ b/choco/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+if(WIN32)
+ configure_file(icinga2.nuspec.cmake icinga2.nuspec)
+ configure_file(chocolateyInstall.ps1.template.cmake chocolateyInstall.ps1.template)
+endif()
diff --git a/choco/chocolateyInstall.ps1.template.cmake b/choco/chocolateyInstall.ps1.template.cmake
new file mode 100644
index 0000000..424a737
--- /dev/null
+++ b/choco/chocolateyInstall.ps1.template.cmake
@@ -0,0 +1,20 @@
+$packageName= 'icinga2'
+$toolsDir = "$(Split-Path -Parent $MyInvocation.MyCommand.Definition)"
+$url = 'https://packages.icinga.com/windows/Icinga2-v${CHOCO_VERSION_SHORT}-x86.msi'
+$url64 = 'https://packages.icinga.com/windows/Icinga2-v${CHOCO_VERSION_SHORT}-x86_64.msi'
+
+$packageArgs = @{
+ packageName = $packageName
+ fileType = 'msi'
+ url = $url
+ url64bit = $url64
+ silentArgs = "/qn /norestart"
+ validExitCodes= @(0)
+ softwareName = 'Icinga 2*'
+ checksum = '%CHOCO_32BIT_CHECKSUM%'
+ checksumType = 'sha256'
+ checksum64 = '%CHOCO_64BIT_CHECKSUM%'
+ checksumType64= 'sha256'
+}
+
+Install-ChocolateyPackage @packageArgs \ No newline at end of file
diff --git a/choco/chocolateyUninstall.ps1 b/choco/chocolateyUninstall.ps1
new file mode 100644
index 0000000..a41b351
--- /dev/null
+++ b/choco/chocolateyUninstall.ps1
@@ -0,0 +1,20 @@
+$packageName = "Icinga 2";
+$fileType = 'msi';
+$silentArgs = '/qr /norestart'
+$validExitCodes = @(0)
+
+$packageGuid = Get-ChildItem HKLM:\SOFTWARE\Classes\Installer\Products |
+ Get-ItemProperty -Name 'ProductName' |
+ ? { $_.ProductName -like $packageName + "*"} |
+ Select -ExpandProperty PSChildName -First 1
+
+$properties = Get-ItemProperty HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Installer\UserData\S-1-5-18\Products\$packageGuid\InstallProperties
+
+$file = $properties.LocalPackage
+
+# Would like to use the following, but looks like there is a bug in this method when uninstalling MSI's
+# Uninstall-ChocolateyPackage $packageName $fileType $silentArgs $file -validExitCodes $validExitCodes
+
+# Use this instead
+$msiArgs = "/x $file $silentArgs";
+Start-ChocolateyProcessAsAdmin "$msiArgs" 'msiexec' -validExitCodes $validExitCodes
diff --git a/choco/icinga2.nuspec.cmake b/choco/icinga2.nuspec.cmake
new file mode 100755
index 0000000..d0699f2
--- /dev/null
+++ b/choco/icinga2.nuspec.cmake
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Do not remove this test for UTF-8: if ??? doesn?t appear as greek uppercase omega letter enclosed in quotation marks, you should use an editor that supports UTF-8, not this one. -->
+<!--package xmlns="http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd"-->
+<package xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <metadata>
+ <!-- Read this before publishing packages to chocolatey.org: https://github.com/chocolatey/chocolatey/wiki/CreatePackages -->
+ <id>icinga2</id>
+ <title>Icinga 2</title>
+ <version>${ICINGA2_VERSION_SAFE}</version>
+ <authors>Icinga GmbH</authors>
+ <owners>Icinga GmbH</owners>
+ <summary>icinga2 - Monitoring Agent for Windows</summary>
+ <description>Icinga is an open source monitoring platform which notifies users about host and service outages.</description>
+ <projectUrl>https://icinga.com/</projectUrl>
+ <tags>icinga2 icinga agent monitoring admin</tags>
+ <licenseUrl>https://github.com/Icinga/icinga2/blob/master/COPYING</licenseUrl>
+ <releaseNotes>https://github.com/Icinga/icinga2/blob/master/ChangeLog</releaseNotes>
+ <docsUrl>https://icinga.com/docs/icinga2/latest/</docsUrl>
+ <bugTrackerUrl>https://github.com/Icinga/icinga2/issues</bugTrackerUrl>
+ <packageSourceUrl>https://github.com/Icinga/icinga2</packageSourceUrl>
+ <projectSourceUrl>https://github.com/Icinga/icinga2</projectSourceUrl>
+ <requireLicenseAcceptance>false</requireLicenseAcceptance>
+ <iconUrl>https://raw.githubusercontent.com/Icinga/icinga2/master/icinga-app/icinga.ico</iconUrl>
+ <dependencies>
+ <dependency id='netfx-4.6.2' />
+ </dependencies>
+ </metadata>
+ <files>
+ <file src="${CMAKE_CURRENT_BINARY_DIR}/chocolateyInstall.ps1" target="tools" />
+ <file src="${CMAKE_CURRENT_SOURCE_DIR}/chocolateyUninstall.ps1" target="tools" />
+ </files>
+</package>
diff --git a/cmake/FindJSON.cmake b/cmake/FindJSON.cmake
new file mode 100644
index 0000000..b7d5d79
--- /dev/null
+++ b/cmake/FindJSON.cmake
@@ -0,0 +1,9 @@
+FIND_PATH (JSON_INCLUDE json.hpp HINTS "${PROJECT_SOURCE_DIR}/third-party/nlohmann_json")
+
+if (JSON_INCLUDE)
+ set(JSON_BuildTests OFF CACHE INTERNAL "")
+
+ message(STATUS "Found JSON: ${JSON_INCLUDE}" )
+else ()
+ message(FATAL_ERROR "Unable to include json.hpp")
+endif ()
diff --git a/cmake/FindUTF8CPP.cmake b/cmake/FindUTF8CPP.cmake
new file mode 100644
index 0000000..b000353
--- /dev/null
+++ b/cmake/FindUTF8CPP.cmake
@@ -0,0 +1,7 @@
+FIND_PATH (UTF8CPP_INCLUDE utf8.h HINTS "${PROJECT_SOURCE_DIR}/third-party/utf8cpp/source")
+
+if (UTF8CPP_INCLUDE)
+ message(STATUS "Found UTF8CPP: ${UTF8CPP_INCLUDE}" )
+else ()
+ message(FATAL_ERROR "Unable to include utf8.h")
+endif ()
diff --git a/cmake/InstallConfig.cmake b/cmake/InstallConfig.cmake
new file mode 100644
index 0000000..70eae91
--- /dev/null
+++ b/cmake/InstallConfig.cmake
@@ -0,0 +1,47 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+#
+# Install $src into directory $dest - usually only used for config files
+#
+# * similar to install() a non absolute path is prefixed with CMAKE_INSTALL_PREFIX on runtime
+# * in case of CPack path with be prefixed with share/skel/
+# * DESTDIR is prefixed as well
+#
+# also see https://cmake.org/cmake/help/latest/command/install.html
+
+function(install_if_not_exists src dest)
+ if(NOT IS_ABSOLUTE "${src}")
+ set(src "${CMAKE_CURRENT_SOURCE_DIR}/${src}")
+ endif()
+
+ get_filename_component(src_name "${src}" NAME)
+
+ install(CODE "
+ set(dest \"${dest}\")
+
+ if (\"\${CMAKE_INSTALL_PREFIX}\" MATCHES .*/_CPack_Packages/.*)
+ set(dest \"share/skel/\${dest}\")
+ set(force_overwrite TRUE)
+ else()
+ set(force_overwrite FALSE)
+ endif()
+
+ if(NOT IS_ABSOLUTE \"\${dest}\")
+ set(dest \"\${CMAKE_INSTALL_PREFIX}/\${dest}\")
+ endif()
+
+ set(full_dest \"\$ENV{DESTDIR}\${dest}/${src_name}\")
+
+ if(force_overwrite OR NOT EXISTS \"\${full_dest}\")
+ message(STATUS \"Installing: ${src} into \${full_dest}\")
+
+ execute_process(COMMAND \${CMAKE_COMMAND} -E copy \"${src}\" \"\${full_dest}\"
+ RESULT_VARIABLE copy_result
+ ERROR_VARIABLE error_output)
+ if(copy_result)
+ message(FATAL_ERROR \${error_output})
+ endif()
+ else()
+ message(STATUS \"Skipping : \${full_dest}\")
+ endif()
+ ")
+endfunction(install_if_not_exists)
diff --git a/cmake/SetFullDir.cmake b/cmake/SetFullDir.cmake
new file mode 100644
index 0000000..8dce669
--- /dev/null
+++ b/cmake/SetFullDir.cmake
@@ -0,0 +1,11 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+#
+# Ensures a directory is absolute by prefixing CMAKE_INSTALL_PREFIX if it is not
+# similar to CMAKE_INSTALL_FULL_... https://cmake.org/cmake/help/latest/module/GNUInstallDirs.html
+function(set_full_dir var path)
+ if(NOT IS_ABSOLUTE "${path}")
+ message(STATUS "Prefixing in ${var} \"${path}\" with ${CMAKE_INSTALL_PREFIX}")
+ set(path "${CMAKE_INSTALL_PREFIX}/${path}")
+ endif()
+ set(${var} "${path}" PARENT_SCOPE)
+endfunction(set_full_dir)
diff --git a/config.h.cmake b/config.h.cmake
new file mode 100644
index 0000000..3ed2ae4
--- /dev/null
+++ b/config.h.cmake
@@ -0,0 +1,37 @@
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#cmakedefine HAVE_BACKTRACE_SYMBOLS
+#cmakedefine HAVE_PIPE2
+#cmakedefine HAVE_VFORK
+#cmakedefine HAVE_DLADDR
+#cmakedefine HAVE_LIBEXECINFO
+#cmakedefine HAVE_CXXABI_H
+#cmakedefine HAVE_NICE
+#cmakedefine HAVE_EDITLINE
+#cmakedefine HAVE_SYSTEMD
+
+#cmakedefine ICINGA2_UNITY_BUILD
+#cmakedefine ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS
+
+#define ICINGA_CONFIGDIR "${ICINGA2_FULL_CONFIGDIR}"
+#define ICINGA_DATADIR "${ICINGA2_FULL_DATADIR}"
+#define ICINGA_LOGDIR "${ICINGA2_FULL_LOGDIR}"
+#define ICINGA_CACHEDIR "${ICINGA2_FULL_CACHEDIR}"
+#define ICINGA_SPOOLDIR "${ICINGA2_FULL_SPOOLDIR}"
+#define ICINGA_INITRUNDIR "${ICINGA2_FULL_INITRUNDIR}"
+#define ICINGA_INCLUDECONFDIR "${ICINGA2_FULL_INCLUDEDIR}"
+#define ICINGA_USER "${ICINGA2_USER}"
+#define ICINGA_GROUP "${ICINGA2_GROUP}"
+#define ICINGA_BUILD_HOST_NAME "${ICINGA2_BUILD_HOST_NAME}"
+#define ICINGA_BUILD_COMPILER_NAME "${ICINGA2_BUILD_COMPILER_NAME}"
+#define ICINGA_BUILD_COMPILER_VERSION "${ICINGA2_BUILD_COMPILER_VERSION}"
+
+// Deprecated options?
+#define ICINGA_PKGDATADIR "${ICINGA2_FULL_PKGDATADIR}"
+#define ICINGA_PREFIX "${CMAKE_INSTALL_PREFIX}"
+#define ICINGA_SYSCONFDIR "${CMAKE_INSTALL_FULL_SYSCONFDIR}"
+#define ICINGA_RUNDIR "${ICINGA2_FULL_RUNDIR}"
+#define ICINGA_LOCALSTATEDIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}"
+
+#endif /* CONFIG_H */
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 0000000..e60e194
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,2 @@
+build
+*.rst
diff --git a/doc/01-about.md b/doc/01-about.md
new file mode 100644
index 0000000..4a66553
--- /dev/null
+++ b/doc/01-about.md
@@ -0,0 +1,70 @@
+# About Icinga 2 <a id="about-icinga2"></a>
+
+## What is Icinga 2? <a id="what-is-icinga2"></a>
+
+[Icinga](https://icinga.com/products/) is a monitoring system which checks
+the availability of your network resources, notifies users of outages, and generates
+performance data for reporting.
+
+Scalable and extensible, Icinga can monitor large, complex environments across
+multiple locations. This includes your data center as well as your private, public, or hybrid clouds.
+
+Icinga 2 is the monitoring server and requires [Icinga Web 2](https://icinga.com/products/)
+on top in your Icinga Stack. The [configuration](https://icinga.com/products/configuration/)
+can be easily managed with either the [Icinga Director](https://icinga.com/docs/director/latest/),
+config management tools or plain text within the [Icinga DSL](04-configuration.md#configuration).
+
+
+![Icinga 2 Distributed Master and Satellites with Agents](images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_satellites_agents.png)
+
+## Start with Icinga <a id="start-icinga"></a>
+
+* [Installation](02-installation.md#installation)
+* [Monitoring Basics](03-monitoring-basics.md#monitoring-basics)
+* [Configuration](04-configuration.md#configuration)
+* [Distributed Monitoring](06-distributed-monitoring.md#distributed-monitoring)
+* [Addons, Integrations and Features](13-addons.md#addons)
+* [Troubleshooting](15-troubleshooting.md#troubleshooting)
+* [Upgrading](16-upgrading-icinga-2.md#upgrading-icinga-2)
+
+Once Icinga Server and Web are running in your distributed environment,
+make sure to check out the many [Icinga modules](https://icinga.com/docs/)
+for even better monitoring.
+
+## What's New <a id="whats-new"></a>
+
+You can follow the development and release milestones on [GitHub](https://github.com/icinga/icinga2/issues).
+Please follow our release announcements on [icinga.com](https://icinga.com/blog/) too.
+
+## Support <a id="support"></a>
+
+Check the project website at [icinga.com](https://icinga.com) for status updates. Join the
+[community channels](https://icinga.com/community/) for questions
+or get in touch for [professional support](https://icinga.com/subscription/).
+
+## Contribute <a id="contribute"></a>
+
+There are many ways to contribute to Icinga -- whether it be sending patches,
+testing, reporting bugs or reviewing and updating the documentation. Every
+contribution is appreciated!
+
+Please continue reading in the [Contributing chapter](https://github.com/Icinga/icinga2/blob/master/CONTRIBUTING.md).
+
+### Security Issues <a id="security"></a>
+
+For reporting security issues please visit [this page](https://icinga.com/contact/security/).
+
+### Icinga 2 Development <a id="development-info"></a>
+
+The Git repository is located on [GitHub](https://github.com/Icinga/icinga2).
+
+Icinga 2 is written in C++ and can be built on Linux/Unix and Windows.
+Read more about development builds in the [development chapter](21-development.md#development).
+
+
+## License <a id="license"></a>
+
+Icinga 2 and the Icinga 2 documentation are licensed under the terms of the GNU
+General Public License Version 2. You will find a copy of this license in the
+LICENSE file included in the source package.
+
diff --git a/doc/02-installation.md b/doc/02-installation.md
new file mode 100644
index 0000000..42d4a74
--- /dev/null
+++ b/doc/02-installation.md
@@ -0,0 +1,672 @@
+<!-- {% if index %} -->
+# Installation <a id="installation"></a>
+
+This tutorial is a step-by-step introduction to install Icinga 2.
+It assumes that you are familiar with the operating system you're using to install Icinga 2.
+
+Please follow the steps listed for your operating system. Packages for distributions other than the ones
+listed here may also be available. Please refer to [icinga.com/get-started/download](https://icinga.com/get-started/download/#community)
+for a full list of available community repositories.
+
+## Upgrade <a id="upgrade"></a>
+
+In case you are upgrading an existing setup, please ensure to
+follow the [upgrade documentation](16-upgrading-icinga-2.md#upgrading-icinga-2).
+<!-- {% else %} -->
+
+## Add Icinga Package Repository <a id="add-icinga-package-repository"></a>
+
+We recommend using our official repositories. Here's how to add it to your system:
+
+<!-- {% if debian %} -->
+
+### Debian Repository <a id="debian-repository"></a>
+
+```bash
+apt update
+apt -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | gpg --dearmor -o /usr/share/keyrings/icinga-archive-keyring.gpg
+
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+ echo "deb [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/debian icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+ echo "deb-src [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/debian icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+
+apt update
+```
+
+#### Debian Backports Repository <a id="debian-backports-repository"></a>
+
+This repository is required for Debian Stretch since Icinga v2.11.
+
+Debian Stretch:
+
+```bash
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+ echo "deb https://deb.debian.org/debian ${DIST}-backports main" > \
+ /etc/apt/sources.list.d/${DIST}-backports.list
+
+apt update
+```
+
+<!-- {% endif %} -->
+
+<!-- {% if ubuntu %} -->
+### Ubuntu Repository <a id="ubuntu-repository"></a>
+
+```bash
+apt update
+apt -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | gpg --dearmor -o /usr/share/keyrings/icinga-archive-keyring.gpg
+
+. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
+ echo "deb [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/ubuntu icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+ echo "deb-src [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/ubuntu icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+
+apt update
+```
+<!-- {% endif %} -->
+
+<!-- {% if raspbian %} -->
+### Raspbian Repository <a id="raspbian-repository"></a>
+
+```bash
+apt update
+apt -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | gpg --dearmor -o /usr/share/keyrings/icinga-archive-keyring.gpg
+
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+ echo "deb [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/raspbian icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/icinga.list
+ echo "deb-src [signed-by=/usr/share/keyrings/icinga-archive-keyring.gpg] https://packages.icinga.com/raspbian icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/icinga.list
+
+apt update
+```
+<!-- {% endif %} -->
+
+<!-- {% if centos %} -->
+### CentOS Repository <a id="centos-repository"></a>
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+wget https://packages.icinga.com/centos/ICINGA-release.repo -O /etc/yum.repos.d/ICINGA-release.repo
+```
+
+The packages for CentOS depend on other packages which are distributed
+as part of the [EPEL repository](https://fedoraproject.org/wiki/EPEL):
+
+```bash
+yum install epel-release
+```
+<!-- {% endif %} -->
+
+<!-- {% if rhel %} -->
+### RHEL Repository <a id="rhel-repository"></a>
+
+!!! info
+
+ A paid repository subscription is required for RHEL repositories. Get more information on
+ [icinga.com/subscription](https://icinga.com/subscription)
+
+ Don't forget to fill in the username and password section with your credentials in the local .repo file.
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+wget https://packages.icinga.com/subscription/rhel/ICINGA-release.repo -O /etc/yum.repos.d/ICINGA-release.repo
+```
+
+If you are using RHEL you need to additionally enable the `codeready-builder`
+repository before installing the [EPEL rpm package](https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F).
+
+#### RHEL 8 or Later
+
+```bash
+ARCH=$(/bin/arch)
+OSVER=$(. /etc/os-release; echo "${VERSION_ID%%.*}")
+
+subscription-manager repos --enable "codeready-builder-for-rhel-${OSVER}-${ARCH}-rpms"
+
+dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-${OSVER}.noarch.rpm
+```
+
+#### RHEL 7
+
+```bash
+subscription-manager repos --enable rhel-7-server-optional-rpms
+
+yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+```
+<!-- {% endif %} -->
+
+
+<!-- {% if fedora %} -->
+### Fedora Repository <a id="fedora-repository"></a>
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+dnf install -y 'dnf-command(config-manager)'
+dnf config-manager --add-repo https://packages.icinga.com/fedora/$(. /etc/os-release; echo "$VERSION_ID")/release
+```
+<!-- {% endif %} -->
+
+<!-- {% if sles %} -->
+### SLES Repository <a id="sles-repository"></a>
+
+!!! info
+
+ A paid repository subscription is required for SLES repositories. Get more information on
+ [icinga.com/subscription](https://icinga.com/subscription)
+
+ Don't forget to fill in the username and password section with your credentials in the local .repo file.
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+
+zypper ar https://packages.icinga.com/subscription/sles/ICINGA-release.repo
+zypper ref
+```
+
+You need to additionally add the `PackageHub` repository to fulfill dependencies:
+
+```bash
+source /etc/os-release
+
+SUSEConnect -p PackageHub/$VERSION_ID/x86_64
+```
+<!-- {% endif %} -->
+
+<!-- {% if opensuse %} -->
+### openSUSE Repository <a id="opensuse-repository"></a>
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+
+zypper ar https://packages.icinga.com/openSUSE/ICINGA-release.repo
+zypper ref
+```
+
+You need to additionally add the `server:monitoring` repository to fulfill dependencies:
+
+```bash
+zypper ar https://download.opensuse.org/repositories/server:/monitoring/15.3/server:monitoring.repo
+```
+<!-- {% endif %} -->
+
+<!-- {% if amazon_linux %} -->
+### Amazon Linux Repository <a id="amazon-linux-2-repository"></a>
+
+!!! info
+
+ A paid repository subscription is required for Amazon Linux repositories. Get more information on
+ [icinga.com/subscription](https://icinga.com/subscription)
+
+ Don't forget to fill in the username and password section with your credentials in the local .repo file.
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+wget https://packages.icinga.com/subscription/amazon/ICINGA-release.repo -O /etc/yum.repos.d/ICINGA-release.repo
+```
+
+The packages for **Amazon Linux 2** depend on other packages which are distributed
+as part of the [EPEL repository](https://fedoraproject.org/wiki/EPEL).
+
+```bash
+yum install epel-release
+```
+
+The packages for newer versions of Amazon Linux don't require additional repositories.
+<!-- {% endif %} -->
+
+<!-- {% if windows %} -->
+### Icinga for Windows Repository <a id="icinga-for-windows-repository"></a>
+[Icinga for Windows](https://icinga.com/docs/icinga-for-windows/latest/doc/000-Introduction/) is the recommended
+way to install and update Icinga 2 on Windows.
+
+We provide a dedicated repository for Windows to simplify the installation. Please refer to the official
+[Icinga for Windows installation docs](https://icinga.com/docs/icinga-for-windows/latest/doc/110-Installation/01-Getting-Started/)
+<!-- {% else %} -->
+
+## Install Icinga 2 <a id="installing-icinga2"></a>
+
+You can install Icinga 2 by using your distribution's package manager
+to install the `icinga2` package. The following commands must be executed
+with `root` permissions unless noted otherwise.
+
+<!-- {% if centos or rhel or fedora or amazon_linux %} -->
+!!! tip
+
+ If you have [SELinux](22-selinux.md) enabled, the package `icinga2-selinux` is also required.
+<!-- {% endif %} -->
+
+<!-- {% if debian or ubuntu or raspbian %} -->
+<!-- {% if not icingaDocs %} -->
+#### Debian / Ubuntu / Raspbian
+<!-- {% endif %} -->
+```bash
+apt install icinga2
+```
+<!-- {% endif %} -->
+
+<!-- {% if centos %} -->
+<!-- {% if not icingaDocs %} -->
+#### CentOS
+<!-- {% endif %} -->
+!!! info
+
+ Note that installing Icinga 2 is only supported on CentOS 7 as CentOS 8 is EOL.
+
+```bash
+yum install icinga2
+systemctl enable icinga2
+systemctl start icinga2
+```
+<!-- {% endif %} -->
+
+<!-- {% if rhel %} -->
+#### RHEL 8 or Later
+
+```bash
+dnf install icinga2
+systemctl enable icinga2
+systemctl start icinga2
+```
+
+#### RHEL 7
+
+```bash
+yum install icinga2
+systemctl enable icinga2
+systemctl start icinga2
+```
+<!-- {% endif %} -->
+
+<!-- {% if fedora %} -->
+<!-- {% if not icingaDocs %} -->
+#### Fedora
+<!-- {% endif %} -->
+```bash
+dnf install icinga2
+systemctl enable icinga2
+systemctl start icinga2
+```
+<!-- {% endif %} -->
+
+<!-- {% if sles or opensuse %} -->
+<!-- {% if not icingaDocs %} -->
+#### SLES / openSUSE
+<!-- {% endif %} -->
+```bash
+zypper install icinga2
+```
+<!-- {% endif %} -->
+
+<!-- {% if amazon_linux %} -->
+<!-- {% if not icingaDocs %} -->
+#### Amazon Linux
+<!-- {% endif %} -->
+```bash
+yum install icinga2
+systemctl enable icinga2
+systemctl start icinga2
+```
+<!-- {% endif %} -->
+
+### Systemd Service <a id="systemd-service"></a>
+
+The majority of supported distributions use systemd. The Icinga 2 packages automatically install the necessary
+systemd unit files.
+
+If you're stuck with configuration errors, you can manually invoke the
+[configuration validation](11-cli-commands.md#config-validation).
+
+```bash
+icinga2 daemon -C
+```
+
+!!! tip
+
+ If you are running into fork errors with systemd enabled distributions,
+ please check the [troubleshooting chapter](15-troubleshooting.md#check-fork-errors).
+
+
+## Set up Check Plugins <a id="setting-up-check-plugins"></a>
+
+Without plugins Icinga 2 does not know how to check external services. The
+[Monitoring Plugins Project](https://www.monitoring-plugins.org/) provides
+an extensive set of plugins which can be used with Icinga 2 to check whether
+services are working properly.
+
+These plugins are required to make the [example configuration](04-configuration.md#configuring-icinga2-overview)
+work out-of-the-box.
+
+Depending on which directory your plugins are installed into you may need to
+update the global `PluginDir` constant in your [Icinga 2 configuration](04-configuration.md#constants-conf).
+This constant is used by the check command definitions contained in the Icinga Template Library
+to determine where to find the plugin binaries.
+
+!!! tip
+
+ Please refer to the [service monitoring](05-service-monitoring.md#service-monitoring-plugins) chapter for details about how to integrate
+ additional check plugins into your Icinga 2 setup.
+
+
+<!-- {% if debian or ubuntu or raspbian %} -->
+<!-- {% if not icingaDocs %} -->
+#### Debian / Ubuntu / Raspbian
+<!-- {% endif %} -->
+```bash
+apt install monitoring-plugins
+```
+<!-- {% endif %} -->
+
+<!-- {% if centos %} -->
+<!-- {% if not icingaDocs %} -->
+#### CentOS
+<!-- {% endif %} -->
+The packages for CentOS depend on other packages which are distributed as part of the EPEL repository.
+
+```bash
+yum install nagios-plugins-all
+```
+<!-- {% endif %} -->
+
+<!-- {% if rhel %} -->
+<!-- {% if not icingaDocs %} -->
+#### RHEL
+<!-- {% endif %} -->
+The packages for RHEL depend on other packages which are distributed as part of the EPEL repository.
+
+#### RHEL 8 or Later
+
+```bash
+dnf install nagios-plugins-all
+```
+
+#### RHEL 7
+
+```bash
+yum install nagios-plugins-all
+```
+<!-- {% endif %} -->
+
+<!-- {% if fedora %} -->
+<!-- {% if not icingaDocs %} -->
+#### Fedora
+<!-- {% endif %} -->
+```bash
+dnf install nagios-plugins-all
+```
+<!-- {% endif %} -->
+
+<!-- {% if sles or opensuse %} -->
+<!-- {% if not icingaDocs %} -->
+#### SLES / openSUSE
+<!-- {% endif %} -->
+The packages depend on other packages which are distributed
+as part of the [server:monitoring repository](https://build.opensuse.org/project/repositories/server:monitoring).
+Please make sure to enable this repository beforehand.
+
+```bash
+zypper install --recommends monitoring-plugins-all
+```
+<!-- {% endif %} -->
+
+<!-- {% if amazon_linux %} -->
+<!-- {% if not icingaDocs %} -->
+#### Amazon Linux
+<!-- {% endif %} -->
+The packages for **Amazon Linux 2** depend on other packages which are distributed as part of the EPEL repository.
+
+```bash
+amazon-linux-extras install epel
+
+yum install nagios-plugins-all
+```
+
+Unfortunately newer versions of Amazon Linux don't provide those plugins, yet.
+<!-- {% endif %} -->
+
+## Set up Icinga 2 API <a id="set-up-icinga2-api"></a>
+
+Almost every Icinga 2 setup requires the Icinga 2 API as Icinga Web connects to it, Icinga DB requires it,
+and it enables cluster communication functionality for highly available and distributed setups.
+
+!!! info
+
+ If you set up a highly available and/or distributed Icinga monitoring environment, please read the
+ [Distributed Monitoring](06-distributed-monitoring.md#distributed-monitoring) chapter as
+ the commands to set up the API are different from setting up a single node setup.
+
+See the [API](12-icinga2-api.md#icinga2-api-setup) chapter for details,
+or follow the steps below to set up the API quickly:
+
+Run the following command to:
+
+* enable the `api` feature,
+* set up certificates, and
+* add the API user `root` with an auto-generated password in the configuration file
+ `/etc/icinga2/conf.d/api-users.conf`.
+
+```bash
+icinga2 api setup
+```
+
+Restart Icinga 2 for these changes to take effect.
+
+```bash
+systemctl restart icinga2
+```
+
+<!-- {% if amazon_linux or centos or debian or rhel or sles or ubuntu %} -->
+## Set up Icinga DB <a id="set-up-icinga-db"></a>
+
+Icinga DB is a set of components for publishing, synchronizing and
+visualizing monitoring data in the Icinga ecosystem, consisting of:
+
+* Icinga 2 with its `icingadb` feature enabled,
+ responsible for publishing monitoring data to a Redis server, i.e. configuration and its runtime updates,
+ check results, state changes, downtimes, acknowledgements, notifications, and other events such as flapping
+* The [Icinga DB daemon](https://icinga.com/docs/icinga-db),
+ which synchronizes the data between the Redis server and a database
+* And Icinga Web with the
+ [Icinga DB Web](https://icinga.com/docs/icinga-db-web) module enabled,
+ which connects to both Redis and the database to display and work with the most up-to-date data
+
+![Icinga DB Architecture](images/icingadb/icingadb-architecture.png)
+
+!!! info
+
+ Setting up Icinga 2's Icinga DB feature is only required for Icinga 2 master nodes or single-node setups.
+
+### Set up Redis Server <a id="set-up-redis-server"></a>
+
+A Redis server from version 6.2 is required.
+
+!!! info
+
+ This guide sets up the `icingadb-redis` package provided by Icinga,
+ which ships a current Redis Server version and is preconfigured for the Icinga DB components.
+ Using own Redis server setups is supported as long as the version requirements are met.
+
+![Icinga DB Redis](images/icingadb/icingadb-redis.png)
+
+!!! tip
+
+ Although the Redis server can run anywhere in an Icinga environment,
+ we recommend to install it where the corresponding Icinga 2 node is running to
+ keep latency between the components low.
+
+#### Install Icinga DB Redis Package <a id="install-icinga-db-redis-package"></a>
+
+Use your distribution's package manager to install the `icingadb-redis` package as follows:
+
+<!-- {% if amazon_linux %} -->
+<!-- {% if not icingaDocs %} -->
+##### Amazon Linux
+<!-- {% endif %} -->
+```bash
+yum install icingadb-redis
+```
+<!-- {% endif %} -->
+
+<!-- {% if centos %} -->
+<!-- {% if not icingaDocs %} -->
+##### CentOS
+<!-- {% endif %} -->
+
+!!! info
+
+ Note that installing Icinga DB Redis is only supported on CentOS 7 as CentOS 8 is EOL.
+
+```bash
+yum install icingadb-redis
+```
+<!-- {% endif %} -->
+
+<!-- {% if debian or ubuntu %} -->
+<!-- {% if not icingaDocs %} -->
+##### Debian / Ubuntu
+<!-- {% endif %} -->
+```bash
+apt install icingadb-redis
+```
+<!-- {% endif %} -->
+
+<!-- {% if rhel %} -->
+##### RHEL 8 or Later
+
+```bash
+dnf install icingadb-redis
+```
+
+##### RHEL 7
+
+```bash
+yum install icingadb-redis
+```
+<!-- {% endif %} -->
+
+<!-- {% if sles %} -->
+<!-- {% if not icingaDocs %} -->
+##### SLES
+<!-- {% endif %} -->
+```bash
+zypper install icingadb-redis
+```
+<!-- {% endif %} -->
+
+#### Run Icinga DB Redis <a id="run-icinga-db-redis"></a>
+
+The `icingadb-redis` package automatically installs the necessary systemd unit files to run Icinga DB Redis.
+Please run the following command to enable and start its service:
+
+```bash
+systemctl enable --now icingadb-redis
+```
+
+#### Enable Remote Redis Connections <a id="enable-remote-redis-connections"></a>
+
+By default, `icingadb-redis` only listens on `127.0.0.1`. If Icinga Web or Icinga 2 is running on another node,
+remote access to the Redis server must be allowed. This requires the following directives to be set in
+the `/etc/icingadb-redis/icingadb-redis.conf` configuration file:
+
+* Set `protected-mode` to `no`, i.e. `protected-mode no`
+* Set `bind` to the desired binding interface or bind all interfaces, e.g. `bind 0.0.0.0`
+
+!!! warning
+
+ By default, Redis has no authentication preventing others from accessing it.
+ When opening Redis to an external interface, make sure to set a password, set up appropriate firewall rules,
+ or configure TLS with certificate authentication on Redis and its consumers,
+ i.e. Icinga 2, Icinga DB and Icinga Web.
+
+Restart Icinga DB Redis for these changes to take effect:
+
+```bash
+systemctl restart icingadb-redis
+```
+
+### Enable Icinga DB Feature <a id="enable-icinga-db-feature"></a>
+
+With the [Icinga DB feature](14-features.md#icinga-db) enabled,
+Icinga 2 publishes all of its monitoring data to the Redis server. This includes configuration and
+its runtime updates via the Icinga 2 API, check results, state changes, downtimes, acknowledgments, notifications and
+other events such as flapping.
+
+![Icinga DB Icinga 2](images/icingadb/icingadb-icinga2.png)
+
+Icinga 2 installs the feature configuration file to `/etc/icinga2/features-available/icingadb.conf`,
+pre-configured for a local setup.
+Update this file in case Redis is running on a different host or to set credentials.
+All available settings are explained in the [Icinga DB object](09-object-types.md#icingadb) chapter.
+
+!!! important
+
+ For single-node and high-availability setups, please read the note about the
+ [environment ID](https://icinga.com/docs/icinga-db/latest/doc/05-Distributed-Setups/#environment-id),
+ which is common to all Icinga DB components and generated by the Icinga DB feature.
+
+To enable the `icingadb` feature use the following command:
+
+```bash
+icinga2 feature enable icingadb
+```
+
+Restart Icinga 2 for these changes to take effect:
+
+```bash
+systemctl restart icinga2
+```
+
+### Install Icinga DB Daemon <a id="install-icinga-db-daemon"></a>
+
+After installing Icinga 2, setting up a Redis server and enabling the `icingadb` feature,
+the Icinga DB daemon that synchronizes monitoring data between the Redis server and a database is now set up.
+
+![Icinga DB Daemon](images/icingadb/icingadb-daemon.png)
+
+!!! tip
+
+ Although the Icinga DB daemon can run anywhere in an Icinga environment,
+ we recommend to install it where the corresponding Icinga 2 node and Redis server is running to
+ keep latency between the components low.
+
+The Icinga DB daemon package is also included in the Icinga repository, and since it is already set up,
+you have completed the instructions here and can proceed to
+<!-- {% if amazon_linux %} -->
+[install the Icinga DB daemon on Amazon Linux](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/01-Amazon-Linux/#installing-icinga-db-package),
+<!-- {% endif %} -->
+<!-- {% if centos %} -->
+[install the Icinga DB daemon on CentOS](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/02-CentOS/#installing-icinga-db-package),
+<!-- {% endif %} -->
+<!-- {% if debian %} -->
+[install the Icinga DB daemon on Debian](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/03-Debian/#installing-icinga-db-package),
+<!-- {% endif %} -->
+<!-- {% if rhel %} -->
+[install the Icinga DB daemon on RHEL](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/04-RHEL/#installing-icinga-db-package),
+<!-- {% endif %} -->
+<!-- {% if sles %} -->
+[install the Icinga DB daemon on SLES](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/05-SLES/#installing-icinga-db-package),
+<!-- {% endif %} -->
+<!-- {% if ubuntu %} -->
+[install the Icinga DB daemon on Ubuntu](https://icinga.com/docs/icinga-db/latest/doc/02-Installation/06-Ubuntu/#installing-icinga-db-package),
+<!-- {% endif %} -->
+which will also guide you through the setup of the database and Icinga DB Web.
+<!-- {% endif %} -->
+
+## Backup <a id="install-backup"></a>
+
+Ensure to include the following in your backups:
+
+* Configuration files in `/etc/icinga2`
+* Certificate files in `/var/lib/icinga2/ca` (Master CA key pair) and `/var/lib/icinga2/certs` (node certificates)
+* Runtime files in `/var/lib/icinga2`
+<!-- {% endif %} --><!-- {# end windows else #} -->
+<!-- {% endif %} --><!-- {# end index else #} -->
diff --git a/doc/02-installation.md.d/01-Debian.md b/doc/02-installation.md.d/01-Debian.md
new file mode 100644
index 0000000..d3e3143
--- /dev/null
+++ b/doc/02-installation.md.d/01-Debian.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on Debian
+<!-- {% set debian = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/02-Ubuntu.md b/doc/02-installation.md.d/02-Ubuntu.md
new file mode 100644
index 0000000..aa099d8
--- /dev/null
+++ b/doc/02-installation.md.d/02-Ubuntu.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on Ubuntu
+<!-- {% set ubuntu = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/03-Raspbian.md b/doc/02-installation.md.d/03-Raspbian.md
new file mode 100644
index 0000000..fc48d6c
--- /dev/null
+++ b/doc/02-installation.md.d/03-Raspbian.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on Raspbian
+<!-- {% set raspbian = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/04-Fedora.md b/doc/02-installation.md.d/04-Fedora.md
new file mode 100644
index 0000000..2bfbf53
--- /dev/null
+++ b/doc/02-installation.md.d/04-Fedora.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on Fedora
+<!-- {% set fedora = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/05-CentOS.md b/doc/02-installation.md.d/05-CentOS.md
new file mode 100644
index 0000000..4d766b2
--- /dev/null
+++ b/doc/02-installation.md.d/05-CentOS.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on CentOS
+<!-- {% set centos = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/06-RHEL.md b/doc/02-installation.md.d/06-RHEL.md
new file mode 100644
index 0000000..568251a
--- /dev/null
+++ b/doc/02-installation.md.d/06-RHEL.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on RHEL
+<!-- {% set rhel = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/07-OpenSUSE.md b/doc/02-installation.md.d/07-OpenSUSE.md
new file mode 100644
index 0000000..347831e
--- /dev/null
+++ b/doc/02-installation.md.d/07-OpenSUSE.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on openSUSE
+<!-- {% set opensuse = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/08-SLES.md b/doc/02-installation.md.d/08-SLES.md
new file mode 100644
index 0000000..aa2646d
--- /dev/null
+++ b/doc/02-installation.md.d/08-SLES.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on SLES
+<!-- {% set sles = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/09-Amazon-Linux.md b/doc/02-installation.md.d/09-Amazon-Linux.md
new file mode 100644
index 0000000..ec1d986
--- /dev/null
+++ b/doc/02-installation.md.d/09-Amazon-Linux.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on Amazon Linux
+<!-- {% set amazon_linux = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/02-installation.md.d/10-Windows.md b/doc/02-installation.md.d/10-Windows.md
new file mode 100644
index 0000000..64bc687
--- /dev/null
+++ b/doc/02-installation.md.d/10-Windows.md
@@ -0,0 +1,3 @@
+# Install Icinga 2 on Windows
+<!-- {% set windows = True %} -->
+<!-- {% include "02-installation.md" %} -->
diff --git a/doc/03-monitoring-basics.md b/doc/03-monitoring-basics.md
new file mode 100644
index 0000000..06ea0c1
--- /dev/null
+++ b/doc/03-monitoring-basics.md
@@ -0,0 +1,3305 @@
+# Monitoring Basics <a id="monitoring-basics"></a>
+
+This part of the Icinga 2 documentation provides an overview of all the basic
+monitoring concepts you need to know to run Icinga 2.
+Keep in mind these examples are made with a Linux server. If you are
+using Windows, you will need to change the services accordingly. See the [ITL reference](10-icinga-template-library.md#windows-plugins)
+ for further information.
+
+## Attribute Value Types <a id="attribute-value-types"></a>
+
+The Icinga 2 configuration uses different value types for attributes.
+
+ Type | Example
+ -------------------------------------------------------|---------------------------------------------------------
+ [Number](17-language-reference.md#numeric-literals) | `5`
+ [Duration](17-language-reference.md#duration-literals) | `1m`
+ [String](17-language-reference.md#string-literals) | `"These are notes"`
+ [Boolean](17-language-reference.md#boolean-literals) | `true`
+ [Array](17-language-reference.md#array) | `[ "value1", "value2" ]`
+ [Dictionary](17-language-reference.md#dictionary) | `{ "key1" = "value1", "key2" = false }`
+
+It is important to use the correct value type for object attributes
+as otherwise the [configuration validation](11-cli-commands.md#config-validation) will fail.
+
+## Hosts and Services <a id="hosts-services"></a>
+
+Icinga 2 can be used to monitor the availability of hosts and services. Hosts
+and services can be virtually anything which can be checked in some way:
+
+* Network services (HTTP, SMTP, SNMP, SSH, etc.)
+* Printers
+* Switches or routers
+* Temperature sensors
+* Other local or network-accessible services
+
+Host objects provide a mechanism to group services that are running
+on the same physical device.
+
+Here is an example of a host object which defines two child services:
+
+```
+object Host "my-server1" {
+ address = "10.0.0.1"
+ check_command = "hostalive"
+}
+
+object Service "ping4" {
+ host_name = "my-server1"
+ check_command = "ping4"
+}
+
+object Service "http" {
+ host_name = "my-server1"
+ check_command = "http"
+}
+```
+
+The example creates two services `ping4` and `http` which belong to the
+host `my-server1`.
+
+It also specifies that the host should perform its own check using the `hostalive`
+check command.
+
+The `address` attribute is used by check commands to determine which network
+address is associated with the host object.
+
+Details on troubleshooting check problems can be found [here](15-troubleshooting.md#troubleshooting).
+
+### Host States <a id="host-states"></a>
+
+Hosts can be in any one of the following states:
+
+ Name | Description
+ ------------|--------------
+ UP | The host is available.
+ DOWN | The host is unavailable.
+
+### Service States <a id="service-states"></a>
+
+Services can be in any one of the following states:
+
+ Name | Description
+ ------------|--------------
+ OK | The service is working properly.
+ WARNING | The service is experiencing some problems but is still considered to be in working condition.
+ CRITICAL | The check successfully determined that the service is in a critical state.
+ UNKNOWN | The check could not determine the service's state.
+
+### Check Result State Mapping <a id="check-result-state-mapping"></a>
+
+[Check plugins](05-service-monitoring.md#service-monitoring-plugins) return
+with an exit code which is converted into a state number.
+Services map the states directly while hosts will treat `0` or `1` as `UP`
+for example.
+
+ Value | Host State | Service State
+ ------|------------|--------------
+ 0 | Up | OK
+ 1 | Up | Warning
+ 2 | Down | Critical
+ 3 | Down | Unknown
+
+### Hard and Soft States <a id="hard-soft-states"></a>
+
+When detecting a problem with a host/service, Icinga re-checks the object a number of
+times (based on the `max_check_attempts` and `retry_interval` settings) before sending
+notifications. This ensures that no unnecessary notifications are sent for
+transient failures. During this time the object is in a `SOFT` state.
+
+After all re-checks have been executed and the object is still in a non-OK
+state, the host/service switches to a `HARD` state and notifications are sent.
+
+ Name | Description
+ ------------|--------------
+ HARD | The host/service's state hasn't recently changed. `check_interval` applies here.
+ SOFT | The host/service has recently changed state and is being re-checked with `retry_interval`.
+
+### Host and Service Checks <a id="host-service-checks"></a>
+
+Hosts and services determine their state by running checks in a regular interval.
+
+```
+object Host "router" {
+ check_command = "hostalive"
+ address = "10.0.0.1"
+}
+```
+
+The `hostalive` command is one of several built-in check commands. It sends ICMP
+echo requests to the IP address specified in the `address` attribute to determine
+whether a host is online.
+
+> **Tip**
+>
+> `hostalive` is the same as `ping` but with different default thresholds.
+> Both use the `ping` CLI command to execute sequential checks.
+>
+> If you need faster ICMP checks, look into the [icmp](10-icinga-template-library.md#plugin-check-command-icmp) CheckCommand.
+
+A number of other [built-in check commands](10-icinga-template-library.md#icinga-template-library) are also
+available. In addition to these commands the next few chapters will explain in
+detail how to set up your own check commands.
+
+#### Host Check Alternatives <a id="host-check-alternatives"></a>
+
+If the host is not reachable with ICMP, HTTP, etc. you can
+also use the [dummy](10-icinga-template-library.md#itl-dummy) CheckCommand to set a default state.
+
+```
+object Host "dummy-host" {
+ check_command = "dummy"
+ vars.dummy_state = 0 //Up
+ vars.dummy_text = "Everything OK."
+}
+```
+
+This method is also used when you send in [external check results](08-advanced-topics.md#external-check-results).
+
+A more advanced technique is to calculate an overall state
+based on all services. This is described [here](08-advanced-topics.md#access-object-attributes-at-runtime-cluster-check).
+
+
+## Templates <a id="object-inheritance-using-templates"></a>
+
+Templates may be used to apply a set of identical attributes to more than one
+object:
+
+```
+template Service "generic-service" {
+ max_check_attempts = 3
+ check_interval = 5m
+ retry_interval = 1m
+ enable_perfdata = true
+}
+
+apply Service "ping4" {
+ import "generic-service"
+
+ check_command = "ping4"
+
+ assign where host.address
+}
+
+apply Service "ping6" {
+ import "generic-service"
+
+ check_command = "ping6"
+
+ assign where host.address6
+}
+```
+
+
+In this example the `ping4` and `ping6` services inherit properties from the
+template `generic-service`.
+
+Objects as well as templates themselves can import an arbitrary number of
+other templates. Attributes inherited from a template can be overridden in the
+object if necessary.
+
+You can also import existing non-template objects.
+
+> **Note**
+>
+> Templates and objects share the same namespace, i.e. you can't define a template
+> that has the same name like an object.
+
+
+### Multiple Templates <a id="object-inheritance-using-multiple-templates"></a>
+
+The following example uses [custom variables](03-monitoring-basics.md#custom-variables) which
+are provided in each template. The `web-server` template is used as the
+base template for any host providing web services. In addition to that it
+specifies the custom variable `webserver_type`, e.g. `apache`. Since this
+template is also the base template, we import the `generic-host` template here.
+This provides the `check_command` attribute by default and we don't need
+to set it anywhere later on.
+
+```
+template Host "web-server" {
+ import "generic-host"
+ vars = {
+ webserver_type = "apache"
+ }
+}
+```
+
+The `wp-server` host template specifies a Wordpress instance and sets
+the `application_type` custom variable. Please note the `+=` [operator](17-language-reference.md#dictionary-operators)
+which adds [dictionary](17-language-reference.md#dictionary) items,
+but does not override any previous `vars` attribute.
+
+```
+template Host "wp-server" {
+ vars += {
+ application_type = "wordpress"
+ }
+}
+```
+
+The final host object imports both templates. The order is important here:
+First the base template `web-server` is added to the object, then additional
+attributes are imported from the `wp-server` object.
+
+```
+object Host "wp.example.com" {
+ import "web-server"
+ import "wp-server"
+
+ address = "192.168.56.200"
+}
+```
+
+If you want to override specific attributes inherited from templates, you can
+specify them on the host object.
+
+```
+object Host "wp1.example.com" {
+ import "web-server"
+ import "wp-server"
+
+ vars.webserver_type = "nginx" //overrides attribute from base template
+
+ address = "192.168.56.201"
+}
+```
+
+<!-- Keep this for compatibility -->
+<a id="custom-attributes"></a>
+
+## Custom Variables <a id="custom-variables"></a>
+
+In addition to built-in object attributes you can define your own custom
+attributes inside the `vars` attribute.
+
+> **Tip**
+>
+> This is called `custom variables` throughout the documentation, backends and web interfaces.
+>
+> Older documentation versions referred to this as `custom attribute`.
+
+The following example specifies the key `ssh_port` as custom
+variable and assigns an integer value.
+
+```
+object Host "localhost" {
+ check_command = "ssh"
+ vars.ssh_port = 2222
+}
+```
+
+`vars` is a [dictionary](17-language-reference.md#dictionary) where you
+can set specific keys to values. The example above uses the shorter
+[indexer](17-language-reference.md#indexer) syntax.
+
+An alternative representation can be written like this:
+
+```
+ vars = {
+ ssh_port = 2222
+ }
+```
+
+or
+
+```
+ vars["ssh_port"] = 2222
+```
+
+### Custom Variable Values <a id="custom-variables-values"></a>
+
+Valid values for custom variables include:
+
+* [Strings](17-language-reference.md#string-literals), [numbers](17-language-reference.md#numeric-literals) and [booleans](17-language-reference.md#boolean-literals)
+* [Arrays](17-language-reference.md#array) and [dictionaries](17-language-reference.md#dictionary)
+* [Functions](03-monitoring-basics.md#custom-variables-functions)
+
+You can also define nested values such as dictionaries in dictionaries.
+
+This example defines the custom variable `disks` as dictionary.
+The first key is set to `disk /` is itself set to a dictionary
+with one key-value pair.
+
+```
+ vars.disks["disk /"] = {
+ disk_partitions = "/"
+ }
+```
+
+This can be written as resolved structure like this:
+
+```
+ vars = {
+ disks = {
+ "disk /" = {
+ disk_partitions = "/"
+ }
+ }
+ }
+```
+
+Keep this in mind when trying to access specific sub-keys
+in apply rules or functions.
+
+Another example which is shown in the example configuration:
+
+```
+ vars.notification["mail"] = {
+ groups = [ "icingaadmins" ]
+ }
+```
+
+This defines the `notification` custom variable as dictionary
+with the key `mail`. Its value is a dictionary with the key `groups`
+which itself has an array as value. Note: This array is the exact
+same as the `user_groups` attribute for [notification apply rules](#03-monitoring-basics.md#using-apply-notifications)
+expects.
+
+```
+ vars.notification = {
+ mail = {
+ groups = [
+ "icingaadmins"
+ ]
+ }
+ }
+```
+
+<!-- Keep this for compatibility -->
+<a id="custom-attributes-functions"></a>
+
+### Functions as Custom Variables <a id="custom-variables-functions"></a>
+
+Icinga 2 lets you specify [functions](17-language-reference.md#functions) for custom variables.
+The special case here is that whenever Icinga 2 needs the value for such a custom variable it runs
+the function and uses whatever value the function returns:
+
+```
+object CheckCommand "random-value" {
+ command = [ PluginDir + "/check_dummy", "0", "$text$" ]
+
+ vars.text = {{ Math.random() * 100 }}
+}
+```
+
+This example uses the [abbreviated lambda syntax](17-language-reference.md#nullary-lambdas).
+
+These functions have access to a number of variables:
+
+ Variable | Description
+ -------------|---------------
+ user | The User object (for notifications).
+ service | The Service object (for service checks/notifications/event handlers).
+ host | The Host object.
+ command | The command object (e.g. a CheckCommand object for checks).
+
+Here's an example:
+
+```
+vars.text = {{ host.check_interval }}
+```
+
+In addition to these variables the [macro](18-library-reference.md#scoped-functions-macro) function can be used to retrieve the
+value of arbitrary macro expressions:
+
+```
+vars.text = {{
+ if (macro("$address$") == "127.0.0.1") {
+ log("Running a check for localhost!")
+ }
+
+ return "Some text"
+}}
+```
+
+The `resolve_arguments` function can be used to resolve a command and its arguments much in
+the same fashion Icinga does this for the `command` and `arguments` attributes for
+commands. The `by_ssh` command uses this functionality to let users specify a
+command and arguments that should be executed via SSH:
+
+```
+arguments = {
+ "-C" = {{
+ var command = macro("$by_ssh_command$")
+ var arguments = macro("$by_ssh_arguments$")
+
+ if (typeof(command) == String && !arguments) {
+ return command
+ }
+
+ var escaped_args = []
+ for (arg in resolve_arguments(command, arguments)) {
+ escaped_args.add(escape_shell_arg(arg))
+ }
+ return escaped_args.join(" ")
+ }}
+ ...
+}
+```
+
+Accessing object attributes at runtime inside these functions is described in the
+[advanced topics](08-advanced-topics.md#access-object-attributes-at-runtime) chapter.
+
+
+## Runtime Macros <a id="runtime-macros"></a>
+
+Macros can be used to access other objects' attributes and [custom variables](03-monitoring-basics.md#custom-variables)
+at runtime. For example they are used in command definitions to figure out
+which IP address a check should be run against:
+
+```
+object CheckCommand "my-ping" {
+ command = [ PluginDir + "/check_ping" ]
+
+ arguments = {
+ "-H" = "$ping_address$"
+ "-w" = "$ping_wrta$,$ping_wpl$%"
+ "-c" = "$ping_crta$,$ping_cpl$%"
+ "-p" = "$ping_packets$"
+ }
+
+ // Resolve from a host attribute, or custom variable.
+ vars.ping_address = "$address$"
+
+ // Default values
+ vars.ping_wrta = 100
+ vars.ping_wpl = 5
+
+ vars.ping_crta = 250
+ vars.ping_cpl = 10
+
+ vars.ping_packets = 5
+}
+
+object Host "router" {
+ check_command = "my-ping"
+ address = "10.0.0.1"
+}
+```
+
+In this example we are using the `$address$` macro to refer to the host's `address`
+attribute.
+
+We can also directly refer to custom variables, e.g. by using `$ping_wrta$`. Icinga
+automatically tries to find the closest match for the attribute you specified. The
+exact rules for this are explained in the next section.
+
+> **Note**
+>
+> When using the `$` sign as single character you must escape it with an
+> additional dollar character (`$$`).
+
+
+### Evaluation Order <a id="macro-evaluation-order"></a>
+
+When executing commands Icinga 2 checks the following objects in this order to look
+up macros and their respective values:
+
+1. User object (only for notifications)
+2. Service object
+3. Host object
+4. Command object
+5. Global custom variables in the `Vars` constant
+
+This execution order allows you to define default values for custom variables
+in your command objects.
+
+Here's how you can override the custom variable `ping_packets` from the previous
+example:
+
+```
+object Service "ping" {
+ host_name = "localhost"
+ check_command = "my-ping"
+
+ vars.ping_packets = 10 // Overrides the default value of 5 given in the command
+}
+```
+
+If a custom variable isn't defined anywhere, an empty value is used and a warning is
+written to the Icinga 2 log.
+
+You can also directly refer to a specific attribute -- thereby ignoring these evaluation
+rules -- by specifying the full attribute name:
+
+```
+$service.vars.ping_wrta$
+```
+
+This retrieves the value of the `ping_wrta` custom variable for the service. This
+returns an empty value if the service does not have such a custom variable no matter
+whether another object such as the host has this attribute.
+
+
+### Host Runtime Macros <a id="host-runtime-macros"></a>
+
+The following host custom variables are available in all commands that are executed for
+hosts or services:
+
+ Name | Description
+ -----------------------------|--------------
+ host.name | The name of the host object.
+ host.display\_name | The value of the `display_name` attribute.
+ host.state | The host's current state. Can be one of `UNREACHABLE`, `UP` and `DOWN`.
+ host.state\_id | The host's current state. Can be one of `0` (up), `1` (down) and `2` (unreachable).
+ host.state\_type | The host's current state type. Can be one of `SOFT` and `HARD`.
+ host.check\_attempt | The current check attempt number.
+ host.max\_check\_attempts | The maximum number of checks which are executed before changing to a hard state.
+ host.last\_state | The host's previous state. Can be one of `UNREACHABLE`, `UP` and `DOWN`.
+ host.last\_state\_id | The host's previous state. Can be one of `0` (up), `1` (down) and `2` (unreachable).
+ host.last\_state\_type | The host's previous state type. Can be one of `SOFT` and `HARD`.
+ host.last\_state\_change | The last state change's timestamp.
+ host.downtime\_depth | The number of active downtimes.
+ host.duration\_sec | The time since the last state change.
+ host.latency | The host's check latency.
+ host.execution\_time | The host's check execution time.
+ host.output | The last check's output.
+ host.perfdata | The last check's performance data.
+ host.last\_check | The timestamp when the last check was executed.
+ host.check\_source | The monitoring instance that performed the last check.
+ host.num\_services | Number of services associated with the host.
+ host.num\_services\_ok | Number of services associated with the host which are in an `OK` state.
+ host.num\_services\_warning | Number of services associated with the host which are in a `WARNING` state.
+ host.num\_services\_unknown | Number of services associated with the host which are in an `UNKNOWN` state.
+ host.num\_services\_critical | Number of services associated with the host which are in a `CRITICAL` state.
+
+In addition to these specific runtime macros [host object](09-object-types.md#objecttype-host)
+attributes can be accessed too.
+
+### Service Runtime Macros <a id="service-runtime-macros"></a>
+
+The following service macros are available in all commands that are executed for
+services:
+
+ Name | Description
+ -----------------------------|--------------
+ service.name | The short name of the service object.
+ service.display\_name | The value of the `display_name` attribute.
+ service.check\_command | The short name of the command along with any arguments to be used for the check.
+ service.state | The service's current state. Can be one of `OK`, `WARNING`, `CRITICAL` and `UNKNOWN`.
+ service.state\_id | The service's current state. Can be one of `0` (ok), `1` (warning), `2` (critical) and `3` (unknown).
+ service.state\_type | The service's current state type. Can be one of `SOFT` and `HARD`.
+ service.check\_attempt | The current check attempt number.
+ service.max\_check\_attempts | The maximum number of checks which are executed before changing to a hard state.
+ service.last\_state | The service's previous state. Can be one of `OK`, `WARNING`, `CRITICAL` and `UNKNOWN`.
+ service.last\_state\_id | The service's previous state. Can be one of `0` (ok), `1` (warning), `2` (critical) and `3` (unknown).
+ service.last\_state\_type | The service's previous state type. Can be one of `SOFT` and `HARD`.
+ service.last\_state\_change | The last state change's timestamp.
+ service.downtime\_depth | The number of active downtimes.
+ service.duration\_sec | The time since the last state change.
+ service.latency | The service's check latency.
+ service.execution\_time | The service's check execution time.
+ service.output | The last check's output.
+ service.perfdata | The last check's performance data.
+ service.last\_check | The timestamp when the last check was executed.
+ service.check\_source | The monitoring instance that performed the last check.
+
+In addition to these specific runtime macros [service object](09-object-types.md#objecttype-service)
+attributes can be accessed too.
+
+### Command Runtime Macros <a id="command-runtime-macros"></a>
+
+The following custom variables are available in all commands:
+
+ Name | Description
+ -----------------------|--------------
+ command.name | The name of the command object.
+
+### User Runtime Macros <a id="user-runtime-macros"></a>
+
+The following custom variables are available in all commands that are executed for
+users:
+
+ Name | Description
+ -----------------------|--------------
+ user.name | The name of the user object.
+ user.display\_name | The value of the `display_name` attribute.
+
+In addition to these specific runtime macros [user object](09-object-types.md#objecttype-user)
+attributes can be accessed too.
+
+### Notification Runtime Macros <a id="notification-runtime-macros"></a>
+
+ Name | Description
+ -----------------------|--------------
+ notification.type | The type of the notification.
+ notification.author | The author of the notification comment if existing.
+ notification.comment | The comment of the notification if existing.
+
+In addition to these specific runtime macros [notification object](09-object-types.md#objecttype-notification)
+attributes can be accessed too.
+
+### Global Runtime Macros <a id="global-runtime-macros"></a>
+
+The following macros are available in all executed commands:
+
+ Name | Description
+ -------------------------|--------------
+ icinga.timet | Current UNIX timestamp.
+ icinga.long\_date\_time | Current date and time including timezone information. Example: `2014-01-03 11:23:08 +0000`
+ icinga.short\_date\_time | Current date and time. Example: `2014-01-03 11:23:08`
+ icinga.date | Current date. Example: `2014-01-03`
+ icinga.time | Current time including timezone information. Example: `11:23:08 +0000`
+ icinga.uptime | Current uptime of the Icinga 2 process.
+
+The following macros provide global statistics:
+
+ Name | Description
+ ------------------------------------|------------------------------------
+ icinga.num\_services\_ok | Current number of services in state 'OK'.
+ icinga.num\_services\_warning | Current number of services in state 'Warning'.
+ icinga.num\_services\_critical | Current number of services in state 'Critical'.
+ icinga.num\_services\_unknown | Current number of services in state 'Unknown'.
+ icinga.num\_services\_pending | Current number of pending services.
+ icinga.num\_services\_unreachable | Current number of unreachable services.
+ icinga.num\_services\_flapping | Current number of flapping services.
+ icinga.num\_services\_in\_downtime | Current number of services in downtime.
+ icinga.num\_services\_acknowledged | Current number of acknowledged service problems.
+ icinga.num\_hosts\_up | Current number of hosts in state 'Up'.
+ icinga.num\_hosts\_down | Current number of hosts in state 'Down'.
+ icinga.num\_hosts\_unreachable | Current number of unreachable hosts.
+ icinga.num\_hosts\_pending | Current number of pending hosts.
+ icinga.num\_hosts\_flapping | Current number of flapping hosts.
+ icinga.num\_hosts\_in\_downtime | Current number of hosts in downtime.
+ icinga.num\_hosts\_acknowledged | Current number of acknowledged host problems.
+
+### Environment Variable Runtime Macros <a id="env-runtime-macros"></a>
+
+All environment variables of the Icinga process are available as runtime macros
+named `env.<env var name>`. E.g. `$env.ProgramFiles$` for ProgramFiles which is
+especially useful on Windows. In contrast to the other runtime macros env vars
+require the `env.` prefix.
+
+
+## Apply Rules <a id="using-apply"></a>
+
+Several object types require an object relation, e.g. [Service](09-object-types.md#objecttype-service),
+[Notification](09-object-types.md#objecttype-notification), [Dependency](09-object-types.md#objecttype-dependency),
+[ScheduledDowntime](09-object-types.md#objecttype-scheduleddowntime) objects. The
+object relations are documented in the linked chapters.
+
+If you for example create a service object you have to specify the [host_name](09-object-types.md#objecttype-service)
+attribute and reference an existing host attribute.
+
+```
+object Service "ping4" {
+ check_command = "ping4"
+ host_name = "icinga2-agent1.localdomain"
+}
+```
+
+This isn't comfortable when managing a huge set of configuration objects which could
+[match](03-monitoring-basics.md#using-apply-expressions) on a common pattern.
+
+Instead you want to use **[apply](17-language-reference.md#apply) rules**.
+
+If you want basic monitoring for all your hosts, add a `ping4` service apply rule
+for all hosts which have the `address` attribute specified. Just one rule for 1000 hosts
+instead of 1000 service objects. Apply rules will automatically generate them for you.
+
+```
+apply Service "ping4" {
+ check_command = "ping4"
+ assign where host.address
+}
+```
+
+More explanations on assign where expressions can be found [here](03-monitoring-basics.md#using-apply-expressions).
+
+### Apply Rules: Prerequisites <a id="using-apply-prerquisites"></a>
+
+Before you start with apply rules keep the following in mind:
+
+* Define the best match.
+ * A set of unique [custom variables](03-monitoring-basics.md#custom-variables) for these hosts/services?
+ * Or [group](03-monitoring-basics.md#groups) memberships, e.g. a host being a member of a hostgroup which should have a service set?
+ * A generic pattern [match](18-library-reference.md#global-functions-match) on the host/service name?
+ * [Multiple expressions combined](03-monitoring-basics.md#using-apply-expressions) with `&&` or `||` [operators](17-language-reference.md#expression-operators)
+* All expressions must return a boolean value (an empty string is equal to `false` e.g.)
+
+More specific object type requirements are described in these chapters:
+
+* [Apply services to hosts](03-monitoring-basics.md#using-apply-services)
+* [Apply notifications to hosts and services](03-monitoring-basics.md#using-apply-notifications)
+* [Apply dependencies to hosts and services](03-monitoring-basics.md#using-apply-dependencies)
+* [Apply scheduled downtimes to hosts and services](03-monitoring-basics.md#using-apply-scheduledowntimes)
+
+### Apply Rules: Usage Examples <a id="using-apply-usage-examples"></a>
+
+You can set/override object attributes in apply rules using the respectively available
+objects in that scope (host and/or service objects).
+
+```
+vars.application_type = host.vars.application_type
+```
+
+[Custom variables](03-monitoring-basics.md#custom-variables) can also store
+nested dictionaries and arrays. That way you can use them for not only matching
+for their existence or values in apply expressions, but also assign
+("inherit") their values into the generated objected from apply rules.
+
+Remember the examples shown for [custom variable values](03-monitoring-basics.md#custom-variables-values):
+
+```
+ vars.notification["mail"] = {
+ groups = [ "icingaadmins" ]
+ }
+```
+
+You can do two things here:
+
+* Check for the existence of the `notification` custom variable and its nested dictionary key `mail`.
+If this is boolean true, the notification object will be generated.
+* Assign the value of the `groups` key to the `user_groups` attribute.
+
+```
+apply Notification "mail-icingaadmin" to Host {
+ [...]
+
+ user_groups = host.vars.notification.mail.groups
+
+ assign where host.vars.notification.mail
+}
+
+```
+
+A more advanced example is to use [apply rules with for loops on arrays or
+dictionaries](03-monitoring-basics.md#using-apply-for) provided by
+[custom atttributes](03-monitoring-basics.md#custom-variables) or groups.
+
+Remember the examples shown for [custom variable values](03-monitoring-basics.md#custom-variables-values):
+
+```
+ vars.disks["disk /"] = {
+ disk_partitions = "/"
+ }
+```
+
+You can iterate over all dictionary keys defined in `disks`.
+You can optionally use the value to specify additional object attributes.
+
+```
+apply Service for (disk => config in host.vars.disks) {
+ [...]
+
+ vars.disk_partitions = config.disk_partitions
+}
+```
+
+Please read the [apply for chapter](03-monitoring-basics.md#using-apply-for)
+for more specific insights.
+
+
+> **Tip**
+>
+> Building configuration in that dynamic way requires detailed information
+> of the generated objects. Use the `object list` [CLI command](11-cli-commands.md#cli-command-object)
+> after successful [configuration validation](11-cli-commands.md#config-validation).
+
+
+### Apply Rules Expressions <a id="using-apply-expressions"></a>
+
+You can use simple or advanced combinations of apply rule expressions. Each
+expression must evaluate into the boolean `true` value. An empty string
+will be for instance interpreted as `false`. In a similar fashion undefined
+attributes will return `false`.
+
+Returns `false`:
+
+```
+assign where host.vars.attribute_does_not_exist
+```
+
+Multiple `assign where` condition rows are evaluated as `OR` condition.
+
+You can combine multiple expressions for matching only a subset of objects. In some cases,
+you want to be able to add more than one assign/ignore where expression which matches
+a specific condition. To achieve this you can use the logical `and` and `or` operators.
+
+#### Apply Rules Expressions Examples <a id="using-apply-expressions-examples"></a>
+
+Assign a service to a specific host in a host group [array](18-library-reference.md#array-type) using the [in operator](17-language-reference.md#expression-operators):
+
+```
+assign where "hostgroup-dev" in host.groups
+```
+
+Assign an object when a custom variable is [equal](17-language-reference.md#expression-operators) to a value:
+
+```
+assign where host.vars.application_type == "database"
+
+assign where service.vars.sms_notify == true
+```
+
+Assign an object if a dictionary [contains](18-library-reference.md#dictionary-contains) a given key:
+
+```
+assign where host.vars.app_dict.contains("app")
+```
+
+Match the host name by either using a [case insensitive match](18-library-reference.md#global-functions-match):
+
+```
+assign where match("webserver*", host.name)
+```
+
+Match the host name by using a [regular expression](18-library-reference.md#global-functions-regex). Please note the [escaped](17-language-reference.md#string-literals-escape-sequences) backslash character:
+
+```
+assign where regex("^webserver-[\\d+]", host.name)
+```
+
+[Match](18-library-reference.md#global-functions-match) all `*mysql*` patterns in the host name and (`&&`) custom variable `prod_mysql_db`
+matches the `db-*` pattern. All hosts with the custom variable `test_server` set to `true`
+should be ignored, or any host name ending with `*internal` pattern.
+
+```
+object HostGroup "mysql-server" {
+ display_name = "MySQL Server"
+
+ assign where match("*mysql*", host.name) && match("db-*", host.vars.prod_mysql_db)
+ ignore where host.vars.test_server == true
+ ignore where match("*internal", host.name)
+}
+```
+
+Similar example for advanced notification apply rule filters: If the service
+attribute `notes` [matches](18-library-reference.md#global-functions-match) the `has gold support 24x7` string `AND` one of the
+two condition passes, either the `customer` host custom variable is set to `customer-xy`
+`OR` the host custom variable `always_notify` is set to `true`.
+
+The notification is ignored for services whose host name ends with `*internal`
+`OR` the `priority` custom variable is [less than](17-language-reference.md#expression-operators) `2`.
+
+```
+template Notification "cust-xy-notification" {
+ users = [ "noc-xy", "mgmt-xy" ]
+ command = "mail-service-notification"
+}
+
+apply Notification "notify-cust-xy-mysql" to Service {
+ import "cust-xy-notification"
+
+ assign where match("*has gold support 24x7*", service.notes) && (host.vars.customer == "customer-xy" || host.vars.always_notify == true)
+ ignore where match("*internal", host.name) || (service.vars.priority < 2 && host.vars.is_clustered == true)
+}
+```
+
+More advanced examples are covered [here](08-advanced-topics.md#use-functions-assign-where).
+
+### Apply Services to Hosts <a id="using-apply-services"></a>
+
+The sample configuration already includes a detailed example in [hosts.conf](04-configuration.md#hosts-conf)
+and [services.conf](04-configuration.md#services-conf) for this use case.
+
+The example for `ssh` applies a service object to all hosts with the `address`
+attribute being defined and the custom variable `os` set to the string `Linux` in `vars`.
+
+```
+apply Service "ssh" {
+ import "generic-service"
+
+ check_command = "ssh"
+
+ assign where host.address && host.vars.os == "Linux"
+}
+```
+
+Other detailed examples are used in their respective chapters, for example
+[apply services with custom command arguments](03-monitoring-basics.md#command-passing-parameters).
+
+### Apply Notifications to Hosts and Services <a id="using-apply-notifications"></a>
+
+Notifications are applied to specific targets (`Host` or `Service`) and work in a similar
+manner:
+
+```
+apply Notification "mail-noc" to Service {
+ import "mail-service-notification"
+
+ user_groups = [ "noc" ]
+
+ assign where host.vars.notification.mail
+}
+```
+
+In this example the `mail-noc` notification will be created as object for all services having the
+`notification.mail` custom variable defined. The notification command is set to `mail-service-notification`
+and all members of the user group `noc` will get notified.
+
+It is also possible to generally apply a notification template and dynamically overwrite values from
+the template by checking for custom variables. This can be achieved by using [conditional statements](17-language-reference.md#conditional-statements):
+
+```
+apply Notification "host-mail-noc" to Host {
+ import "mail-host-notification"
+
+ // replace interval inherited from `mail-host-notification` template with new notfication interval set by a host custom variable
+ if (host.vars.notification_interval) {
+ interval = host.vars.notification_interval
+ }
+
+ // same with notification period
+ if (host.vars.notification_period) {
+ period = host.vars.notification_period
+ }
+
+ // Send SMS instead of email if the host's custom variable `notification_type` is set to `sms`
+ if (host.vars.notification_type == "sms") {
+ command = "sms-host-notification"
+ } else {
+ command = "mail-host-notification"
+ }
+
+ user_groups = [ "noc" ]
+
+ assign where host.address
+}
+```
+
+In the example above the notification template `mail-host-notification`
+contains all relevant notification settings.
+The apply rule is applied on all host objects where the `host.address` is defined.
+
+If the host object has a specific custom variable set, its value is inherited
+into the local notification object scope, e.g. `host.vars.notification_interval`,
+`host.vars.notification_period` and `host.vars.notification_type`.
+This overwrites attributes already specified in the imported `mail-host-notification`
+template.
+
+The corresponding host object could look like this:
+
+```
+object Host "host1" {
+ import "host-linux-prod"
+ display_name = "host1"
+ address = "192.168.1.50"
+ vars.notification_interval = 1h
+ vars.notification_period = "24x7"
+ vars.notification_type = "sms"
+}
+```
+
+### Apply Dependencies to Hosts and Services <a id="using-apply-dependencies"></a>
+
+Detailed examples can be found in the [dependencies](03-monitoring-basics.md#dependencies) chapter.
+
+### Apply Recurring Downtimes to Hosts and Services <a id="using-apply-scheduledowntimes"></a>
+
+The sample configuration includes an example in [downtimes.conf](04-configuration.md#downtimes-conf).
+
+Detailed examples can be found in the [recurring downtimes](08-advanced-topics.md#recurring-downtimes) chapter.
+
+
+### Using Apply For Rules <a id="using-apply-for"></a>
+
+Next to the standard way of using [apply rules](03-monitoring-basics.md#using-apply)
+there is the requirement of applying objects based on a set (array or
+dictionary) using [apply for](17-language-reference.md#apply-for) expressions.
+
+The sample configuration already includes a detailed example in [hosts.conf](04-configuration.md#hosts-conf)
+and [services.conf](04-configuration.md#services-conf) for this use case.
+
+Take the following example: A host provides the snmp oids for different service check
+types. This could look like the following example:
+
+```
+object Host "router-v6" {
+ check_command = "hostalive"
+ address6 = "2001:db8:1234::42"
+
+ vars.oids["if01"] = "1.1.1.1.1"
+ vars.oids["temp"] = "1.1.1.1.2"
+ vars.oids["bgp"] = "1.1.1.1.5"
+}
+```
+
+The idea is to create service objects for `if01` and `temp` but not `bgp`.
+The oid value should also be used as service custom variable `snmp_oid`.
+This is the command argument required by the [snmp](10-icinga-template-library.md#plugin-check-command-snmp)
+check command.
+The service's `display_name` should be set to the identifier inside the dictionary,
+e.g. `if01`.
+
+```
+apply Service for (identifier => oid in host.vars.oids) {
+ check_command = "snmp"
+ display_name = identifier
+ vars.snmp_oid = oid
+
+ ignore where identifier == "bgp" //don't generate service for bgp checks
+}
+```
+
+Icinga 2 evaluates the `apply for` rule for all objects with the custom variable
+`oids` set.
+It iterates over all dictionary items inside the `for` loop and evaluates the
+`assign/ignore where` expressions. You can access the loop variable
+in these expressions, e.g. to ignore specific values.
+
+In this example the `bgp` identifier is ignored. This avoids to generate
+unwanted services. A different approach would be to match the `oid` value with a
+[regex](18-library-reference.md#global-functions-regex)/[wildcard match](18-library-reference.md#global-functions-match) pattern for example.
+
+```
+ ignore where regex("^\d.\d.\d.\d.5$", oid)
+```
+
+> **Note**
+>
+> You don't need an `assign where` expression which checks for the existence of the
+> `oids` custom variable.
+
+This method saves you from creating multiple apply rules. It also moves
+the attribute specification logic from the service to the host.
+
+<!-- Keep this for compatibility -->
+<a id="using-apply-for-custom-attribute-override"></a>
+
+#### Apply For and Custom Variable Override <a id="using-apply-for-custom-variable-override"></a>
+
+Imagine a different more advanced example: You are monitoring your network device (host)
+with many interfaces (services). The following requirements/problems apply:
+
+* Each interface service should be named with a prefix and a name defined in your host object (which could be generated from your CMDB, etc.)
+* Each interface has its own VLAN tag
+* Some interfaces have QoS enabled
+* Additional attributes such as `display_name` or `notes`, `notes_url` and `action_url` must be
+dynamically generated.
+
+
+> **Tip**
+>
+> Define the SNMP community as global constant in your [constants.conf](04-configuration.md#constants-conf) file.
+
+```
+const IftrafficSnmpCommunity = "public"
+```
+
+Define the `interfaces` [custom variable](03-monitoring-basics.md#custom-variables)
+on the `cisco-catalyst-6509-34` host object and add three example interfaces as dictionary keys.
+
+Specify additional attributes inside the nested dictionary
+as learned with [custom variable values](03-monitoring-basics.md#custom-variables-values):
+
+```
+object Host "cisco-catalyst-6509-34" {
+ import "generic-host"
+ display_name = "Catalyst 6509 #34 VIE21"
+ address = "127.0.1.4"
+
+ /* "GigabitEthernet0/2" is the interface name,
+ * and key name in service apply for later on
+ */
+ vars.interfaces["GigabitEthernet0/2"] = {
+ /* define all custom variables with the
+ * same name required for command parameters/arguments
+ * in service apply (look into your CheckCommand definition)
+ */
+ iftraffic_units = "g"
+ iftraffic_community = IftrafficSnmpCommunity
+ iftraffic_bandwidth = 1
+ vlan = "internal"
+ qos = "disabled"
+ }
+ vars.interfaces["GigabitEthernet0/4"] = {
+ iftraffic_units = "g"
+ //iftraffic_community = IftrafficSnmpCommunity
+ iftraffic_bandwidth = 1
+ vlan = "remote"
+ qos = "enabled"
+ }
+ vars.interfaces["MgmtInterface1"] = {
+ iftraffic_community = IftrafficSnmpCommunity
+ vlan = "mgmt"
+ interface_address = "127.99.0.100" #special management ip
+ }
+}
+```
+
+Start with the apply for definition and iterate over `host.vars.interfaces`.
+This is a dictionary and should use the variables `interface_name` as key
+and `interface_config` as value for each generated object scope.
+
+`"if-"` specifies the object name prefix for each service which results
+in `if-<interface_name>` for each iteration.
+
+```
+/* loop over the host.vars.interfaces dictionary
+ * for (key => value in dict) means `interface_name` as key
+ * and `interface_config` as value. Access config attributes
+ * with the indexer (`.`) character.
+ */
+apply Service "if-" for (interface_name => interface_config in host.vars.interfaces) {
+```
+
+Import the `generic-service` template, assign the [iftraffic](10-icinga-template-library.md#plugin-contrib-command-iftraffic)
+`check_command`. Use the dictionary key `interface_name` to set a proper `display_name`
+string for external interfaces.
+
+```
+ import "generic-service"
+ check_command = "iftraffic"
+ display_name = "IF-" + interface_name
+```
+
+The `interface_name` key's value is the same string used as command parameter for
+`iftraffic`:
+
+```
+ /* use the key as command argument (no duplication of values in host.vars.interfaces) */
+ vars.iftraffic_interface = interface_name
+```
+
+Remember that `interface_config` is a nested dictionary. In the first iteration it looks
+like this:
+
+```
+interface_config = {
+ iftraffic_units = "g"
+ iftraffic_community = IftrafficSnmpCommunity
+ iftraffic_bandwidth = 1
+ vlan = "internal"
+ qos = "disabled"
+}
+```
+
+Access the dictionary keys with the [indexer](17-language-reference.md#indexer) syntax
+and assign them to custom variables used as command parameters for the `iftraffic`
+check command.
+
+```
+ /* map the custom variables as command arguments */
+ vars.iftraffic_units = interface_config.iftraffic_units
+ vars.iftraffic_community = interface_config.iftraffic_community
+```
+
+If you just want to inherit all attributes specified inside the `interface_config`
+dictionary, add it to the generated service custom variables like this:
+
+```
+ /* the above can be achieved in a shorter fashion if the names inside host.vars.interfaces
+ * are the _exact_ same as required as command parameter by the check command
+ * definition.
+ */
+ vars += interface_config
+```
+
+If the user did not specify default values for required service custom variables,
+add them here. This also helps to avoid unwanted configuration validation errors or
+runtime failures. Please read more about conditional statements [here](17-language-reference.md#conditional-statements).
+
+```
+ /* set a default value for units and bandwidth */
+ if (interface_config.iftraffic_units == "") {
+ vars.iftraffic_units = "m"
+ }
+ if (interface_config.iftraffic_bandwidth == "") {
+ vars.iftraffic_bandwidth = 1
+ }
+ if (interface_config.vlan == "") {
+ vars.vlan = "not set"
+ }
+ if (interface_config.qos == "") {
+ vars.qos = "not set"
+ }
+```
+
+If the host object did not specify a custom SNMP community,
+set a default value specified by the [global constant](17-language-reference.md#constants) `IftrafficSnmpCommunity`.
+
+```
+ /* set the global constant if not explicitely
+ * not provided by the `interfaces` dictionary on the host
+ */
+ if (len(interface_config.iftraffic_community) == 0 || len(vars.iftraffic_community) == 0) {
+ vars.iftraffic_community = IftrafficSnmpCommunity
+ }
+```
+
+Use the provided values to [calculate](17-language-reference.md#expression-operators)
+more object attributes which can be e.g. seen in external interfaces.
+
+```
+ /* Calculate some additional object attributes after populating the `vars` dictionary */
+ notes = "Interface check for " + interface_name + " (units: '" + interface_config.iftraffic_units + "') in VLAN '" + vars.vlan + "' with ' QoS '" + vars.qos + "'"
+ notes_url = "https://foreman.company.com/hosts/" + host.name
+ action_url = "https://snmp.checker.company.com/" + host.name + "/if-" + interface_name
+}
+```
+
+> **Tip**
+>
+> Building configuration in that dynamic way requires detailed information
+> of the generated objects. Use the `object list` [CLI command](11-cli-commands.md#cli-command-object)
+> after successful [configuration validation](11-cli-commands.md#config-validation).
+
+Verify that the apply-for-rule successfully created the service objects with the
+inherited custom variables:
+
+```
+# icinga2 daemon -C
+# icinga2 object list --type Service --name *catalyst*
+
+Object 'cisco-catalyst-6509-34!if-GigabitEthernet0/2' of type 'Service':
+......
+ * vars
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 59:3-59:26
+ * iftraffic_bandwidth = 1
+ * iftraffic_community = "public"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 53:3-53:65
+ * iftraffic_interface = "GigabitEthernet0/2"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 49:3-49:43
+ * iftraffic_units = "g"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 52:3-52:57
+ * qos = "disabled"
+ * vlan = "internal"
+
+
+Object 'cisco-catalyst-6509-34!if-GigabitEthernet0/4' of type 'Service':
+...
+ * vars
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 59:3-59:26
+ * iftraffic_bandwidth = 1
+ * iftraffic_community = "public"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 53:3-53:65
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 79:5-79:53
+ * iftraffic_interface = "GigabitEthernet0/4"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 49:3-49:43
+ * iftraffic_units = "g"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 52:3-52:57
+ * qos = "enabled"
+ * vlan = "remote"
+
+Object 'cisco-catalyst-6509-34!if-MgmtInterface1' of type 'Service':
+...
+ * vars
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 59:3-59:26
+ * iftraffic_bandwidth = 1
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 66:5-66:32
+ * iftraffic_community = "public"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 53:3-53:65
+ * iftraffic_interface = "MgmtInterface1"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 49:3-49:43
+ * iftraffic_units = "m"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 52:3-52:57
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 63:5-63:30
+ * interface_address = "127.99.0.100"
+ * qos = "not set"
+ % = modified in '/etc/icinga2/conf.d/iftraffic.conf', lines 72:5-72:24
+ * vlan = "mgmt"
+```
+
+### Use Object Attributes in Apply Rules <a id="using-apply-object-attributes"></a>
+
+Since apply rules are evaluated after the generic objects, you
+can reference existing host and/or service object attributes as
+values for any object attribute specified in that apply rule.
+
+```
+object Host "opennebula-host" {
+ import "generic-host"
+ address = "10.1.1.2"
+
+ vars.hosting["cust1"] = {
+ http_uri = "/shop"
+ customer_name = "Customer 1"
+ customer_id = "7568"
+ support_contract = "gold"
+ }
+ vars.hosting["cust2"] = {
+ http_uri = "/"
+ customer_name = "Customer 2"
+ customer_id = "7569"
+ support_contract = "silver"
+ }
+}
+```
+
+`hosting` is a custom variable with the Dictionary value type.
+This is mandatory to iterate with the `key => value` notation
+in the below apply for rule.
+
+```
+apply Service for (customer => config in host.vars.hosting) {
+ import "generic-service"
+ check_command = "ping4"
+
+ vars.qos = "disabled"
+
+ vars += config
+
+ vars.http_uri = "/" + customer + "/" + config.http_uri
+
+ display_name = "Shop Check for " + vars.customer_name + "-" + vars.customer_id
+
+ notes = "Support contract: " + vars.support_contract + " for Customer " + vars.customer_name + " (" + vars.customer_id + ")."
+
+ notes_url = "https://foreman.company.com/hosts/" + host.name
+ action_url = "https://snmp.checker.company.com/" + host.name + "/" + vars.customer_id
+}
+```
+
+Each loop iteration has different values for `customer` and config`
+in the local scope.
+
+1.
+
+```
+customer = "cust 1"
+config = {
+ http_uri = "/shop"
+ customer_name = "Customer 1"
+ customer_id = "7568"
+ support_contract = "gold"
+}
+```
+
+2.
+
+```
+customer = "cust2"
+config = {
+ http_uri = "/"
+ customer_name = "Customer 2"
+ customer_id = "7569"
+ support_contract = "silver"
+}
+```
+
+You can now add the `config` dictionary into `vars`.
+
+```
+vars += config
+```
+
+Now it looks like the following in the first iteration:
+
+```
+customer = "cust 1"
+vars = {
+ http_uri = "/shop"
+ customer_name = "Customer 1"
+ customer_id = "7568"
+ support_contract = "gold"
+}
+```
+
+Remember, you know this structure already. Custom
+attributes can also be accessed by using the [indexer](17-language-reference.md#indexer)
+syntax.
+
+```
+ vars.http_uri = ... + config.http_uri
+```
+
+can also be written as
+
+```
+ vars += config
+ vars.http_uri = ... + vars.http_uri
+```
+
+
+## Groups <a id="groups"></a>
+
+A group is a collection of similar objects. Groups are primarily used as a
+visualization aid in web interfaces.
+
+Group membership is defined at the respective object itself. If
+you have a hostgroup name `windows` for example, and want to assign
+specific hosts to this group for later viewing the group on your
+alert dashboard, first create a HostGroup object:
+
+```
+object HostGroup "windows" {
+ display_name = "Windows Servers"
+}
+```
+
+Then add your hosts to this group:
+
+```
+template Host "windows-server" {
+ groups += [ "windows" ]
+}
+
+object Host "mssql-srv1" {
+ import "windows-server"
+
+ vars.mssql_port = 1433
+}
+
+object Host "mssql-srv2" {
+ import "windows-server"
+
+ vars.mssql_port = 1433
+}
+```
+
+This can be done for service and user groups the same way:
+
+```
+object UserGroup "windows-mssql-admins" {
+ display_name = "Windows MSSQL Admins"
+}
+
+template User "generic-windows-mssql-users" {
+ groups += [ "windows-mssql-admins" ]
+}
+
+object User "win-mssql-noc" {
+ import "generic-windows-mssql-users"
+
+ email = "noc@example.com"
+}
+
+object User "win-mssql-ops" {
+ import "generic-windows-mssql-users"
+
+ email = "ops@example.com"
+}
+```
+
+### Group Membership Assign <a id="group-assign-intro"></a>
+
+Instead of manually assigning each object to a group you can also assign objects
+to a group based on their attributes:
+
+```
+object HostGroup "prod-mssql" {
+ display_name = "Production MSSQL Servers"
+
+ assign where host.vars.mssql_port && host.vars.prod_mysql_db
+ ignore where host.vars.test_server == true
+ ignore where match("*internal", host.name)
+}
+```
+
+In this example all hosts with the `vars` attribute `mssql_port`
+will be added as members to the host group `mssql`. However, all
+hosts [matching](18-library-reference.md#global-functions-match) the string `\*internal`
+or with the `test_server` attribute set to `true` are **not** added to this group.
+
+Details on the `assign where` syntax can be found in the
+[Language Reference](17-language-reference.md#apply).
+
+## Notifications <a id="alert-notifications"></a>
+
+Notifications for service and host problems are an integral part of your
+monitoring setup.
+
+When a host or service is in a downtime, a problem has been acknowledged or
+the dependency logic determined that the host/service is unreachable, no
+notifications are sent. You can configure additional type and state filters
+refining the notifications being actually sent.
+
+There are many ways of sending notifications, e.g. by email, XMPP,
+IRC, Twitter, etc. On its own Icinga 2 does not know how to send notifications.
+Instead it relies on external mechanisms such as shell scripts to notify users.
+More notification methods are listed in the [addons and plugins](13-addons.md#notification-scripts-interfaces)
+chapter.
+
+A notification specification requires one or more users (and/or user groups)
+who will be notified in case of problems. These users must have all custom
+attributes defined which will be used in the `NotificationCommand` on execution.
+
+The user `icingaadmin` in the example below will get notified only on `Warning` and
+`Critical` problems. In addition to that `Recovery` notifications are sent (they require
+the `OK` state).
+
+```
+object User "icingaadmin" {
+ display_name = "Icinga 2 Admin"
+ enable_notifications = true
+ states = [ OK, Warning, Critical ]
+ types = [ Problem, Recovery ]
+ email = "icinga@localhost"
+}
+```
+
+If you don't set the `states` and `types` configuration attributes for the `User`
+object, notifications for all states and types will be sent.
+
+Details on troubleshooting notification problems can be found [here](15-troubleshooting.md#troubleshooting).
+
+> **Note**
+>
+> Make sure that the [notification](11-cli-commands.md#enable-features) feature is enabled
+> in order to execute notification commands.
+
+You should choose which information you (and your notified users) are interested in
+case of emergency, and also which information does not provide any value to you and
+your environment.
+
+An example notification command is explained [here](03-monitoring-basics.md#notification-commands).
+
+You can add all shared attributes to a `Notification` template which is inherited
+to the defined notifications. That way you'll save duplicated attributes in each
+`Notification` object. Attributes can be overridden locally.
+
+```
+template Notification "generic-notification" {
+ interval = 15m
+
+ command = "mail-service-notification"
+
+ states = [ Warning, Critical, Unknown ]
+ types = [ Problem, Acknowledgement, Recovery, Custom, FlappingStart,
+ FlappingEnd, DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ period = "24x7"
+}
+```
+
+The time period `24x7` is included as example configuration with Icinga 2.
+
+Use the `apply` keyword to create `Notification` objects for your services:
+
+```
+apply Notification "notify-cust-xy-mysql" to Service {
+ import "generic-notification"
+
+ users = [ "noc-xy", "mgmt-xy" ]
+
+ assign where match("*has gold support 24x7*", service.notes) && (host.vars.customer == "customer-xy" || host.vars.always_notify == true
+ ignore where match("*internal", host.name) || (service.vars.priority < 2 && host.vars.is_clustered == true)
+}
+```
+
+
+Instead of assigning users to notifications, you can also add the `user_groups`
+attribute with a list of user groups to the `Notification` object. Icinga 2 will
+send notifications to all group members.
+
+> **Note**
+>
+> Only users who have been notified of a problem before (`Warning`, `Critical`, `Unknown`
+states for services, `Down` for hosts) will receive `Recovery` notifications.
+
+Icinga 2 v2.10 allows you to configure a `User` object with `Acknowledgement` and/or `Recovery`
+without a `Problem` notification. These notifications will be sent without
+any problem notifications beforehand, and can be used for e.g. ticket systems.
+
+```
+object User "ticketadmin" {
+ display_name = "Ticket Admin"
+ enable_notifications = true
+ states = [ OK, Warning, Critical ]
+ types = [ Acknowledgement, Recovery ]
+ email = "ticket@localhost"
+}
+```
+
+### Notifications: Users from Host/Service <a id="alert-notifications-users-host-service"></a>
+
+A common pattern is to store the users and user groups
+on the host or service objects instead of the notification
+object itself.
+
+The sample configuration provided in [hosts.conf](04-configuration.md#hosts-conf) and [notifications.conf](notifications-conf)
+already provides an example for this question.
+
+> **Tip**
+>
+> Please make sure to read the [apply](03-monitoring-basics.md#using-apply) and
+> [custom variable values](03-monitoring-basics.md#custom-variables-values) chapter to
+> fully understand these examples.
+
+
+Specify the user and groups as nested custom variable on the host object:
+
+```
+object Host "icinga2-agent1.localdomain" {
+ [...]
+
+ vars.notification["mail"] = {
+ groups = [ "icingaadmins" ]
+ users = [ "icingaadmin" ]
+ }
+ vars.notification["sms"] = {
+ users = [ "icingaadmin" ]
+ }
+}
+```
+
+As you can see, there is the option to use two different notification
+apply rules here: One for `mail` and one for `sms`.
+
+This example assigns the `users` and `groups` nested keys from the `notification`
+custom variable to the actual notification object attributes.
+
+Since errors are hard to debug if host objects don't specify the required
+configuration attributes, you can add a safety condition which logs which
+host object is affected.
+
+```
+critical/config: Host 'icinga2-client3.localdomain' does not specify required user/user_groups configuration attributes for notification 'mail-icingaadmin'.
+```
+
+You can also use the [script debugger](20-script-debugger.md#script-debugger) for more advanced insights.
+
+```
+apply Notification "mail-host-notification" to Host {
+ [...]
+
+ /* Log which host does not specify required user/user_groups attributes. This will fail immediately during config validation and help a lot. */
+ if (len(host.vars.notification.mail.users) == 0 && len(host.vars.notification.mail.user_groups) == 0) {
+ log(LogCritical, "config", "Host '" + host.name + "' does not specify required user/user_groups configuration attributes for notification '" + name + "'.")
+ }
+
+ users = host.vars.notification.mail.users
+ user_groups = host.vars.notification.mail.groups
+
+ assign where host.vars.notification.mail && typeof(host.vars.notification.mail) == Dictionary
+}
+
+apply Notification "sms-host-notification" to Host {
+ [...]
+
+ /* Log which host does not specify required user/user_groups attributes. This will fail immediately during config validation and help a lot. */
+ if (len(host.vars.notification.sms.users) == 0 && len(host.vars.notification.sms.user_groups) == 0) {
+ log(LogCritical, "config", "Host '" + host.name + "' does not specify required user/user_groups configuration attributes for notification '" + name + "'.")
+ }
+
+ users = host.vars.notification.sms.users
+ user_groups = host.vars.notification.sms.groups
+
+ assign where host.vars.notification.sms && typeof(host.vars.notification.sms) == Dictionary
+}
+```
+
+The example above uses [typeof](18-library-reference.md#global-functions-typeof) as safety function to ensure that
+the `mail` key really provides a dictionary as value. Otherwise
+the configuration validation could fail if an admin adds something
+like this on another host:
+
+```
+ vars.notification.mail = "yes"
+```
+
+
+You can also do a more fine granular assignment on the service object:
+
+```
+apply Service "http" {
+ [...]
+
+ vars.notification["mail"] = {
+ groups = [ "icingaadmins" ]
+ users = [ "icingaadmin" ]
+ }
+
+ [...]
+}
+```
+
+This notification apply rule is different to the one above. The service
+notification users and groups are inherited from the service and if not set,
+from the host object. A default user is set too.
+
+```
+apply Notification "mail-service-notification" to Service {
+ [...]
+
+ if (service.vars.notification.mail.users) {
+ users = service.vars.notification.mail.users
+ } else if (host.vars.notification.mail.users) {
+ users = host.vars.notification.mail.users
+ } else {
+ /* Default user who receives everything. */
+ users = [ "icingaadmin" ]
+ }
+
+ if (service.vars.notification.mail.groups) {
+ user_groups = service.vars.notification.mail.groups
+ } else if (host.vars.notification.mail.groups) {
+ user_groups = host.vars.notification.mail.groups
+ }
+
+ assign where ( host.vars.notification.mail && typeof(host.vars.notification.mail) == Dictionary ) || ( service.vars.notification.mail && typeof(service.vars.notification.mail) == Dictionary )
+}
+```
+
+### Notification Escalations <a id="notification-escalations"></a>
+
+When a problem notification is sent and a problem still exists at the time of re-notification
+you may want to escalate the problem to the next support level. A different approach
+is to configure the default notification by email, and escalate the problem via SMS
+if not already solved.
+
+You can define notification start and end times as additional configuration
+attributes making the `Notification` object a so-called `notification escalation`.
+Using templates you can share the basic notification attributes such as users or the
+`interval` (and override them for the escalation then).
+
+Using the example from above, you can define additional users being escalated for SMS
+notifications between start and end time.
+
+```
+object User "icinga-oncall-2nd-level" {
+ display_name = "Icinga 2nd Level"
+
+ vars.mobile = "+1 555 424642"
+}
+
+object User "icinga-oncall-1st-level" {
+ display_name = "Icinga 1st Level"
+
+ vars.mobile = "+1 555 424642"
+}
+```
+
+Define an additional [NotificationCommand](03-monitoring-basics.md#notification-commands) for SMS notifications.
+
+> **Note**
+>
+> The example is not complete as there are many different SMS providers.
+> Please note that sending SMS notifications will require an SMS provider
+> or local hardware with an active SIM card.
+
+```
+object NotificationCommand "sms-notification" {
+ command = [
+ PluginDir + "/send_sms_notification",
+ "$mobile$",
+ "..."
+}
+```
+
+The two new notification escalations are added onto the local host
+and its service `ping4` using the `generic-notification` template.
+The user `icinga-oncall-2nd-level` will get notified by SMS (`sms-notification`
+command) after `30m` until `1h`.
+
+> **Note**
+>
+> The `interval` was set to 15m in the `generic-notification`
+> template example. Lower that value in your escalations by using a secondary
+> template or by overriding the attribute directly in the `notifications` array
+> position for `escalation-sms-2nd-level`.
+
+If the problem does not get resolved nor acknowledged preventing further notifications,
+the `escalation-sms-1st-level` user will be escalated `1h` after the initial problem was
+notified, but only for one hour (`2h` as `end` key for the `times` dictionary).
+
+```
+apply Notification "mail" to Service {
+ import "generic-notification"
+
+ command = "mail-notification"
+ users = [ "icingaadmin" ]
+
+ assign where service.name == "ping4"
+}
+
+apply Notification "escalation-sms-2nd-level" to Service {
+ import "generic-notification"
+
+ command = "sms-notification"
+ users = [ "icinga-oncall-2nd-level" ]
+
+ times = {
+ begin = 30m
+ end = 1h
+ }
+
+ assign where service.name == "ping4"
+}
+
+apply Notification "escalation-sms-1st-level" to Service {
+ import "generic-notification"
+
+ command = "sms-notification"
+ users = [ "icinga-oncall-1st-level" ]
+
+ times = {
+ begin = 1h
+ end = 2h
+ }
+
+ assign where service.name == "ping4"
+}
+```
+
+### Notification Delay <a id="notification-delay"></a>
+
+Sometimes the problem in question should not be announced when the notification is due
+(the object reaching the `HARD` state), but after a certain period. In Icinga 2
+you can use the `times` dictionary and set `begin = 15m` as key and value if you want to
+postpone the notification window for 15 minutes. Leave out the `end` key -- if not set,
+Icinga 2 will not check against any end time for this notification.
+
+> **Note**
+>
+> Setting the `end` key to `0` will stop sending notifications immediately
+> when a problem occurs, effectively disabling the notification.
+
+Make sure to specify a relatively low notification `interval` to get notified soon enough again.
+
+```
+apply Notification "mail" to Service {
+ import "generic-notification"
+
+ command = "mail-notification"
+ users = [ "icingaadmin" ]
+
+ interval = 5m
+
+ times.begin = 15m // delay notification window
+
+ assign where service.name == "ping4"
+}
+```
+
+Also note that this mechanism doesn't take downtimes etc. into account, only
+the `HARD` state change time matters. E.g. for a problem which occurred in the
+middle of a downtime from 2 PM to 4 PM `times.begin = 2h` means 5 PM, not 6 PM.
+
+### Disable Re-notifications <a id="disable-renotification"></a>
+
+If you prefer to be notified only once, you can disable re-notifications by setting the
+`interval` attribute to `0`.
+
+```
+apply Notification "notify-once" to Service {
+ import "generic-notification"
+
+ command = "mail-notification"
+ users = [ "icingaadmin" ]
+
+ interval = 0 // disable re-notification
+
+ assign where service.name == "ping4"
+}
+```
+
+### Notification Filters by State and Type <a id="notification-filters-state-type"></a>
+
+If there are no notification state and type filter attributes defined at the `Notification`
+or `User` object, Icinga 2 assumes that all states and types are being notified.
+
+Available state and type filters for notifications are:
+
+```
+template Notification "generic-notification" {
+
+ states = [ OK, Warning, Critical, Unknown ]
+ types = [ Problem, Acknowledgement, Recovery, Custom, FlappingStart,
+ FlappingEnd, DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+}
+```
+
+
+## Commands <a id="commands"></a>
+
+Icinga 2 uses three different command object types to specify how
+checks should be performed, notifications should be sent, and
+events should be handled.
+
+### Check Commands <a id="check-commands"></a>
+
+[CheckCommand](09-object-types.md#objecttype-checkcommand) objects define the command line how
+a check is called.
+
+[CheckCommand](09-object-types.md#objecttype-checkcommand) objects are referenced by
+[Host](09-object-types.md#objecttype-host) and [Service](09-object-types.md#objecttype-service) objects
+using the `check_command` attribute.
+
+> **Note**
+>
+> Make sure that the [checker](11-cli-commands.md#enable-features) feature is enabled in order to
+> execute checks.
+
+#### Integrate the Plugin with a CheckCommand Definition <a id="command-plugin-integration"></a>
+
+Unless you have done so already, download your check plugin and put it
+into the [PluginDir](04-configuration.md#constants-conf) directory. The following example uses the
+`check_mysql` plugin contained in the Monitoring Plugins package.
+
+The plugin path and all command arguments are made a list of
+double-quoted string arguments for proper shell escaping.
+
+Call the `check_mysql` plugin with the `--help` parameter to see
+all available options. Our example defines warning (`-w`) and
+critical (`-c`) thresholds.
+
+```
+icinga@icinga2 $ /usr/lib64/nagios/plugins/check_mysql --help
+...
+This program tests connections to a MySQL server
+
+Usage:
+check_mysql [-d database] [-H host] [-P port] [-s socket]
+[-u user] [-p password] [-S] [-l] [-a cert] [-k key]
+[-C ca-cert] [-D ca-dir] [-L ciphers] [-f optfile] [-g group]
+```
+
+Next step is to understand how [command parameters](03-monitoring-basics.md#command-passing-parameters)
+are being passed from a host or service object, and add a [CheckCommand](09-object-types.md#objecttype-checkcommand)
+definition based on these required parameters and/or default values.
+
+Please continue reading in the [plugins section](05-service-monitoring.md#service-monitoring-plugins) for additional integration examples.
+
+#### Passing Check Command Parameters from Host or Service <a id="command-passing-parameters"></a>
+
+Check command parameters are defined as custom variables which can be accessed as runtime macros
+by the executed check command.
+
+The check command parameters for ITL provided plugin check command definitions are documented
+[here](10-icinga-template-library.md#icinga-template-library), for example
+[disk](10-icinga-template-library.md#plugin-check-command-disk).
+
+In order to practice passing command parameters you should [integrate your own plugin](03-monitoring-basics.md#command-plugin-integration).
+
+The following example will use `check_mysql` provided by the [Monitoring Plugins](https://www.monitoring-plugins.org/).
+
+Define the default check command custom variables, for example `mysql_user` and `mysql_password`
+(freely definable naming schema) and optional their default threshold values. You can
+then use these custom variables as runtime macros for [command arguments](03-monitoring-basics.md#command-arguments)
+on the command line.
+
+> **Tip**
+>
+> Use a common command type as prefix for your command arguments to increase
+> readability. `mysql_user` helps understanding the context better than just
+> `user` as argument.
+
+The default custom variables can be overridden by the custom variables
+defined in the host or service using the check command `my-mysql`. The custom variables
+can also be inherited from a parent template using additive inheritance (`+=`).
+
+```
+# vim /etc/icinga2/conf.d/commands.conf
+
+object CheckCommand "my-mysql" {
+ command = [ PluginDir + "/check_mysql" ] //constants.conf -> const PluginDir
+
+ arguments = {
+ "-H" = "$mysql_host$"
+ "-u" = {
+ required = true
+ value = "$mysql_user$"
+ }
+ "-p" = "$mysql_password$"
+ "-P" = "$mysql_port$"
+ "-s" = "$mysql_socket$"
+ "-a" = "$mysql_cert$"
+ "-d" = "$mysql_database$"
+ "-k" = "$mysql_key$"
+ "-C" = "$mysql_ca_cert$"
+ "-D" = "$mysql_ca_dir$"
+ "-L" = "$mysql_ciphers$"
+ "-f" = "$mysql_optfile$"
+ "-g" = "$mysql_group$"
+ "-S" = {
+ set_if = "$mysql_check_slave$"
+ description = "Check if the slave thread is running properly."
+ }
+ "-l" = {
+ set_if = "$mysql_ssl$"
+ description = "Use ssl encryption"
+ }
+ }
+
+ vars.mysql_check_slave = false
+ vars.mysql_ssl = false
+ vars.mysql_host = "$address$"
+}
+```
+
+The check command definition also sets `mysql_host` to the `$address$` default value. You can override
+this command parameter if for example your MySQL host is not running on the same server's ip address.
+
+Make sure pass all required command parameters, such as `mysql_user`, `mysql_password` and `mysql_database`.
+`MysqlUsername` and `MysqlPassword` are specified as [global constants](04-configuration.md#constants-conf)
+in this example.
+
+```
+# vim /etc/icinga2/conf.d/services.conf
+
+apply Service "mysql-icinga-db-health" {
+ import "generic-service"
+
+ check_command = "my-mysql"
+
+ vars.mysql_user = MysqlUsername
+ vars.mysql_password = MysqlPassword
+
+ vars.mysql_database = "icinga"
+ vars.mysql_host = "192.168.33.11"
+
+ assign where match("icinga2*", host.name)
+ ignore where host.vars.no_health_check == true
+}
+```
+
+
+Take a different example: The example host configuration in [hosts.conf](04-configuration.md#hosts-conf)
+also applies an `ssh` service check. Your host's ssh port is not the default `22`, but set to `2022`.
+You can pass the command parameter as custom variable `ssh_port` directly inside the service apply rule
+inside [services.conf](04-configuration.md#services-conf):
+
+```
+apply Service "ssh" {
+ import "generic-service"
+
+ check_command = "ssh"
+ vars.ssh_port = 2022 //custom command parameter
+
+ assign where (host.address || host.address6) && host.vars.os == "Linux"
+}
+```
+
+If you prefer this being configured at the host instead of the service, modify the host configuration
+object instead. The runtime macro resolving order is described [here](03-monitoring-basics.md#macro-evaluation-order).
+
+```
+object Host "icinga2-agent1.localdomain {
+...
+ vars.ssh_port = 2022
+}
+```
+
+#### Passing Check Command Parameters Using Apply For <a id="command-passing-parameters-apply-for"></a>
+
+The host `localhost` with the generated services from the `basic-partitions` dictionary (see
+[apply for](03-monitoring-basics.md#using-apply-for) for details) checks a basic set of disk partitions
+with modified custom variables (warning thresholds at `10%`, critical thresholds at `5%`
+free disk space).
+
+The custom variable `disk_partition` can either hold a single string or an array of
+string values for passing multiple partitions to the `check_disk` check plugin.
+
+```
+object Host "my-server" {
+ import "generic-host"
+ address = "127.0.0.1"
+ address6 = "::1"
+
+ vars.local_disks["basic-partitions"] = {
+ disk_partitions = [ "/", "/tmp", "/var", "/home" ]
+ }
+}
+
+apply Service for (disk => config in host.vars.local_disks) {
+ import "generic-service"
+ check_command = "my-disk"
+
+ vars += config
+
+ vars.disk_wfree = "10%"
+ vars.disk_cfree = "5%"
+}
+```
+
+
+More details on using arrays in custom variables can be found in
+[this chapter](03-monitoring-basics.md#custom-variables).
+
+
+#### Command Arguments <a id="command-arguments"></a>
+
+Next to the short `command` array specified in the command object,
+it is advised to define plugin/script parameters in the `arguments`
+dictionary attribute.
+
+The value of the `--parameter` key itself is a dictionary with additional
+keys. They allow to create generic command objects and are also for documentation
+purposes, e.g. with the `description` field copying the plugin's help text in there.
+The Icinga Director uses this field to show the argument's purpose when selecting it.
+
+```
+ arguments = {
+ "--parameter" = {
+ description = "..."
+ value = "..."
+ }
+ }
+```
+
+Each argument is optional by default and is omitted if
+the value is not set.
+
+Learn more about integrating plugins with CheckCommand
+objects in [this chapter](05-service-monitoring.md#service-monitoring-plugin-checkcommand).
+
+There are additional possibilities for creating a command only once,
+with different parameters and arguments, shown below.
+
+##### Command Arguments: Value <a id="command-arguments-value"></a>
+
+In order to find out about the command argument, call the plugin's help
+or consult the README.
+
+```
+./check_systemd.py --help
+
+...
+
+ -u UNIT, --unit UNIT Name of the systemd unit that is beeing tested.
+```
+
+Whenever the long parameter name is available, prefer this over the short one.
+
+```
+ arguments = {
+ "--unit" = {
+
+ }
+ }
+```
+
+Define a unique `prefix` for the command's specific arguments. Best practice is to follow this schema:
+
+```
+<command name>_<parameter name>
+```
+
+Therefore use `systemd_` as prefix, and use the long plugin parameter name `unit` inside the [runtime macro](03-monitoring-basics.md#runtime-macros)
+syntax.
+
+```
+ arguments = {
+ "--unit" = {
+ value = "$systemd_unit$"
+ }
+ }
+```
+
+In order to specify a default value, specify
+a [custom variable](03-monitoring-basics.md#custom-variables) inside
+the CheckCommand object.
+
+```
+ vars.systemd_unit = "icinga2"
+```
+
+This value can be overridden from the host/service
+object as command parameters.
+
+
+##### Command Arguments: Description <a id="command-arguments-description"></a>
+
+Best practice, also inside the [ITL](10-icinga-template-library.md#icinga-template-library), is to always
+copy the command parameter help output into the `description`
+field of your check command.
+
+Learn more about integrating plugins with CheckCommand
+objects in [this chapter](05-service-monitoring.md#service-monitoring-plugin-checkcommand).
+
+With the [example above](03-monitoring-basics.md#command-arguments-value),
+inspect the parameter's help text.
+
+```
+./check_systemd.py --help
+
+...
+
+ -u UNIT, --unit UNIT Name of the systemd unit that is beeing tested.
+```
+
+Copy this into the command arguments `description` entry.
+
+```
+ arguments = {
+ "--unit" = {
+ value = "$systemd_unit$"
+ description = "Name of the systemd unit that is beeing tested."
+ }
+ }
+```
+
+##### Command Arguments: Required <a id="command-arguments-required"></a>
+
+Specifies whether this command argument is required, or not. By
+default all arguments are optional.
+
+> **Tip**
+>
+> Good plugins provide optional parameters in square brackets, e.g. `[-w SECONDS]`.
+
+The `required` field can be toggled with a [boolean](17-language-reference.md#boolean-literals) value.
+
+```
+ arguments = {
+ "--host" = {
+ value = "..."
+ description = "..."
+ required = true
+ }
+ }
+```
+
+Whenever the check is executed and the argument is missing, Icinga
+logs an error. This allows to better debug configuration errors
+instead of sometimes unreadable plugin errors when parameters are
+missing.
+
+##### Command Arguments: Skip Key <a id="command-arguments-skip-key"></a>
+
+The `arguments` attribute requires a key, empty values are not allowed.
+To overcome this for parameters which don't need the name in front of
+the value, use the `skip_key` [boolean](17-language-reference.md#boolean-literals) toggle.
+
+```
+ command = [ PrefixDir + "/bin/icingacli", "businessprocess", "process", "check" ]
+
+ arguments = {
+ "--process" = {
+ value = "$icingacli_businessprocess_process$"
+ description = "Business process to monitor"
+ skip_key = true
+ required = true
+ order = -1
+ }
+ }
+```
+
+The service specifies the [custom variable](03-monitoring-basics.md#custom-variables) `icingacli_businessprocess_process`.
+
+```
+ vars.icingacli_businessprocess_process = "bp-shop-web"
+```
+
+This results in this command line without the `--process` parameter:
+
+```bash
+'/bin/icingacli' 'businessprocess' 'process' 'check' 'bp-shop-web'
+```
+
+You can use this method to put everything into the `arguments` attribute
+in a defined order and without keys. This avoids entries in the `command`
+attributes too.
+
+
+##### Command Arguments: Set If <a id="command-arguments-set-if"></a>
+
+This can be used for the following scenarios:
+
+**Parameters without value, e.g. `--sni`.**
+
+```
+ command = [ PluginDir + "/check_http"]
+
+ arguments = {
+ "--sni" = {
+ set_if = "$http_sni$"
+ }
+ }
+```
+
+Whenever a host/service object sets the `http_sni` [custom variable](03-monitoring-basics.md#custom-variables)
+to `true`, the parameter is added to the command line.
+
+```bash
+'/usr/lib64/nagios/plugins/check_http' '--sni'
+```
+
+[Numeric](17-language-reference.md#numeric-literals) values are allowed too.
+
+**Parameters with value, but additionally controlled with an extra custom variable boolean flag.**
+
+The following example is taken from the [postgres]() CheckCommand. The host
+parameter should use a `value` but only whenever the `postgres_unixsocket`
+[custom variable](03-monitoring-basics.md#custom-variables) is set to false.
+
+Note: `set_if` is using a runtime lambda function because the value
+is evaluated at runtime. This is explained in [this chapter](08-advanced-topics.md#use-functions-object-config).
+
+```
+ command = [ PluginContribDir + "/check_postgres.pl" ]
+
+ arguments = {
+ "-H" = {
+ value = "$postgres_host$"
+ set_if = {{ macro("$postgres_unixsocket$") == false }}
+ description = "hostname(s) to connect to; defaults to none (Unix socket)"
+ }
+```
+
+An executed check for this host and services ...
+
+```
+object Host "postgresql-cluster" {
+ // ...
+
+ vars.postgres_host = "192.168.56.200"
+ vars.postgres_unixsocket = false
+}
+```
+
+... use the following command line:
+
+```bash
+'/usr/lib64/nagios/plugins/check_postgres.pl' '-H' '192.168.56.200'
+```
+
+Host/service objects which set `postgres_unixsocket` to `false` don't add the `-H` parameter
+and its value to the command line.
+
+References: [abbreviated lambda syntax](17-language-reference.md#nullary-lambdas), [macro](18-library-reference.md#scoped-functions-macro).
+
+##### Command Arguments: Order <a id="command-arguments-order"></a>
+
+Plugin may require parameters in a special order. One after the other,
+or e.g. one parameter always in the first position.
+
+```
+ arguments = {
+ "--first" = {
+ value = "..."
+ description = "..."
+ order = -5
+ }
+ "--second" = {
+ value = "..."
+ description = "..."
+ order = -4
+ }
+ "--last" = {
+ value = "..."
+ description = "..."
+ order = 99
+ }
+ }
+```
+
+Keep in mind that positional arguments need to be tested thoroughly.
+
+##### Command Arguments: Repeat Key <a id="command-arguments-repeat-key"></a>
+
+Parameters can use [Array](17-language-reference.md#array) as value type. Whenever Icinga encounters
+an array, it repeats the parameter key and each value element by default.
+
+```
+ command = [ NscpPath + "\\nscp.exe", "client" ]
+
+ arguments = {
+ "-a" = {
+ value = "$nscp_arguments$"
+ description = "..."
+ repeat_key = true
+ }
+ }
+```
+
+On a host/service object, specify the `nscp_arguments` [custom variable](03-monitoring-basics.md#custom-variables)
+as an array.
+
+```
+ vars.nscp_arguments = [ "exclude=sppsvc", "exclude=ShellHWDetection" ]
+```
+
+This translates into the following command line:
+
+```
+nscp.exe 'client' '-a' 'exclude=sppsvc' '-a' 'exclude=ShellHWDetection'
+```
+
+If the plugin requires you to pass the list without repeating the key,
+set `repeat_key = false` in the argument definition.
+
+```
+ command = [ NscpPath + "\\nscp.exe", "client" ]
+
+ arguments = {
+ "-a" = {
+ value = "$nscp_arguments$"
+ description = "..."
+ repeat_key = false
+ }
+ }
+```
+
+This translates into the following command line:
+
+```
+nscp.exe 'client' '-a' 'exclude=sppsvc' 'exclude=ShellHWDetection'
+```
+
+
+##### Command Arguments: Key <a id="command-arguments-key"></a>
+
+The `arguments` attribute requires unique keys. Sometimes, you'll
+need to override this in the resulting command line with same key
+names. Therefore you can specifically override the arguments key.
+
+```
+arguments = {
+ "--key1" = {
+ value = "..."
+ key = "-specialkey"
+ }
+ "--key2" = {
+ value = "..."
+ key = "-specialkey"
+ }
+}
+```
+
+This results in the following command line:
+
+```
+ '-specialkey' '...' '-specialkey' '...'
+```
+
+#### Environment Variables <a id="command-environment-variables"></a>
+
+The `env` command object attribute specifies a list of environment variables with values calculated
+from custom variables which should be exported as environment variables prior to executing the command.
+
+This is useful for example for hiding sensitive information on the command line output
+when passing credentials to database checks:
+
+```
+object CheckCommand "mysql" {
+ command = [ PluginDir + "/check_mysql" ]
+
+ arguments = {
+ "-H" = "$mysql_address$"
+ "-d" = "$mysql_database$"
+ }
+
+ vars.mysql_address = "$address$"
+ vars.mysql_database = "icinga"
+ vars.mysql_user = "icinga_check"
+ vars.mysql_pass = "password"
+
+ env.MYSQLUSER = "$mysql_user$"
+ env.MYSQLPASS = "$mysql_pass$"
+}
+```
+
+The executed command line visible with `ps` or `top` looks like this and hides
+the database credentials in the user's environment.
+
+```bash
+/usr/lib/nagios/plugins/check_mysql -H 192.168.56.101 -d icinga
+```
+
+> **Note**
+>
+> If the CheckCommand also supports setting the parameter in the command line,
+> ensure to use a different name for the custom variable. Otherwise Icinga 2
+> adds the command line parameter.
+
+If a specific CheckCommand object provided with the [Icinga Template Library](10-icinga-template-library.md#icinga-template-library)
+needs additional environment variables, you can import it into a new custom
+CheckCommand object and add additional `env` keys. Example for the [mysql_health](10-icinga-template-library.md#plugin-contrib-command-mysql_health)
+CheckCommand:
+
+```
+object CheckCommand "mysql_health_env" {
+ import "mysql_health"
+
+ // https://labs.consol.de/nagios/check_mysql_health/
+ env.NAGIOS__SERVICEMYSQL_USER = "$mysql_health_env_username$"
+ env.NAGIOS__SERVICEMYSQL_PASS = "$mysql_health_env_password$"
+}
+```
+
+Specify the custom variables `mysql_health_env_username` and `mysql_health_env_password`
+in the service object then.
+
+> **Note**
+>
+> Keep in mind that the values are still visible with the [debug console](11-cli-commands.md#cli-command-console)
+> and the inspect mode in the [Icinga Director](https://icinga.com/docs/director/latest/).
+
+You can also set global environment variables in the application's
+sysconfig configuration file, e.g. `HOME` or specific library paths
+for Oracle. Beware that these environment variables can be used
+by any CheckCommand object and executed plugin and can leak sensitive
+information.
+
+### Notification Commands <a id="notification-commands"></a>
+
+[NotificationCommand](09-object-types.md#objecttype-notificationcommand)
+objects define how notifications are delivered to external interfaces
+(email, XMPP, IRC, Twitter, etc.).
+[NotificationCommand](09-object-types.md#objecttype-notificationcommand)
+objects are referenced by [Notification](09-object-types.md#objecttype-notification)
+objects using the `command` attribute.
+
+> **Note**
+>
+> Make sure that the [notification](11-cli-commands.md#enable-features) feature is enabled
+> in order to execute notification commands.
+
+While it's possible to specify an entire notification command right
+in the NotificationCommand object it is generally advisable to create a
+shell script in the `/etc/icinga2/scripts` directory and have the
+NotificationCommand object refer to that.
+
+A fresh Icinga 2 install comes with with two example scripts for host
+and service notifications by email. Based on the Icinga 2 runtime macros
+(such as `$service.output$` for the current check output) it's possible
+to send email to the user(s) associated with the notification itself
+(`$user.email$`). Feel free to take these scripts as a starting point
+for your own individual notification solution - and keep in mind that
+nearly everything is technically possible.
+
+Information needed to generate notifications is passed to the scripts as
+arguments. The NotificationCommand objects `mail-host-notification` and
+`mail-service-notification` correspond to the shell scripts
+`mail-host-notification.sh` and `mail-service-notification.sh` in
+`/etc/icinga2/scripts` and define default values for arguments. These
+defaults can always be overwritten locally.
+
+> **Note**
+>
+> This example requires the `mail` binary installed on the Icinga 2
+> master.
+>
+> Depending on the distribution, you need a local mail transfer
+> agent (MTA) such as Postfix, Exim or Sendmail in order
+> to send emails.
+>
+> These tools virtually provide the `mail` binary executed
+> by the notification scripts below.
+
+#### mail-host-notification <a id="mail-host-notification"></a>
+
+The `mail-host-notification` NotificationCommand object uses the
+example notification script located in `/etc/icinga2/scripts/mail-host-notification.sh`.
+
+Here is a quick overview of the arguments that can be used. See also [host runtime
+macros](03-monitoring-basics.md#-host-runtime-macros) for further
+information.
+
+ Name | Description
+ -------------------------------|---------------------------------------
+ `notification_date` | **Required.** Date and time. Defaults to `$icinga.long_date_time$`.
+ `notification_hostname` | **Required.** The host's `FQDN`. Defaults to `$host.name$`.
+ `notification_hostdisplayname` | **Required.** The host's display name. Defaults to `$host.display_name$`.
+ `notification_hostoutput` | **Required.** Output from host check. Defaults to `$host.output$`.
+ `notification_useremail` | **Required.** The notification's recipient(s). Defaults to `$user.email$`.
+ `notification_hoststate` | **Required.** Current state of host. Defaults to `$host.state$`.
+ `notification_type` | **Required.** Type of notification. Defaults to `$notification.type$`.
+ `notification_address` | **Optional.** The host's IPv4 address. Defaults to `$address$`.
+ `notification_address6` | **Optional.** The host's IPv6 address. Defaults to `$address6$`.
+ `notification_author` | **Optional.** Comment author. Defaults to `$notification.author$`.
+ `notification_comment` | **Optional.** Comment text. Defaults to `$notification.comment$`.
+ `notification_from` | **Optional.** Define a valid From: string (e.g. `"Icinga 2 Host Monitoring <icinga@example.com>"`). Requires `GNU mailutils` (Debian/Ubuntu) or `mailx` (RHEL/SUSE).
+ `notification_icingaweb2url` | **Optional.** Define URL to your Icinga Web 2 (e.g. `"https://www.example.com/icingaweb2"`)
+ `notification_logtosyslog` | **Optional.** Set `true` to log notification events to syslog; useful for debugging. Defaults to `false`.
+
+#### mail-service-notification <a id="mail-service-notification"></a>
+
+The `mail-service-notification` NotificationCommand object uses the
+example notification script located in `/etc/icinga2/scripts/mail-service-notification.sh`.
+
+Here is a quick overview of the arguments that can be used. See also [service runtime
+macros](03-monitoring-basics.md#-service-runtime-macros) for further
+information.
+
+ Name | Description
+ ----------------------------------|---------------------------------------
+ `notification_date` | **Required.** Date and time. Defaults to `$icinga.long_date_time$`.
+ `notification_hostname` | **Required.** The host's `FQDN`. Defaults to `$host.name$`.
+ `notification_servicename` | **Required.** The service name. Defaults to `$service.name$`.
+ `notification_hostdisplayname` | **Required.** Host display name. Defaults to `$host.display_name$`.
+ `notification_servicedisplayname` | **Required.** Service display name. Defaults to `$service.display_name$`.
+ `notification_serviceoutput` | **Required.** Output from service check. Defaults to `$service.output$`.
+ `notification_useremail` | **Required.** The notification's recipient(s). Defaults to `$user.email$`.
+ `notification_servicestate` | **Required.** Current state of host. Defaults to `$service.state$`.
+ `notification_type` | **Required.** Type of notification. Defaults to `$notification.type$`.
+ `notification_address` | **Optional.** The host's IPv4 address. Defaults to `$address$`.
+ `notification_address6` | **Optional.** The host's IPv6 address. Defaults to `$address6$`.
+ `notification_author` | **Optional.** Comment author. Defaults to `$notification.author$`.
+ `notification_comment` | **Optional.** Comment text. Defaults to `$notification.comment$`.
+ `notification_from` | **Optional.** Define a valid From: string (e.g. `"Icinga 2 Host Monitoring <icinga@example.com>"`). Requires `GNU mailutils` (Debian/Ubuntu) or `mailx` (RHEL/SUSE).
+ `notification_icingaweb2url` | **Optional.** Define URL to your Icinga Web 2 (e.g. `"https://www.example.com/icingaweb2"`)
+ `notification_logtosyslog` | **Optional.** Set `true` to log notification events to syslog; useful for debugging. Defaults to `false`.
+
+### Event Commands <a id="event-commands"></a>
+
+Unlike notifications, event commands for hosts/services are called on every
+check execution if one of these conditions matches:
+
+* The host/service is in a [soft state](03-monitoring-basics.md#hard-soft-states)
+* The host/service state changes into a [hard state](03-monitoring-basics.md#hard-soft-states)
+* The host/service state recovers from a [soft or hard state](03-monitoring-basics.md#hard-soft-states) to [OK](03-monitoring-basics.md#service-states)/[Up](03-monitoring-basics.md#host-states)
+
+[EventCommand](09-object-types.md#objecttype-eventcommand) objects are referenced by
+[Host](09-object-types.md#objecttype-host) and [Service](09-object-types.md#objecttype-service) objects
+with the `event_command` attribute.
+
+Therefore the `EventCommand` object should define a command line
+evaluating the current service state and other service runtime attributes
+available through runtime variables. Runtime macros such as `$service.state_type$`
+and `$service.state$` will be processed by Icinga 2 and help with fine-granular
+triggered events
+
+If the host/service is located on a client as [command endpoint](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
+the event command will be executed on the client itself (similar to the check
+command).
+
+Common use case scenarios are a failing HTTP check which requires an immediate
+restart via event command. Another example would be an application that is not
+responding and therefore requires a restart. You can also use event handlers
+to forward more details on state changes and events than the typical notification
+alerts provide.
+
+#### Use Event Commands to Send Information from the Master <a id="event-command-send-information-from-master"></a>
+
+This example sends a web request from the master node to an external tool
+for every event triggered on a `businessprocess` service.
+
+Define an [EventCommand](09-object-types.md#objecttype-eventcommand)
+object `send_to_businesstool` which sends state changes to the external tool.
+
+```
+object EventCommand "send_to_businesstool" {
+ command = [
+ "/usr/bin/curl",
+ "-s",
+ "-X PUT"
+ ]
+
+ arguments = {
+ "-H" = {
+ value ="$businesstool_url$"
+ skip_key = true
+ }
+ "-d" = "$businesstool_message$"
+ }
+
+ vars.businesstool_url = "http://localhost:8080/businesstool"
+ vars.businesstool_message = "$host.name$ $service.name$ $service.state$ $service.state_type$ $service.check_attempt$"
+}
+```
+
+Set the `event_command` attribute to `send_to_businesstool` on the Service.
+
+```
+object Service "businessprocess" {
+ host_name = "businessprocess"
+
+ check_command = "icingacli-businessprocess"
+ vars.icingacli_businessprocess_process = "icinga"
+ vars.icingacli_businessprocess_config = "training"
+
+ event_command = "send_to_businesstool"
+}
+```
+
+In order to test this scenario you can run:
+
+```bash
+nc -l 8080
+```
+
+This allows to catch the web request. You can also enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output)
+and search for the event command execution log message.
+
+```bash
+tail -f /var/log/icinga2/debug.log | grep EventCommand
+```
+
+Feed in a check result via REST API action [process-check-result](12-icinga2-api.md#icinga2-api-actions-process-check-result)
+or via Icinga Web 2.
+
+Expected Result:
+
+```
+# nc -l 8080
+PUT /businesstool HTTP/1.1
+User-Agent: curl/7.29.0
+Host: localhost:8080
+Accept: */*
+Content-Length: 47
+Content-Type: application/x-www-form-urlencoded
+
+businessprocess businessprocess CRITICAL SOFT 1
+```
+
+#### Use Event Commands to Restart Service Daemon via Command Endpoint on Linux <a id="event-command-restart-service-daemon-command-endpoint-linux"></a>
+
+This example triggers a restart of the `httpd` service on the local system
+when the `procs` service check executed via Command Endpoint fails. It only
+triggers if the service state is `Critical` and attempts to restart the
+service before a notification is sent.
+
+Requirements:
+
+* Icinga 2 as client on the remote node
+* icinga user with sudo permissions to the httpd daemon
+
+Example on CentOS 7:
+
+```
+# visudo
+icinga ALL=(ALL) NOPASSWD: /usr/bin/systemctl restart httpd
+```
+
+Note: Distributions might use a different name. On Debian/Ubuntu the service is called `apache2`.
+
+Define an [EventCommand](09-object-types.md#objecttype-eventcommand) object `restart_service`
+which allows to trigger local service restarts. Put it into a [global zone](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync)
+to sync its configuration to all clients.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/global-templates/eventcommands.conf
+
+object EventCommand "restart_service" {
+ command = [ PluginDir + "/restart_service" ]
+
+ arguments = {
+ "-s" = "$service.state$"
+ "-t" = "$service.state_type$"
+ "-a" = "$service.check_attempt$"
+ "-S" = "$restart_service$"
+ }
+
+ vars.restart_service = "$procs_command$"
+}
+```
+
+This event command triggers the following script which restarts the service.
+The script only is executed if the service state is `CRITICAL`. Warning and Unknown states
+are ignored as they indicate not an immediate failure.
+
+```
+[root@icinga2-agent1.localdomain /]# vim /usr/lib64/nagios/plugins/restart_service
+
+#!/bin/bash
+
+while getopts "s:t:a:S:" opt; do
+ case $opt in
+ s)
+ servicestate=$OPTARG
+ ;;
+ t)
+ servicestatetype=$OPTARG
+ ;;
+ a)
+ serviceattempt=$OPTARG
+ ;;
+ S)
+ service=$OPTARG
+ ;;
+ esac
+done
+
+if ( [ -z $servicestate ] || [ -z $servicestatetype ] || [ -z $serviceattempt ] || [ -z $service ] ); then
+ echo "USAGE: $0 -s servicestate -z servicestatetype -a serviceattempt -S service"
+ exit 3;
+else
+ # Only restart on the third attempt of a critical event
+ if ( [ $servicestate == "CRITICAL" ] && [ $servicestatetype == "SOFT" ] && [ $serviceattempt -eq 3 ] ); then
+ sudo /usr/bin/systemctl restart $service
+ fi
+fi
+
+[root@icinga2-agent1.localdomain /]# chmod +x /usr/lib64/nagios/plugins/restart_service
+```
+
+Add a service on the master node which is executed via command endpoint on the client.
+Set the `event_command` attribute to `restart_service`, the name of the previously defined
+EventCommand object.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/icinga2-agent1.localdomain.conf
+
+object Service "Process httpd" {
+ check_command = "procs"
+ event_command = "restart_service"
+ max_check_attempts = 4
+
+ host_name = "icinga2-agent1.localdomain"
+ command_endpoint = "icinga2-agent1.localdomain"
+
+ vars.procs_command = "httpd"
+ vars.procs_warning = "1:10"
+ vars.procs_critical = "1:"
+}
+```
+
+In order to test this configuration just stop the `httpd` on the remote host `icinga2-agent1.localdomain`.
+
+```
+[root@icinga2-agent1.localdomain /]# systemctl stop httpd
+```
+
+You can enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) and search for the
+executed command line.
+
+```
+[root@icinga2-agent1.localdomain /]# tail -f /var/log/icinga2/debug.log | grep restart_service
+```
+
+#### Use Event Commands to Restart Service Daemon via Command Endpoint on Windows <a id="event-command-restart-service-daemon-command-endpoint-windows"></a>
+
+This example triggers a restart of the `httpd` service on the remote system
+when the `service-windows` service check executed via Command Endpoint fails.
+It only triggers if the service state is `Critical` and attempts to restart the
+service before a notification is sent.
+
+Requirements:
+
+* Icinga 2 as client on the remote node
+* Icinga 2 service with permissions to execute Powershell scripts (which is the default)
+
+Define an [EventCommand](09-object-types.md#objecttype-eventcommand) object `restart_service-windows`
+which allows to trigger local service restarts. Put it into a [global zone](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync)
+to sync its configuration to all clients.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/global-templates/eventcommands.conf
+
+object EventCommand "restart_service-windows" {
+ command = [
+ "C:\\Windows\\SysWOW64\\WindowsPowerShell\\v1.0\\powershell.exe",
+ PluginDir + "/restart_service.ps1"
+ ]
+
+ arguments = {
+ "-ServiceState" = "$service.state$"
+ "-ServiceStateType" = "$service.state_type$"
+ "-ServiceAttempt" = "$service.check_attempt$"
+ "-Service" = "$restart_service$"
+ "; exit" = {
+ order = 99
+ value = "$$LASTEXITCODE"
+ }
+ }
+
+ vars.restart_service = "$service_win_service$"
+}
+```
+
+This event command triggers the following script which restarts the service.
+The script only is executed if the service state is `CRITICAL`. Warning and Unknown states
+are ignored as they indicate not an immediate failure.
+
+Add the `restart_service.ps1` Powershell script into `C:\Program Files\Icinga2\sbin`:
+
+```
+param(
+ [string]$Service = '',
+ [string]$ServiceState = '',
+ [string]$ServiceStateType = '',
+ [int]$ServiceAttempt = ''
+ )
+
+if (!$Service -Or !$ServiceState -Or !$ServiceStateType -Or !$ServiceAttempt) {
+ $scriptName = GCI $MyInvocation.PSCommandPath | Select -Expand Name;
+ Write-Host "USAGE: $scriptName -ServiceState servicestate -ServiceStateType servicestatetype -ServiceAttempt serviceattempt -Service service" -ForegroundColor red;
+ exit 3;
+}
+
+# Only restart on the third attempt of a critical event
+if ($ServiceState -eq "CRITICAL" -And $ServiceStateType -eq "SOFT" -And $ServiceAttempt -eq 3) {
+ Restart-Service $Service;
+}
+
+exit 0;
+```
+
+Add a service on the master node which is executed via command endpoint on the client.
+Set the `event_command` attribute to `restart_service-windows`, the name of the previously defined
+EventCommand object.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/icinga2-agent2.localdomain.conf
+
+object Service "Service httpd" {
+ check_command = "service-windows"
+ event_command = "restart_service-windows"
+ max_check_attempts = 4
+
+ host_name = "icinga2-agent2.localdomain"
+ command_endpoint = "icinga2-agent2.localdomain"
+
+ vars.service_win_service = "httpd"
+}
+```
+
+In order to test this configuration just stop the `httpd` on the remote host `icinga2-agent1.localdomain`.
+
+```
+C:> net stop httpd
+```
+
+You can enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) and search for the
+executed command line in `C:\ProgramData\icinga2\var\log\icinga2\debug.log`.
+
+
+#### Use Event Commands to Restart Service Daemon via SSH <a id="event-command-restart-service-daemon-ssh"></a>
+
+This example triggers a restart of the `httpd` daemon
+via SSH when the `http` service check fails.
+
+Requirements:
+
+* SSH connection allowed (firewall, packet filters)
+* icinga user with public key authentication
+* icinga user with sudo permissions to restart the httpd daemon.
+
+Example on Debian:
+
+```
+# ls /home/icinga/.ssh/
+authorized_keys
+
+# visudo
+icinga ALL=(ALL) NOPASSWD: /etc/init.d/apache2 restart
+```
+
+Define a generic [EventCommand](09-object-types.md#objecttype-eventcommand) object `event_by_ssh`
+which can be used for all event commands triggered using SSH:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/local_eventcommands.conf
+
+/* pass event commands through ssh */
+object EventCommand "event_by_ssh" {
+ command = [ PluginDir + "/check_by_ssh" ]
+
+ arguments = {
+ "-H" = "$event_by_ssh_address$"
+ "-p" = "$event_by_ssh_port$"
+ "-C" = "$event_by_ssh_command$"
+ "-l" = "$event_by_ssh_logname$"
+ "-i" = "$event_by_ssh_identity$"
+ "-q" = {
+ set_if = "$event_by_ssh_quiet$"
+ }
+ "-w" = "$event_by_ssh_warn$"
+ "-c" = "$event_by_ssh_crit$"
+ "-t" = "$event_by_ssh_timeout$"
+ }
+
+ vars.event_by_ssh_address = "$address$"
+ vars.event_by_ssh_quiet = false
+}
+```
+
+The actual event command only passes the `event_by_ssh_command` attribute.
+The `event_by_ssh_service` custom variable takes care of passing the correct
+daemon name, while `test $service.state_id$ -gt 0` makes sure that the daemon
+is only restarted when the service is not in an `OK` state.
+
+```
+object EventCommand "event_by_ssh_restart_service" {
+ import "event_by_ssh"
+
+ //only restart the daemon if state > 0 (not-ok)
+ //requires sudo permissions for the icinga user
+ vars.event_by_ssh_command = "test $service.state_id$ -gt 0 && sudo systemctl restart $event_by_ssh_service$"
+}
+```
+
+
+Now set the `event_command` attribute to `event_by_ssh_restart_service` and tell it
+which service should be restarted using the `event_by_ssh_service` attribute.
+
+```
+apply Service "http" {
+ import "generic-service"
+ check_command = "http"
+
+ event_command = "event_by_ssh_restart_service"
+ vars.event_by_ssh_service = "$host.vars.httpd_name$"
+
+ //vars.event_by_ssh_logname = "icinga"
+ //vars.event_by_ssh_identity = "/home/icinga/.ssh/id_rsa.pub"
+
+ assign where host.vars.httpd_name
+}
+```
+
+Specify the `httpd_name` custom variable on the host to assign the
+service and set the event handler service.
+
+```
+object Host "remote-http-host" {
+ import "generic-host"
+ address = "192.168.1.100"
+
+ vars.httpd_name = "apache2"
+}
+```
+
+In order to test this configuration just stop the `httpd` on the remote host `icinga2-agent1.localdomain`.
+
+```
+[root@icinga2-agent1.localdomain /]# systemctl stop httpd
+```
+
+You can enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) and search for the
+executed command line.
+
+```
+[root@icinga2-agent1.localdomain /]# tail -f /var/log/icinga2/debug.log | grep by_ssh
+```
+
+
+## Dependencies <a id="dependencies"></a>
+
+Icinga 2 uses host and service [Dependency](09-object-types.md#objecttype-dependency) objects
+for determining their network reachability.
+
+A service can depend on a host, and vice versa. A service has an implicit
+dependency (parent) to its host. A host to host dependency acts implicitly
+as host parent relation.
+When dependencies are calculated, not only the immediate parent is taken into
+account but all parents are inherited.
+
+The `parent_host_name` and `parent_service_name` attributes are mandatory for
+service dependencies, `parent_host_name` is required for host dependencies.
+[Apply rules](03-monitoring-basics.md#using-apply) will allow you to
+[determine these attributes](03-monitoring-basics.md#dependencies-apply-custom-variables) in a more
+dynamic fashion if required.
+
+```
+parent_host_name = "core-router"
+parent_service_name = "uplink-port"
+```
+
+Notifications are suppressed by default if a host or service becomes unreachable.
+You can control that option by defining the `disable_notifications` attribute.
+
+```
+disable_notifications = false
+```
+
+If the dependency should be triggered in the parent object's soft state, you
+need to set `ignore_soft_states` to `false`.
+
+The dependency state filter must be defined based on the parent object being
+either a host (`Up`, `Down`) or a service (`OK`, `Warning`, `Critical`, `Unknown`).
+
+The following example will make the dependency fail and trigger it if the parent
+object is **not** in one of these states:
+
+```
+states = [ OK, Critical, Unknown ]
+```
+
+> **In other words**
+>
+> If the parent service object changes into the `Warning` state, this
+> dependency will fail and render all child objects (hosts or services) unreachable.
+
+You can determine the child's reachability by querying the `last_reachable` attribute
+via the [REST API](12-icinga2-api.md#icinga2-api).
+
+> **Note**
+>
+> Reachability calculation depends on fresh and processed check results. If dependencies
+> disable checks for child objects, this won't work reliably.
+
+### Implicit Dependencies for Services on Host <a id="dependencies-implicit-host-service"></a>
+
+Icinga 2 automatically adds an implicit dependency for services on their host. That way
+service notifications are suppressed when a host is `DOWN` or `UNREACHABLE`. This dependency
+does not overwrite other dependencies and implicitly sets `disable_notifications = true` and
+`states = [ Up ]` for all service objects.
+
+Service checks are still executed. If you want to prevent them from happening, you can
+apply the following dependency to all services setting their host as `parent_host_name`
+and disabling the checks. `assign where true` matches on all `Service` objects.
+
+```
+apply Dependency "disable-host-service-checks" to Service {
+ disable_checks = true
+ assign where true
+}
+```
+
+### Dependencies for Network Reachability <a id="dependencies-network-reachability"></a>
+
+A common scenario is the Icinga 2 server behind a router. Checking internet
+access by pinging the Google DNS server `google-dns` is a common method, but
+will fail in case the `dsl-router` host is down. Therefore the example below
+defines a host dependency which acts implicitly as parent relation too.
+
+Furthermore the host may be reachable but ping probes are dropped by the
+router's firewall. In case the `dsl-router`'s `ping4` service check fails, all
+further checks for the `ping4` service on host `google-dns` service should
+be suppressed. This is achieved by setting the `disable_checks` attribute to `true`.
+
+```
+object Host "dsl-router" {
+ import "generic-host"
+ address = "192.168.1.1"
+}
+
+object Host "google-dns" {
+ import "generic-host"
+ address = "8.8.8.8"
+}
+
+apply Service "ping4" {
+ import "generic-service"
+
+ check_command = "ping4"
+
+ assign where host.address
+}
+
+apply Dependency "internet" to Host {
+ parent_host_name = "dsl-router"
+ disable_checks = true
+ disable_notifications = true
+
+ assign where host.name != "dsl-router"
+}
+
+apply Dependency "internet" to Service {
+ parent_host_name = "dsl-router"
+ parent_service_name = "ping4"
+ disable_checks = true
+
+ assign where host.name != "dsl-router"
+}
+```
+
+### Redundancy Groups <a id="dependencies-redundancy-groups"></a>
+
+Sometimes you want dependencies to accumulate,
+i.e. to consider the parent reachable only if no dependency is violated.
+Sometimes you want them to be regarded as redundant,
+i.e. to consider the parent unreachable only if no dependency is fulfilled.
+Think of a host connected to both a network and a storage switch vs. a host connected to redundant routers.
+
+Sometimes you even want a mixture of both.
+Think of a service like SSH depeding on both LDAP and DNS to function,
+while operating redundant LDAP servers as well as redundant DNS resolvers.
+
+Before v2.12, Icinga regarded all dependecies as cumulative.
+In v2.12 and v2.13, Icinga regarded all dependencies redundant.
+The latter led to unrelated services being inadvertantly regarded to be redundant to each other.
+
+v2.14 restored the former behavior and allowed to override it.
+I.e. all dependecies are regarded as essential for the parent by default.
+Specifying the `redundancy_group` attribute for two dependecies of a child object with the equal value
+causes them to be regarded as redundant (only inside that redundancy group).
+
+<!-- Keep this for compatibility -->
+<a id="dependencies-apply-custom-attríbutes"></a>
+
+### Apply Dependencies based on Custom Variables <a id="dependencies-apply-custom-variables"></a>
+
+You can use [apply rules](03-monitoring-basics.md#using-apply) to set parent or
+child attributes, e.g. `parent_host_name` to other objects'
+attributes.
+
+A common example are virtual machines hosted on a master. The object
+name of that master is auto-generated from your CMDB or VMWare inventory
+into the host's custom variables (or a generic template for your
+cloud).
+
+Define your master host object:
+
+```
+/* your master */
+object Host "master.example.com" {
+ import "generic-host"
+}
+```
+
+Add a generic template defining all common host attributes:
+
+```
+/* generic template for your virtual machines */
+template Host "generic-vm" {
+ import "generic-host"
+}
+```
+
+Add a template for all hosts on your example.com cloud setting
+custom variable `vm_parent` to `master.example.com`:
+
+```
+template Host "generic-vm-example.com" {
+ import "generic-vm"
+ vars.vm_parent = "master.example.com"
+}
+```
+
+Define your guest hosts:
+
+```
+object Host "www.example1.com" {
+ import "generic-vm-master.example.com"
+}
+
+object Host "www.example2.com" {
+ import "generic-vm-master.example.com"
+}
+```
+
+Apply the host dependency to all child hosts importing the
+`generic-vm` template and set the `parent_host_name`
+to the previously defined custom variable `host.vars.vm_parent`.
+
+```
+apply Dependency "vm-host-to-parent-master" to Host {
+ parent_host_name = host.vars.vm_parent
+ assign where "generic-vm" in host.templates
+}
+```
+
+You can extend this example, and make your services depend on the
+`master.example.com` host too. Their local scope allows you to use
+`host.vars.vm_parent` similar to the example above.
+
+```
+apply Dependency "vm-service-to-parent-master" to Service {
+ parent_host_name = host.vars.vm_parent
+ assign where "generic-vm" in host.templates
+}
+```
+
+That way you don't need to wait for your guest hosts becoming
+unreachable when the master host goes down. Instead the services
+will detect their reachability immediately when executing checks.
+
+> **Note**
+>
+> This method with setting locally scoped variables only works in
+> apply rules, but not in object definitions.
+
+
+### Dependencies for Agent Checks <a id="dependencies-agent-checks"></a>
+
+Another good example are agent based checks. You would define a health check
+for the agent daemon responding to your requests, and make all other services
+querying that daemon depend on that health check.
+
+```
+apply Service "agent-health" {
+ check_command = "cluster-zone"
+
+ display_name = "cluster-health-" + host.name
+
+ /* This follows the convention that the agent zone name is the FQDN which is the same as the host object name. */
+ vars.cluster_zone = host.name
+
+ assign where host.vars.agent_endpoint
+}
+```
+
+Now, make all other agent based checks dependent on the OK state of the `agent-health`
+service.
+
+```
+apply Dependency "agent-health-check" to Service {
+ parent_service_name = "agent-health"
+
+ states = [ OK ] // Fail if the parent service state switches to NOT-OK
+ disable_notifications = true
+
+ assign where host.vars.agent_endpoint // Automatically assigns all agent endpoint checks as child services on the matched host
+ ignore where service.name == "agent-health" // Avoid a self reference from child to parent
+}
+
+```
+
+This is described in detail in [this chapter](06-distributed-monitoring.md#distributed-monitoring-health-checks).
diff --git a/doc/04-configuration.md b/doc/04-configuration.md
new file mode 100644
index 0000000..e16c210
--- /dev/null
+++ b/doc/04-configuration.md
@@ -0,0 +1,737 @@
+# Configuration <a id="configuration"></a>
+
+The Icinga [configuration](https://icinga.com/products/configuration/)
+can be easily managed with either the [Icinga Director](https://icinga.com/docs/director/latest/),
+config management tools or plain text within the [Icinga DSL](04-configuration.md#configuration).
+
+Before looking into web based configuration or any sort of automation,
+we recommend to start with the configuration files and fully understand
+the possibilities of the Icinga DSL (Domain Specific Language).
+
+The package installation provides example configuration which already
+monitors the local Icinga server. You can view the monitoring details
+in Icinga Web.
+
+![Icinga Web Local Server](images/configuration/icinga_web_local_server.png)
+
+The [Language Reference](17-language-reference.md#language-reference) chapter explains details
+on value types (string, number, dictionaries, etc.) and the general configuration syntax.
+
+## Configuration Best Practice <a id="configuration-best-practice"></a>
+
+If you are ready to configure additional hosts, services, notifications,
+dependencies, etc., you should think about the requirements first and then
+decide for a possible strategy.
+
+There are many ways of creating Icinga 2 configuration objects:
+
+* The [Icinga Director](https://icinga.com/docs/director/latest/) as web based and/or automation configuration interface
+ * [Monitoring Automation with Icinga - The Director](https://icinga.com/2019/04/23/monitoring-automation-with-icinga-the-director/)
+* Manually with your preferred editor, for example vi(m), nano, notepad, etc.
+* Generated by a [configuration management tool](13-addons.md#configuration-tools) such as Puppet, Chef, Ansible, etc.
+* A custom exporter script from your CMDB or inventory tool
+* etc.
+
+Find the best strategy for your own configuration and ask yourself the following questions:
+
+* Do your hosts share a common group of services (for example linux hosts with disk, load, etc. checks)?
+* Only a small set of users receives notifications and escalations for all hosts/services?
+
+If you can at least answer one of these questions with yes, look for the
+[apply rules](03-monitoring-basics.md#using-apply) logic instead of defining objects on a per
+host and service basis.
+
+* You are required to define specific configuration for each host/service?
+* Does your configuration generation tool already know about the host-service-relationship?
+
+Then you should look for the object specific configuration setting `host_name` etc. accordingly.
+
+You decide on the "best" layout for configuration files and directories. Ensure that
+the [icinga2.conf](04-configuration.md#icinga2-conf) configuration file includes them.
+
+Consider these ideas:
+
+* tree-based on locations, host groups, specific host attributes with sub levels of directories.
+* flat `hosts.conf`, `services.conf`, etc. files for rule based configuration.
+* generated configuration with one file per host and a global configuration for groups, users, etc.
+* one big file generated from an external application (probably a bad idea for maintaining changes).
+* your own.
+
+In either way of choosing the right strategy you should additionally check the following:
+
+* Are there any specific attributes describing the host/service you could set as `vars` custom variables?
+You can later use them for applying assign/ignore rules, or export them into external interfaces.
+* Put hosts into hostgroups, services into servicegroups and use these attributes for your apply rules.
+* Use templates to store generic attributes for your objects and apply rules making your configuration more readable.
+Details can be found in the [using templates](03-monitoring-basics.md#object-inheritance-using-templates) chapter.
+* Apply rules may overlap. Keep a central place (for example, [services.conf](04-configuration.md#services-conf) or [notifications.conf](04-configuration.md#notifications-conf)) storing
+the configuration instead of defining apply rules deep in your configuration tree.
+* Every plugin used as check, notification or event command requires a `Command` definition.
+Further details can be looked up in the [check commands](03-monitoring-basics.md#check-commands) chapter.
+
+If you are planning to use a distributed monitoring setup with master, satellite and client installations
+take the configuration location into account too. Everything configured on the master, synced to all other
+nodes? Or any specific local configuration (e.g. health checks)?
+
+There is a detailed chapter on [distributed monitoring scenarios](06-distributed-monitoring.md#distributed-monitoring-scenarios).
+Please ensure to have read the [introduction](06-distributed-monitoring.md#distributed-monitoring) at first glance.
+
+If you happen to have further questions, do not hesitate to join the
+[community forum](https://community.icinga.com)
+and ask community members for their experience and best practices.
+
+## Your Configuration <a id="your-configuration"></a>
+
+If you prefer to organize your own local object tree, you can also remove
+`include_recursive "conf.d"` from your icinga2.conf file.
+
+Create a new configuration directory, e.g. `objects.d` and include it
+in your icinga2.conf file.
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/objects.d
+
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/icinga2.conf
+
+/* Local object configuration on our master instance. */
+include_recursive "objects.d"
+```
+
+This approach is used by the [Icinga 2 Puppet module](https://icinga.com/products/integrations/puppet/).
+
+If you plan to setup a distributed setup with HA clusters and clients, please refer to [this chapter](#06-distributed-monitoring.md#distributed-monitoring-top-down)
+for examples with `zones.d` as configuration directory.
+
+## Configuration Overview <a id="configuring-icinga2-overview"></a>
+
+### icinga2.conf <a id="icinga2-conf"></a>
+
+An example configuration file is installed for you in `/etc/icinga2/icinga2.conf`.
+
+Here's a brief description of the example configuration:
+
+```
+/**
+* Icinga 2 configuration file
+* -- this is where you define settings for the Icinga application including
+* which hosts/services to check.
+*
+* For an overview of all available configuration options please refer
+* to the documentation that is distributed as part of Icinga 2.
+*/
+```
+
+Icinga 2 supports [C/C++-style comments](17-language-reference.md#comments).
+
+/**
+* The constants.conf defines global constants.
+*/
+include "constants.conf"
+
+The `include` directive can be used to include other files.
+
+```
+/**
+* The zones.conf defines zones for a cluster setup.
+* Not required for single instance setups.
+*/
+include "zones.conf"
+```
+
+The [Icinga Template Library](10-icinga-template-library.md#icinga-template-library) provides a set of common templates
+and [CheckCommand](03-monitoring-basics.md#check-commands) definitions.
+
+```
+/**
+* The Icinga Template Library (ITL) provides a number of useful templates
+* and command definitions.
+* Common monitoring plugin command definitions are included separately.
+*/
+include <itl>
+include <plugins>
+include <plugins-contrib>
+include <manubulon>
+
+/**
+* This includes the Icinga 2 Windows plugins. These command definitions
+* are required on a master node when a client is used as command endpoint.
+*/
+include <windows-plugins>
+
+/**
+* This includes the NSClient++ check commands. These command definitions
+* are required on a master node when a client is used as command endpoint.
+*/
+include <nscp>
+
+/**
+* The features-available directory contains a number of configuration
+* files for features which can be enabled and disabled using the
+* icinga2 feature enable / icinga2 feature disable CLI commands.
+* These commands work by creating and removing symbolic links in
+* the features-enabled directory.
+*/
+include "features-enabled/*.conf"
+```
+
+This `include` directive takes care of including the configuration files for all
+the features which have been enabled with `icinga2 feature enable`. See
+[Enabling/Disabling Features](11-cli-commands.md#enable-features) for more details.
+
+```
+/**
+* Although in theory you could define all your objects in this file
+* the preferred way is to create separate directories and files in the conf.d
+* directory. Each of these files must have the file extension ".conf".
+*/
+include_recursive "conf.d"
+```
+
+You can put your own configuration files in the [conf.d](04-configuration.md#conf-d) directory. This
+directive makes sure that all of your own configuration files are included.
+
+### constants.conf <a id="constants-conf"></a>
+
+The `constants.conf` configuration file can be used to define global constants.
+
+By default, you need to make sure to set these constants:
+
+* The `PluginDir` constant must be set to the path where the [Monitoring Project](https://www.monitoring-plugins.org/) plugins are installed.
+This constant is used by a number of
+[built-in check command definitions](10-icinga-template-library.md#icinga-template-library).
+* The `NodeName` constant defines your local node name. Should be set to FQDN which is the default
+if not set. This constant is required for local host configuration, monitoring remote clients and
+cluster setup.
+
+Example:
+
+```
+/* The directory which contains the plugins from the Monitoring Plugins project. */
+const PluginDir = "/usr/lib64/nagios/plugins"
+
+/* The directory which contains the Manubulon plugins.
+* Check the documentation, chapter "SNMP Manubulon Plugin Check Commands", for details.
+*/
+const ManubulonPluginDir = "/usr/lib64/nagios/plugins"
+
+/* Our local instance name. By default this is the server's hostname as returned by `hostname --fqdn`.
+* This should be the common name from the API certificate.
+*/
+//const NodeName = "localhost"
+
+/* Our local zone name. */
+const ZoneName = NodeName
+
+/* Secret key for remote node tickets */
+const TicketSalt = ""
+```
+
+The `ZoneName` and `TicketSalt` constants are required for remote client
+and distributed setups. The `node setup/wizard` CLI tools take care of
+populating these values.
+
+### zones.conf <a id="zones-conf"></a>
+
+This file can be used to specify the required [Zone](09-object-types.md#objecttype-zone)
+and [Endpoint](09-object-types.md#objecttype-endpoint) configuration object for
+[distributed monitoring](06-distributed-monitoring.md#distributed-monitoring).
+
+By default the `NodeName` and `ZoneName` [constants](04-configuration.md#constants-conf) will be used.
+
+It also contains several [global zones](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync)
+for distributed monitoring environments.
+
+Please ensure to modify this configuration with real names i.e. use the FQDN
+mentioned in [this chapter](06-distributed-monitoring.md#distributed-monitoring-conventions)
+for your `Zone` and `Endpoint` object names.
+
+### The conf.d Directory <a id="conf-d"></a>
+
+This directory contains **example configuration** which should help you get started
+with monitoring the local host and its services. It is included in the
+[icinga2.conf](04-configuration.md#icinga2-conf) configuration file by default.
+
+It can be used as reference example for your own configuration strategy.
+Just keep in mind to include the main directories in the
+[icinga2.conf](04-configuration.md#icinga2-conf) file.
+
+> **Note**
+>
+> You can remove the include directive in [icinga2.conf](04-configuration.md#icinga2-conf)
+> if you prefer your own way of deploying Icinga 2 configuration.
+
+Further details on configuration best practice and how to build your
+own strategy is described in [this chapter](04-configuration.md#configuration-best-practice).
+
+Available configuration files which are installed by default:
+
+* [hosts.conf](04-configuration.md#hosts-conf)
+* [services.conf](04-configuration.md#services-conf)
+* [users.conf](04-configuration.md#users-conf)
+* [notifications.conf](04-configuration.md#notifications-conf)
+* [commands.conf](04-configuration.md#commands-conf)
+* [groups.conf](04-configuration.md#groups-conf)
+* [templates.conf](04-configuration.md#templates-conf)
+* [downtimes.conf](04-configuration.md#downtimes-conf)
+* [timeperiods.conf](04-configuration.md#timeperiods-conf)
+* [api-users.conf](04-configuration.md#api-users-conf)
+* [app.conf](04-configuration.md#app-conf)
+
+#### hosts.conf <a id="hosts-conf"></a>
+
+The `hosts.conf` file contains an example host based on your
+`NodeName` setting in [constants.conf](04-configuration.md#constants-conf). You
+can use global constants for your object names instead of string
+values.
+
+The `import` keyword is used to import the `generic-host` template which
+takes care of setting up the host check command to `hostalive`. If you
+require a different check command, you can override it in the object definition.
+
+The `vars` attribute can be used to define custom variables which are available
+for check and notification commands. Most of the [Plugin Check Commands](10-icinga-template-library.md#icinga-template-library)
+in the Icinga Template Library require an `address` attribute.
+
+The custom variable `os` is evaluated by the `linux-servers` group in
+[groups.conf](04-configuration.md#groups-conf) making the local host a member.
+
+The example host will show you how to:
+
+* define http vhost attributes for the `http` service apply rule defined
+in [services.conf](04-configuration.md#services-conf).
+* define disks (all, specific `/`) and their attributes for the `disk`
+service apply rule defined in [services.conf](04-configuration.md#services-conf).
+* define notification types (`mail`) and set the groups attribute. This
+will be used by notification apply rules in [notifications.conf](04-configuration.md#notifications-conf).
+
+If you've installed [Icinga Web 2](https://icinga.com/docs/icinga-web-2/latest/doc/02-Installation/), you can
+uncomment the http vhost attributes and reload Icinga 2. The apply
+rules in [services.conf](04-configuration.md#services-conf) will automatically
+generate a new service checking the `/icingaweb2` URI using the `http`
+check.
+
+```
+/*
+* Host definitions with object attributes
+* used for apply rules for Service, Notification,
+* Dependency and ScheduledDowntime objects.
+*
+* Tip: Use `icinga2 object list --type Host` to
+* list all host objects after running
+* configuration validation (`icinga2 daemon -C`).
+*/
+
+/*
+ * This is an example host based on your
+ * local host's FQDN. Specify the NodeName
+ * constant in `constants.conf` or use your
+ * own description, e.g. "db-host-1".
+ */
+
+object Host NodeName {
+ /* Import the default host template defined in `templates.conf`. */
+ import "generic-host"
+
+ /* Specify the address attributes for checks e.g. `ssh` or `http`. */
+ address = "127.0.0.1"
+ address6 = "::1"
+
+ /* Set custom variable `os` for hostgroup assignment in `groups.conf`. */
+ vars.os = "Linux"
+
+ /* Define http vhost attributes for service apply rules in `services.conf`. */
+ vars.http_vhosts["http"] = {
+ http_uri = "/"
+ }
+ /* Uncomment if you've successfully installed Icinga Web 2. */
+ //vars.http_vhosts["Icinga Web 2"] = {
+ // http_uri = "/icingaweb2"
+ //}
+
+ /* Define disks and attributes for service apply rules in `services.conf`. */
+ vars.disks["disk"] = {
+ /* No parameters. */
+ }
+ vars.disks["disk /"] = {
+ disk_partitions = "/"
+ }
+
+ /* Define notification mail attributes for notification apply rules in `notifications.conf`. */
+ vars.notification["mail"] = {
+ /* The UserGroup `icingaadmins` is defined in `users.conf`. */
+ groups = [ "icingaadmins" ]
+ }
+}
+```
+
+This is only the host object definition. Now we'll need to make sure that this
+host and your additional hosts are getting [services](04-configuration.md#services-conf) applied.
+
+> **Tip**
+>
+> If you don't understand all the attributes and how to use [apply rules](17-language-reference.md#apply),
+> don't worry -- the [monitoring basics](03-monitoring-basics.md#monitoring-basics) chapter will explain
+> that in detail.
+
+#### services.conf <a id="services-conf"></a>
+
+These service [apply rules](17-language-reference.md#apply) will show you how to monitor
+the local host, but also allow you to re-use or modify them for
+your own requirements.
+
+You should define all your service apply rules in `services.conf`
+or any other central location keeping them organized.
+
+By default, the local host will be monitored by the following services
+
+Service(s) | Applied on host(s)
+--------------------------------------------|------------------------
+`load`, `procs`, `swap`, `users`, `icinga` | The `NodeName` host only.
+`ping4`, `ping6` | All hosts with `address` resp. `address6` attribute.
+`ssh` | All hosts with `address` and `vars.os` set to `Linux`
+`http`, optional: `Icinga Web 2` | All hosts with custom variable `http_vhosts` defined as dictionary.
+`disk`, `disk /` | All hosts with custom variable `disks` defined as dictionary.
+
+The Debian packages also include an additional `apt` service check applied to the local host.
+
+The command object `icinga` for the embedded health check is provided by the
+[Icinga Template Library (ITL)](10-icinga-template-library.md#icinga-template-library) while `http_ip`, `ssh`, `load`, `processes`,
+`users` and `disk` are all provided by the [Plugin Check Commands](10-icinga-template-library.md#icinga-template-library)
+which we enabled earlier by including the `itl` and `plugins` configuration file.
+
+
+Example `load` service apply rule:
+
+```
+apply Service "load" {
+import "generic-service"
+
+check_command = "load"
+
+/* Used by the ScheduledDowntime apply rule in `downtimes.conf`. */
+vars.backup_downtime = "02:00-03:00"
+
+assign where host.name == NodeName
+}
+```
+
+The `apply` keyword can be used to create new objects which are associated with
+another group of objects. You can `import` existing templates, define (custom)
+attributes.
+
+The custom variable `backup_downtime` is defined to a specific timerange string.
+This variable value will be used for applying a `ScheduledDowntime` object to
+these services in [downtimes.conf](04-configuration.md#downtimes-conf).
+
+In this example the `assign where` condition is a boolean expression which is
+evaluated for all objects of type `Host` and a new service with name "load"
+is created for each matching host. [Expression operators](17-language-reference.md#expression-operators)
+may be used in `assign where` conditions.
+
+Multiple `assign where` conditions can be combined with `AND` using the `&&` operator
+as shown in the `ssh` example:
+
+```
+apply Service "ssh" {
+ import "generic-service"
+
+ check_command = "ssh"
+
+ assign where host.address && host.vars.os == "Linux"
+}
+```
+
+In this example, the service `ssh` is applied to all hosts having the `address`
+attribute defined `AND` having the custom variable `os` set to the string
+`Linux`.
+You can modify this condition to match multiple expressions by combining `AND`
+and `OR` using `&&` and `||` [operators](17-language-reference.md#expression-operators), for example
+`assign where host.address && (vars.os == "Linux" || vars.os == "Windows")`.
+
+
+A more advanced example is shown by the `http` and `disk` service apply
+rules. While one `apply` rule for `ssh` will only create a service for matching
+hosts, you can go one step further: Generate apply rules based on array items
+or dictionary key-value pairs.
+
+The idea is simple: Your host in [hosts.conf](04-configuration.md#hosts-conf) defines the
+`disks` dictionary as custom variable in `vars`.
+
+Remember the example from [hosts.conf](04-configuration.md#hosts-conf):
+
+```
+...
+ /* Define disks and attributes for service apply rules in `services.conf`. */
+ vars.disks["disk"] = {
+ /* No parameters. */
+ }
+ vars.disks["disk /"] = {
+ disk_partition = "/"
+ }
+...
+```
+
+This dictionary contains multiple service names we want to monitor. `disk`
+should just check all available disks, while `disk /` will pass an additional
+parameter `disk_partition` to the check command.
+
+You'll recognize that the naming is important -- that's the very same name
+as it is passed from a service to a check command argument. Read about services
+and passing check commands in [this chapter](03-monitoring-basics.md#command-passing-parameters).
+
+Using `apply Service for` omits the service name, it will take the key stored in
+the `disk` variable in `key => config` as new service object name.
+
+The `for` keyword expects a loop definition, for example `key => value in dictionary`
+as known from Perl and other scripting languages.
+
+Once defined like this, the `apply` rule defined below will do the following:
+
+* only match hosts with `host.vars.disks` defined through the `assign where` condition
+* loop through all entries in the `host.vars.disks` dictionary. That's `disk` and `disk /` as keys.
+* call `apply` on each, and set the service object name from the provided key
+* inside apply, the `generic-service` template is imported
+* defining the [disk](10-icinga-template-library.md#plugin-check-command-disk) check command requiring command arguments like `disk_partition`
+* adding the `config` dictionary items to `vars`. Simply said, there's now `vars.disk_partition` defined for the
+generated service
+
+Configuration example:
+
+```
+apply Service for (disk => config in host.vars.disks) {
+ import "generic-service"
+
+ check_command = "disk"
+
+ vars += config
+}
+```
+
+A similar example is used for the `http` services. That way you can make your
+host the information provider for all apply rules. Define them once, and only
+manage your hosts.
+
+Look into [notifications.conf](04-configuration.md#notifications-conf) how this technique is used
+for applying notifications to hosts and services using their type and user
+attributes.
+
+Don't forget to install the check plugins required by the hosts and services and their check commands.
+
+Further details on the monitoring configuration can be found in the
+[monitoring basics](03-monitoring-basics.md#monitoring-basics) chapter.
+
+#### users.conf <a id="users-conf"></a>
+
+Defines the `icingaadmin` User and the `icingaadmins` UserGroup. The latter is used in
+[hosts.conf](04-configuration.md#hosts-conf) for defining a custom host attribute later used in
+[notifications.conf](04-configuration.md#notifications-conf) for notification apply rules.
+
+```
+object User "icingaadmin" {
+ import "generic-user"
+
+ display_name = "Icinga 2 Admin"
+ groups = [ "icingaadmins" ]
+
+ email = "icinga@localhost"
+}
+
+object UserGroup "icingaadmins" {
+ display_name = "Icinga 2 Admin Group"
+}
+```
+
+#### notifications.conf <a id="notifications-conf"></a>
+
+Notifications for check alerts are an integral part or your
+Icinga 2 monitoring stack.
+
+The examples in this file define two notification apply rules for hosts and services.
+Both `apply` rules match on the same condition: They are only applied if the
+nested dictionary attribute `notification.mail` is set.
+
+Please note that the `to` keyword is important in [notification apply rules](03-monitoring-basics.md#using-apply-notifications)
+defining whether these notifications are applies to hosts or services.
+The `import` keyword imports the specific mail templates defined in [templates.conf](04-configuration.md#templates-conf).
+
+The `interval` attribute is not explicitly set -- it [defaults to 30 minutes](09-object-types.md#objecttype-notification).
+
+By setting the `user_groups` to the value provided by the
+respective [host.vars.notification.mail](04-configuration.md#hosts-conf) attribute we'll
+implicitely use the `icingaadmins` UserGroup defined in [users.conf](04-configuration.md#users-conf).
+
+```
+apply Notification "mail-icingaadmin" to Host {
+ import "mail-host-notification"
+
+ user_groups = host.vars.notification.mail.groups
+ users = host.vars.notification.mail.users
+
+ assign where host.vars.notification.mail
+}
+
+apply Notification "mail-icingaadmin" to Service {
+ import "mail-service-notification"
+
+ user_groups = host.vars.notification.mail.groups
+ users = host.vars.notification.mail.users
+
+ assign where host.vars.notification.mail
+}
+```
+
+More details on defining notifications and their additional attributes such as
+filters can be read in [this chapter](03-monitoring-basics.md#alert-notifications).
+
+#### commands.conf <a id="commands-conf"></a>
+
+This is the place where your own command configuration can be defined. By default
+only the notification commands used by the notification templates defined in [templates.conf](04-configuration.md#templates-conf).
+
+You can freely customize these notification commands, and adapt them for your needs.
+Read more on that topic [here](03-monitoring-basics.md#notification-commands).
+
+#### groups.conf <a id="groups-conf"></a>
+
+The example host defined in [hosts.conf](hosts-conf) already has the
+custom variable `os` set to `Linux` and is therefore automatically
+a member of the host group `linux-servers`.
+
+This is done by using the [group assign](17-language-reference.md#group-assign) expressions similar
+to previously seen [apply rules](03-monitoring-basics.md#using-apply).
+
+```
+object HostGroup "linux-servers" {
+ display_name = "Linux Servers"
+
+ assign where host.vars.os == "Linux"
+}
+
+object HostGroup "windows-servers" {
+ display_name = "Windows Servers"
+
+ assign where host.vars.os == "Windows"
+}
+```
+
+Service groups can be grouped together by similar pattern matches.
+The [match function](18-library-reference.md#global-functions-match) expects a wildcard match string
+and the attribute string to match with.
+
+```
+object ServiceGroup "ping" {
+ display_name = "Ping Checks"
+
+ assign where match("ping*", service.name)
+}
+
+object ServiceGroup "http" {
+ display_name = "HTTP Checks"
+
+ assign where match("http*", service.check_command)
+}
+
+object ServiceGroup "disk" {
+ display_name = "Disk Checks"
+
+ assign where match("disk*", service.check_command)
+}
+```
+
+#### templates.conf <a id="templates-conf"></a>
+
+Most of the example configuration objects use generic global templates by
+default:
+
+```
+template Host "generic-host" {
+ max_check_attempts = 5
+ check_interval = 1m
+ retry_interval = 30s
+
+ check_command = "hostalive"
+}
+
+template Service "generic-service" {
+ max_check_attempts = 3
+ check_interval = 1m
+ retry_interval = 30s
+}
+```
+
+The `hostalive` check command is part of the
+[Plugin Check Commands](10-icinga-template-library.md#icinga-template-library).
+
+```
+template Notification "mail-host-notification" {
+ command = "mail-host-notification"
+
+ states = [ Up, Down ]
+ types = [ Problem, Acknowledgement, Recovery, Custom,
+ FlappingStart, FlappingEnd,
+ DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ period = "24x7"
+}
+
+template Notification "mail-service-notification" {
+ command = "mail-service-notification"
+
+ states = [ OK, Warning, Critical, Unknown ]
+ types = [ Problem, Acknowledgement, Recovery, Custom,
+ FlappingStart, FlappingEnd,
+ DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ period = "24x7"
+}
+```
+
+More details on `Notification` object attributes can be found [here](09-object-types.md#objecttype-notification).
+
+
+#### downtimes.conf <a id="downtimes-conf"></a>
+
+The `load` service apply rule defined in [services.conf](04-configuration.md#services-conf) defines
+the `backup_downtime` custom variable.
+
+The ScheduledDowntime apply rule uses this attribute to define the default value
+for the time ranges required for recurring downtime slots.
+
+Learn more about downtimes in [this chapter](08-advanced-topics.md#downtimes).
+
+```
+apply ScheduledDowntime "backup-downtime" to Service {
+ author = "icingaadmin"
+ comment = "Scheduled downtime for backup"
+
+ ranges = {
+ monday = service.vars.backup_downtime
+ tuesday = service.vars.backup_downtime
+ wednesday = service.vars.backup_downtime
+ thursday = service.vars.backup_downtime
+ friday = service.vars.backup_downtime
+ saturday = service.vars.backup_downtime
+ sunday = service.vars.backup_downtime
+ }
+
+ assign where service.vars.backup_downtime != ""
+}
+```
+
+#### timeperiods.conf <a id="timeperiods-conf"></a>
+
+This file contains the default timeperiod definitions for `24x7`, `9to5`
+and `never`. TimePeriod objects are referenced by `*period`
+objects such as hosts, services or notifications.
+
+
+#### api-users.conf <a id="api-users-conf"></a>
+
+Provides the default [ApiUser](09-object-types.md#objecttype-apiuser) object
+named "root" for the [API authentication](12-icinga2-api.md#icinga2-api-authentication).
+
+#### app.conf <a id="app-conf"></a>
+
+Provides the default [IcingaApplication](09-object-types.md#objecttype-icingaapplication)
+object named "app" for additional settings such as disabling notifications
+globally, etc.
diff --git a/doc/05-service-monitoring.md b/doc/05-service-monitoring.md
new file mode 100644
index 0000000..097fb11
--- /dev/null
+++ b/doc/05-service-monitoring.md
@@ -0,0 +1,1007 @@
+# Service Monitoring <a id="service-monitoring"></a>
+
+The power of Icinga 2 lies in its modularity. There are thousands of
+community plugins available next to the standard plugins provided by
+the [Monitoring Plugins project](https://www.monitoring-plugins.org).
+
+Start your research on [Icinga Exchange](https://exchange.icinga.com)
+and look which services are already [covered](05-service-monitoring.md#service-monitoring-overview).
+
+The [requirements chapter](05-service-monitoring.md#service-monitoring-requirements) guides you
+through the plugin setup, tests and their integration with an [existing](05-service-monitoring.md#service-monitoring-plugin-checkcommand)
+or [new](05-service-monitoring.md#service-monitoring-plugin-checkcommand-new) CheckCommand object
+and host/service objects inside the [Director](05-service-monitoring.md#service-monitoring-plugin-checkcommand-integration-director)
+or [Icinga config files](05-service-monitoring.md#service-monitoring-plugin-checkcommand-integration-config-files).
+It also adds hints on [modifying](05-service-monitoring.md#service-monitoring-plugin-checkcommand-modify) existing commands.
+
+Plugins follow the [Plugin API specification](05-service-monitoring.md#service-monitoring-plugin-api)
+which is enriched with examples and also code examples to get you started with
+[your own plugin](05-service-monitoring.md#service-monitoring-plugin-new).
+
+
+
+## Requirements <a id="service-monitoring-requirements"></a>
+
+### Plugins <a id="service-monitoring-plugins"></a>
+
+All existing Icinga or Nagios plugins work with Icinga 2. Community
+plugins can be found for example on [Icinga Exchange](https://exchange.icinga.com).
+
+The recommended way of setting up these plugins is to copy them
+into the `PluginDir` directory.
+
+If you have plugins with many dependencies, consider creating a
+custom RPM/DEB package which handles the required libraries and binaries.
+
+Configuration management tools such as Puppet, Ansible, Chef or Saltstack
+also help with automatically installing the plugins on different
+operating systems. They can also help with installing the required
+dependencies, e.g. Python libraries, Perl modules, etc.
+
+### Plugin Setup <a id="service-monitoring-plugins-setup"></a>
+
+Good plugins provide installations and configuration instructions
+in their docs and/or README on GitHub.
+
+Sometimes dependencies are not listed, or your distribution differs from the one
+described. Try running the plugin after setup and [ensure it works](05-service-monitoring.md#service-monitoring-plugins-it-works).
+
+#### Ensure it works <a id="service-monitoring-plugins-it-works"></a>
+
+Prior to using the check plugin with Icinga 2 you should ensure that it is working properly
+by trying to run it on the console using whichever user Icinga 2 is running as:
+
+RHEL/CentOS/Fedora
+
+```bash
+sudo -u icinga /usr/lib64/nagios/plugins/check_mysql_health --help
+```
+
+Debian/Ubuntu
+
+```bash
+sudo -u nagios /usr/lib/nagios/plugins/check_mysql_health --help
+```
+
+Additional libraries may be required for some plugins. Please consult the plugin
+documentation and/or the included README file for installation instructions.
+Sometimes plugins contain hard-coded paths to other components. Instead of changing
+the plugin it might be easier to create a symbolic link to make sure it doesn't get
+overwritten during the next update.
+
+Sometimes there are plugins which do not exactly fit your requirements.
+In that case you can modify an existing plugin or just write your own.
+
+#### Plugin Dependency Errors <a id="service-monitoring-plugins-setup-dependency-errors"></a>
+
+Plugins can be scripts (Shell, Python, Perl, Ruby, PHP, etc.)
+or compiled binaries (C, C++, Go).
+
+These scripts/binaries may require additional libraries
+which must be installed on every system they are executed.
+
+> **Tip**
+>
+> Don't test the plugins on your master instance, instead
+> do that on the satellites and clients which execute the
+> checks.
+
+There are errors, now what? Typical errors are missing libraries,
+binaries or packages.
+
+##### Python Example
+
+Example for a Python plugin which uses the `tinkerforge` module
+to query a network service:
+
+```
+ImportError: No module named tinkerforge.ip_connection
+```
+
+Its [documentation](https://github.com/NETWAYS/check_tinkerforge#installation)
+points to installing the `tinkerforge` Python module.
+
+##### Perl Example
+
+Example for a Perl plugin which uses SNMP:
+
+```
+Can't locate Net/SNMP.pm in @INC (you may need to install the Net::SNMP module)
+```
+
+Prior to installing the Perl module via CPAN, look for a distribution
+specific package, e.g. `libnet-snmp-perl` on Debian/Ubuntu or `perl-Net-SNMP`
+on RHEL/CentOS.
+
+
+#### Optional: Custom Path <a id="service-monitoring-plugins-custom-path"></a>
+
+If you are not using the default `PluginDir` directory, you
+can create a custom plugin directory and constant
+and reference this in the created CheckCommand objects.
+
+Create a common directory e.g. `/opt/monitoring/plugins`
+and install the plugin there.
+
+```bash
+mkdir -p /opt/monitoring/plugins
+cp check_snmp_int.pl /opt/monitoring/plugins
+chmod +x /opt/monitoring/plugins/check_snmp_int.pl
+```
+
+Next create a new global constant, e.g. `CustomPluginDir`
+in your [constants.conf](04-configuration.md#constants-conf)
+configuration file:
+
+```
+vim /etc/icinga2/constants.conf
+
+const PluginDir = "/usr/lib/nagios/plugins"
+const CustomPluginDir = "/opt/monitoring/plugins"
+```
+
+### CheckCommand Definition <a id="service-monitoring-plugin-checkcommand"></a>
+
+Each plugin requires a [CheckCommand](09-object-types.md#objecttype-checkcommand) object in your
+configuration which can be used in the [Service](09-object-types.md#objecttype-service) or
+[Host](09-object-types.md#objecttype-host) object definition.
+
+Please check if the Icinga 2 package already provides an
+[existing CheckCommand definition](10-icinga-template-library.md#icinga-template-library).
+
+If that's the case, thoroughly check the required parameters and integrate the check command
+into your host and service objects. Best practice is to run the plugin on the CLI
+with the required parameters first.
+
+Example for database size checks with [check_mysql_health](10-icinga-template-library.md#plugin-contrib-command-mysql_health).
+
+```bash
+/usr/lib64/nagios/plugins/check_mysql_health --hostname '127.0.0.1' --username root --password icingar0xx --mode sql --name 'select sum(data_length + index_length) / 1024 / 1024 from information_schema.tables where table_schema = '\''icinga'\'';' '--name2' 'db_size' --units 'MB' --warning 4096 --critical 8192
+```
+
+The parameter names inside the ITL commands follow the
+`<command name>_<parameter name>` schema.
+
+#### Icinga Director Integration <a id="service-monitoring-plugin-checkcommand-integration-director"></a>
+
+Navigate into `Commands > External Commands` and search for `mysql_health`.
+Select `mysql_health` and navigate into the `Fields` tab.
+
+In order to access the parameters, the Director requires you to first
+define the needed custom data fields:
+
+* `mysql_health_hostname`
+* `mysql_health_username` and `mysql_health_password`
+* `mysql_health_mode`
+* `mysql_health_name`, `mysql_health_name2` and `mysql_health_units`
+* `mysql_health_warning` and `mysql_health_critical`
+
+Create a new host template and object where you'll generic
+settings like `mysql_health_hostname` (if it differs from the host's
+`address` attribute) and `mysql_health_username` and `mysql_health_password`.
+
+Create a new service template for `mysql-health` and set the `mysql_health`
+as check command. You can also define a default for `mysql_health_mode`.
+
+Next, create a service apply rule or a new service set which gets assigned
+to matching host objects.
+
+
+#### Icinga Config File Integration <a id="service-monitoring-plugin-checkcommand-integration-config-files"></a>
+
+Create or modify a host object which stores
+the generic database defaults and prepares details
+for a service apply for rule.
+
+```
+object Host "icinga2-master1.localdomain" {
+ check_command = "hostalive"
+ address = "..."
+
+ // Database listens locally, not external
+ vars.mysql_health_hostname = "127.0.0.1"
+
+ // Basic database size checks for Icinga DBs
+ vars.databases["icinga"] = {
+ mysql_health_warning = 4096 //MB
+ mysql_health_critical = 8192 //MB
+ }
+ vars.databases["icingaweb2"] = {
+ mysql_health_warning = 4096 //MB
+ mysql_health_critical = 8192 //MB
+ }
+}
+```
+
+The host object prepares the database details and thresholds already
+for advanced [apply for](03-monitoring-basics.md#using-apply-for) rules. It also uses
+conditions to fetch host specified values, or set default values.
+
+```
+apply Service "db-size-" for (db_name => config in host.vars.databases) {
+ check_interval = 1m
+ retry_interval = 30s
+
+ check_command = "mysql_health"
+
+ if (config.mysql_health_username) {
+ vars.mysql_health_username = config.mysql_health_username
+ } else {
+ vars.mysql_health_username = "root"
+ }
+ if (config.mysql_health_password) {
+ vars.mysql_health_password = config.mysql_health_password
+ } else {
+ vars.mysql_health_password = "icingar0xx"
+ }
+
+ vars.mysql_health_mode = "sql"
+ vars.mysql_health_name = "select sum(data_length + index_length) / 1024 / 1024 from information_schema.tables where table_schema = '" + db_name + "';"
+ vars.mysql_health_name2 = "db_size"
+ vars.mysql_health_units = "MB"
+
+ if (config.mysql_health_warning) {
+ vars.mysql_health_warning = config.mysql_health_warning
+ }
+ if (config.mysql_health_critical) {
+ vars.mysql_health_critical = config.mysql_health_critical
+ }
+
+ vars += config
+}
+```
+
+#### New CheckCommand <a id="service-monitoring-plugin-checkcommand-new"></a>
+
+This chapter describes how to add a new CheckCommand object for a plugin.
+
+Please make sure to follow these conventions when adding a new command object definition:
+
+* Use [command arguments](03-monitoring-basics.md#command-arguments) whenever possible. The `command` attribute
+must be an array in `[ ... ]` for shell escaping.
+* Define a unique `prefix` for the command's specific arguments. Best practice is to follow this schema:
+
+```
+<command name>_<parameter name>
+```
+
+That way you can safely set them on host/service level and you'll always know which command they control.
+* Use command argument default values, e.g. for thresholds.
+* Use [advanced conditions](09-object-types.md#objecttype-checkcommand) like `set_if` definitions.
+
+Before starting with the CheckCommand definition, please check
+the existing objects available inside the ITL. They follow best
+practices and are maintained by developers and our community.
+
+This example picks a new plugin called [check_systemd](https://exchange.icinga.com/joseffriedrich/check_systemd)
+uploaded to Icinga Exchange in June 2019.
+
+First, [install](05-service-monitoring.md#service-monitoring-plugins-setup) the plugin and ensure
+that [it works](05-service-monitoring.md#service-monitoring-plugins-it-works). Then run it with the
+`--help` parameter to see the actual parameters (docs might be outdated).
+
+```
+./check_systemd.py --help
+
+usage: check_systemd.py [-h] [-c SECONDS] [-e UNIT | -u UNIT] [-v] [-V]
+ [-w SECONDS]
+
+...
+
+optional arguments:
+ -h, --help show this help message and exit
+ -c SECONDS, --critical SECONDS
+ Startup time in seconds to result in critical status.
+ -e UNIT, --exclude UNIT
+ Exclude a systemd unit from the checks. This option
+ can be applied multiple times. For example: -e mnt-
+ data.mount -e task.service.
+ -u UNIT, --unit UNIT Name of the systemd unit that is beeing tested.
+ -v, --verbose Increase output verbosity (use up to 3 times).
+ -V, --version show program's version number and exit
+ -w SECONDS, --warning SECONDS
+ Startup time in seconds to result in warning status.
+```
+
+The argument description is important, based on this you need to create the
+command arguments.
+
+> **Tip**
+>
+> When you are using the Director, you can prepare the commands as files
+> e.g. inside the `global-templates` zone. Then run the kickstart wizard
+> again to import the commands as external reference.
+>
+> If you prefer to use the Director GUI/CLI, please apply the steps
+> in the `Add Command` form.
+
+Start with the basic plugin call without any parameters.
+
+```
+object CheckCommand "systemd" { // Plugin name without 'check_' prefix
+ command = [ PluginContribDir + "/check_systemd.py" ] // Use the 'PluginContribDir' constant, see the contributed ITL commands
+}
+```
+
+Run a config validation to see if that works, `icinga2 daemon -C`
+
+Next, analyse the plugin parameters. Plugins with a good help output show
+optional parameters in square brackes. This is the case for all parameters
+for this plugin. If there are required parameters, use the `required` key
+inside the argument.
+
+The `arguments` attribute is a dictionary which takes the parameters as keys.
+
+```
+ arguments = {
+ "--unit" = { ... }
+ }
+```
+
+If there a long parameter names available, prefer them. This increases
+readability in both the configuration as well as the executed command line.
+
+The argument value itself is a sub dictionary which has additional keys:
+
+* `value` which references the runtime macro string
+* `description` where you copy the plugin parameter help text into
+* `required`, `set_if`, etc. for advanced parameters, check the [CheckCommand object](09-object-types.md#objecttype-checkcommand) chapter.
+
+The runtime macro syntax is required to allow value extraction when
+the command is executed.
+
+> **Tip**
+>
+> Inside the Director, store the new command first in order to
+> unveil the `Arguments` tab.
+
+Best practice is to use the command name as prefix, in this specific
+case e.g. `systemd_unit`.
+
+```
+ arguments = {
+ "--unit" = {
+ value = "$systemd_unit$" // The service parameter would then be defined as 'vars.systemd_unit = "icinga2"'
+ description = "Name of the systemd unit that is beeing tested."
+ }
+ "--warning" = {
+ value = "$systemd_warning$"
+ description = "Startup time in seconds to result in warning status."
+ }
+ "--critical" = {
+ value = "$systemd_critical$"
+ description = "Startup time in seconds to result in critical status."
+ }
+ }
+```
+
+This may take a while -- validate the configuration in between up until
+the CheckCommand definition is done.
+
+Then test and integrate it into your monitoring configuration.
+
+Remember: Do it once and right, and never touch the CheckCommand again.
+Optional arguments allow different use cases and scenarios.
+
+
+Once you have created your really good CheckCommand, please consider
+sharing it with our community by creating a new PR on [GitHub](https://github.com/Icinga/icinga2/blob/master/CONTRIBUTING.md).
+_Please also update the documentation for the ITL._
+
+
+> **Tip**
+>
+> Inside the Director, you can render the configuration in the Deployment
+> section. Extract the static configuration object and use that as a source
+> for sending it upstream.
+
+
+
+#### Modify Existing CheckCommand <a id="service-monitoring-plugin-checkcommand-modify"></a>
+
+Sometimes an existing CheckCommand inside the ITL is missing a parameter.
+Or you don't need a default parameter value being set.
+
+Instead of copying the entire configuration object, you can import
+an object into another new object.
+
+```
+object CheckCommand "http-custom" {
+ import "http" // Import existing http object
+
+ arguments += { // Use additive assignment to add missing parameters
+ "--key" = {
+ value = "$http_..." // Keep the parameter name the same as with http
+ }
+ }
+
+ // Override default parameters
+ vars.http_address = "..."
+}
+```
+
+This CheckCommand can then be referenced in your host/service object
+definitions.
+
+
+### Plugin API <a id="service-monitoring-plugin-api"></a>
+
+Icinga 2 supports the native plugin API specification from the Monitoring Plugins project.
+It is defined in the [Monitoring Plugins](https://www.monitoring-plugins.org) guidelines.
+
+The Icinga documentation revamps the specification into our
+own guideline enriched with examples and best practices.
+
+#### Output <a id="service-monitoring-plugin-api-output"></a>
+
+The output should be as short and as detailed as possible. The
+most common cases include:
+
+- Viewing a problem list in Icinga Web and dashboards
+- Getting paged about a problem
+- Receiving the alert on the CLI or forwarding it to external (ticket) systems
+
+Examples:
+
+```
+<STATUS>: <A short description what happened>
+
+OK: MySQL connection time is fine (0.0002s)
+WARNING: MySQL connection time is slow (0.5s > 0.1s threshold)
+CRITICAL: MySQL connection time is causing degraded performance (3s > 0.5s threshold)
+```
+
+Icinga supports reading multi-line output where Icinga Web
+only shows the first line in the listings and everything in the detail view.
+
+Example for an end2end check with many smaller test cases integrated:
+
+```
+OK: Online banking works.
+Testcase 1: Site reached.
+Testcase 2: Attempted login, JS loads.
+Testcase 3: Login succeeded.
+Testcase 4: View current state works.
+Testcase 5: Transactions fine.
+```
+
+If the extended output shouldn't be visible in your monitoring, but only for testing,
+it is recommended to implement the `--verbose` plugin parameter to allow
+developers and users to debug further. Check [here](05-service-monitoring.md#service-monitoring-plugin-api-verbose)
+for more implementation tips.
+
+> **Tip**
+>
+> More debug output also helps when implementing your plugin.
+>
+> Best practice is to have the plugin parameter and handling implemented first,
+> then add it anywhere you want to see more, e.g. from initial database connections
+> to actual query results.
+
+
+#### Status <a id="service-monitoring-plugin-api-status"></a>
+
+Value | Status | Description
+------|-----------|-------------------------------
+0 | OK | The check went fine and everything is considered working.
+1 | Warning | The check is above the given warning threshold, or anything else is suspicious requiring attention before it breaks.
+2 | Critical | The check exceeded the critical threshold, or something really is broken and will harm the production environment.
+3 | Unknown | Invalid parameters, low level resource errors (IO device busy, no fork resources, TCP sockets, etc.) preventing the actual check. Higher level errors such as DNS resolving, TCP connection timeouts should be treated as `Critical` instead. Whenever the plugin reaches its timeout (best practice) it should also terminate with `Unknown`.
+
+Keep in mind that these are service states. Icinga automatically maps
+the [host state](03-monitoring-basics.md#check-result-state-mapping) from the returned plugin states.
+
+#### Thresholds <a id="service-monitoring-plugin-api-thresholds"></a>
+
+A plugin calculates specific values and may decide about the exit state on its own.
+This is done with thresholds - warning and critical values which are compared with
+the actual value. Upon this logic, the exit state is determined.
+
+Imagine the following value and defined thresholds:
+
+```
+ptc_value = 57.8
+
+warning = 50
+critical = 60
+```
+
+Whenever `ptc_value` is higher than warning or critical, it should return
+the appropriate [state](05-service-monitoring.md#service-monitoring-plugin-api-status).
+
+The threshold evaluation order also is important:
+
+* Critical thresholds are evaluated first and superseed everything else.
+* Warning thresholds are evaluated second
+* If no threshold is matched, return the OK state
+
+Avoid using hardcoded threshold values in your plugins, always
+add them to the argument parser.
+
+Example for Python:
+
+```python
+import argparse
+import signal
+import sys
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument("-w", "--warning", help="Warning threshold. Single value or range, e.g. '20:50'.")
+ parser.add_argument("-c", "--critical", help="Critical threshold. Single vluae or range, e.g. '25:45'.")
+
+ args = parser.parse_args()
+```
+
+Users might call plugins only with the critical threshold parameter,
+leaving out the warning parameter. Keep this in mind when evaluating
+the thresholds, always check if the parameters have been defined before.
+
+```python
+ if args.critical:
+ if ptc_value > args.critical:
+ print("CRITICAL - ...")
+ sys.exit(2) # Critical
+
+ if args.warning:
+ if ptc_value > args.warning:
+ print("WARNING - ...")
+ sys.exit(1) # Warning
+
+ print("OK - ...")
+ sys.exit(0) # OK
+```
+
+The above is a simplified example for printing the [output](05-service-monitoring.md#service-monitoring-plugin-api-output)
+and using the [state](05-service-monitoring.md#service-monitoring-plugin-api-status)
+as exit code.
+
+Before diving into the implementation, learn more about required
+[performance data metrics](05-service-monitoring.md#service-monitoring-plugin-api-performance-data-metrics)
+and more best practices below.
+
+##### Threshold Ranges <a id="service-monitoring-plugin-api-thresholds-ranges"></a>
+
+Threshold ranges can be used to specify an alert window, e.g. whenever a calculated
+value is between a lower and higher critical threshold.
+
+The schema for threshold ranges looks as follows. The `@` character in square brackets
+is optional.
+
+```
+[@]start:end
+```
+
+There are a few requirements for ranges:
+
+* `start <= end`. Add a check in your code and let the user know about problematic values.
+
+```
+10:20 # OK
+
+30:10 # Error
+```
+
+* `start:` can be omitted if its value is 0. This is the default handling for single threshold values too.
+
+```
+10 # Every value > 10 and < 0, outside of 0..10
+```
+
+* If `end` is omitted, assume end is infinity.
+
+```
+10: # < 10, outside of 10..∞
+```
+
+* In order to specify negative infinity, use the `~` character.
+
+```
+~:10 # > 10, outside of -∞..10
+```
+
+* Raise alert if value is outside of the defined range.
+
+```
+10:20 # < 10 or > 20, outside of 10..20
+```
+
+* Start with `@` to raise an alert if the value is **inside** the defined range, inclusive start/end values.
+
+```
+@10:20 # >= 10 and <= 20, inside of 10..20
+```
+
+Best practice is to either implement single threshold values, or fully support ranges.
+This requires parsing the input parameter values, therefore look for existing libraries
+already providing this functionality.
+
+[check_tinkerforge](https://github.com/NETWAYS/check_tinkerforge/blob/master/check_tinkerforge.py)
+implements a simple parser to avoid dependencies.
+
+
+#### Performance Data Metrics <a id="service-monitoring-plugin-api-performance-data-metrics"></a>
+
+Performance data metrics must be appended to the plugin output with a preceding `|` character.
+The schema is as follows:
+
+```
+<output> | 'label'=value[UOM];[warn];[crit];[min];[max]
+```
+
+The label should be encapsulated with single quotes. Avoid spaces or special characters such
+as `%` in there, this could lead to problems with metric receivers such as Graphite.
+
+Labels must not include `'` and `=` characters. Keep the label length as short and unique as possible.
+
+Example:
+
+```
+'load1'=4.7
+```
+
+Values must respect the C/POSIX locale and not implement e.g. German locale for floating point numbers with `,`.
+Icinga sets `LC_NUMERIC=C` to enforce this locale on plugin execution.
+
+##### Unit of Measurement (UOM) <a id="service-monitoring-plugin-api-performance-data-metrics-uom"></a>
+
+```
+'rta'=12.445000ms 'pl'=0%
+```
+
+The UoMs are written as-is into the [core backends](14-features.md#core-backends)
+(IDO, API). I.e. 12.445000ms remain 12.445000ms.
+
+In contrast, the [metric backends](14-features.md#metrics)
+(Graphite, InfluxDB, etc.) get perfdata (including warn, crit, min, max)
+normalized by Icinga. E.g. 12.445000ms become 0.012445 seconds.
+
+Some plugins change the UoM for different sizing, e.g. returning the disk usage in MB and later GB
+for the same performance data label. This is to ensure that graphs always look the same.
+
+[Icinga DB](14-features.md#core-backends-icingadb) gets both the as-is and the normalized perfdata.
+
+What metric backends get... | ... from which perfdata UoMs (case-insensitive if possible)
+----------------------------|---------------------------------------
+bytes (B) | B, KB, MB, ..., YB, KiB, MiB, ..., YiB
+bits (b) | b, kb, mb, ..., yb, kib, mib, ..., yib
+packets | packets
+seconds (s) | ns, us, ms, s, m, h, d
+percent | %
+amperes (A) | nA, uA, mA, A, kA, MA, GA, ..., YA
+ohms (O) | nO, uO, mO, O, kO, MO, GO, ..., YO
+volts (V) | nV, uV, mV, V, kV, MV, GV, ..., YV
+watts (W) | nW, uW, mW, W, kW, MW, GW, ..., YW
+ampere seconds (As) | nAs, uAs, mAs, As, kAs, MAs, GAs, ..., YAs
+ampere seconds | nAm, uAm, mAm, Am (ampere minutes), kAm, MAm, GAm, ..., YAm
+ampere seconds | nAh, uAh, mAh, Ah (ampere hours), kAh, MAh, GAh, ..., YAh
+watt hours | nWs, uWs, mWs, Ws (watt seconds), kWs, MWs, GWs, ..., YWs
+watt hours | nWm, uWm, mWm, Wm (watt minutes), kWm, MWm, GWm, ..., YWm
+watt hours (Wh) | nWh, uWh, mWh, Wh, kWh, MWh, GWh, ..., YWh
+lumens | lm
+decibel-milliwatts | dBm
+grams (g) | ng, ug, mg, g, kg, t
+degrees Celsius | C
+degrees Fahrenheit | F
+degrees Kelvin | K
+liters (l) | ml, l, hl
+
+The UoM "c" represents a continuous counter (e.g. interface traffic counters).
+
+Unknown UoMs are discarted (as if none was given).
+A value without any UoM may be an integer or floating point number
+for any type (processes, users, etc.).
+
+##### Thresholds and Min/Max <a id="service-monitoring-plugin-api-performance-data-metrics-thresholds-min-max"></a>
+
+Next to the performance data value, warn, crit, min, max can optionally be provided. They must be separated
+with the semi-colon `;` character. They share the same UOM with the performance data value.
+
+```
+$ check_ping -4 -H icinga.com -c '200,15%' -w '100,5%'
+
+PING OK - Packet loss = 0%, RTA = 12.44 ms|rta=12.445000ms;100.000000;200.000000;0.000000 pl=0%;5;15;0
+```
+
+##### Multiple Performance Data Values <a id="service-monitoring-plugin-api-performance-data-metrics-multiple"></a>
+
+Multiple performance data values must be joined with a space character. The below example
+is from the [check_load](10-icinga-template-library.md#plugin-check-command-load) plugin.
+
+```
+load1=4.680;1.000;2.000;0; load5=0.000;5.000;10.000;0; load15=0.000;10.000;20.000;0;
+```
+
+#### Timeout <a id="service-monitoring-plugin-api-timeout"></a>
+
+Icinga has a safety mechanism where it kills processes running for too
+long. The timeout can be specified in [CheckCommand objects](09-object-types.md#objecttype-checkcommand)
+or on the host/service object.
+
+Best practice is to control the timeout in the plugin itself
+and provide a clear message followed by the Unknown state.
+
+Example in Python taken from [check_tinkerforge](https://github.com/NETWAYS/check_tinkerforge/blob/master/check_tinkerforge.py):
+
+```python
+import argparse
+import signal
+import sys
+
+def handle_sigalrm(signum, frame, timeout=None):
+ output('Plugin timed out after %d seconds' % timeout, 3)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ # ... add more arguments
+ parser.add_argument("-t", "--timeout", help="Timeout in seconds (default 10s)", type=int, default=10)
+ args = parser.parse_args()
+
+ signal.signal(signal.SIGALRM, partial(handle_sigalrm, timeout=args.timeout))
+ signal.alarm(args.timeout)
+
+ # ... perform the check and generate output/status
+```
+
+#### Versions <a id="service-monitoring-plugin-api-versions"></a>
+
+Plugins should provide a version via `-V` or `--version` parameter
+which is bumped on releases. This allows to identify problems with
+too old or new versions on the community support channels.
+
+Example in Python taken from [check_tinkerforge](https://github.com/NETWAYS/check_tinkerforge/blob/master/check_tinkerforge.py):
+
+```python
+import argparse
+import signal
+import sys
+
+__version__ = '0.9.1'
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-V', '--version', action='version', version='%(prog)s v' + sys.modules[__name__].__version__)
+```
+
+#### Verbose <a id="service-monitoring-plugin-api-verbose"></a>
+
+Plugins should provide a verbose mode with `-v` or `--verbose` in order
+to show more detailed log messages. This helps to debug and analyse the
+flow and execution steps inside the plugin.
+
+Ensure to add the parameter prior to implementing the check logic into
+the plugin.
+
+Example in Python taken from [check_tinkerforge](https://github.com/NETWAYS/check_tinkerforge/blob/master/check_tinkerforge.py):
+
+```python
+import argparse
+import signal
+import sys
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-v', '--verbose', action='store_true')
+
+ if args.verbose:
+ print("Verbose debug output")
+```
+
+
+### Create a new Plugin <a id="service-monitoring-plugin-new"></a>
+
+Sometimes an existing plugin does not satisfy your requirements. You
+can either kindly contact the original author about plans to add changes
+and/or create a patch.
+
+If you just want to format the output and state of an existing plugin
+it might also be helpful to write a wrapper script. This script
+could pass all configured parameters, call the plugin script, parse
+its output/exit code and return your specified output/exit code.
+
+On the other hand plugins for specific services and hardware might not yet
+exist.
+
+> **Tip**
+>
+> Watch this presentation from Icinga Camp Berlin to learn more
+> about [How to write checks that don't suck](https://www.youtube.com/watch?v=Ey_APqSCoFQ).
+
+Common best practices:
+
+* Choose the programming language wisely
+ * Scripting languages (Bash, Python, Perl, Ruby, PHP, etc.) are easier to write and setup but their check execution might take longer (invoking the script interpreter as overhead, etc.).
+ * Plugins written in C/C++, Go, etc. improve check execution time but may generate an overhead with installation and packaging.
+* Use a modern VCS such as Git for developing the plugin, e.g. share your plugin on GitHub and let it sync to [Icinga Exchange](https://exchange.icinga.com).
+* **Look into existing plugins endorsed by community members.**
+
+Implementation hints:
+
+* Add parameters with key-value pairs to your plugin. They should allow long names (e.g. `--host localhost`) and also short parameters (e.g. `-H localhost`)
+ * `-h|--help` should print the version and all details about parameters and runtime invocation. Note: Python's ArgParse class provides this OOTB.
+ * `--version` should print the plugin [version](05-service-monitoring.md#service-monitoring-plugin-api-versions).
+* Add a [verbose/debug output](05-service-monitoring.md#service-monitoring-plugin-api-verbose) functionality for detailed on-demand logging.
+* Respect the exit codes required by the [Plugin API](05-service-monitoring.md#service-monitoring-plugin-api).
+* Always add [performance data](05-service-monitoring.md#service-monitoring-plugin-api-performance-data-metrics) to your plugin output.
+* Allow to specify [warning/critical thresholds](05-service-monitoring.md#service-monitoring-plugin-api-thresholds) as parameters.
+
+Example skeleton:
+
+```
+# 1. include optional libraries
+# 2. global variables
+# 3. helper functions and/or classes
+# 4. define timeout condition
+
+if (<timeout_reached>) then
+ print "UNKNOWN - Timeout (...) reached | 'time'=30.0
+endif
+
+# 5. main method
+
+<execute and fetch data>
+
+if (<threshold_critical_condition>) then
+ print "CRITICAL - ... | 'time'=0.1 'myperfdatavalue'=5.0
+ exit(2)
+else if (<threshold_warning_condition>) then
+ print "WARNING - ... | 'time'=0.1 'myperfdatavalue'=3.0
+ exit(1)
+else
+ print "OK - ... | 'time'=0.2 'myperfdatavalue'=1.0
+endif
+```
+
+There are various plugin libraries available which will help
+with plugin execution and output formatting too, for example
+[nagiosplugin from Python](https://pypi.python.org/pypi/nagiosplugin/).
+
+> **Note**
+>
+> Ensure to test your plugin properly with special cases before putting it
+> into production!
+
+Once you've finished your plugin please upload/sync it to [Icinga Exchange](https://exchange.icinga.com/new).
+Thanks in advance!
+
+
+## Service Monitoring Overview <a id="service-monitoring-overview"></a>
+
+The following examples should help you to start implementing your own ideas.
+There is a variety of plugins available. This collection is not complete --
+if you have any updates, please send a documentation patch upstream.
+
+Please visit our [community forum](https://community.icinga.com) which
+may provide an answer to your use case already. If not, do not hesitate
+to create a new topic.
+
+### General Monitoring <a id="service-monitoring-general"></a>
+
+If the remote service is available (via a network protocol and port),
+and if a check plugin is also available, you don't necessarily need a local client.
+Instead, choose a plugin and configure its parameters and thresholds. The following examples are included in the [Icinga 2 Template Library](10-icinga-template-library.md#icinga-template-library):
+
+* [ping4](10-icinga-template-library.md#plugin-check-command-ping4), [ping6](10-icinga-template-library.md#plugin-check-command-ping6),
+[fping4](10-icinga-template-library.md#plugin-check-command-fping4), [fping6](10-icinga-template-library.md#plugin-check-command-fping6), [hostalive](10-icinga-template-library.md#plugin-check-command-hostalive)
+* [tcp](10-icinga-template-library.md#plugin-check-command-tcp), [udp](10-icinga-template-library.md#plugin-check-command-udp), [ssl](10-icinga-template-library.md#plugin-check-command-ssl)
+* [ntp_time](10-icinga-template-library.md#plugin-check-command-ntp-time)
+
+### Linux Monitoring <a id="service-monitoring-linux"></a>
+
+* [disk](10-icinga-template-library.md#plugin-check-command-disk)
+* [mem](10-icinga-template-library.md#plugin-contrib-command-mem), [swap](10-icinga-template-library.md#plugin-check-command-swap)
+* [procs](10-icinga-template-library.md#plugin-check-command-processes)
+* [users](10-icinga-template-library.md#plugin-check-command-users)
+* [running_kernel](10-icinga-template-library.md#plugin-contrib-command-running_kernel)
+* package management: [apt](10-icinga-template-library.md#plugin-check-command-apt), [yum](10-icinga-template-library.md#plugin-contrib-command-yum), etc.
+* [ssh](10-icinga-template-library.md#plugin-check-command-ssh)
+* performance: [iostat](10-icinga-template-library.md#plugin-contrib-command-iostat), [check_sar_perf](https://github.com/NETWAYS/check-sar-perf)
+
+### Windows Monitoring <a id="service-monitoring-windows"></a>
+
+!!! important
+
+ [Icinga for Windows](https://icinga.com/docs/icinga-for-windows/latest/doc/000-Introduction/)
+ is the recommended way to monitor Windows via Icinga 2.
+ Even if the plugins it ships out-of-the-box don't already cover your needs, you can
+ [create your own](https://icinga.com/docs/icinga-for-windows/latest/doc/900-Developer-Guide/11-Custom-Plugins/).
+
+Other (legacy) solutions include:
+
+* [check_wmi_plus](https://edcint.co.nz/checkwmiplus/)
+* [NSClient++](https://www.nsclient.org) (in combination with the Icinga 2 client and either [check_nscp_api](10-icinga-template-library.md#nscp-check-api) or [nscp-local](10-icinga-template-library.md#nscp-plugin-check-commands) check commands)
+* [Icinga 2 Windows Plugins](10-icinga-template-library.md#windows-plugins) (disk, load, memory, network, performance counters, ping, procs, service, swap, updates, uptime, users
+* vbs and Powershell scripts
+
+### Database Monitoring <a id="service-monitoring-database"></a>
+
+* MySQL/MariaDB: [mysql_health](10-icinga-template-library.md#plugin-contrib-command-mysql_health), [mysql](10-icinga-template-library.md#plugin-check-command-mysql), [mysql_query](10-icinga-template-library.md#plugin-check-command-mysql-query)
+* PostgreSQL: [postgres](10-icinga-template-library.md#plugin-contrib-command-postgres)
+* Oracle: [oracle_health](10-icinga-template-library.md#plugin-contrib-command-oracle_health)
+* MSSQL: [mssql_health](10-icinga-template-library.md#plugin-contrib-command-mssql_health)
+* DB2: [db2_health](10-icinga-template-library.md#plugin-contrib-command-db2_health)
+* MongoDB: [mongodb](10-icinga-template-library.md#plugin-contrib-command-mongodb)
+* Elasticsearch: [elasticsearch](10-icinga-template-library.md#plugin-contrib-command-elasticsearch)
+* Redis: [redis](10-icinga-template-library.md#plugin-contrib-command-redis)
+
+### SNMP Monitoring <a id="service-monitoring-snmp"></a>
+
+* [Manubulon plugins](10-icinga-template-library.md#snmp-manubulon-plugin-check-commands) (interface, storage, load, memory, process)
+* [snmp](10-icinga-template-library.md#plugin-check-command-snmp), [snmpv3](10-icinga-template-library.md#plugin-check-command-snmpv3)
+
+### Network Monitoring <a id="service-monitoring-network"></a>
+
+* [nwc_health](10-icinga-template-library.md#plugin-contrib-command-nwc_health)
+* [interfaces](10-icinga-template-library.md#plugin-contrib-command-interfaces)
+* [interfacetable](10-icinga-template-library.md#plugin-contrib-command-interfacetable)
+* [iftraffic](10-icinga-template-library.md#plugin-contrib-command-iftraffic), [iftraffic64](10-icinga-template-library.md#plugin-contrib-command-iftraffic64)
+
+### Web Monitoring <a id="service-monitoring-web"></a>
+
+* [http](10-icinga-template-library.md#plugin-check-command-http)
+* [ftp](10-icinga-template-library.md#plugin-check-command-ftp)
+* [webinject](10-icinga-template-library.md#plugin-contrib-command-webinject)
+* [squid](10-icinga-template-library.md#plugin-contrib-command-squid)
+* [apache-status](10-icinga-template-library.md#plugin-contrib-command-apache-status)
+* [nginx_status](10-icinga-template-library.md#plugin-contrib-command-nginx_status)
+* [kdc](10-icinga-template-library.md#plugin-contrib-command-kdc)
+* [rbl](10-icinga-template-library.md#plugin-contrib-command-rbl)
+
+* [Icinga Certificate Monitoring](https://icinga.com/products/icinga-certificate-monitoring/)
+
+### Java Monitoring <a id="service-monitoring-java"></a>
+
+* [jmx4perl](10-icinga-template-library.md#plugin-contrib-command-jmx4perl)
+
+### DNS Monitoring <a id="service-monitoring-dns"></a>
+
+* [dns](10-icinga-template-library.md#plugin-check-command-dns)
+* [dig](10-icinga-template-library.md#plugin-check-command-dig)
+* [dhcp](10-icinga-template-library.md#plugin-check-command-dhcp)
+
+### Backup Monitoring <a id="service-monitoring-backup"></a>
+
+* [check_bareos](https://github.com/widhalmt/check_bareos)
+
+### Log Monitoring <a id="service-monitoring-log"></a>
+
+* [check_logfiles](https://labs.consol.de/nagios/check_logfiles/)
+* [check_logstash](https://github.com/NETWAYS/check_logstash)
+* [check_graylog2_stream](https://github.com/Graylog2/check-graylog2-stream)
+
+### Virtualization Monitoring <a id="service-monitoring-virtualization"></a>
+
+### VMware Monitoring <a id="service-monitoring-virtualization-vmware"></a>
+
+* [Icinga Module for vSphere](https://icinga.com/products/icinga-module-for-vsphere/)
+* [esxi_hardware](10-icinga-template-library.md#plugin-contrib-command-esxi-hardware)
+* [VMware](10-icinga-template-library.md#plugin-contrib-vmware)
+
+**Tip**: If you are encountering timeouts using the VMware Perl SDK,
+check [this blog entry](https://www.claudiokuenzler.com/blog/650/slow-vmware-perl-sdk-soap-request-error-libwww-version).
+Ubuntu 16.04 LTS can have troubles with random entropy in Perl asked [here](https://monitoring-portal.org/t/check-vmware-api-slow-when-run-multiple-times/2868).
+In that case, [haveged](https://issihosts.com/haveged/) may help.
+
+### SAP Monitoring <a id="service-monitoring-sap"></a>
+
+* [check_sap_health](https://labs.consol.de/nagios/check_sap_health/index.html)
+* [SAP CCMS](https://sourceforge.net/projects/nagios-sap-ccms/)
+
+### Mail Monitoring <a id="service-monitoring-mail"></a>
+
+* [smtp](10-icinga-template-library.md#plugin-check-command-smtp), [ssmtp](10-icinga-template-library.md#plugin-check-command-ssmtp)
+* [imap](10-icinga-template-library.md#plugin-check-command-imap), [simap](10-icinga-template-library.md#plugin-check-command-simap)
+* [pop](10-icinga-template-library.md#plugin-check-command-pop), [spop](10-icinga-template-library.md#plugin-check-command-spop)
+* [mailq](10-icinga-template-library.md#plugin-check-command-mailq)
+
+### Hardware Monitoring <a id="service-monitoring-hardware"></a>
+
+* [hpasm](10-icinga-template-library.md#plugin-contrib-command-hpasm)
+* [ipmi-sensor](10-icinga-template-library.md#plugin-contrib-command-ipmi-sensor)
+
+### Metrics Monitoring <a id="service-monitoring-metrics"></a>
+
+* [graphite](10-icinga-template-library.md#plugin-contrib-command-graphite)
diff --git a/doc/06-distributed-monitoring.md b/doc/06-distributed-monitoring.md
new file mode 100644
index 0000000..5d4db15
--- /dev/null
+++ b/doc/06-distributed-monitoring.md
@@ -0,0 +1,3516 @@
+# Distributed Monitoring with Master, Satellites and Agents <a id="distributed-monitoring"></a>
+
+This chapter will guide you through the setup of a distributed monitoring
+environment, including high-availability clustering and setup details
+for Icinga masters, satellites and agents.
+
+## Roles: Master, Satellites and Agents <a id="distributed-monitoring-roles"></a>
+
+Icinga 2 nodes can be given names for easier understanding:
+
+* A `master` node which is on top of the hierarchy.
+* A `satellite` node which is a child of a `satellite` or `master` node.
+* An `agent` node which is connected to `master` and/or `satellite` nodes.
+
+![Icinga 2 Distributed Roles](images/distributed-monitoring/icinga2_distributed_monitoring_roles.png)
+
+Rephrasing this picture into more details:
+
+* A `master` node has no parent node.
+ * A `master`node is where you usually install Icinga Web 2.
+ * A `master` node can combine executed checks from child nodes into backends and notifications.
+* A `satellite` node has a parent and a child node.
+ * A `satellite` node may execute checks on its own or delegate check execution to child nodes.
+ * A `satellite` node can receive configuration for hosts/services, etc. from the parent node.
+ * A `satellite` node continues to run even if the master node is temporarily unavailable.
+* An `agent` node only has a parent node.
+ * An `agent` node will either run its own configured checks or receive command execution events from the parent node.
+
+A client can be a secondary master, a satellite or an agent. It
+typically requests something from the primary master or parent node.
+
+The following sections will refer to these roles and explain the
+differences and the possibilities this kind of setup offers.
+
+> **Note**
+>
+> Previous versions of this documentation used the term `Icinga client`.
+> This has been refined into `Icinga agent` and is visible in the docs,
+> backends and web interfaces.
+
+**Tip**: If you just want to install a single master node that monitors several hosts
+(i.e. Icinga agents), continue reading -- we'll start with
+simple examples.
+In case you are planning a huge cluster setup with multiple levels and
+lots of satellites and agents, read on -- we'll deal with these cases later on.
+
+The installation on each system is the same: Follow the [installation instructions](02-installation.md)
+for the Icinga 2 package and the required check plugins.
+
+The required configuration steps are mostly happening
+on the command line. You can also [automate the setup](06-distributed-monitoring.md#distributed-monitoring-automation).
+
+The first thing you need learn about a distributed setup is the hierarchy of the single components.
+
+## Zones <a id="distributed-monitoring-zones"></a>
+
+The Icinga 2 hierarchy consists of so-called [zone](09-object-types.md#objecttype-zone) objects.
+Zones depend on a parent-child relationship in order to trust each other.
+
+![Icinga 2 Distributed Zones](images/distributed-monitoring/icinga2_distributed_monitoring_zones.png)
+
+Have a look at this example for the `satellite` zones which have the `master` zone as a parent zone:
+
+```
+object Zone "master" {
+ //...
+}
+
+object Zone "satellite region 1" {
+ parent = "master"
+ //...
+}
+
+object Zone "satellite region 2" {
+ parent = "master"
+ //...
+}
+```
+
+There are certain limitations for child zones, e.g. their members are not allowed
+to send configuration commands to the parent zone members. Vice versa, the
+trust hierarchy allows for example the `master` zone to send
+configuration files to the `satellite` zone. Read more about this
+in the [security section](06-distributed-monitoring.md#distributed-monitoring-security).
+
+`agent` nodes also have their own unique zone. By convention you
+must use the FQDN for the zone name.
+
+## Endpoints <a id="distributed-monitoring-endpoints"></a>
+
+Nodes which are a member of a zone are so-called [Endpoint](09-object-types.md#objecttype-endpoint) objects.
+
+![Icinga 2 Distributed Endpoints](images/distributed-monitoring/icinga2_distributed_monitoring_endpoints.png)
+
+Here is an example configuration for two endpoints in different zones:
+
+```
+object Endpoint "icinga2-master1.localdomain" {
+ host = "192.168.56.101"
+}
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ host = "192.168.56.105"
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain" ]
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain" ]
+ parent = "master"
+}
+```
+
+All endpoints in the same zone work as high-availability setup. For
+example, if you have two nodes in the `master` zone, they will load-balance the check execution.
+
+Endpoint objects are important for specifying the connection
+information, e.g. if the master should actively try to connect to an agent.
+
+The zone membership is defined inside the `Zone` object definition using
+the `endpoints` attribute with an array of `Endpoint` names.
+
+> **Note**
+>
+> There is a known [problem](https://github.com/Icinga/icinga2/issues/3533)
+> with >2 endpoints in a zone and a message routing loop.
+> The config validation will log a warning to let you know about this too.
+
+If you want to check the availability (e.g. ping checks) of the node
+you still need a [Host](09-object-types.md#objecttype-host) object.
+
+## ApiListener <a id="distributed-monitoring-apilistener"></a>
+
+In case you are using the CLI commands later, you don't have to write
+this configuration from scratch in a text editor.
+The [ApiListener](09-object-types.md#objecttype-apilistener) object is
+used to load the TLS certificates and specify restrictions, e.g.
+for accepting configuration commands.
+
+It is also used for the [Icinga 2 REST API](12-icinga2-api.md#icinga2-api) which shares
+the same host and port with the Icinga 2 Cluster protocol.
+
+The object configuration is stored in the `/etc/icinga2/features-enabled/api.conf`
+file. Depending on the configuration mode the attributes `accept_commands`
+and `accept_config` can be configured here.
+
+In order to use the `api` feature you need to enable it and restart Icinga 2.
+
+```bash
+icinga2 feature enable api
+```
+
+## Conventions <a id="distributed-monitoring-conventions"></a>
+
+By convention all nodes should be configured using their FQDN.
+
+Furthermore, you must ensure that the following names
+are exactly the same in all configuration files:
+
+* Host certificate common name (CN).
+* Endpoint configuration object for the host.
+* NodeName constant for the local host.
+
+Setting this up on the command line will help you to minimize the effort.
+Just keep in mind that you need to use the FQDN for endpoints and for
+common names when asked.
+
+## Security <a id="distributed-monitoring-security"></a>
+
+While there are certain mechanisms to ensure a secure communication between all
+nodes (firewalls, policies, software hardening, etc.), Icinga 2 also provides
+additional security:
+
+* TLS v1.2+ is required.
+* TLS cipher lists are hardened [by default](09-object-types.md#objecttype-apilistener).
+* TLS certificates are mandatory for communication between nodes. The CLI command wizards
+help you create these certificates.
+* Child zones only receive updates (check results, commands, etc.) for their configured objects.
+* Child zones are not allowed to push configuration updates to parent zones.
+* Zones cannot interfere with other zones and influence each other. Each checkable host or service object is assigned to **one zone** only.
+* All nodes in a zone trust each other.
+* [Config sync](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync) and [remote command endpoint execution](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint) is disabled by default.
+
+The underlying protocol uses JSON-RPC event notifications exchanged by nodes.
+The connection is secured by TLS. The message protocol uses an internal API,
+and as such message types and names may change internally and are not documented.
+
+Zones build the trust relationship in a distributed environment. If you do not specify
+a zone for an agent/satellite and specify the parent zone, its zone members e.g. the master instance
+won't trust the agent/satellite.
+
+Building this trust is key in your distributed environment. That way the parent node
+knows that it is able to send messages to the child zone, e.g. configuration objects,
+configuration in global zones, commands to be executed in this zone/for this endpoint.
+It also receives check results from the child zone for checkable objects (host/service).
+
+Vice versa, the agent/satellite trusts the master and accepts configuration and commands if enabled
+in the api feature. If the agent/satellite would send configuration to the parent zone, the parent nodes
+will deny it. The parent zone is the configuration entity, and does not trust agents/satellites in this matter.
+An agent/satellite could attempt to modify a different agent/satellite for example, or inject a check command
+with malicious code.
+
+While it may sound complicated for agent/satellite setups, it removes the problem with different roles
+and configurations for a master and child nodes. Both of them work the same way, are configured
+in the same way (Zone, Endpoint, ApiListener), and you can troubleshoot and debug them in just one go.
+
+## Versions and Upgrade <a id="distributed-monitoring-versions-upgrade"></a>
+
+It generally is advised to use the newest releases with the same version on all instances.
+Prior to upgrading, make sure to plan a maintenance window.
+
+The Icinga project aims to allow the following compatibility:
+
+```
+master (2.11) >= satellite (2.10) >= agent (2.9)
+```
+
+Older agent versions may work, but there's no guarantee. Always keep in mind that
+older versions are out of support and can contain bugs.
+
+In terms of an upgrade, ensure that the master is upgraded first, then
+involved satellites, and last the Icinga agents. If you are on v2.10
+currently, first upgrade the master instance(s) to 2.11, and then proceed
+with the satellites. Things are getting easier with any sort of automation
+tool (Puppet, Ansible, etc.).
+
+Releases and new features may require you to upgrade master/satellite instances at once,
+this is highlighted in the [upgrading docs](16-upgrading-icinga-2.md#upgrading-icinga-2) if needed.
+One example is the CA Proxy and on-demand signing feature
+available since v2.8 where all involved instances need this version
+to function properly.
+
+## Master Setup <a id="distributed-monitoring-setup-master"></a>
+
+This section explains how to install a central single master node using
+the `node wizard` command. If you prefer to do an automated installation, please
+refer to the [automated setup](06-distributed-monitoring.md#distributed-monitoring-automation) section.
+
+Follow the [installation instructions](02-installation.md) for the Icinga 2 package and the required
+check plugins if you haven't done so already.
+
+**Note**: Windows is not supported for a master node setup.
+
+The next step is to run the `node wizard` CLI command. Prior to that
+ensure to collect the required information:
+
+ Parameter | Description
+ --------------------|--------------------
+ Common name (CN) | **Required.** By convention this should be the host's FQDN. Defaults to the FQDN.
+ Master zone name | **Optional.** Allows to specify the master zone name. Defaults to `master`.
+ Global zones | **Optional.** Allows to specify more global zones in addition to `global-templates` and `director-global`. Defaults to `n`.
+ API bind host | **Optional.** Allows to specify the address the ApiListener is bound to. For advanced usage only.
+ API bind port | **Optional.** Allows to specify the port the ApiListener is bound to. For advanced usage only (requires changing the default port 5665 everywhere).
+ Disable conf.d | **Optional.** Allows to disable the `include_recursive "conf.d"` directive except for the `api-users.conf` file in the `icinga2.conf` file. Defaults to `y`. Configuration on the master is discussed below.
+
+The setup wizard will ensure that the following steps are taken:
+
+* Enable the `api` feature.
+* Generate a new certificate authority (CA) in `/var/lib/icinga2/ca` if it doesn't exist.
+* Create a certificate for this node signed by the CA key.
+* Update the [zones.conf](04-configuration.md#zones-conf) file with the new zone hierarchy.
+* Update the [ApiListener](06-distributed-monitoring.md#distributed-monitoring-apilistener) and [constants](04-configuration.md#constants-conf) configuration.
+* Update the [icinga2.conf](04-configuration.md#icinga2-conf) to disable the `conf.d` inclusion, and add the `api-users.conf` file inclusion.
+
+Here is an example of a master setup for the `icinga2-master1.localdomain` node on CentOS 7:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 node wizard
+
+Welcome to the Icinga 2 Setup Wizard!
+
+We will guide you through all required configuration details.
+
+Please specify if this is a satellite/agent setup ('n' installs a master setup) [Y/n]: n
+
+Starting the Master setup routine...
+
+Please specify the common name (CN) [icinga2-master1.localdomain]: icinga2-master1.localdomain
+Reconfiguring Icinga...
+Checking for existing certificates for common name 'icinga2-master1.localdomain'...
+Certificates not yet generated. Running 'api setup' now.
+Generating master configuration for Icinga 2.
+Enabling feature api. Make sure to restart Icinga 2 for these changes to take effect.
+
+Master zone name [master]:
+
+Default global zones: global-templates director-global
+Do you want to specify additional global zones? [y/N]: N
+
+Please specify the API bind host/port (optional):
+Bind Host []:
+Bind Port []:
+
+Do you want to disable the inclusion of the conf.d directory [Y/n]:
+Disabling the inclusion of the conf.d directory...
+Checking if the api-users.conf file exists...
+
+Done.
+
+Now restart your Icinga 2 daemon to finish the installation!
+```
+
+You can verify that the CA public and private keys are stored in the `/var/lib/icinga2/ca` directory.
+Keep this path secure and include it in your backups.
+
+In case you lose the CA private key you have to generate a new CA for signing new agent/satellite
+certificate requests. You then have to also re-create new signed certificates for all
+existing nodes.
+
+Once the master setup is complete, you can also use this node as primary [CSR auto-signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing)
+master. The following section will explain how to use the CLI commands in order to fetch their
+signed certificate from this master node.
+
+## Signing Certificates on the Master <a id="distributed-monitoring-setup-sign-certificates-master"></a>
+
+All certificates must be signed by the same certificate authority (CA). This ensures
+that all nodes trust each other in a distributed monitoring environment.
+
+This CA is generated during the [master setup](06-distributed-monitoring.md#distributed-monitoring-setup-master)
+and should be the same on all master instances.
+
+You can avoid signing and deploying certificates [manually](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-certificates-manual)
+by using built-in methods for auto-signing certificate signing requests (CSR):
+
+* [CSR Auto-Signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing) which uses a client (an agent or a satellite) ticket generated on the master as trust identifier.
+* [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing) which allows to sign pending certificate requests on the master.
+
+Both methods are described in detail below.
+
+> **Note**
+>
+> [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing) is available in Icinga 2 v2.8+.
+
+### CSR Auto-Signing <a id="distributed-monitoring-setup-csr-auto-signing"></a>
+
+A client can be a secondary master, a satellite or an agent. It sends a certificate signing request (CSR)
+and must authenticate itself in a trusted way. The master generates a client ticket which is included in this request.
+That way the master can verify that the request matches the previously trusted ticket
+and sign the request.
+
+> **Note**
+>
+> Icinga 2 v2.8 added the possibility to forward signing requests on a satellite
+> to the master node. This is called `CA Proxy` in blog posts and design drafts.
+> This functionality helps with the setup of [three level clusters](06-distributed-monitoring.md#distributed-monitoring-scenarios-master-satellite-agents)
+> and more.
+
+Advantages:
+
+* Nodes (secondary master, satellites, agents) can be installed by different users who have received the client ticket.
+* No manual interaction necessary on the master node.
+* Automation tools like Puppet, Ansible, etc. can retrieve the pre-generated ticket in their client catalog
+and run the node setup directly.
+
+Disadvantages:
+
+* Tickets need to be generated on the master and copied to client setup wizards.
+* No central signing management.
+
+#### CSR Auto-Signing: Preparation <a id="distributed-monitoring-setup-csr-auto-signing-preparation"></a>
+
+Prior to using this mode, ensure that the following steps are taken on
+the signing master:
+
+* The [master setup](06-distributed-monitoring.md#distributed-monitoring-setup-master) was run successfully. This includes:
+ * Generated a CA key pair
+ * Generated a private ticket salt stored in the `TicketSalt` constant, set as `ticket_salt` attribute inside the [api](09-object-types.md#objecttype-apilistener) feature.
+* Restart of the master instance.
+
+#### CSR Auto-Signing: On the master <a id="distributed-monitoring-setup-csr-auto-signing-master"></a>
+
+Setup wizards for agent/satellite nodes will ask you for this specific client ticket.
+
+There are two possible ways to retrieve the ticket:
+
+* [CLI command](11-cli-commands.md#cli-command-pki) executed on the master node.
+* [REST API](12-icinga2-api.md#icinga2-api) request against the master node.
+
+
+Required information:
+
+ Parameter | Description
+ --------------------|--------------------
+ Common name (CN) | **Required.** The common name for the agent/satellite. By convention this should be the FQDN.
+
+The following example shows how to generate a ticket on the master node `icinga2-master1.localdomain` for the agent `icinga2-agent1.localdomain`:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 pki ticket --cn icinga2-agent1.localdomain
+```
+
+Querying the [Icinga 2 API](12-icinga2-api.md#icinga2-api) on the master requires an [ApiUser](12-icinga2-api.md#icinga2-api-authentication)
+object with at least the `actions/generate-ticket` permission.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/conf.d/api-users.conf
+
+object ApiUser "client-pki-ticket" {
+ password = "bea11beb7b810ea9ce6ea" //change this
+ permissions = [ "actions/generate-ticket" ]
+}
+
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+
+Retrieve the ticket on the master node `icinga2-master1.localdomain` with `curl`, for example:
+
+ [root@icinga2-master1.localdomain /]# curl -k -s -u client-pki-ticket:bea11beb7b810ea9ce6ea -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/generate-ticket' -d '{ "cn": "icinga2-agent1.localdomain" }'
+```
+
+Store that ticket number for the [agent/satellite setup](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite) below.
+
+> **Note**
+>
+> Never expose the ticket salt and/or ApiUser credentials to your client nodes.
+> Example: Retrieve the ticket on the Puppet master node and send the compiled catalog
+> to the authorized Puppet agent node which will invoke the
+> [automated setup steps](06-distributed-monitoring.md#distributed-monitoring-automation-cli-node-setup).
+
+
+### On-Demand CSR Signing <a id="distributed-monitoring-setup-on-demand-csr-signing"></a>
+
+The client can be a secondary master, satellite or agent.
+It sends a certificate signing request to specified parent node without any
+ticket. The admin on the primary master is responsible for reviewing and signing the requests
+with the private CA key.
+
+This could either be directly the master, or a satellite which forwards the request
+to the signing master.
+
+Advantages:
+
+* Central certificate request signing management.
+* No pre-generated ticket is required for client setups.
+
+Disadvantages:
+
+* Asynchronous step for automated deployments.
+* Needs client verification on the master.
+
+#### On-Demand CSR Signing: Preparation <a id="distributed-monitoring-setup-on-demand-csr-signing-preparation"></a>
+
+Prior to using this mode, ensure that the following steps are taken on
+the signing master:
+
+* The [master setup](06-distributed-monitoring.md#distributed-monitoring-setup-master) was run successfully. This includes:
+ * Generated a CA key pair
+* Restart of the master instance.
+
+#### On-Demand CSR Signing: On the master <a id="distributed-monitoring-setup-on-demand-csr-signing-master"></a>
+
+You can list pending certificate signing requests with the `ca list` CLI command.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 ca list
+Fingerprint | Timestamp | Signed | Subject
+-----------------------------------------------------------------|---------------------|--------|--------
+71700c28445109416dd7102038962ac3fd421fbb349a6e7303b6033ec1772850 | 2017/09/06 17:20:02 | | CN = icinga2-agent2.localdomain
+```
+
+In order to show all requests, use the `--all` parameter.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 ca list --all
+Fingerprint | Timestamp | Signed | Subject
+-----------------------------------------------------------------|---------------------|--------|--------
+403da5b228df384f07f980f45ba50202529cded7c8182abf96740660caa09727 | 2017/09/06 17:02:40 | * | CN = icinga2-agent1.localdomain
+71700c28445109416dd7102038962ac3fd421fbb349a6e7303b6033ec1772850 | 2017/09/06 17:20:02 | | CN = icinga2-agent2.localdomain
+```
+
+**Tip**: Add `--json` to the CLI command to retrieve the details in JSON format.
+
+If you want to sign a specific request, you need to use the `ca sign` CLI command
+and pass its fingerprint as argument.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 ca sign 71700c28445109416dd7102038962ac3fd421fbb349a6e7303b6033ec1772850
+information/cli: Signed certificate for 'CN = icinga2-agent2.localdomain'.
+```
+
+> **Note**
+>
+> `ca list` cannot be used as historical inventory. Certificate
+> signing requests older than 1 week are automatically deleted.
+
+You can also remove an undesired CSR using the `ca remove` command using the
+syntax as the `ca sign` command.
+
+```
+[root@pym ~]# icinga2 ca remove 5c31ca0e2269c10363a97e40e3f2b2cd56493f9194d5b1852541b835970da46e
+information/cli: Certificate 5c31ca0e2269c10363a97e40e3f2b2cd56493f9194d5b1852541b835970da46e removed.
+```
+If you want to restore a certificate you have removed, you can use `ca restore`.
+
+<!-- Keep this for compatibility -->
+<a id="distributed-monitoring-setup-satellite-client"></a>
+
+## Agent/Satellite Setup <a id="distributed-monitoring-setup-agent-satellite"></a>
+
+This section describes the setup of an agent or satellite connected to an
+existing master node setup. If you haven't done so already, please [run the master setup](06-distributed-monitoring.md#distributed-monitoring-setup-master).
+
+Icinga 2 on the master node must be running and accepting connections on port `5665`.
+
+<!-- Keep this for compatibility -->
+<a id="distributed-monitoring-setup-client-linux"></a>
+
+### Agent/Satellite Setup on Linux <a id="distributed-monitoring-setup-agent-satellite-linux"></a>
+
+Please ensure that you've run all the steps mentioned in the [agent/satellite section](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite).
+
+Follow the [installation instructions](02-installation.md) for the Icinga 2 package and the required
+check plugins if you haven't done so already.
+
+The next step is to run the `node wizard` CLI command.
+
+In this example we're generating a ticket on the master node `icinga2-master1.localdomain` for the agent `icinga2-agent1.localdomain`:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 pki ticket --cn icinga2-agent1.localdomain
+4f75d2ecd253575fe9180938ebff7cbca262f96e
+```
+
+Note: You don't need this step if you have chosen to use [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing).
+
+Start the wizard on the agent `icinga2-agent1.localdomain`:
+
+```
+[root@icinga2-agent1.localdomain /]# icinga2 node wizard
+
+Welcome to the Icinga 2 Setup Wizard!
+
+We will guide you through all required configuration details.
+```
+
+Press `Enter` or add `y` to start a satellite or agent setup.
+
+```
+Please specify if this is an agent/satellite setup ('n' installs a master setup) [Y/n]:
+```
+
+Press `Enter` to use the proposed name in brackets, or add a specific common name (CN). By convention
+this should be the FQDN.
+
+```
+Starting the Agent/Satellite setup routine...
+
+Please specify the common name (CN) [icinga2-agent1.localdomain]: icinga2-agent1.localdomain
+```
+
+Specify the direct parent for this node. This could be your primary master `icinga2-master1.localdomain`
+or a satellite node in a multi level cluster scenario.
+
+```
+Please specify the parent endpoint(s) (master or satellite) where this node should connect to:
+Master/Satellite Common Name (CN from your master/satellite node): icinga2-master1.localdomain
+```
+
+Press `Enter` or choose `y` to establish a connection to the parent node.
+
+```
+Do you want to establish a connection to the parent node from this node? [Y/n]:
+```
+
+> **Note:**
+>
+> If this node cannot connect to the parent node, choose `n`. The setup
+> wizard will provide instructions for this scenario -- signing questions are disabled then.
+
+Add the connection details for `icinga2-master1.localdomain`.
+
+```
+Please specify the master/satellite connection information:
+Master/Satellite endpoint host (IP address or FQDN): 192.168.56.101
+Master/Satellite endpoint port [5665]: 5665
+```
+
+You can add more parent nodes if necessary. Press `Enter` or choose `n`
+if you don't want to add any. This comes in handy if you have more than one
+parent node, e.g. two masters or two satellites.
+
+```
+Add more master/satellite endpoints? [y/N]:
+```
+
+Verify the parent node's certificate:
+
+```
+Parent certificate information:
+
+ Subject: CN = icinga2-master1.localdomain
+ Issuer: CN = Icinga CA
+ Valid From: Sep 7 13:41:24 2017 GMT
+ Valid Until: Sep 3 13:41:24 2032 GMT
+ Fingerprint: AC 99 8B 2B 3D B0 01 00 E5 21 FA 05 2E EC D5 A9 EF 9E AA E3
+
+Is this information correct? [y/N]: y
+```
+
+The setup wizard fetches the parent node's certificate and ask
+you to verify this information. This is to prevent MITM attacks or
+any kind of untrusted parent relationship.
+
+You can verify the fingerprint by running the following command on the node to connect to:
+
+```bash
+openssl x509 -noout -fingerprint -sha256 -in \
+ "/var/lib/icinga2/certs/$(hostname --fqdn).crt"
+```
+
+Note: The certificate is not fetched if you have chosen not to connect
+to the parent node.
+
+Proceed with adding the optional client ticket for [CSR auto-signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing):
+
+```
+Please specify the request ticket generated on your Icinga 2 master (optional).
+ (Hint: # icinga2 pki ticket --cn 'icinga2-agent1.localdomain'):
+4f75d2ecd253575fe9180938ebff7cbca262f96e
+```
+
+In case you've chosen to use [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing)
+you can leave the ticket question blank.
+
+Instead, Icinga 2 tells you to approve the request later on the master node.
+
+```
+No ticket was specified. Please approve the certificate signing request manually
+on the master (see 'icinga2 ca list' and 'icinga2 ca sign --help' for details).
+```
+
+You can optionally specify a different bind host and/or port.
+
+```
+Please specify the API bind host/port (optional):
+Bind Host []:
+Bind Port []:
+```
+
+The next step asks you to accept configuration (required for [config sync mode](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync))
+and commands (required for [command endpoint mode](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)).
+
+```
+Accept config from parent node? [y/N]: y
+Accept commands from parent node? [y/N]: y
+```
+
+Next you can optionally specify the local and parent zone names. This will be reflected
+in the generated zone configuration file.
+
+Set the local zone name to something else, if you are installing a satellite or secondary master instance.
+
+```
+Local zone name [icinga2-agent1.localdomain]:
+```
+
+Set the parent zone name to something else than `master` if this agents connects to a satellite instance instead of the master.
+
+```
+Parent zone name [master]:
+```
+
+You can add more global zones in addition to `global-templates` and `director-global` if necessary.
+Press `Enter` or choose `n`, if you don't want to add any additional.
+
+```
+Reconfiguring Icinga...
+
+Default global zones: global-templates director-global
+Do you want to specify additional global zones? [y/N]: N
+```
+
+Last but not least the wizard asks you whether you want to disable the inclusion of the local configuration
+directory in `conf.d`, or not. Defaults to disabled, as agents either are checked via command endpoint, or
+they receive configuration synced from the parent zone.
+
+```
+Do you want to disable the inclusion of the conf.d directory [Y/n]: Y
+Disabling the inclusion of the conf.d directory...
+```
+
+
+The wizard proceeds and you are good to go.
+
+```
+Done.
+
+Now restart your Icinga 2 daemon to finish the installation!
+```
+
+> **Note**
+>
+> If you have chosen not to connect to the parent node, you cannot start
+> Icinga 2 yet. The wizard asked you to manually copy the master's public
+> CA certificate file into `/var/lib/icinga2/certs/ca.crt`.
+>
+> You need to [manually sign the CSR on the master node](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing-master).
+
+Restart Icinga 2 as requested.
+
+```
+[root@icinga2-agent1.localdomain /]# systemctl restart icinga2
+```
+
+Here is an overview of all parameters in detail:
+
+ Parameter | Description
+ --------------------|--------------------
+ Common name (CN) | **Required.** By convention this should be the host's FQDN. Defaults to the FQDN.
+ Master common name | **Required.** Use the common name you've specified for your master node before.
+ Establish connection to the parent node | **Optional.** Whether the node should attempt to connect to the parent node or not. Defaults to `y`.
+ Master/Satellite endpoint host | **Required if the the agent needs to connect to the master/satellite.** The parent endpoint's IP address or FQDN. This information is included in the `Endpoint` object configuration in the `zones.conf` file.
+ Master/Satellite endpoint port | **Optional if the the agent needs to connect to the master/satellite.** The parent endpoints's listening port. This information is included in the `Endpoint` object configuration.
+ Add more master/satellite endpoints | **Optional.** If you have multiple master/satellite nodes configured, add them here.
+ Parent Certificate information | **Required.** Verify that the connecting host really is the requested master node.
+ Request ticket | **Optional.** Add the [ticket](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing) generated on the master.
+ API bind host | **Optional.** Allows to specify the address the ApiListener is bound to. For advanced usage only.
+ API bind port | **Optional.** Allows to specify the port the ApiListener is bound to. For advanced usage only (requires changing the default port 5665 everywhere).
+ Accept config | **Optional.** Whether this node accepts configuration sync from the master node (required for [config sync mode](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync)). For [security reasons](06-distributed-monitoring.md#distributed-monitoring-security) this defaults to `n`.
+ Accept commands | **Optional.** Whether this node accepts command execution messages from the master node (required for [command endpoint mode](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)). For [security reasons](06-distributed-monitoring.md#distributed-monitoring-security) this defaults to `n`.
+ Local zone name | **Optional.** Allows to specify the name for the local zone. This comes in handy when this instance is a satellite, not an agent. Defaults to the FQDN.
+ Parent zone name | **Optional.** Allows to specify the name for the parent zone. This is important if the agent has a satellite instance as parent, not the master. Defaults to `master`.
+ Global zones | **Optional.** Allows to specify more global zones in addition to `global-templates` and `director-global`. Defaults to `n`.
+ Disable conf.d | **Optional.** Allows to disable the inclusion of the `conf.d` directory which holds local example configuration. Clients should retrieve their configuration from the parent node, or act as command endpoint execution bridge. Defaults to `y`.
+
+The setup wizard will ensure that the following steps are taken:
+
+* Enable the `api` feature.
+* Create a certificate signing request (CSR) for the local node.
+* Request a signed certificate (optional with the provided ticket number) on the master node.
+* Allow to verify the parent node's certificate.
+* Store the signed agent/satellite certificate and ca.crt in `/var/lib/icinga2/certs`.
+* Update the `zones.conf` file with the new zone hierarchy.
+* Update `/etc/icinga2/features-enabled/api.conf` (`accept_config`, `accept_commands`) and `constants.conf`.
+* Update `/etc/icinga2/icinga2.conf` and comment out `include_recursive "conf.d"`.
+
+You can verify that the certificate files are stored in the `/var/lib/icinga2/certs` directory.
+
+> **Note**
+>
+> If the agent is not directly connected to the certificate signing master,
+> signing requests and responses might need some minutes to fully update the agent certificates.
+>
+> If you have chosen to use [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing)
+> certificates need to be signed on the master first. Ticket-less setups require at least Icinga 2 v2.8+ on all involved instances.
+
+Now that you've successfully installed a Linux/Unix agent/satellite instance, please proceed to
+the [configuration modes](06-distributed-monitoring.md#distributed-monitoring-configuration-modes).
+
+
+<!-- Keep this for compatibility -->
+<a id="distributed-monitoring-setup-client-windows"></a>
+
+### Agent Setup on Windows <a id="distributed-monitoring-setup-agent-windows"></a>
+
+!!! important
+
+ [Icinga for Windows](https://icinga.com/docs/icinga-for-windows/latest/doc/000-Introduction/)
+ is the recommended way to install, setup and update Icinga 2 on Windows.
+ This section describes the classic installation and configuration procedure.
+
+The supported Windows agent versions are listed [here](https://icinga.com/subscription/support-details/).
+
+Requirements:
+
+* [Microsoft .NET Framework 4.6](https://www.microsoft.com/en-US/download/details.aspx?id=53344) or higher. This is the default on Windows Server 2016 or later.
+* [Universal C Runtime for Windows](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows) for Windows Server 2012 and older.
+
+#### Agent Setup on Windows: Installer <a id="distributed-monitoring-setup-agent-windows-installer"></a>
+
+Download the MSI-Installer package from [https://packages.icinga.com/windows/](https://packages.icinga.com/windows/).
+The preferred flavor is `x86_64` for modern Windows systems.
+
+The Windows package provides native [monitoring plugin binaries](06-distributed-monitoring.md#distributed-monitoring-windows-plugins)
+to get you started more easily.
+
+> **Note**
+>
+> Please note that Icinga 2 was designed to run as light-weight agent on Windows.
+> There is no support for satellite instances.
+
+Run the MSI-Installer package and follow the instructions shown in the screenshots.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_installer_01.png)
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_installer_02.png)
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_installer_03.png)
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_installer_04.png)
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_installer_05.png)
+
+The graphical installer offers to run the [Icinga Agent setup wizard](06-distributed-monitoring.md#distributed-monitoring-setup-agent-windows-configuration-wizard)
+after the installation. Select the check box to proceed.
+
+> **Tip**
+>
+> You can also run the Icinga agent setup wizard from the Start menu later.
+
+#### Agent Setup on Windows: Configuration Wizard <a id="distributed-monitoring-setup-agent-windows-configuration-wizard"></a>
+
+On a fresh installation the setup wizard guides you through the initial configuration.
+It also provides a mechanism to send a certificate request to the [CSR signing master](06-distributed-monitoring.md#distributed-monitoring-setup-sign-certificates-master).
+
+The following configuration details are required:
+
+ Parameter | Description
+ --------------------|--------------------
+ Instance name | **Required.** By convention this should be the host's FQDN. Defaults to the FQDN.
+ Setup ticket | **Optional.** Paste the previously generated [ticket number](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing). If left blank, the certificate request must be [signed on the master node](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing).
+
+Fill in the required information and click `Add` to add a new master connection.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_01.png)
+
+Add the following details:
+
+ Parameter | Description
+ -------------------------------|-------------------------------
+ Instance name | **Required.** The master/satellite endpoint name where this agent is a direct child of.
+ Master/Satellite endpoint host | **Required.** The master or satellite's IP address or FQDN. This information is included in the `Endpoint` object configuration in the `zones.conf` file.
+ Master/Satellite endpoint port | **Optional.** The master or satellite's listening port. This information is included in the `Endpoint` object configuration.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_02.png)
+
+When needed you can add an additional global zone (the zones `global-templates` and `director-global` are added by default):
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_02_global_zone.png)
+
+Optionally enable the following settings:
+
+ Parameter | Description
+ --------------------------------------------------------|----------------------------------
+ Accept commands from master/satellite instance(s) | **Optional.** Whether this node accepts command execution messages from the master node (required for [command endpoint mode](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)). For [security reasons](06-distributed-monitoring.md#distributed-monitoring-security) this is disabled by default.
+ Accept config updates from master/satellite instance(s) | **Optional.** Whether this node accepts configuration sync from the master node (required for [config sync mode](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync)). For [security reasons](06-distributed-monitoring.md#distributed-monitoring-security) this is disabled by default.
+ Run Icinga 2 service as this user | **Optional.** Specify a different Windows user. This defaults to `NT AUTHORITY\Network Service` and is required for more privileged service checks.
+ Disable including local 'conf.d' directory | **Optional.** Allows to disable the `include_recursive "conf.d"` directive except for the `api-users.conf` file in the `icinga2.conf` file. Defaults to `true`.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_03.png)
+
+Verify the certificate from the master/satellite instance where this node should connect to.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_04.png)
+
+
+#### Finish Windows Agent Setup <a id="distributed-monitoring-setup-agent-windows-finish"></a>
+
+Finish the Windows setup wizard.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_with_ticket.png)
+
+If you did not provide a setup ticket, you need to sign the certificate request on the master.
+The setup wizards tells you to do so. The Icinga 2 service is running at this point already
+and will automatically receive and update a signed client certificate.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_no_ticket.png)
+
+Icinga 2 is automatically started as a Windows service.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_running_service.png)
+
+The Icinga 2 configuration is stored inside the `C:\ProgramData\icinga2` directory.
+Click `Examine Config` in the setup wizard to open a new Explorer window.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_examine_config.png)
+
+The configuration files can be modified with your favorite editor e.g. Notepad++ or vim in Powershell (via chocolatey).
+
+In order to use the [top down](06-distributed-monitoring.md#distributed-monitoring-top-down) agent
+configuration prepare the following steps.
+
+You don't need any local configuration on the agent except for
+CheckCommand definitions which can be synced using the global zone
+above. Therefore disable the inclusion of the `conf.d` directory
+in the `icinga2.conf` file.
+
+Navigate to `C:\ProgramData\icinga2\etc\icinga2` and open
+the `icinga2.conf` file in your preferred editor. Remove or comment (`//`)
+the following line:
+
+```
+// Commented out, not required on an agent with top down mode
+//include_recursive "conf.d"
+```
+
+> **Note**
+>
+> Packages >= 2.9 provide an option in the setup wizard to disable this.
+> Defaults to disabled.
+
+Validate the configuration on Windows open an administrative Powershell
+and run the following command:
+
+```
+C:\> cd C:\Program Files\ICINGA2\sbin
+
+C:\Program Files\ICINGA2\sbin> .\icinga2.exe daemon -C
+```
+
+**Note**: You have to run this command in a shell with `administrator` privileges.
+
+Now you need to restart the Icinga 2 service. Run `services.msc` from the start menu and restart the `icinga2` service.
+Alternatively open an administrative Powershell and run the following commands:
+
+```
+C:\> Restart-Service icinga2
+
+C:\> Get-Service icinga2
+```
+
+Now that you've successfully installed a Windows agent, please proceed to
+the [detailed configuration modes](06-distributed-monitoring.md#distributed-monitoring-configuration-modes).
+
+
+## Configuration Modes <a id="distributed-monitoring-configuration-modes"></a>
+
+There are different ways to ensure that the Icinga 2 cluster nodes execute
+checks, send notifications, etc.
+
+The preferred method is to configure monitoring objects on the master
+and distribute the configuration to satellites and agents.
+
+The following chapters explain this in detail with hands-on manual configuration
+examples. You should test and implement this once to fully understand how it works.
+
+Once you are familiar with Icinga 2 and distributed monitoring, you
+can start with additional integrations to manage and deploy your
+configuration:
+
+* [Icinga Director](https://icinga.com/docs/director/latest/) provides a web interface to manage configuration and also allows to sync imported resources (CMDB, PuppetDB, etc.)
+* [Ansible Roles](https://icinga.com/products/integrations/)
+* [Puppet Module](https://icinga.com/products/integrations/puppet/)
+* [Chef Cookbook](https://icinga.com/products/integrations/chef/)
+
+More details can be found [here](13-addons.md#configuration-tools).
+
+### Top Down <a id="distributed-monitoring-top-down"></a>
+
+There are two different behaviors with check execution:
+
+* Send a command execution event remotely: The scheduler still runs on the parent node.
+* Sync the host/service objects directly to the child node: Checks are executed locally.
+
+Again, technically it does not matter whether this is an `agent` or a `satellite`
+which is receiving configuration or command execution events.
+
+### Top Down Command Endpoint <a id="distributed-monitoring-top-down-command-endpoint"></a>
+
+This mode forces the Icinga 2 node to execute commands remotely on a specified endpoint.
+The host/service object configuration is located on the master/satellite and the agent only
+needs the CheckCommand object definitions available.
+
+Every endpoint has its own remote check queue. The amount of checks executed simultaneously
+can be limited on the endpoint with the `MaxConcurrentChecks` constant defined in [constants.conf](04-configuration.md#constants-conf). Icinga 2 may discard check requests,
+if the remote check queue is full.
+
+![Icinga 2 Distributed Top Down Command Endpoint](images/distributed-monitoring/icinga2_distributed_monitoring_agent_checks_command_endpoint.png)
+
+Advantages:
+
+* No local checks need to be defined on the child node (agent).
+* Light-weight remote check execution (asynchronous events).
+* No [replay log](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-command-endpoint-log-duration) is necessary for the child node.
+* Pin checks to specific endpoints (if the child zone consists of 2 endpoints).
+
+Disadvantages:
+
+* If the child node is not connected, no more checks are executed.
+* Requires additional configuration attribute specified in host/service objects.
+* Requires local `CheckCommand` object configuration. Best practice is to use a [global config zone](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync).
+
+To make sure that all nodes involved will accept configuration and/or
+commands, you need to configure the `Zone` and `Endpoint` hierarchy
+on all nodes.
+
+* `icinga2-master1.localdomain` is the configuration master in this scenario.
+* `icinga2-agent1.localdomain` acts as agent which receives command execution messages via command endpoint from the master. In addition, it receives the global check command configuration from the master.
+
+Include the endpoint and zone configuration on **both** nodes in the file `/etc/icinga2/zones.conf`.
+
+The endpoint configuration could look like this, for example:
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ host = "192.168.56.101"
+}
+
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111"
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+```
+
+Next, you need to define two zones. There is no naming convention, best practice is to either use `master`, `satellite`/`agent-fqdn` or to choose region names for example `Europe`, `USA` and `Asia`, though.
+
+**Note**: Each agent requires its own zone and endpoint configuration. Best practice
+is to use the agent's FQDN for all object names.
+
+The `master` zone is a parent of the `icinga2-agent1.localdomain` zone:
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain" ] //array with endpoint names
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "master" //establish zone hierarchy
+}
+```
+
+You don't need any local configuration on the agent except for
+CheckCommand definitions which can be synced using the global zone
+above. Therefore disable the inclusion of the `conf.d` directory
+in `/etc/icinga2/icinga2.conf`.
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/icinga2.conf
+
+// Commented out, not required on an agent as command endpoint
+//include_recursive "conf.d"
+```
+
+> **Note**
+>
+> Packages >= 2.9 provide an option in the setup wizard to disable this.
+> Defaults to disabled.
+
+Now it is time to validate the configuration and to restart the Icinga 2 daemon
+on both nodes.
+
+Example on CentOS 7:
+
+```
+[root@icinga2-agent1.localdomain /]# icinga2 daemon -C
+[root@icinga2-agent1.localdomain /]# systemctl restart icinga2
+
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Once the agents have successfully connected, you are ready for the next step: **execute
+a remote check on the agent using the command endpoint**.
+
+Include the host and service object configuration in the `master` zone
+-- this will help adding a secondary master for high-availability later.
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/master
+```
+
+Add the host and service objects you want to monitor. There is
+no limitation for files and directories -- best practice is to
+sort things by type.
+
+By convention a master/satellite/agent host object should use the same name as the endpoint object.
+You can also add multiple hosts which execute checks against remote services/agents.
+
+The following example adds the `agent_endpoint` custom variable to the
+host and stores its name (FQDN). _Versions older than 2.11
+used the `client_endpoint` custom variable._
+
+This custom variable serves two purposes: 1) Service apply rules can match against it.
+2) Apply rules can retrieve its value and assign it to the `command_endpoint` attribute.
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive" //check is executed on the master
+ address = "192.168.56.111"
+
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+}
+```
+
+Given that you are monitoring a Linux agent, add a remote [disk](10-icinga-template-library.md#plugin-check-command-disk)
+check.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "disk" {
+ check_command = "disk"
+
+ // Specify the remote agent as command execution endpoint, fetch the host custom variable
+ command_endpoint = host.vars.agent_endpoint
+
+ // Only assign where a host is marked as agent endpoint
+ assign where host.vars.agent_endpoint
+}
+```
+
+If you have your own custom `CheckCommand` definition, add it to the global zone:
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/global-templates
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/global-templates/commands.conf
+
+object CheckCommand "my-cmd" {
+ //...
+}
+```
+
+Save the changes and validate the configuration on the master node:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+```
+Restart the Icinga 2 daemon (example for CentOS 7):
+
+```
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+The following steps will happen:
+
+* Icinga 2 validates the configuration on `icinga2-master1.localdomain` and restarts.
+* The `icinga2-master1.localdomain` node schedules and executes the checks.
+* The `icinga2-agent1.localdomain` node receives the execute command event with additional command parameters.
+* The `icinga2-agent1.localdomain` node maps the command parameters to the local check command, executes the check locally, and sends back the check result message.
+
+As you can see, no interaction from your side is required on the agent itself, and it's not necessary to reload the Icinga 2 service on the agent.
+
+You have learned the basics about command endpoint checks. Proceed with
+the [scenarios](06-distributed-monitoring.md#distributed-monitoring-scenarios)
+section where you can find detailed information on extending the setup.
+
+
+### Top Down Config Sync <a id="distributed-monitoring-top-down-config-sync"></a>
+
+This mode syncs the object configuration files within specified zones.
+It comes in handy if you want to configure everything on the master node
+and sync the satellite checks (disk, memory, etc.). The satellites run their
+own local scheduler and will send the check result messages back to the master.
+
+![Icinga 2 Distributed Top Down Config Sync](images/distributed-monitoring/icinga2_distributed_monitoring_satellite_config_sync.png)
+
+Advantages:
+
+* Sync the configuration files from the parent zone to the child zones.
+* No manual restart is required on the child nodes, as syncing, validation, and restarts happen automatically.
+* Execute checks directly on the child node's scheduler.
+* Replay log if the connection drops (important for keeping the check history in sync, e.g. for SLA reports).
+* Use a global zone for syncing templates, groups, etc.
+
+Disadvantages:
+
+* Requires a config directory on the master node with the zone name underneath `/etc/icinga2/zones.d`.
+* Additional zone and endpoint configuration needed.
+* Replay log is replicated on reconnect after connection loss. This might increase the data transfer and create an overload on the connection.
+
+> **Note**
+>
+> This mode only supports **configuration text files** for Icinga. Do not abuse
+> this for syncing binaries, this is not supported and may harm your production
+> environment. The config sync uses checksums to detect changes, binaries may
+> trigger reload loops.
+>
+> This is a fair warning. If you want to deploy plugin binaries, create
+> packages for dependency management and use infrastructure lifecycle tools
+> such as Foreman, Puppet, Ansible, etc.
+
+To make sure that all involved nodes accept configuration and/or
+commands, you need to configure the `Zone` and `Endpoint` hierarchy
+on all nodes.
+
+* `icinga2-master1.localdomain` is the configuration master in this scenario.
+* `icinga2-satellite1.localdomain` acts as satellite which receives configuration from the master. Checks are scheduled locally.
+
+Include the endpoint and zone configuration on **both** nodes in the file `/etc/icinga2/zones.conf`.
+
+The endpoint configuration could look like this:
+
+```
+[root@icinga2-satellite1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ host = "192.168.56.101"
+}
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ host = "192.168.56.105"
+}
+```
+
+Next, you need to define two zones. There is no naming convention, best practice is to either use `master`, `satellite`/`agent-fqdn` or to choose region names for example `Europe`, `USA` and `Asia`, though.
+
+The `master` zone is a parent of the `satellite` zone:
+
+```
+[root@icinga2-agent2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain" ] //array with endpoint names
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain" ]
+
+ parent = "master" //establish zone hierarchy
+}
+```
+
+Edit the `api` feature on the satellite `icinga2-satellite1.localdomain` in
+the `/etc/icinga2/features-enabled/api.conf` file and set
+`accept_config` to `true`.
+
+```
+[root@icinga2-satellite1.localdomain /]# vim /etc/icinga2/features-enabled/api.conf
+
+object ApiListener "api" {
+ //...
+ accept_config = true
+}
+```
+
+Now it is time to validate the configuration and to restart the Icinga 2 daemon
+on both nodes.
+
+Example on CentOS 7:
+
+```
+[root@icinga2-satellite1.localdomain /]# icinga2 daemon -C
+[root@icinga2-satellite1.localdomain /]# systemctl restart icinga2
+
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+**Tip**: Best practice is to use a [global zone](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync)
+for common configuration items (check commands, templates, groups, etc.).
+
+Once the satellite(s) have connected successfully, it's time for the next step: **execute
+a local check on the satellite using the configuration sync**.
+
+Navigate to `/etc/icinga2/zones.d` on your master node
+`icinga2-master1.localdomain` and create a new directory with the same
+name as your satellite/agent zone name:
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/satellite
+```
+
+Add the host and service objects you want to monitor. There is
+no limitation for files and directories -- best practice is to
+sort things by type.
+
+By convention a master/satellite/agent host object should use the same name as the endpoint object.
+You can also add multiple hosts which execute checks against remote services/agents via [command endpoint](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
+checks.
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/satellite
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim hosts.conf
+
+object Host "icinga2-satellite1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.112"
+ zone = "master" //optional trick: sync the required host object to the satellite, but enforce the "master" zone to execute the check
+}
+```
+
+Given that you are monitoring a Linux satellite add a local [disk](10-icinga-template-library.md#plugin-check-command-disk)
+check.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim services.conf
+
+object Service "disk" {
+ host_name = "icinga2-satellite1.localdomain"
+
+ check_command = "disk"
+}
+```
+
+Save the changes and validate the configuration on the master node:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+```
+
+Restart the Icinga 2 daemon (example for CentOS 7):
+
+```
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+The following steps will happen:
+
+* Icinga 2 validates the configuration on `icinga2-master1.localdomain`.
+* Icinga 2 copies the configuration into its zone config store in `/var/lib/icinga2/api/zones`.
+* The `icinga2-master1.localdomain` node sends a config update event to all endpoints in the same or direct child zones.
+* The `icinga2-satellite1.localdomain` node accepts config and populates the local zone config store with the received config files.
+* The `icinga2-satellite1.localdomain` node validates the configuration and automatically restarts.
+
+Again, there is no interaction required on the satellite itself.
+
+You can also use the config sync inside a high-availability zone to
+ensure that all config objects are synced among zone members.
+
+**Note**: You can only have one so-called "config master" in a zone which stores
+the configuration in the `zones.d` directory.
+Multiple nodes with configuration files in the `zones.d` directory are
+**not supported**.
+
+Now that you've learned the basics about the configuration sync, proceed with
+the [scenarios](06-distributed-monitoring.md#distributed-monitoring-scenarios)
+section where you can find detailed information on extending the setup.
+
+
+
+If you are eager to start fresh instead you might take a look into the
+[Icinga Director](https://icinga.com/docs/director/latest/).
+
+## Scenarios <a id="distributed-monitoring-scenarios"></a>
+
+The following examples should give you an idea on how to build your own
+distributed monitoring environment. We've seen them all in production
+environments and received feedback from our [community](https://community.icinga.com/)
+and [partner support](https://icinga.com/support/) channels:
+
+* [Single master with agents](06-distributed-monitoring.md#distributed-monitoring-master-agents).
+* [HA master with agents as command endpoint](06-distributed-monitoring.md#distributed-monitoring-scenarios-ha-master-agents)
+* [Three level cluster](06-distributed-monitoring.md#distributed-monitoring-scenarios-master-satellite-agents) with config HA masters, satellites receiving config sync, and agents checked using command endpoint.
+
+You can also extend the cluster tree depth to four levels e.g. with 2 satellite levels.
+Just keep in mind that multiple levels become harder to debug in case of errors.
+
+You can also start with a single master setup, and later add a secondary
+master endpoint. This requires an extra step with the [initial sync](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-initial-sync)
+for cloning the runtime state. This is described in detail [here](06-distributed-monitoring.md#distributed-monitoring-scenarios-ha-master-agents).
+
+<!-- Keep this for compatiblity -->
+<a id="distributed-monitoring-master-clients"></a>
+
+### Master with Agents <a id="distributed-monitoring-master-agents"></a>
+
+In this scenario, a single master node runs the check scheduler, notifications
+and IDO database backend and uses the [command endpoint mode](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
+to execute checks on the remote agents.
+
+![Icinga 2 Distributed Master with Agents](images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_with_agents.png)
+
+* `icinga2-master1.localdomain` is the primary master node.
+* `icinga2-agent1.localdomain` and `icinga2-agent2.localdomain` are two child nodes as agents.
+
+Setup requirements:
+
+* Set up `icinga2-master1.localdomain` as [master](06-distributed-monitoring.md#distributed-monitoring-setup-master).
+* Set up `icinga2-agent1.localdomain` and `icinga2-agent2.localdomain` as [agent](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite).
+
+Edit the `zones.conf` configuration file on the master:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // That's us
+}
+
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111" // The master actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+
+object Endpoint "icinga2-agent2.localdomain" {
+ host = "192.168.56.112" // The master actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain" ]
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "master"
+}
+
+object Zone "icinga2-agent2.localdomain" {
+ endpoints = [ "icinga2-agent2.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+```
+
+The two agent nodes do not need to know about each other. The only important thing
+is that they know about the parent zone and their endpoint members (and optionally the global zone).
+
+If you specify the `host` attribute in the `icinga2-master1.localdomain` endpoint object,
+the agent will actively try to connect to the master node. Since you've specified the agent
+endpoint's attribute on the master node already, you don't want the agents to connect to the
+master. **Choose one [connection direction](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction).**
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-agent1.localdomain" {
+ // That's us
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain" ]
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+```
+```
+[root@icinga2-agent2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-agent2.localdomain" {
+ // That's us
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain" ]
+}
+
+object Zone "icinga2-agent2.localdomain" {
+ endpoints = [ "icinga2-agent2.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+```
+
+Now it is time to define the two agent hosts and apply service checks using
+the command endpoint execution method on them. Note: You can also use the
+config sync mode here.
+
+Create a new configuration directory on the master node:
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/master
+```
+
+Add the two agent nodes as host objects:
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.111"
+
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+}
+
+object Host "icinga2-agent2.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.112"
+
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+}
+```
+
+Add services using command endpoint checks:
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "ping4" {
+ check_command = "ping4"
+
+ //check is executed on the master node
+ assign where host.address
+}
+
+apply Service "disk" {
+ check_command = "disk"
+
+ // Execute the check on the remote command endpoint
+ command_endpoint = host.vars.agent_endpoint
+
+ // Assign the service onto an agent
+ assign where host.vars.agent_endpoint
+}
+```
+
+Validate the configuration and restart Icinga 2 on the master node `icinga2-master1.localdomain`.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Open Icinga Web 2 and check the two newly created agent hosts with two new services
+-- one executed locally (`ping4`) and one using command endpoint (`disk`).
+
+> **Note**
+>
+> You don't necessarily need to add the agent endpoint/zone configuration objects
+> into the master's zones.conf file. Instead, you can put them into `/etc/icinga2/zones.d/master`
+> either in `hosts.conf` shown above, or in a new file called `agents.conf`.
+
+> **Tip**:
+>
+> It's a good idea to add [health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks)
+to make sure that your cluster notifies you in case of failure.
+
+In terms of health checks, consider adding the following for this scenario:
+
+- Master node(s) check the connection to the agents
+- Optional: Add dependencies for the agent host to prevent unwanted notifications when agents are unreachable
+
+Proceed in [this chapter](06-distributed-monitoring.md#distributed-monitoring-health-checks-master-agents).
+
+<!-- Keep this for compatibility -->
+<a id="distributed-monitoring-scenarios-ha-master-clients"></a>
+
+### High-Availability Master with Agents <a id="distributed-monitoring-scenarios-ha-master-agents"></a>
+
+This scenario is similar to the one in the [previous section](06-distributed-monitoring.md#distributed-monitoring-master-agents). The only difference is that we will now set up two master nodes in a high-availability setup.
+These nodes must be configured as zone and endpoints objects.
+
+![Icinga 2 Distributed High Availability Master with Agents](images/distributed-monitoring/icinga2_distributed_monitoring_scenario_ha_masters_with_agents.png)
+
+The setup uses the capabilities of the Icinga 2 cluster. All zone members
+replicate cluster events between each other. In addition to that, several Icinga 2
+features can enable [HA functionality](06-distributed-monitoring.md#distributed-monitoring-high-availability-features).
+
+Best practice is to run the database backend on a dedicated server/cluster and
+only expose a virtual IP address to Icinga and the IDO feature. By default, only one
+endpoint will actively write to the backend then. Typical setups for MySQL clusters
+involve Master-Master-Replication (Master-Slave-Replication in both directions) or Galera,
+more tips can be found on our [community forums](https://community.icinga.com/).
+The IDO object must have the same `instance_name` on all master nodes.
+
+**Note**: All nodes in the same zone require that you enable the same features for high-availability (HA).
+
+Overview:
+
+* `icinga2-master1.localdomain` is the config master master node.
+* `icinga2-master2.localdomain` is the secondary master master node without config in `zones.d`.
+* `icinga2-agent1.localdomain` and `icinga2-agent2.localdomain` are two child nodes as agents.
+
+Setup requirements:
+
+* Set up `icinga2-master1.localdomain` as [master](06-distributed-monitoring.md#distributed-monitoring-setup-master).
+* Set up `icinga2-master2.localdomain` as [satellite](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite) (**we will modify the generated configuration**).
+* Set up `icinga2-agent1.localdomain` and `icinga2-agent2.localdomain` as [agents](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite) (when asked for adding multiple masters, set to `y` and add the secondary master `icinga2-master2.localdomain`).
+
+In case you don't want to use the CLI commands, you can also manually create and sync the
+required TLS certificates. We will modify and discuss all the details of the automatically generated configuration here.
+
+Since there are now two nodes in the same zone, we must consider the
+[high-availability features](06-distributed-monitoring.md#distributed-monitoring-high-availability-features).
+
+* Checks and notifications are balanced between the two master nodes. That's fine, but it requires check plugins and notification scripts to exist on both nodes.
+* The IDO feature will only be active on one node by default. Since all events are replicated between both nodes, it is easier to just have one central database.
+
+One possibility is to use a dedicated MySQL cluster VIP (external application cluster)
+and leave the IDO feature with enabled HA capabilities. Alternatively,
+you can disable the HA feature and write to a local database on each node.
+Both methods require that you configure Icinga Web 2 accordingly (monitoring
+backend, IDO database, used transports, etc.).
+
+> **Note**
+>
+> You can also start with a single master shown [here](06-distributed-monitoring.md#distributed-monitoring-master-agents) and later add
+> the second master. This requires an extra step with the [initial sync](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-initial-sync)
+> for cloning the runtime state after done. Once done, proceed here.
+
+In this scenario, we are not adding the agent configuration immediately
+to the `zones.conf` file but will establish the hierarchy later.
+
+The first master looks like this:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // That's us
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ host = "192.168.56.102" // Actively connect to the secondary master
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+```
+
+The secondary master waits for connection attempts from the first master,
+and therefore does not try to connect to it again.
+
+```
+[root@icinga2-master2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // The first master already connects to us
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // That's us
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+```
+
+Restart both masters and ensure the initial connection and TLS handshake works.
+
+The two agent nodes do not need to know about each other. The only important thing
+is that they know about the parent zone and their endpoint members (and optionally about the global zone).
+
+If you specify the `host` attribute in the `icinga2-master1.localdomain` and `icinga2-master2.localdomain`
+endpoint objects, the agent will actively try to connect to the master node. Since we've specified the agent
+endpoint's attribute on the master node already, we don't want the agent to connect to the
+master nodes. **Choose one [connection direction](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction).**
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-agent1.localdomain" {
+ // That's us
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+
+```
+
+```
+[root@icinga2-agent2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-agent2.localdomain" {
+ //That's us
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+
+object Zone "icinga2-agent2.localdomain" {
+ endpoints = [ "icinga2-agent2.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+object Zone "director-global" {
+ global = true
+}
+```
+
+Now it is time to define the two agent hosts and apply service checks using
+the command endpoint execution method.
+
+Create a new configuration directory on the master node `icinga2-master1.localdomain`.
+**Note**: The secondary master node `icinga2-master2.localdomain` receives the
+configuration using the [config sync mode](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync).
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/master
+```
+
+Add the two agent nodes with their zone/endpoint and host object configuration.
+
+> **Note**
+>
+> In order to keep things in sync between the two HA masters,
+> keep the `zones.conf` file as small as possible.
+>
+> You can create the agent zone and endpoint objects inside the
+> master zone and have them synced to the secondary master.
+> The cluster config sync enforces a reload allowing the secondary
+> master to connect to the agents as well.
+
+Edit the `zones.conf` file and ensure that the agent zone/endpoint objects
+are **not** specified in there.
+
+Then navigate into `/etc/icinga2/zones.d/master` and create a new file `agents.conf`.
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim agents.conf
+
+//-----------------------------------------------
+// Endpoints
+
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111" // The master actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+
+object Endpoint "icinga2-agent2.localdomain" {
+ host = "192.168.56.112" // The master actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+
+//-----------------------------------------------
+// Zones
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "master"
+}
+
+object Zone "icinga2-agent2.localdomain" {
+ endpoints = [ "icinga2-agent2.localdomain" ]
+
+ parent = "master"
+}
+```
+
+Whenever you need to add an agent again, edit the mentioned files.
+
+Next, create the corresponding host objects for the agents. Use the same names
+for host and endpoint objects.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.111"
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+}
+
+object Host "icinga2-agent2.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.112"
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+}
+```
+
+Add services using command endpoint checks:
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "ping4" {
+ check_command = "ping4"
+
+ // Check is executed on the master node
+ assign where host.address
+}
+
+apply Service "disk" {
+ check_command = "disk"
+
+ // Check is executed on the remote command endpoint
+ command_endpoint = host.vars.agent_endpoint
+
+ assign where host.vars.agent_endpoint
+}
+```
+
+Validate the configuration and restart Icinga 2 on the master node `icinga2-master1.localdomain`.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Open Icinga Web 2 and check the two newly created agent hosts with two new services
+-- one executed locally (`ping4`) and one using command endpoint (`disk`).
+
+> **Tip**:
+>
+> It's a good idea to add [health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks)
+to make sure that your cluster notifies you in case of failure.
+
+In terms of health checks, consider adding the following for this scenario:
+
+- Master node(s) check the connection to the agents
+- Optional: Add dependencies for the agent host to prevent unwanted notifications when agents are unreachable
+
+Proceed in [this chapter](06-distributed-monitoring.md#distributed-monitoring-health-checks-master-agents).
+
+<!-- Keep this for compatibility -->
+<a id="distributed-monitoring-scenarios-master-satellite-client"></a>
+
+### Three Levels with Masters, Satellites and Agents <a id="distributed-monitoring-scenarios-master-satellite-agents"></a>
+
+This scenario combines everything you've learned so far: High-availability masters,
+satellites receiving their configuration from the master zone, and agents checked via command
+endpoint from the satellite zones.
+
+![Icinga 2 Distributed Master and Satellites with Agents](images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_satellites_agents.png)
+
+> **Tip**:
+>
+> It can get complicated, so grab a pen and paper and bring your thoughts to life.
+> Play around with a test setup before using it in a production environment!
+
+There are various reasons why you might want to have satellites in your environment. The following list explains the more common ones.
+
+* Monitor remote locations. Besides reducing connections and traffic between different locations this setup also helps when the network connection to the remote network is lost. Satellites will keep checking and collecting data on their own and will send their check results when the connection is restored.
+* Reduce connections between security zones. Satellites in a different zone (e.g. DMZ) than your masters will help reduce connections through firewalls.
+* Offload resource hungry checks to other hosts. In very big setups running lots of plugins on your masters or satellites might have a significant impact on the performance during times of high load. You can introduce another level of satellites just to run these plugins and send their results to the upstream hosts.
+
+Best practice is to run the database backend on a dedicated server/cluster and
+only expose a virtual IP address to Icinga and the IDO feature. By default, only one
+endpoint will actively write to the backend then. Typical setups for MySQL clusters
+involve Master-Master-Replication (Master-Slave-Replication in both directions) or Galera,
+more tips can be found on our [community forums](https://community.icinga.com/).
+
+Overview:
+
+* `icinga2-master1.localdomain` is the configuration master master node.
+* `icinga2-master2.localdomain` is the secondary master master node without configuration in `zones.d`.
+* `icinga2-satellite1.localdomain` and `icinga2-satellite2.localdomain` are satellite nodes in a `master` child zone. They forward CSR signing requests to the master zone.
+* `icinga2-agent1.localdomain` and `icinga2-agent2.localdomain` are two child nodes as agents.
+
+Setup requirements:
+
+* Set up `icinga2-master1.localdomain` as [master](06-distributed-monitoring.md#distributed-monitoring-setup-master).
+* Set up `icinga2-master2.localdomain`, `icinga2-satellite1.localdomain` and `icinga2-satellite2.localdomain` as [agents](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite) (we will modify the generated configuration).
+* Set up `icinga2-agent1.localdomain` and `icinga2-agent2.localdomain` as [agents](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite).
+
+When being asked for the parent endpoint providing CSR auto-signing capabilities,
+please add one of the satellite nodes. **Note**: This requires Icinga 2 v2.8+
+and the `CA Proxy` on all master, satellite and agent nodes.
+
+Example for `icinga2-agent1.localdomain`:
+
+```
+Please specify the parent endpoint(s) (master or satellite) where this node should connect to:
+```
+
+Parent endpoint is the first satellite `icinga2-satellite1.localdomain`:
+
+```
+Master/Satellite Common Name (CN from your master/satellite node): icinga2-satellite1.localdomain
+Do you want to establish a connection to the parent node from this node? [Y/n]: y
+
+Please specify the master/satellite connection information:
+Master/Satellite endpoint host (IP address or FQDN): 192.168.56.105
+Master/Satellite endpoint port [5665]: 5665
+```
+
+Add the second satellite `icinga2-satellite2.localdomain` as parent:
+
+```
+Add more master/satellite endpoints? [y/N]: y
+
+Master/Satellite Common Name (CN from your master/satellite node): icinga2-satellite2.localdomain
+Do you want to establish a connection to the parent node from this node? [Y/n]: y
+
+Please specify the master/satellite connection information:
+Master/Satellite endpoint host (IP address or FQDN): 192.168.56.106
+Master/Satellite endpoint port [5665]: 5665
+
+Add more master/satellite endpoints? [y/N]: n
+```
+
+The specified parent nodes will forward the CSR signing request to the master instances.
+
+Proceed with adding the optional client ticket for [CSR auto-signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing):
+
+```
+Please specify the request ticket generated on your Icinga 2 master (optional).
+ (Hint: # icinga2 pki ticket --cn 'icinga2-agent1.localdomain'):
+4f75d2ecd253575fe9180938ebff7cbca262f96e
+```
+
+In case you've chosen to use [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing)
+you can leave the ticket question blank.
+
+Instead, Icinga 2 tells you to approve the request later on the master node.
+
+```
+No ticket was specified. Please approve the certificate signing request manually
+on the master (see 'icinga2 ca list' and 'icinga2 ca sign --help' for details).
+```
+
+You can optionally specify a different bind host and/or port.
+
+```
+Please specify the API bind host/port (optional):
+Bind Host []:
+Bind Port []:
+```
+
+The next step asks you to accept configuration (required for [config sync mode](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync))
+and commands (required for [command endpoint mode](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)).
+
+```
+Accept config from parent node? [y/N]: y
+Accept commands from parent node? [y/N]: y
+```
+
+Next you can optionally specify the local and parent zone names. This will be reflected
+in the generated zone configuration file.
+
+```
+Local zone name [icinga2-agent1.localdomain]: icinga2-agent1.localdomain
+```
+
+Set the parent zone name to `satellite` for this agent.
+
+```
+Parent zone name [master]: satellite
+```
+
+You can add more global zones in addition to `global-templates` and `director-global` if necessary.
+Press `Enter` or choose `n`, if you don't want to add any additional.
+
+```
+Reconfiguring Icinga...
+
+Default global zones: global-templates director-global
+Do you want to specify additional global zones? [y/N]: N
+```
+
+Last but not least the wizard asks you whether you want to disable the inclusion of the local configuration
+directory in `conf.d`, or not. Defaults to disabled, since agents are checked via command endpoint and the example
+configuration would collide with this mode.
+
+```
+Do you want to disable the inclusion of the conf.d directory [Y/n]: Y
+Disabling the inclusion of the conf.d directory...
+```
+
+
+**We'll discuss the details of the required configuration below. Most of this
+configuration can be rendered by the setup wizards.**
+
+The zone hierarchy can look like this. We'll define only the directly connected zones here.
+
+The master instances should actively connect to the satellite instances, therefore
+the configuration on `icinga2-master1.localdomain` and `icinga2-master2.localdomain`
+must include the `host` attribute for the satellite endpoints:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // That's us
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ host = "192.168.56.102" // Actively connect to the second master.
+}
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ host = "192.168.56.105" // Actively connect to the satellites.
+}
+
+object Endpoint "icinga2-satellite2.localdomain" {
+ host = "192.168.56.106" // Actively connect to the satellites.
+}
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+```
+
+The endpoint configuration on the secondary master looks similar,
+but changes the connection attributes - the first master already
+tries to connect, there is no need for a secondary attempt.
+
+```
+[root@icinga2-master2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // First master already connects to us
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // That's us
+}
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ host = "192.168.56.105" // Actively connect to the satellites.
+}
+
+object Endpoint "icinga2-satellite2.localdomain" {
+ host = "192.168.56.106" // Actively connect to the satellites.
+}
+```
+
+The zone configuration on both masters looks the same. Add this
+to the corresponding `zones.conf` entries for the endpoints.
+
+```
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain", "icinga2-satellite2.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+
+object Zone "director-global" {
+ global = true
+}
+
+```
+
+In contrast to that, the satellite instances `icinga2-satellite1.localdomain`
+and `icinga2-satellite2.localdomain` should not actively connect to the master
+instances.
+
+```
+[root@icinga2-satellite1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // This endpoint will connect to us
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // This endpoint will connect to us
+}
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ // That's us
+}
+
+object Endpoint "icinga2-satellite2.localdomain" {
+ host = "192.168.56.106" // Actively connect to the secondary satellite
+}
+```
+
+Again, only one side is required to establish the connection inside the HA zone.
+Since satellite1 already connects to satellite2, leave out the `host` attribute
+for `icinga2-satellite1.localdomain` on satellite2.
+
+```
+[root@icinga2-satellite2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" {
+ // This endpoint will connect to us
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // This endpoint will connect to us
+}
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ // First satellite already connects to us
+}
+
+object Endpoint "icinga2-satellite2.localdomain" {
+ // That's us
+}
+```
+
+The zone configuration on both satellites looks the same. Add this
+to the corresponding `zones.conf` entries for the endpoints.
+
+```
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain", "icinga2-satellite2.localdomain" ]
+
+ parent = "master"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+
+object Zone "director-global" {
+ global = true
+}
+```
+
+Keep in mind to control the endpoint [connection direction](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction)
+using the `host` attribute, also for other endpoints in the same zone.
+
+Since we want to use [top down command endpoint](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint) checks,
+we must configure the agent endpoint and zone objects.
+
+In order to minimize the effort, we'll sync the agent zone and endpoint configuration to the
+satellites where the connection information is needed as well. Note: This only works with satellite
+and agents, since there already is a trust relationship between the master and the satellite zone.
+The cluster config sync to the satellite invokes an automated reload causing the agent connection attempts.
+
+`icinga2-master1.localdomain` is the configuration master where everything is stored:
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/{master,satellite,global-templates}
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/satellite
+
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim icinga2-agent1.localdomain.conf
+
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111" // The satellite actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "satellite"
+}
+
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim icinga2-agent2.localdomain.conf
+
+object Endpoint "icinga2-agent2.localdomain" {
+ host = "192.168.56.112" // The satellite actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+
+object Zone "icinga2-agent2.localdomain" {
+ endpoints = [ "icinga2-agent2.localdomain" ]
+
+ parent = "satellite"
+}
+```
+
+The two agent nodes do not need to know about each other. The only important thing
+is that they know about the parent zone (the satellite) and their endpoint members (and optionally the global zone).
+
+> **Tipp**
+>
+> In the example above we've specified the `host` attribute in the agent endpoint configuration. In this mode,
+> the satellites actively connect to the agents. This costs some resources on the satellite -- if you prefer to
+> offload the connection attempts to the agent, or your DMZ requires this, you can also change the **[connection direction](06-distributed-monitoring.md#distributed-monitoring-advanced-hints-connection-direction).**
+>
+> 1) Don't set the `host` attribute for the agent endpoints put into `zones.d/satellite`.
+> 2) Modify each agent's zones.conf file and add the `host` attribute to all parent satellites. You can automate this with using the `node wizard/setup` CLI commands.
+
+The agents are waiting for the satellites to connect, therefore they don't specify
+the `host` attribute in the endpoint objects locally.
+
+Example for `icinga2-agent1.localdomain`:
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ // Do not actively connect to the satellite by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-satellite2.localdomain" {
+ // Do not actively connect to the satellite by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-agent1.localdomain" {
+ // That's us
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain", "icinga2-satellite2.localdomain" ]
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+
+ parent = "satellite"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+
+object Zone "director-global" {
+ global = true
+}
+```
+
+Example for `icinga2-agent2.localdomain`:
+
+```
+[root@icinga2-agent2.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-satellite1.localdomain" {
+ // Do not actively connect to the satellite by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-satellite2.localdomain" {
+ // Do not actively connect to the satellite by leaving out the 'host' attribute
+}
+
+object Endpoint "icinga2-agent2.localdomain" {
+ // That's us
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain", "icinga2-satellite2.localdomain" ]
+}
+
+object Zone "icinga2-agent2.localdomain" {
+ endpoints = [ "icinga2-agent2.localdomain" ]
+
+ parent = "satellite"
+}
+
+/* sync global commands */
+object Zone "global-templates" {
+ global = true
+}
+
+object Zone "director-global" {
+ global = true
+}
+```
+
+Now it is time to define the two agents hosts on the master, sync them to the satellites
+and apply service checks using the command endpoint execution method to them.
+Add the two agent nodes as host objects to the `satellite` zone.
+
+We've already created the directories in `/etc/icinga2/zones.d` including the files for the
+zone and endpoint configuration for the agents.
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/satellite
+```
+
+Add the host object configuration for the `icinga2-agent1.localdomain` agent. You should
+have created the configuration file in the previous steps and it should contain the endpoint
+and zone object configuration already.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim icinga2-agent1.localdomain.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.111"
+
+ vars.agent_endpoint = name // Follows the convention that host name == endpoint name
+}
+```
+
+Add the host object configuration for the `icinga2-agent2.localdomain` agent configuration file:
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim icinga2-agent2.localdomain.conf
+
+object Host "icinga2-agent2.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.112"
+
+ vars.agent_endpoint = name // Follows the convention that host name == endpoint name
+}
+```
+
+Add a service object which is executed on the satellite nodes (e.g. `ping4`). Pin the apply rule to the `satellite` zone only.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim services.conf
+
+apply Service "ping4" {
+ check_command = "ping4"
+
+ // Check is executed on the satellite node
+ assign where host.zone == "satellite" && host.address
+}
+```
+
+Add services using command endpoint checks. Pin the apply rules to the `satellite` zone only.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim services.conf
+
+apply Service "disk" {
+ check_command = "disk"
+
+ // Execute the check on the remote command endpoint
+ command_endpoint = host.vars.agent_endpoint
+
+ assign where host.zone == "satellite" && host.vars.agent_endpoint
+}
+```
+
+Validate the configuration and restart Icinga 2 on the master node `icinga2-master1.localdomain`.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Open Icinga Web 2 and check the two newly created agent hosts with two new services
+-- one executed locally (`ping4`) and one using command endpoint (`disk`).
+
+> **Tip**:
+>
+> It's a good idea to add [health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks)
+to make sure that your cluster notifies you in case of failure.
+
+In terms of health checks, consider adding the following for this scenario:
+
+- Master nodes check whether the satellite zone is connected
+- Satellite nodes check the connection to the agents
+- Optional: Add dependencies for the agent host to prevent unwanted notifications when agents are unreachable
+
+Proceed in [this chapter](06-distributed-monitoring.md#distributed-monitoring-health-checks-master-satellite-agent).
+
+
+## Best Practice <a id="distributed-monitoring-best-practice"></a>
+
+We've put together a collection of configuration examples from community feedback.
+If you like to share your tips and tricks with us, please join the [community channels](https://icinga.com/community/)!
+
+### Global Zone for Config Sync <a id="distributed-monitoring-global-zone-config-sync"></a>
+
+Global zones can be used to sync generic configuration objects
+to all nodes depending on them. Common examples are:
+
+* Templates which are imported into zone specific objects.
+* Command objects referenced by Host, Service, Notification objects.
+* Apply rules for services, notifications and dependencies.
+* User objects referenced in notifications.
+* Group objects.
+* TimePeriod objects.
+
+Plugin scripts and binaries must not be synced, this is for Icinga 2
+configuration files only. Use your preferred package repository
+and/or configuration management tool (Puppet, Ansible, Chef, etc.)
+for keeping packages and scripts uptodate.
+
+**Note**: Checkable objects (hosts and services) cannot be put into a global
+zone. The configuration validation will terminate with an error. Apply rules
+work as they are evaluated locally on each endpoint.
+
+The zone object configuration must be deployed on all nodes which should receive
+the global configuration files:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
+
+object Zone "global-commands" {
+ global = true
+}
+```
+
+The default global zones generated by the setup wizards are called `global-templates` and `director-global`.
+
+While you can and should use `global-templates` for your global configuration, `director-global` is reserved for use
+by [Icinga Director](https://icinga.com/docs/director/latest/). Please don't
+place any configuration in it manually.
+
+Similar to the zone configuration sync you'll need to create a new directory in
+`/etc/icinga2/zones.d`:
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/global-commands
+```
+
+Next, add a new check command, for example:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/global-commands/web.conf
+
+object CheckCommand "webinject" {
+ //...
+}
+```
+
+Restart the endpoints(s) which should receive the global zone before
+before restarting the parent master/satellite nodes.
+
+Then validate the configuration on the master node and restart Icinga 2.
+
+**Tip**: You can copy the example configuration files located in `/etc/icinga2/conf.d`
+into the default global zone `global-templates`.
+
+Example:
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/conf.d
+[root@icinga2-master1.localdomain /etc/icinga2/conf.d]# cp {commands,groups,notifications,services,templates,timeperiods,users}.conf /etc/icinga2/zones.d/global-templates
+```
+
+### Health Checks <a id="distributed-monitoring-health-checks"></a>
+
+In case of network failures or other problems, your monitoring might
+either have late check results or just send out mass alarms for unknown
+checks.
+
+In order to minimize the problems caused by this, you should configure
+additional health checks.
+
+#### cluster-zone with Masters and Agents <a id="distributed-monitoring-health-checks-master-agents"></a>
+
+The `cluster-zone` check will test whether the configured target zone is currently
+connected or not. This example adds a health check for the [ha master with agents scenario](06-distributed-monitoring.md#distributed-monitoring-scenarios-ha-master-agents).
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/services.conf
+
+apply Service "agent-health" {
+ check_command = "cluster-zone"
+
+ display_name = "cluster-health-" + host.name
+
+ /* This follows the convention that the agent zone name is the FQDN which is the same as the host object name. */
+ vars.cluster_zone = host.name
+
+ assign where host.vars.agent_endpoint
+}
+```
+
+In order to prevent unwanted notifications, add a service dependency which gets applied to
+all services using the command endpoint mode.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/dependencies.conf
+
+apply Dependency "agent-health-check" to Service {
+ parent_service_name = "agent-health"
+
+ states = [ OK ] // Fail if the parent service state switches to NOT-OK
+ disable_notifications = true
+
+ assign where host.vars.agent_endpoint // Automatically assigns all agent endpoint checks as child services on the matched host
+ ignore where service.name == "agent-health" // Avoid a self reference from child to parent
+}
+```
+
+#### cluster-zone with Masters, Satellites and Agents <a id="distributed-monitoring-health-checks-master-satellite-agent"></a>
+
+This example adds health checks for the [master, satellites and agents scenario](06-distributed-monitoring.md#distributed-monitoring-scenarios-master-satellite-agents).
+
+Whenever the connection between the master and satellite zone breaks,
+you may encounter late check results in Icinga Web. In order to view
+this failure and also send notifications, add the following configuration:
+
+First, add the two masters as host objects to the master zone, if not already
+existing.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/hosts.conf
+
+object Host "icinga2-master1.localdomain" {
+ check_command = "hostalive"
+
+ address = "192.168.56.101"
+}
+
+object Host "icinga2-master2.localdomain" {
+ check_command = "hostalive"
+
+ address = "192.168.56.102"
+}
+```
+
+Add service health checks against the satellite zone.
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/health.conf
+
+apply Service "satellite-zone-health" {
+ check_command = "cluster-zone"
+ check_interval = 30s
+ retry_interval = 10s
+
+ vars.cluster_zone = "satellite"
+
+ assign where match("icinga2-master*.localdomain", host.name)
+}
+```
+
+**Don't forget to create notification apply rules for these services.**
+
+Next are health checks for agents connected to the satellite zone.
+Navigate into the satellite directory in `zones.d`:
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/satellite
+```
+
+You should already have configured agent host objects following [the master, satellite, agents scenario](06-distributed-monitoring.md#distributed-monitoring-scenarios-master-satellite-agents).
+Add a new configuration file where all the health checks are defined.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim health.conf
+
+apply Service "agent-health" {
+ check_command = "cluster-zone"
+
+ display_name = "agent-health-" + host.name
+
+ // This follows the convention that the agent zone name is the FQDN which is the same as the host object name.
+ vars.cluster_zone = host.name
+
+ // Create this health check for agent hosts in the satellite zone
+ assign where host.zone == "satellite" && host.vars.agent_endpoint
+}
+```
+
+In order to prevent unwanted notifications, add a service dependency which gets applied to
+all services using the command endpoint mode.
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/satellite]# vim health.conf
+
+apply Dependency "agent-health-check" to Service {
+ parent_service_name = "agent-health"
+
+ states = [ OK ] // Fail if the parent service state switches to NOT-OK
+ disable_notifications = true
+
+ assign where host.zone == "satellite" && host.vars.agent_endpoint // Automatically assigns all agent endpoint checks as child services on the matched host
+ ignore where service.name == "agent-health" // Avoid a self reference from child to parent
+}
+```
+
+This is all done on the configuration master, and requires the scenario to be fully up and running.
+
+#### Cluster Check
+
+The `cluster` check will check if all endpoints in the current zone and the directly
+connected zones are working properly. The disadvantage of using this check is that
+you cannot monitor 3 or more cluster levels with it.
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/icinga2-master1.localdomain.conf
+
+object Host "icinga2-master1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.101"
+}
+
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/cluster.conf
+
+object Service "cluster" {
+ check_command = "cluster"
+ check_interval = 5s
+ retry_interval = 1s
+
+ host_name = "icinga2-master1.localdomain"
+}
+```
+
+### Pin Checks in a Zone <a id="distributed-monitoring-pin-checks-zone"></a>
+
+In case you want to pin specific checks to their endpoints in a given zone you'll need to use
+the `command_endpoint` attribute. This is reasonable if you want to
+execute a local disk check in the `master` Zone on a specific endpoint then.
+
+```
+[root@icinga2-master1.localdomain /]# mkdir -p /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/icinga2-master1.localdomain.conf
+
+object Host "icinga2-master1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.101"
+}
+
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.d/master/services.conf
+
+apply Service "disk" {
+ check_command = "disk"
+
+ command_endpoint = host.name //requires a host object matching the endpoint object name e.g. icinga2-master1.localdomain
+
+ assign where host.zone == "master" && match("icinga2-master*", host.name)
+}
+```
+
+The `host.zone` attribute check inside the expression ensures that
+the service object is only created for host objects inside the `master`
+zone. In addition to that the [match](18-library-reference.md#global-functions-match)
+function ensures to only create services for the master nodes.
+
+### Windows Firewall <a id="distributed-monitoring-windows-firewall"></a>
+
+#### ICMP Requests <a id="distributed-monitoring-windows-firewall-icmp"></a>
+
+By default ICMP requests are disabled in the Windows firewall. You can
+change that by [adding a new rule](https://support.microsoft.com/en-us/kb/947709).
+
+```
+C:\> netsh advfirewall firewall add rule name="ICMP Allow incoming V4 echo request" protocol=icmpv4:8,any dir=in action=allow
+```
+
+#### Icinga 2 <a id="distributed-monitoring-windows-firewall-icinga2"></a>
+
+If your master/satellite nodes should actively connect to the Windows agent
+you'll also need to ensure that port `5665` is enabled.
+
+```
+C:\> netsh advfirewall firewall add rule name="Open port 5665 (Icinga 2)" dir=in action=allow protocol=TCP localport=5665
+```
+
+#### NSClient++ API <a id="distributed-monitoring-windows-firewall-nsclient-api"></a>
+
+If the [check_nscp_api](06-distributed-monitoring.md#distributed-monitoring-windows-nscp-check-api)
+plugin is used to query NSClient++, you need to ensure that its port is enabled.
+
+```
+C:\> netsh advfirewall firewall add rule name="Open port 8443 (NSClient++ API)" dir=in action=allow protocol=TCP localport=8443
+```
+
+For security reasons, it is advised to enable the NSClient++ HTTP API for local
+connection from the Icinga agent only. Remote connections to the HTTP API
+are not recommended with using the legacy HTTP API.
+
+### Windows Agent and Plugins <a id="distributed-monitoring-windows-plugins"></a>
+
+The Icinga 2 package on Windows already provides several plugins.
+Detailed [documentation](10-icinga-template-library.md#windows-plugins) is available for all check command definitions.
+
+Based on the [master with agents](06-distributed-monitoring.md#distributed-monitoring-master-agents)
+scenario we'll now add a local disk check.
+
+First, add the agent node as host object:
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent2.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.112"
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+ vars.os_type = "windows"
+}
+```
+
+Next, add the disk check using command endpoint checks (details in the
+[disk-windows](10-icinga-template-library.md#windows-plugins-disk-windows) documentation):
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "disk C:" {
+ check_command = "disk-windows"
+
+ vars.disk_win_path = "C:"
+
+ //specify where the check is executed
+ command_endpoint = host.vars.agent_endpoint
+
+ assign where host.vars.os_type == "windows" && host.vars.agent_endpoint
+}
+```
+
+Validate the configuration and restart Icinga 2.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Open Icinga Web 2 and check your newly added Windows disk check :)
+
+![Icinga Windows Agent](images/distributed-monitoring/icinga2_distributed_windows_client_disk_icingaweb2.png)
+
+If you want to add your own plugins please check [this chapter](05-service-monitoring.md#service-monitoring-requirements)
+for the requirements.
+
+### Windows Agent and NSClient++ <a id="distributed-monitoring-windows-nscp"></a>
+
+There are two methods available for querying NSClient++:
+
+* Query the [HTTP API](06-distributed-monitoring.md#distributed-monitoring-windows-nscp-check-api) locally from an Icinga agent (requires a running NSClient++ service)
+* Run a [local CLI check](06-distributed-monitoring.md#distributed-monitoring-windows-nscp-check-local) (does not require NSClient++ as a service)
+
+Both methods have their advantages and disadvantages. One thing to
+note: If you rely on performance counter delta calculations such as
+CPU utilization, please use the HTTP API instead of the CLI sample call.
+
+#### NSCLient++ with check_nscp_api <a id="distributed-monitoring-windows-nscp-check-api"></a>
+
+In addition to the Windows plugins you can use the
+[nscp_api command](10-icinga-template-library.md#nscp-check-api) provided by the Icinga Template Library (ITL).
+
+The initial setup for the NSClient++ API and the required arguments
+is the described in the ITL chapter for the [nscp_api](10-icinga-template-library.md#nscp-check-api) CheckCommand.
+
+Based on the [master with agents](06-distributed-monitoring.md#distributed-monitoring-master-agents)
+scenario we'll now add a local nscp check which queries the NSClient++ API to check the free disk space.
+
+Define a host object called `icinga2-agent2.localdomain` on the master. Add the `nscp_api_password`
+custom variable and specify the drives to check.
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.111"
+
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+ vars.os_type = "Windows"
+ vars.nscp_api_password = "icinga"
+ vars.drives = [ "C:", "D:" ]
+}
+```
+
+The service checks are generated using an [apply for](03-monitoring-basics.md#using-apply-for)
+rule based on `host.vars.drives`:
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "nscp-api-" for (drive in host.vars.drives) {
+ import "generic-service"
+
+ check_command = "nscp_api"
+ command_endpoint = host.vars.agent_endpoint
+
+ //display_name = "nscp-drive-" + drive
+
+ vars.nscp_api_host = "localhost"
+ vars.nscp_api_query = "check_drivesize"
+ vars.nscp_api_password = host.vars.nscp_api_password
+ vars.nscp_api_arguments = [ "drive=" + drive ]
+
+ ignore where host.vars.os_type != "Windows"
+}
+```
+
+Validate the configuration and restart Icinga 2.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Two new services ("nscp-drive-D:" and "nscp-drive-C:") will be visible in Icinga Web 2.
+
+![Icinga 2 Distributed Monitoring Windows Agent with NSClient++ nscp-api](images/distributed-monitoring/icinga2_distributed_windows_nscp_api_drivesize_icingaweb2.png)
+
+Note: You can also omit the `command_endpoint` configuration to execute
+the command on the master. This also requires a different value for `nscp_api_host`
+which defaults to `host.address`.
+
+```
+ //command_endpoint = host.vars.agent_endpoint
+
+ //vars.nscp_api_host = "localhost"
+```
+
+You can verify the check execution by looking at the `Check Source` attribute
+in Icinga Web 2 or the REST API.
+
+If you want to monitor specific Windows services, you could use the following example:
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.111"
+
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+ vars.os_type = "Windows"
+ vars.nscp_api_password = "icinga"
+ vars.services = [ "Windows Update", "wscsvc" ]
+}
+
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "nscp-api-" for (svc in host.vars.services) {
+ import "generic-service"
+
+ check_command = "nscp_api"
+ command_endpoint = host.vars.agent_endpoint
+
+ //display_name = "nscp-service-" + svc
+
+ vars.nscp_api_host = "localhost"
+ vars.nscp_api_query = "check_service"
+ vars.nscp_api_password = host.vars.nscp_api_password
+ vars.nscp_api_arguments = [ "service=" + svc ]
+
+ ignore where host.vars.os_type != "Windows"
+}
+```
+
+#### NSCLient++ with nscp-local <a id="distributed-monitoring-windows-nscp-check-local"></a>
+
+In addition to the Windows plugins you can use the
+[nscp-local commands](10-icinga-template-library.md#nscp-plugin-check-commands)
+provided by the Icinga Template Library (ITL).
+
+Add the following `include` statement on all your nodes (master, satellite, agent):
+
+```
+vim /etc/icinga2/icinga2.conf
+
+include <nscp>
+```
+
+The CheckCommand definitions will automatically determine the installed path
+to the `nscp.exe` binary.
+
+Based on the [master with agents](06-distributed-monitoring.md#distributed-monitoring-master-agents)
+scenario we'll now add a local nscp check querying a given performance counter.
+
+First, add the agent node as host object:
+
+```
+[root@icinga2-master1.localdomain /]# cd /etc/icinga2/zones.d/master
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim hosts.conf
+
+object Host "icinga2-agent1.localdomain" {
+ check_command = "hostalive"
+ address = "192.168.56.111"
+
+ vars.agent_endpoint = name //follows the convention that host name == endpoint name
+ vars.os_type = "windows"
+}
+```
+
+Next, add a performance counter check using command endpoint checks (details in the
+[nscp-local-counter](10-icinga-template-library.md#nscp-check-local-counter) documentation):
+
+```
+[root@icinga2-master1.localdomain /etc/icinga2/zones.d/master]# vim services.conf
+
+apply Service "nscp-local-counter-cpu" {
+ check_command = "nscp-local-counter"
+ command_endpoint = host.vars.agent_endpoint
+
+ vars.nscp_counter_name = "\\Processor(_total)\\% Processor Time"
+ vars.nscp_counter_perfsyntax = "Total Processor Time"
+ vars.nscp_counter_warning = 1
+ vars.nscp_counter_critical = 5
+
+ vars.nscp_counter_showall = true
+
+ assign where host.vars.os_type == "windows" && host.vars.agent_endpoint
+}
+```
+
+Validate the configuration and restart Icinga 2.
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 daemon -C
+[root@icinga2-master1.localdomain /]# systemctl restart icinga2
+```
+
+Open Icinga Web 2 and check your newly added Windows NSClient++ check :)
+
+![Icinga 2 Distributed Monitoring Windows Agent with NSClient++ nscp-local](images/distributed-monitoring/icinga2_distributed_windows_nscp_counter_icingaweb2.png)
+
+> **Tip**
+>
+> In order to measure CPU load, you'll need a running NSClient++ service.
+> Therefore it is advised to use a local [nscp-api](06-distributed-monitoring.md#distributed-monitoring-windows-nscp-check-api)
+> check against its REST API.
+
+## Advanced Hints <a id="distributed-monitoring-advanced-hints"></a>
+
+You can find additional hints in this section if you prefer to go your own route
+with automating setups (setup, certificates, configuration).
+
+### Certificate Auto-Renewal <a id="distributed-monitoring-certificate-auto-renewal"></a>
+
+Icinga 2 v2.8+ added the possibility that nodes request certificate updates
+on their own. If their expiration date is soon enough, they automatically
+renew their already signed certificate by sending a signing request to the
+parent node. You'll also see a message in the logs if certificate renewal
+isn't necessary.
+
+### High-Availability for Icinga 2 Features <a id="distributed-monitoring-high-availability-features"></a>
+
+All nodes in the same zone require that you enable the same features for high-availability (HA).
+
+By default, the following features provide advanced HA functionality:
+
+* [Checks](06-distributed-monitoring.md#distributed-monitoring-high-availability-checks) (load balanced, automated failover).
+* [Notifications](06-distributed-monitoring.md#distributed-monitoring-high-availability-notifications) (load balanced, automated failover).
+* [DB IDO](06-distributed-monitoring.md#distributed-monitoring-high-availability-db-ido) (Run-Once, automated failover).
+* [Elasticsearch](09-object-types.md#objecttype-elasticsearchwriter)
+* [Gelf](09-object-types.md#objecttype-gelfwriter)
+* [Graphite](09-object-types.md#objecttype-graphitewriter)
+* [InfluxDB](09-object-types.md#objecttype-influxdb2writer) (v1 and v2)
+* [OpenTsdb](09-object-types.md#objecttype-opentsdbwriter)
+* [Perfdata](09-object-types.md#objecttype-perfdatawriter) (for PNP)
+
+#### High-Availability with Checks <a id="distributed-monitoring-high-availability-checks"></a>
+
+All instances within the same zone (e.g. the `master` zone as HA cluster) must
+have the `checker` feature enabled.
+
+Example:
+
+```bash
+icinga2 feature enable checker
+```
+
+All nodes in the same zone load-balance the check execution. If one instance shuts down,
+the other nodes will automatically take over the remaining checks.
+
+#### High-Availability with Notifications <a id="distributed-monitoring-high-availability-notifications"></a>
+
+All instances within the same zone (e.g. the `master` zone as HA cluster) must
+have the `notification` feature enabled.
+
+Example:
+
+```bash
+icinga2 feature enable notification
+```
+
+Notifications are load-balanced amongst all nodes in a zone. By default this functionality
+is enabled.
+If your nodes should send out notifications independently from any other nodes (this will cause
+duplicated notifications if not properly handled!), you can set `enable_ha = false`
+in the [NotificationComponent](09-object-types.md#objecttype-notificationcomponent) feature.
+
+#### High-Availability with DB IDO <a id="distributed-monitoring-high-availability-db-ido"></a>
+
+All instances within the same zone (e.g. the `master` zone as HA cluster) must
+have the DB IDO feature enabled.
+
+Example DB IDO MySQL:
+
+```bash
+icinga2 feature enable ido-mysql
+```
+
+By default the DB IDO feature only runs on one node. All other nodes in the same zone disable
+the active IDO database connection at runtime. The node with the active DB IDO connection is
+not necessarily the zone master.
+
+**Note**: The DB IDO HA feature can be disabled by setting the `enable_ha` attribute to `false`
+for the [IdoMysqlConnection](09-object-types.md#objecttype-idomysqlconnection) or
+[IdoPgsqlConnection](09-object-types.md#objecttype-idopgsqlconnection) object on **all** nodes in the
+**same** zone.
+
+All endpoints will enable the DB IDO feature and connect to the configured
+database and dump configuration, status and historical data on their own.
+
+If the instance with the active DB IDO connection dies, the HA functionality will
+automatically elect a new DB IDO master.
+
+The DB IDO feature will try to determine which cluster endpoint is currently writing
+to the database and bail out if another endpoint is active. You can manually verify that
+by running the following query command:
+
+```
+icinga=> SELECT status_update_time, endpoint_name FROM icinga_programstatus;
+ status_update_time | endpoint_name
+------------------------+---------------
+ 2016-08-15 15:52:26+02 | icinga2-master1.localdomain
+(1 Zeile)
+```
+
+This is useful when the cluster connection between endpoints breaks, and prevents
+data duplication in split-brain-scenarios. The failover timeout can be set for the
+`failover_timeout` attribute, but not lower than 60 seconds.
+
+### Endpoint Connection Direction <a id="distributed-monitoring-advanced-hints-connection-direction"></a>
+
+Endpoints attempt to connect to another endpoint when its local [Endpoint](09-object-types.md#objecttype-endpoint) object
+configuration specifies a valid `host` attribute (FQDN or IP address).
+
+Example for the master node `icinga2-master1.localdomain` actively connecting
+to the agent node `icinga2-agent1.localdomain`:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
+
+//...
+
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111" // The master actively tries to connect to the agent
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+```
+
+Example for the agent node `icinga2-agent1.localdomain` not actively
+connecting to the master node `icinga2-master1.localdomain`:
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+//...
+
+object Endpoint "icinga2-master1.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+ log_duration = 0 // Disable the replay log for command endpoint agents
+}
+```
+
+It is not necessary that both the master and the agent node establish
+two connections to each other. Icinga 2 will only use one connection
+and close the second connection if established. This generates useless
+CPU cycles and leads to blocking resources when the connection times out.
+
+**Tip**: Choose either to let master/satellite nodes connect to agent nodes
+or vice versa.
+
+
+### Disable Log Duration for Command Endpoints <a id="distributed-monitoring-advanced-hints-command-endpoint-log-duration"></a>
+
+The replay log is a built-in mechanism to ensure that nodes in a distributed setup
+keep the same history (check results, notifications, etc.) when nodes are temporarily
+disconnected and then reconnect.
+
+This functionality is not needed when a master/satellite node is sending check
+execution events to an agent which is configured as [command endpoint](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
+for check execution.
+
+The [Endpoint](09-object-types.md#objecttype-endpoint) object attribute `log_duration` can
+be lower or set to 0 to fully disable any log replay updates when the
+agent is not connected.
+
+Configuration on the master node `icinga2-master1.localdomain`:
+
+```
+[root@icinga2-master1.localdomain /]# vim /etc/icinga2/zones.conf
+
+//...
+
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111" // The master actively tries to connect to the agent
+ log_duration = 0
+}
+
+object Endpoint "icinga2-agent2.localdomain" {
+ host = "192.168.56.112" // The master actively tries to connect to the agent
+ log_duration = 0
+}
+```
+
+Configuration on the agent `icinga2-agent1.localdomain`:
+
+```
+[root@icinga2-agent1.localdomain /]# vim /etc/icinga2/zones.conf
+
+//...
+
+object Endpoint "icinga2-master1.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+ log_duration = 0
+}
+
+object Endpoint "icinga2-master2.localdomain" {
+ // Do not actively connect to the master by leaving out the 'host' attribute
+ log_duration = 0
+}
+```
+
+### Initial Sync for new Endpoints in a Zone <a id="distributed-monitoring-advanced-hints-initial-sync"></a>
+
+> **Note**
+>
+> This is required if you decide to change an already running single endpoint production
+> environment into a HA-enabled cluster zone with two endpoints.
+> The [initial setup](06-distributed-monitoring.md#distributed-monitoring-scenarios-ha-master-clients)
+> with 2 HA masters doesn't require this step.
+
+In order to make sure that all of your zone endpoints have the same state you need
+to pick the authoritative running one and copy the following content:
+
+* State file from `/var/lib/icinga2/icinga2.state`
+* Internal config package for runtime created objects (downtimes, comments, hosts, etc.) at `/var/lib/icinga2/api/packages/_api`
+
+If you need already deployed config packages from the Director, or synced cluster zones,
+you can also sync the entire `/var/lib/icinga2/api/packages` directory. This directory should also be
+included in your backup strategy.
+
+Do **not** sync `/var/lib/icinga2/api/zones*` manually - this is an internal directory
+and handled by the Icinga cluster config sync itself.
+
+> **Note**
+>
+> Ensure that all endpoints are shut down during this procedure. Once you have
+> synced the cached files, proceed with configuring the remaining endpoints
+> to let them know about the new master/satellite node (zones.conf).
+
+### Manual Certificate Creation <a id="distributed-monitoring-advanced-hints-certificates-manual"></a>
+
+#### Create CA on the Master <a id="distributed-monitoring-advanced-hints-certificates-manual-ca"></a>
+
+Choose the host which should store the certificate authority (one of the master nodes).
+
+The first step is the creation of the certificate authority (CA) by running the following command
+as root user:
+
+```
+[root@icinga2-master1.localdomain /root]# icinga2 pki new-ca
+```
+
+#### Create CSR and Certificate <a id="distributed-monitoring-advanced-hints-certificates-manual-create"></a>
+
+Create a certificate signing request (CSR) for the local instance:
+
+```
+[root@icinga2-master1.localdomain /root]# icinga2 pki new-cert --cn icinga2-master1.localdomain \
+ --key icinga2-master1.localdomain.key \
+ --csr icinga2-master1.localdomain.csr
+```
+
+Sign the CSR with the previously created CA:
+
+```
+[root@icinga2-master1.localdomain /root]# icinga2 pki sign-csr --csr icinga2-master1.localdomain.csr --cert icinga2-master1.localdomain
+```
+
+Repeat the steps for all instances in your setup.
+
+#### Copy Certificates <a id="distributed-monitoring-advanced-hints-certificates-manual-copy"></a>
+
+Copy the host's certificate files and the public CA certificate to `/var/lib/icinga2/certs`:
+
+```
+[root@icinga2-master1.localdomain /root]# mkdir -p /var/lib/icinga2/certs
+[root@icinga2-master1.localdomain /root]# cp icinga2-master1.localdomain.{crt,key} /var/lib/icinga2/certs
+[root@icinga2-master1.localdomain /root]# cp /var/lib/icinga2/ca/ca.crt /var/lib/icinga2/certs
+```
+
+Ensure that proper permissions are set (replace `icinga` with the Icinga 2 daemon user):
+
+```
+[root@icinga2-master1.localdomain /root]# chown -R icinga:icinga /var/lib/icinga2/certs
+[root@icinga2-master1.localdomain /root]# chmod 600 /var/lib/icinga2/certs/*.key
+[root@icinga2-master1.localdomain /root]# chmod 644 /var/lib/icinga2/certs/*.crt
+```
+
+The CA public and private key are stored in the `/var/lib/icinga2/ca` directory. Keep this path secure and include
+it in your backups.
+
+#### Create Multiple Certificates <a id="distributed-monitoring-advanced-hints-certificates-manual-multiple"></a>
+
+Use your preferred method to automate the certificate generation process.
+
+```
+[root@icinga2-master1.localdomain /var/lib/icinga2/certs]# for node in icinga2-master1.localdomain icinga2-master2.localdomain icinga2-satellite1.localdomain; do icinga2 pki new-cert --cn $node --csr $node.csr --key $node.key; done
+information/base: Writing private key to 'icinga2-master1.localdomain.key'.
+information/base: Writing certificate signing request to 'icinga2-master1.localdomain.csr'.
+information/base: Writing private key to 'icinga2-master2.localdomain.key'.
+information/base: Writing certificate signing request to 'icinga2-master2.localdomain.csr'.
+information/base: Writing private key to 'icinga2-satellite1.localdomain.key'.
+information/base: Writing certificate signing request to 'icinga2-satellite1.localdomain.csr'.
+
+[root@icinga2-master1.localdomain /var/lib/icinga2/certs]# for node in icinga2-master1.localdomain icinga2-master2.localdomain icinga2-satellite1.localdomain; do sudo icinga2 pki sign-csr --csr $node.csr --cert $node.crt; done
+information/pki: Writing certificate to file 'icinga2-master1.localdomain.crt'.
+information/pki: Writing certificate to file 'icinga2-master2.localdomain.crt'.
+information/pki: Writing certificate to file 'icinga2-satellite1.localdomain.crt'.
+```
+
+Copy and move these certificates to the respective instances e.g. with SSH/SCP.
+
+## Automation <a id="distributed-monitoring-automation"></a>
+
+These hints should get you started with your own automation tools (Puppet, Ansible, Chef, Salt, etc.)
+or custom scripts for automated setup.
+
+These are collected best practices from various community channels.
+
+* [Silent Windows setup](06-distributed-monitoring.md#distributed-monitoring-automation-windows-silent)
+* [Node Setup CLI command](06-distributed-monitoring.md#distributed-monitoring-automation-cli-node-setup) with parameters
+
+If you prefer an alternate method, we still recommend leaving all the Icinga 2 features intact (e.g. `icinga2 feature enable api`).
+You should also use well known and documented default configuration file locations (e.g. `zones.conf`).
+This will tremendously help when someone is trying to help in the [community channels](https://icinga.com/community/).
+
+
+### Silent Windows Setup <a id="distributed-monitoring-automation-windows-silent"></a>
+
+If you want to install the agent silently/unattended, use the `/qn` modifier. The
+installation should not trigger a restart, but if you want to be completely sure, you can use the `/norestart` modifier.
+
+```
+C:> msiexec /i C:\Icinga2-v2.5.0-x86.msi /qn /norestart
+```
+
+Once the setup is completed you can use the `node setup` cli command too.
+
+### Node Setup using CLI Parameters <a id="distributed-monitoring-automation-cli-node-setup"></a>
+
+Instead of using the `node wizard` CLI command, there is an alternative `node setup`
+command available which has some prerequisites.
+
+**Note**: The CLI command can be used on Linux/Unix and Windows operating systems.
+The graphical Windows setup wizard actively uses these CLI commands.
+
+#### Node Setup on the Master Node <a id="distributed-monitoring-automation-cli-node-setup-master"></a>
+
+In case you want to setup a master node you must add the `--master` parameter
+to the `node setup` CLI command. In addition to that the `--cn` can optionally
+be passed (defaults to the FQDN).
+
+ Parameter | Description
+ --------------------|--------------------
+ `--cn` | **Optional.** Common name (CN). By convention this should be the host's FQDN. Defaults to the FQDN.
+ `--zone` | **Optional.** Zone name. Defaults to `master`.
+ `--listen` | **Optional.** Address to listen on. Syntax is `host,port`.
+ `--disable-confd` | **Optional.** If provided, this disables the `include_recursive "conf.d"` directive and adds the `api-users.conf` file inclusion to `icinga2.conf`. Available since v2.9+. Not set by default for compatibility reasons with Puppet, Ansible, Chef, etc.
+
+Example:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 node setup --master
+```
+
+In case you want to bind the `ApiListener` object to a specific
+host/port you can specify it like this:
+
+```
+--listen 192.68.56.101,5665
+```
+
+In case you don't need anything in `conf.d`, use the following command line:
+
+```
+[root@icinga2-master1.localdomain /]# icinga2 node setup --master --disable-confd
+```
+
+<!-- Keep this for compatibility -->
+<a id="distributed-monitoring-automation-cli-node-setup-satellite-client"></a>
+
+#### Node Setup with Agents/Satellites <a id="distributed-monitoring-automation-cli-node-setup-agent-satellite"></a>
+
+##### Preparations
+
+Make sure that the `/var/lib/icinga2/certs` directory exists and is owned by the `icinga`
+user (or the user Icinga 2 is running as).
+
+```
+[root@icinga2-agent1.localdomain /]# mkdir -p /var/lib/icinga2/certs
+[root@icinga2-agent1.localdomain /]# chown -R icinga:icinga /var/lib/icinga2/certs
+```
+
+First you'll need to generate a new local self-signed certificate.
+Pass the following details to the `pki new-cert` CLI command:
+
+ Parameter | Description
+ --------------------|--------------------
+ `--cn` | **Required.** Common name (CN). By convention this should be the host's FQDN.
+ `--key`, `--file` | **Required.** Client certificate files. These generated files will be put into the specified location. By convention this should be using `/var/lib/icinga2/certs` as directory.
+
+Example:
+
+```
+[root@icinga2-agent1.localdomain /]# icinga2 pki new-cert --cn icinga2-agent1.localdomain \
+--key /var/lib/icinga2/certs/icinga2-agent1.localdomain.key \
+--cert /var/lib/icinga2/certs/icinga2-agent1.localdomain.crt
+```
+
+##### Verify Parent Connection
+
+In order to verify the parent connection and avoid man-in-the-middle attacks,
+fetch the parent instance's certificate and verify that it matches the connection.
+The `trusted-parent.crt` file is a temporary file passed to `node setup` in the
+next step and does not need to be stored for later usage.
+
+Pass the following details to the `pki save-cert` CLI command:
+
+ Parameter | Description
+ --------------------|--------------------
+ `--trustedcert` | **Required.** Store the parent's certificate file. Manually verify that you're trusting it.
+ `--host` | **Required.** FQDN or IP address of the parent host.
+
+Request the master certificate from the master host (`icinga2-master1.localdomain`)
+and store it as `trusted-parent.crt`. Review it and continue.
+
+```
+[root@icinga2-agent1.localdomain /]# icinga2 pki save-cert \
+--trustedcert /var/lib/icinga2/certs/trusted-parent.crt \
+--host icinga2-master1.localdomain
+
+information/cli: Retrieving TLS certificate for 'icinga2-master1.localdomain:5665'.
+
+ Subject: CN = icinga2-master1.localdomain
+ Issuer: CN = icinga2-master1.localdomain
+ Valid From: Feb 4 08:59:05 2020 GMT
+ Valid Until: Jan 31 08:59:05 2035 GMT
+ Fingerprint: B4 90 DE 46 81 DD 2E BF EE 9D D5 47 61 43 EF C6 6D 86 A6 CC
+
+***
+*** You have to ensure that this certificate actually matches the parent
+*** instance's certificate in order to avoid man-in-the-middle attacks.
+***
+
+information/pki: Writing certificate to file '/var/lib/icinga2/certs/trusted-parent.crt'.
+```
+
+##### Node Setup
+
+Continue with the additional `node setup` step. Specify a local endpoint and zone name (`icinga2-agent1.localdomain`)
+and set the master host (`icinga2-master1.localdomain`) as parent zone configuration. Specify the path to
+the previously stored trusted parent certificate (`trusted-parent.crt`).
+
+Pass the following details to the `node setup` CLI command:
+
+ Parameter | Description
+ --------------------|--------------------
+ `--cn` | **Optional.** Common name (CN). By convention this should be the host's FQDN.
+ `--ticket` | **Required.** Request ticket. Add the previously generated [ticket number](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing).
+ `--trustedcert` | **Required.** Trusted parent certificate file as connection verification (received via 'pki save-cert').
+ `--parent_host` | **Optional.** FQDN or IP address of the parent host. This is where the command connects for CSR signing. If not specified, you need to manually copy the parent's public CA certificate file into `/var/lib/icinga2/certs/ca.crt` in order to start Icinga 2.
+ `--endpoint` | **Required.** Specifies the parent's endpoint name.
+ `--zone` | **Required.** Specifies the agent/satellite zone name.
+ `--parent_zone` | **Optional.** Specifies the parent's zone name.
+ `--accept-config` | **Optional.** Whether this node accepts configuration sync from the master node (required for [config sync mode](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync)).
+ `--accept-commands` | **Optional.** Whether this node accepts command execution messages from the master node (required for [command endpoint mode](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)).
+ `--global_zones` | **Optional.** Allows to specify more global zones in addition to `global-templates` and `director-global`.
+ `--disable-confd` | **Optional.** If provided, this disables the `include_recursive "conf.d"` directive in `icinga2.conf`. Available since v2.9+. Not set by default for compatibility reasons with Puppet, Ansible, Chef, etc.
+
+> **Note**
+>
+> The `master_host` parameter is deprecated and will be removed. Please use `--parent_host` instead.
+
+Example:
+
+```
+[root@icinga2-agent1.localdomain /]# icinga2 node setup --ticket ead2d570e18c78abf285d6b85524970a0f69c22d \
+--cn icinga2-agent1.localdomain \
+--endpoint icinga2-master1.localdomain \
+--zone icinga2-agent1.localdomain \
+--parent_zone master \
+--parent_host icinga2-master1.localdomain \
+--trustedcert /var/lib/icinga2/certs/trusted-parent.crt \
+--accept-commands --accept-config \
+--disable-confd
+```
+
+In case the agent/satellite should connect to the master node, you'll
+need to modify the `--endpoint` parameter using the format `cn,host,port`:
+
+```
+--endpoint icinga2-master1.localdomain,192.168.56.101,5665
+```
+
+Specify the parent zone using the `--parent_zone` parameter. This is useful
+if the agent connects to a satellite, not the master instance.
+
+```
+--parent_zone satellite
+```
+
+In case the agent should know the additional global zone `linux-templates`, you'll
+need to set the `--global-zones` parameter.
+
+```
+--global_zones linux-templates
+```
+
+The `--parent-host` parameter is optional since v2.9 and allows you to perform a connection-less setup.
+You cannot restart Icinga 2 yet, the CLI command asked to to manually copy the parent's public CA
+certificate file in `/var/lib/icinga2/certs/ca.crt`. Once Icinga 2 is started, it sends
+a ticket signing request to the parent node. If you have provided a ticket, the master node
+signs the request and sends it back to the agent/satellite which performs a certificate update in-memory.
+
+In case you did not provide a ticket, you need to [manually sign the CSR on the master node](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing-master)
+which holds the CA's key pair.
+
+
+**You can find additional best practices below.**
+
+If this agent node is configured as [remote command endpoint execution](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
+you can safely disable the `checker` feature. The `node setup` CLI command already disabled the `notification` feature.
+
+```
+[root@icinga2-agent1.localdomain /]# icinga2 feature disable checker
+```
+
+**Optional**: Add an ApiUser object configuration for remote troubleshooting.
+
+```
+[root@icinga2-agent1.localdomain /]# cat <<EOF >/etc/icinga2/conf.d/api-users.conf
+object ApiUser "root" {
+ password = "agentsupersecretpassword"
+ permissions = ["*"]
+}
+EOF
+```
+
+Finally restart Icinga 2.
+
+```
+[root@icinga2-agent1.localdomain /]# systemctl restart icinga2
+```
+
+Your automation tool must then configure master node in the meantime.
+
+```
+# cat <<EOF >>/etc/icinga2/zones.conf
+object Endpoint "icinga2-agent1.localdomain" {
+ // Agent connects itself
+}
+
+object Zone "icinga2-agent1.localdomain" {
+ endpoints = [ "icinga2-agent1.localdomain" ]
+ parent = "master"
+}
+
+EOF
+```
+
+## Using Multiple Environments <a id="distributed-monitoring-environments"></a>
+
+> **Note**
+>
+> This documentation only covers the basics. Full functionality requires a not yet released addon.
+
+In some cases it can be desired to run multiple Icinga instances on the same host.
+Two potential scenarios include:
+
+* Different versions of the same monitoring configuration (e.g. production and testing)
+* Disparate sets of checks for entirely unrelated monitoring environments (e.g. infrastructure and applications)
+
+The configuration is done with the global constants `ApiBindHost` and `ApiBindPort`
+or the `bind_host` and `bind_port` attributes of the
+[ApiListener](09-object-types.md#objecttype-apilistener) object.
+
+The environment must be set with the global constant `Environment` or as object attribute
+of the [IcingaApplication](09-object-types.md#objecttype-icingaapplication) object.
+
+In any case the constant is default value for the attribute and the direct configuration in the objects
+have more precedence. The constants have been added to allow the values being set from the CLI on startup.
+
+When Icinga establishes a TLS connection to another cluster instance it automatically uses the [SNI extension](https://en.wikipedia.org/wiki/Server_Name_Indication)
+to signal which endpoint it is attempting to connect to. On its own this can already be used to position multiple
+Icinga instances behind a load balancer.
+
+SNI example: `icinga2-agent1.localdomain`
+
+However, if the environment is configured to `production`, Icinga appends the environment name to the SNI hostname like this:
+
+SNI example with environment: `icinga2-agent1.localdomain:production`
+
+Middleware like loadbalancers or TLS proxies can read the SNI header and route the connection to the appropriate target.
+I.e., it uses a single externally-visible TCP port (usually 5665) and forwards connections to one or more Icinga
+instances which are bound to a local TCP port. It does so by inspecting the environment name that is sent as part of the
+SNI extension.
diff --git a/doc/07-agent-based-monitoring.md b/doc/07-agent-based-monitoring.md
new file mode 100644
index 0000000..51e41ac
--- /dev/null
+++ b/doc/07-agent-based-monitoring.md
@@ -0,0 +1,484 @@
+# Agent-based Checks <a id="agent-based-checks-addon"></a>
+
+If the remote services are not directly accessible through the network, a
+local agent installation exposing the results to check queries can
+become handy.
+
+Prior to installing and configuration an agent service, evaluate possible
+options based on these requirements:
+
+* Security (authentication, TLS certificates, secure connection handling, etc.)
+* Connection direction
+ * Master/satellite can execute commands directly or
+ * Agent sends back passive/external check results
+* Availability on specific OS types and versions
+ * Packages available
+* Configuration and initial setup
+* Updates and maintenance, compatibility
+
+Available agent types:
+
+* [Icinga Agent](07-agent-based-monitoring.md#agent-based-checks-icinga) on Linux/Unix and Windows
+* [SSH](07-agent-based-monitoring.md#agent-based-checks-ssh) on Linux/Unix
+* [SNMP](07-agent-based-monitoring.md#agent-based-checks-snmp) on Linux/Unix and hardware
+* [SNMP Traps](07-agent-based-monitoring.md#agent-based-checks-snmp-traps) as passive check results
+* [REST API](07-agent-based-monitoring.md#agent-based-checks-rest-api) for passive external check results
+* [NSClient++](07-agent-based-monitoring.md#agent-based-checks-nsclient) and [WMI](07-agent-based-monitoring.md#agent-based-checks-wmi) on Windows
+
+
+## Icinga Agent <a id="agent-based-checks-icinga"></a>
+
+For the most common setups on Linux/Unix and Windows, we recommend
+to setup the Icinga agent in a [distributed environment](06-distributed-monitoring.md#distributed-monitoring).
+
+![Icinga 2 Distributed Master with Agents](images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_with_agents.png)
+
+Key benefits:
+
+* Directly integrated into the distributed monitoring stack of Icinga
+* Works on Linux/Unix and Windows
+* Secure communication with TLS
+* Connection can be established from both sides. Once connected, command execution and check results are exchanged.
+ * Master/satellite connects to agent
+ * Agent connects to parent satellite/master
+* Same configuration language and binaries
+* Troubleshooting docs and community best practices
+
+Follow the setup and configuration instructions [here](06-distributed-monitoring.md#distributed-monitoring-setup-agent-satellite).
+
+On Windows hosts, the Icinga agent can query a local NSClient++ service
+for additional checks in case there are no plugins available.
+
+![Icinga 2 Windows Setup](images/distributed-monitoring/icinga2_windows_setup_wizard_01.png)
+
+## SSH <a id="agent-based-checks-ssh"></a>
+
+> **Tip**
+>
+> This is the recommended way for systems where the Icinga agent is not available
+> Be it specific hardware architectures, old systems or forbidden to install an additional software.
+
+This method uses the SSH service on the remote host to execute
+an arbitrary plugin command line. The output and exit code is
+returned and used by the core.
+
+The `check_by_ssh` plugin takes care of this. It is available in the
+[Monitoring Plugins](https://www.monitoring-plugins.org/) package.
+For your convenience, the Icinga template library provides the [by_ssh](10-icinga-template-library.md#plugin-check-command-by-ssh)
+CheckCommand already.
+
+### SSH: Preparations <a id="agent-based-checks-ssh-preparations"></a>
+
+SSH key pair for the Icinga daemon user. In case the user has no shell, temporarily enable this.
+When asked for a passphrase, **do not set it** and press enter.
+
+```bash
+sudo su - icinga
+
+ssh-keygen -b 4096 -t rsa -C "icinga@$(hostname) user for check_by_ssh" -f $HOME/.ssh/id_rsa
+```
+
+On the remote agent, create the icinga user and generate a temporary password.
+
+```bash
+useradd -m icinga
+passwd icinga
+```
+
+Copy the public key from the Icinga server to the remote agent, e.g. with `ssh-copy-id`
+or manually into `/home/icinga/.ssh/authorized_keys`.
+This will ask for the password once.
+
+```bash
+sudo su - icinga
+
+ssh-copy-id -i $HOME/.ssh/id_rsa icinga@ssh-agent1.localdomain
+```
+
+After the SSH key is copied, test at the connection **at least once** and
+accept the host key verification. If you forget about this step, checks will
+become UNKNOWN later.
+
+```bash
+ssh -i $HOME/.ssh/id_rsa icinga@ssh-agent1.localdomain
+```
+
+After the SSH key login works, disable the previously enabled logins.
+
+* Remote agent user's password with `passwd -l icinga`
+* Local icinga user terminal
+
+Also, ensure that the permissions are correct for the `.ssh` directory
+as otherwise logins will fail.
+
+* `.ssh` directory: 700
+* `.ssh/id_rsa.pub` public key file: 644
+* `.ssh/id_rsa` private key file: 600
+
+
+### SSH: Configuration <a id="agent-based-checks-ssh-config"></a>
+
+First, create a host object which has SSH configured and enabled.
+Mark this e.g. with the custom variable `agent_type` to later
+use this for service apply rule matches. Best practice is to
+store that in a specific template, either in the static configuration
+or inside the Director.
+
+```
+template Host "ssh-agent" {
+ check_command = "hostalive"
+
+ vars.agent_type = "ssh"
+ vars.os_type = "linux"
+}
+
+object Host "ssh-agent1.localdomain" {
+ import "ssh-agent"
+
+ address = "192.168.56.115"
+}
+```
+
+Example for monitoring the remote users:
+
+```
+apply Service "users" {
+ check_command = "by_ssh"
+
+ vars.by_ssh_command = [ "/usr/lib/nagios/plugins/check_users" ]
+
+ // Follows the same principle as with command arguments, e.g. for ordering
+ vars.by_ssh_arguments = {
+ "-w" = {
+ value = "$users_wgreater$" // Can reference an existing custom variable defined on the host or service, evaluated at runtime
+ }
+ "-c" = {
+ value = "$users_cgreater$"
+ }
+ }
+
+ vars.users_wgreater = 3
+ vars.users_cgreater = 5
+
+ assign where host.vars.os_type == "linux" && host.vars.agent_type == "ssh"
+}
+```
+
+A more advanced example with better arguments is shown in [this blogpost](https://www.netways.de/blog/2016/03/21/check_by_ssh-mit-icinga-2/).
+
+
+## SNMP <a id="agent-based-checks-snmp"></a>
+
+The SNMP daemon runs on the remote system and answers SNMP queries by plugin scripts.
+The [Monitoring Plugins](https://www.monitoring-plugins.org/) package provides
+the `check_snmp` plugin binary, but there are plenty of [existing plugins](05-service-monitoring.md#service-monitoring-plugins)
+for specific use cases already around, for example monitoring Cisco routers.
+
+The following example uses the [SNMP ITL](10-icinga-template-library.md#plugin-check-command-snmp)
+CheckCommand and sets the `snmp_oid` custom variable. A service is created for all hosts which
+have the `snmp-community` custom variable.
+
+```
+template Host "snmp-agent" {
+ check_command = "hostalive"
+
+ vars.agent_type = "snmp"
+
+ vars.snmp_community = "public-icinga"
+}
+
+object Host "snmp-agent1.localdomain" {
+ import "snmp-agent"
+}
+```
+
+```
+apply Service "uptime" {
+ import "generic-service"
+
+ check_command = "snmp"
+ vars.snmp_oid = "1.3.6.1.2.1.1.3.0"
+ vars.snmp_miblist = "DISMAN-EVENT-MIB"
+
+ assign where host.vars.agent_type == "snmp" && host.vars.snmp_community != ""
+}
+```
+
+If no `snmp_miblist` is specified, the plugin will default to `ALL`. As the number of available MIB files
+on the system increases so will the load generated by this plugin if no `MIB` is specified.
+As such, it is recommended to always specify at least one `MIB`.
+
+Additional SNMP plugins are available using the [Manubulon SNMP Plugins](10-icinga-template-library.md#snmp-manubulon-plugin-check-commands).
+
+For network monitoring, community members advise to use [nwc_health](05-service-monitoring.md#service-monitoring-network)
+for example.
+
+
+## SNMP Traps and Passive Check Results <a id="agent-based-checks-snmp-traps"></a>
+
+SNMP Traps can be received and filtered by using [SNMPTT](http://snmptt.sourceforge.net/)
+and specific trap handlers passing the check results to Icinga 2.
+
+Following the SNMPTT [Format](http://snmptt.sourceforge.net/docs/snmptt.shtml#SNMPTT.CONF-FORMAT)
+documentation and the Icinga external command syntax found [here](24-appendix.md#external-commands-list-detail)
+we can create generic services that can accommodate any number of hosts for a given scenario.
+
+### Simple SNMP Traps <a id="simple-traps"></a>
+
+A simple example might be monitoring host reboots indicated by an SNMP agent reset.
+Building the event to auto reset after dispatching a notification is important.
+Setup the manual check parameters to reset the event from an initial unhandled
+state or from a missed reset event.
+
+Add a directive in `snmptt.conf`
+
+```
+EVENT coldStart .1.3.6.1.6.3.1.1.5.1 "Status Events" Normal
+FORMAT Device reinitialized (coldStart)
+EXEC echo "[$@] PROCESS_SERVICE_CHECK_RESULT;$A;Coldstart;2;The snmp agent has reinitialized." >> /var/run/icinga2/cmd/icinga2.cmd
+SDESC
+A coldStart trap signifies that the SNMPv2 entity, acting
+in an agent role, is reinitializing itself and that its
+configuration may have been altered.
+EDESC
+```
+
+1. Define the `EVENT` as per your need.
+2. Construct the `EXEC` statement with the service name matching your template
+applied to your _n_ hosts. The host address inferred by SNMPTT will be the
+correlating factor. You can have snmptt provide host names or ip addresses to
+match your Icinga convention.
+
+> **Note**
+>
+> Replace the deprecated command pipe EXEC statement with a curl call
+> to the REST API action [process-check-result](12-icinga2-api.md#icinga2-api-actions-process-check-result).
+
+Add an `EventCommand` configuration object for the passive service auto reset event.
+
+```
+object EventCommand "coldstart-reset-event" {
+ command = [ ConfigDir + "/conf.d/custom/scripts/coldstart_reset_event.sh" ]
+
+ arguments = {
+ "-i" = "$service.state_id$"
+ "-n" = "$host.name$"
+ "-s" = "$service.name$"
+ }
+}
+```
+
+Create the `coldstart_reset_event.sh` shell script to pass the expanded variable
+data in. The `$service.state_id$` is important in order to prevent an endless loop
+of event firing after the service has been reset.
+
+```bash
+#!/bin/bash
+
+SERVICE_STATE_ID=""
+HOST_NAME=""
+SERVICE_NAME=""
+
+show_help()
+{
+cat <<-EOF
+ Usage: ${0##*/} [-h] -n HOST_NAME -s SERVICE_NAME
+ Writes a coldstart reset event to the Icinga command pipe.
+
+ -h Display this help and exit.
+ -i SERVICE_STATE_ID The associated service state id.
+ -n HOST_NAME The associated host name.
+ -s SERVICE_NAME The associated service name.
+EOF
+}
+
+while getopts "hi:n:s:" opt; do
+ case "$opt" in
+ h)
+ show_help
+ exit 0
+ ;;
+ i)
+ SERVICE_STATE_ID=$OPTARG
+ ;;
+ n)
+ HOST_NAME=$OPTARG
+ ;;
+ s)
+ SERVICE_NAME=$OPTARG
+ ;;
+ '?')
+ show_help
+ exit 0
+ ;;
+ esac
+done
+
+if [ -z "$SERVICE_STATE_ID" ]; then
+ show_help
+ printf "\n Error: -i required.\n"
+ exit 1
+fi
+
+if [ -z "$HOST_NAME" ]; then
+ show_help
+ printf "\n Error: -n required.\n"
+ exit 1
+fi
+
+if [ -z "$SERVICE_NAME" ]; then
+ show_help
+ printf "\n Error: -s required.\n"
+ exit 1
+fi
+
+if [ "$SERVICE_STATE_ID" -gt 0 ]; then
+ echo "[`date +%s`] PROCESS_SERVICE_CHECK_RESULT;$HOST_NAME;$SERVICE_NAME;0;Auto-reset (`date +"%m-%d-%Y %T"`)." >> /var/run/icinga2/cmd/icinga2.cmd
+fi
+```
+
+> **Note**
+>
+> Replace the deprecated command pipe EXEC statement with a curl call
+> to the REST API action [process-check-result](12-icinga2-api.md#icinga2-api-actions-process-check-result).
+
+Finally create the `Service` and assign it:
+
+```
+apply Service "Coldstart" {
+ import "generic-service-custom"
+
+ check_command = "dummy"
+ event_command = "coldstart-reset-event"
+
+ enable_notifications = 1
+ enable_active_checks = 0
+ enable_passive_checks = 1
+ enable_flapping = 0
+ volatile = 1
+ enable_perfdata = 0
+
+ vars.dummy_state = 0
+ vars.dummy_text = "Manual reset."
+
+ vars.sla = "24x7"
+
+ assign where (host.vars.os == "Linux" || host.vars.os == "Windows")
+}
+```
+
+### Complex SNMP Traps <a id="complex-traps"></a>
+
+A more complex example might be passing dynamic data from a traps varbind list
+for a backup scenario where the backup software dispatches status updates. By
+utilizing active and passive checks, the older freshness concept can be leveraged.
+
+By defining the active check as a hard failed state, a missed backup can be reported.
+As long as the most recent passive update has occurred, the active check is bypassed.
+
+Add a directive in `snmptt.conf`
+
+```
+EVENT enterpriseSpecific <YOUR OID> "Status Events" Normal
+FORMAT Enterprise specific trap
+EXEC echo "[$@] PROCESS_SERVICE_CHECK_RESULT;$A;$1;$2;$3" >> /var/run/icinga2/cmd/icinga2.cmd
+SDESC
+An enterprise specific trap.
+The varbinds in order denote the Icinga service name, state and text.
+EDESC
+```
+
+1. Define the `EVENT` as per your need using your actual oid.
+2. The service name, state and text are extracted from the first three varbinds.
+This has the advantage of accommodating an unlimited set of use cases.
+
+> **Note**
+>
+> Replace the deprecated command pipe EXEC statement with a curl call
+> to the REST API action [process-check-result](12-icinga2-api.md#icinga2-api-actions-process-check-result).
+
+Create a `Service` for the specific use case associated to the host. If the host
+matches and the first varbind value is `Backup`, SNMPTT will submit the corresponding
+passive update with the state and text from the second and third varbind:
+
+```
+object Service "Backup" {
+ import "generic-service-custom"
+
+ host_name = "host.domain.com"
+ check_command = "dummy"
+
+ enable_notifications = 1
+ enable_active_checks = 1
+ enable_passive_checks = 1
+ enable_flapping = 0
+ volatile = 1
+ max_check_attempts = 1
+ check_interval = 87000
+ enable_perfdata = 0
+
+ vars.sla = "24x7"
+ vars.dummy_state = 2
+ vars.dummy_text = "No passive check result received."
+}
+```
+
+
+## Agents sending Check Results via REST API <a id="agent-based-checks-rest-api"></a>
+
+Whenever the remote agent cannot run the Icinga agent, or a backup script
+should just send its current state after finishing, you can use the [REST API](12-icinga2-api.md#icinga2-api)
+as secure transport and send [passive external check results](08-advanced-topics.md#external-check-results).
+
+Use the [process-check-result](12-icinga2-api.md#icinga2-api-actions-process-check-result) API action to send the external passive check result.
+You can either use `curl` or implement the HTTP requests in your preferred programming
+language. Examples for API clients are available in [this chapter](12-icinga2-api.md#icinga2-api-clients).
+
+Feeding check results from remote hosts requires the host/service
+objects configured on the master/satellite instance.
+
+## NSClient++ on Windows <a id="agent-based-checks-nsclient"></a>
+
+[NSClient++](https://nsclient.org/) works on both Windows and Linux platforms and is well
+known for its magnificent Windows support. There are alternatives like the WMI interface,
+but using `NSClient++` will allow you to run local scripts similar to check plugins fetching
+the required output and performance counters.
+
+> **Tip**
+>
+> Best practice is to use the Icinga agent as secure execution
+> bridge (`check_nt` and `check_nrpe` are considered insecure)
+> and query the NSClient++ service [locally](06-distributed-monitoring.md#distributed-monitoring-windows-nscp).
+
+You can use the `check_nt` plugin from the Monitoring Plugins project to query NSClient++.
+Icinga 2 provides the [nscp check command](10-icinga-template-library.md#plugin-check-command-nscp) for this:
+
+Example:
+
+```
+object Service "disk" {
+ import "generic-service"
+
+ host_name = "remote-windows-host"
+
+ check_command = "nscp"
+
+ vars.nscp_variable = "USEDDISKSPACE"
+ vars.nscp_params = "c"
+ vars.nscp_warn = 70
+ vars.nscp_crit = 80
+}
+```
+
+For details on the `NSClient++` configuration please refer to the [official documentation](https://docs.nsclient.org/).
+
+## WMI on Windows <a id="agent-based-checks-wmi"></a>
+
+The most popular plugin is [check_wmi_plus](https://edcint.co.nz/checkwmiplus/).
+
+> Check WMI Plus uses the Windows Management Interface (WMI) to check for common services (cpu, disk, sevices, eventlog…) on Windows machines. It requires the open source wmi client for Linux.
+
+Community examples:
+
+* [Icinga 2 check_wmi_plus example by 18pct](https://18pct.com/icinga2-check_wmi_plus-example/)
+* [Agent-less monitoring with WMI](https://www.devlink.de/linux/icinga2-nagios-agentless-monitoring-von-windows/)
diff --git a/doc/08-advanced-topics.md b/doc/08-advanced-topics.md
new file mode 100644
index 0000000..34330ed
--- /dev/null
+++ b/doc/08-advanced-topics.md
@@ -0,0 +1,1208 @@
+# Advanced Topics <a id="advanced-topics"></a>
+
+This chapter covers a number of advanced topics. If you're new to Icinga, you
+can safely skip over things you're not interested in.
+
+## Downtimes <a id="downtimes"></a>
+
+Downtimes can be scheduled for planned server maintenance or
+any other targeted service outage you are aware of in advance.
+
+Downtimes suppress notifications and can trigger other
+downtimes too. If the downtime was set by accident, or the duration
+exceeds the maintenance windows, you can manually cancel the downtime.
+
+### Scheduling a downtime <a id="scheduling-downtime"></a>
+
+The most convenient way to schedule planned downtimes is to create
+them in Icinga Web 2 inside the host/service detail view. Select
+multiple hosts/services from the listing with the shift key to
+schedule multiple downtimes.
+
+![Downtime in Icinga Web 2](images/advanced-topics/icingaweb2_downtime_handled.png)
+
+In addition to that you can schedule a downtime by using the Icinga 2 API action
+[schedule-downtime](12-icinga2-api.md#icinga2-api-actions-schedule-downtime).
+This is especially useful to schedule a downtime on-demand inside a (remote) backup
+script, or create maintenance downtimes from a cron job for specific dates and intervals.
+
+Multiple downtimes for a single object may overlap. This is useful
+when you want to extend your maintenance window taking longer than expected.
+If there are multiple downtimes triggered for one object, the overall downtime depth
+will be greater than `1`.
+
+If the downtime was scheduled after the problem changed to a critical hard
+state triggering a problem notification, and the service recovers during
+the downtime window, the recovery notification won't be suppressed.
+
+Planned downtimes are also taken into account for SLA reporting
+tools calculating the SLAs based on the state and downtime history.
+
+### Fixed and Flexible Downtimes <a id="fixed-flexible-downtimes"></a>
+
+A `fixed` downtime will be activated at the defined start time, and
+removed at the end time. During this time window the service state
+will change to `NOT-OK` and then actually trigger the downtime.
+Notifications are suppressed and the downtime depth is incremented.
+
+Common scenarios are a planned distribution upgrade on your linux
+servers, or database updates in your warehouse. The customer knows
+about a fixed downtime window between 23:00 and 24:00. After 24:00
+all problems should be alerted again. Solution is simple -
+schedule a `fixed` downtime starting at 23:00 and ending at 24:00.
+
+Unlike a `fixed` downtime, a `flexible` downtime will be triggered
+by the state change in the time span defined by start and end time,
+and then last for the specified duration in minutes.
+
+Imagine the following scenario: Your service is frequently polled
+by users trying to grab free deleted domains for immediate registration.
+Between 07:30 and 08:00 the impact will hit for 15 minutes and generate
+a network outage visible to the monitoring. The service is still alive,
+but answering too slow to Icinga 2 service checks.
+For that reason, you may want to schedule a downtime between 07:30 and
+08:00 with a duration of 15 minutes. The downtime will then last from
+its trigger time until the duration is over. After that, the downtime
+is removed (may happen before or after the actual end time!).
+
+#### Fixed Downtime <a id="fixed-downtime"></a>
+
+If the host/service changes into a NOT-OK state between the start and
+end time window, the downtime will be marked as `in effect` and
+increases the downtime depth counter.
+
+```
+ | | |
+start | end
+ trigger time
+```
+
+#### Flexible Downtime <a id="flexible-downtime"></a>
+
+A flexible downtime defines a time window where the downtime may be
+triggered from a host/service NOT-OK state change. It will then last
+until the specified time duration is reached. That way it can happen
+that the downtime end time is already gone, but the downtime ends
+at `trigger time + duration`.
+
+
+```
+ | | |
+start | end actual end time
+ |--------------duration--------|
+ trigger time
+```
+
+
+### Triggered Downtimes <a id="triggered-downtimes"></a>
+
+This is optional when scheduling a downtime. If there is already a downtime
+scheduled for a future maintenance, the current downtime can be triggered by
+that downtime. This renders useful if you have scheduled a host downtime and
+are now scheduling a child host's downtime getting triggered by the parent
+downtime on `NOT-OK` state change.
+
+### Recurring Downtimes <a id="recurring-downtimes"></a>
+
+[ScheduledDowntime objects](09-object-types.md#objecttype-scheduleddowntime) can be used to set up
+recurring downtimes for services.
+
+Example:
+
+```
+apply ScheduledDowntime "backup-downtime" to Service {
+ author = "icingaadmin"
+ comment = "Scheduled downtime for backup"
+
+ ranges = {
+ monday = "02:00-03:00"
+ tuesday = "02:00-03:00"
+ wednesday = "02:00-03:00"
+ thursday = "02:00-03:00"
+ friday = "02:00-03:00"
+ saturday = "02:00-03:00"
+ sunday = "02:00-03:00"
+ }
+
+ assign where "backup" in service.groups
+}
+```
+
+Icinga 2 attempts to find the next possible segment from a ScheduledDowntime object's
+`ranges` attribute, and wont create multiple downtimes in the future. In case you need
+all these downtimes planned and visible for the next days, weeks or months, schedule them
+manually via the [REST API](12-icinga2-api.md#icinga2-api-actions-schedule-downtime) using
+a script or cron job.
+
+> **Note**
+>
+> If ScheduledDowntime objects are synced in a distributed high-availability setup,
+> both will create the next possible downtime on their own. These runtime generated
+> downtimes are synced among both zone instances, and you may see sort-of duplicate downtimes
+> in Icinga Web 2.
+
+
+## Comments <a id="comments-intro"></a>
+
+Comments can be added at runtime and are persistent over restarts. You can
+add useful information for others on repeating incidents (for example
+"last time syslog at 100% cpu on 17.10.2013 due to stale nfs mount") which
+is primarily accessible using web interfaces.
+
+You can add a comment either by using the Icinga 2 API action
+[add-comment](12-icinga2-api.md#icinga2-api-actions-add-comment) or
+by sending an [external command](14-features.md#external-commands).
+
+## Acknowledgements <a id="acknowledgements"></a>
+
+If a problem persists and notifications have been sent, you can
+acknowledge the problem. That way other users will get
+a notification that you're aware of the issue and probably are
+already working on a fix.
+
+Note: Acknowledgements also add a new [comment](08-advanced-topics.md#comments-intro)
+which contains the author and text fields.
+
+You can send an acknowledgement either by using the Icinga 2 API action
+[acknowledge-problem](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) or
+by sending an [external command](14-features.md#external-commands).
+
+
+### Sticky Acknowledgements <a id="sticky-acknowledgements"></a>
+
+The acknowledgement is removed if a state change occurs or if the host/service
+recovers (OK/Up state).
+
+If you acknowledge a problem once you've received a `Critical` notification,
+the acknowledgement will be removed if there is a state transition to `Warning`.
+```
+OK -> WARNING -> CRITICAL -> WARNING -> OK
+```
+
+If you prefer to keep the acknowledgement until the problem is resolved (`OK`
+recovery) you need to enable the `sticky` parameter.
+
+
+### Expiring Acknowledgements <a id="expiring-acknowledgements"></a>
+
+Once a problem is acknowledged it may disappear from your `handled problems`
+dashboard and no-one ever looks at it again since it will suppress
+notifications too.
+
+This `fire-and-forget` action is quite common. If you're sure that a
+current problem should be resolved in the future at a defined time,
+you can define an expiration time when acknowledging the problem.
+
+Icinga 2 will clear the acknowledgement when expired and start to
+re-notify, if the problem persists.
+
+
+## Time Periods <a id="timeperiods"></a>
+
+[Time Periods](09-object-types.md#objecttype-timeperiod) define
+time ranges in Icinga where event actions are triggered, for
+example whether a service check is executed or not within
+the `check_period` attribute. Or a notification should be sent to
+users or not, filtered by the `period` and `notification_period`
+configuration attributes for `Notification` and `User` objects.
+
+The `TimePeriod` attribute `ranges` may contain multiple directives,
+including weekdays, days of the month, and calendar dates.
+These types may overlap/override other types in your ranges dictionary.
+
+The descending order of precedence is as follows:
+
+* Calendar date (2008-01-01)
+* Specific month date (January 1st)
+* Generic month date (Day 15)
+* Offset weekday of specific month (2nd Tuesday in December)
+* Offset weekday (3rd Monday)
+* Normal weekday (Tuesday)
+
+If you don't set any `check_period` or `notification_period` attribute
+on your configuration objects, Icinga 2 assumes `24x7` as time period
+as shown below.
+
+```
+object TimePeriod "24x7" {
+ display_name = "Icinga 2 24x7 TimePeriod"
+ ranges = {
+ "monday" = "00:00-24:00"
+ "tuesday" = "00:00-24:00"
+ "wednesday" = "00:00-24:00"
+ "thursday" = "00:00-24:00"
+ "friday" = "00:00-24:00"
+ "saturday" = "00:00-24:00"
+ "sunday" = "00:00-24:00"
+ }
+}
+```
+
+If your operation staff should only be notified during workhours,
+create a new timeperiod named `workhours` defining a work day from
+09:00 to 17:00.
+
+```
+object TimePeriod "workhours" {
+ display_name = "Icinga 2 8x5 TimePeriod"
+ ranges = {
+ "monday" = "09:00-17:00"
+ "tuesday" = "09:00-17:00"
+ "wednesday" = "09:00-17:00"
+ "thursday" = "09:00-17:00"
+ "friday" = "09:00-17:00"
+ }
+}
+```
+
+### Across midnight <a id="timeperiods-across-midnight"></a>
+
+If you want to specify a notification period across midnight,
+you can define it the following way:
+
+```
+object TimePeriod "across-midnight" {
+ display_name = "Nightly Notification"
+ ranges = {
+ "saturday" = "22:00-24:00"
+ "sunday" = "00:00-03:00"
+ }
+}
+```
+
+Starting with v2.11 this can be shortened to using
+the first day as start with an overlapping range into
+the next day:
+
+```
+object TimePeriod "do-not-disturb" {
+ display_name = "Weekend DND"
+ ranges = {
+ "saturday" = "22:00-06:00"
+ }
+}
+```
+
+### Across several days, weeks or months <a id="timeperiods-across-days-weeks-months"></a>
+
+Below you can see another example for configuring timeperiods across several
+days, weeks or months. This can be useful when taking components offline
+for a distinct period of time.
+
+```
+object TimePeriod "standby" {
+ display_name = "Standby"
+ ranges = {
+ "2016-09-30 - 2016-10-30" = "00:00-24:00"
+ }
+}
+```
+
+Please note that the spaces before and after the dash are mandatory.
+
+Once your time period is configured you can Use the `period` attribute
+to assign time periods to `Notification` and `Dependency` objects:
+
+```
+apply Notification "mail-icingaadmin" to Service {
+ import "mail-service-notification"
+ user_groups = host.vars.notification.mail.groups
+ users = host.vars.notification.mail.users
+
+ period = "workhours"
+
+ assign where host.vars.notification.mail
+}
+```
+
+### Time Periods Inclusion and Exclusion <a id="timeperiods-includes-excludes"></a>
+
+Sometimes it is necessary to exclude certain time ranges from
+your default time period definitions, for example, if you don't
+want to send out any notification during the holiday season,
+or if you only want to allow small time windows for executed checks.
+
+The [TimePeriod object](09-object-types.md#objecttype-timeperiod)
+provides the `includes` and `excludes` attributes to solve this issue.
+`prefer_includes` defines whether included or excluded time periods are
+preferred.
+
+The following example defines a time period called `holidays` where
+notifications should be suppressed:
+
+```
+object TimePeriod "holidays" {
+ ranges = {
+ "january 1" = "00:00-24:00" //new year's day
+ "july 4" = "00:00-24:00" //independence day
+ "december 25" = "00:00-24:00" //christmas
+ "december 31" = "18:00-24:00" //new year's eve (6pm+)
+ "2017-04-16" = "00:00-24:00" //easter 2017
+ "monday -1 may" = "00:00-24:00" //memorial day (last monday in may)
+ "monday 1 september" = "00:00-24:00" //labor day (1st monday in september)
+ "thursday 4 november" = "00:00-24:00" //thanksgiving (4th thursday in november)
+ }
+}
+```
+
+In addition to that the time period `weekends` defines an additional
+time window which should be excluded from notifications:
+
+```
+object TimePeriod "weekends-excluded" {
+ ranges = {
+ "saturday" = "00:00-09:00,18:00-24:00"
+ "sunday" = "00:00-09:00,18:00-24:00"
+ }
+}
+```
+
+The time period `prod-notification` defines the default time ranges
+and adds the excluded time period names as an array.
+
+```
+object TimePeriod "prod-notification" {
+ excludes = [ "holidays", "weekends-excluded" ]
+
+ ranges = {
+ "monday" = "00:00-24:00"
+ "tuesday" = "00:00-24:00"
+ "wednesday" = "00:00-24:00"
+ "thursday" = "00:00-24:00"
+ "friday" = "00:00-24:00"
+ "saturday" = "00:00-24:00"
+ "sunday" = "00:00-24:00"
+ }
+}
+```
+
+### Time zone handling <a id="timeperiods-timezones"></a>
+
+Icinga 2 takes the OS' time zone including DST changes into account.
+
+Times inside DST changes are interpreted as before the DST changes.
+I.e. for the time zone Europe/Berlin:
+
+* On 2020-10-25 03:00 CEST the time jumps back to 02:00 CET.
+ For Icinga 02:30 means 02:30 CEST.
+* On 2021-02-28 02:00 CET the time jumps forward to 03:00 CEST.
+ For Icinga (the actually not existing) 02:30 refers to CET
+ and effectively means 03:30 CEST.
+
+## External Passive Check Results <a id="external-check-results"></a>
+
+Hosts or services which do not actively execute a check plugin to receive
+the state and output are called "passive checks" or "external check results".
+In this scenario an external client or script is sending in check results.
+
+You can feed check results into Icinga 2 with the following transport methods:
+
+* [process-check-result action](12-icinga2-api.md#icinga2-api-actions-process-check-result) available with the [REST API](12-icinga2-api.md#icinga2-api) (remote and local)
+* External command sent via command pipe (local only)
+
+Each time a new check result is received, the next expected check time
+is updated. This means that if there are no check result received from
+the external source, Icinga 2 will execute [freshness checks](08-advanced-topics.md#check-result-freshness).
+
+> **Note**
+>
+> The REST API action allows to specify the `check_source` attribute
+> which helps identifying the external sender. This is also visible
+> in Icinga Web 2 and the REST API queries.
+
+## Check Result Freshness <a id="check-result-freshness"></a>
+
+In Icinga 2 active check freshness is enabled by default. It is determined by the
+`check_interval` attribute and no incoming check results in that period of time.
+
+The threshold is calculated based on the last check execution time for actively executed checks:
+
+```
+(last check execution time + check interval) > current time
+```
+
+If this host/service receives check results from an [external source](08-advanced-topics.md#external-check-results),
+the threshold is based on the last time a check result was received:
+
+```
+(last check result time + check interval) > current time
+```
+
+> **Tip**
+>
+> The [process-check-result](12-icinga2-api.md#icinga2-api-actions-process-check-result) REST API
+> action allows to overrule the pre-defined check interval with a specified TTL in Icinga 2 v2.9+.
+
+If the freshness checks fail, Icinga 2 will execute the defined check command unless active checks are disabled.
+
+Best practice is to define a [dummy](10-icinga-template-library.md#itl-dummy) `check_command` which gets
+executed when freshness checks fail.
+
+```
+apply Service "external-check" {
+ check_command = "dummy"
+ check_interval = 1m
+
+ /* Set the state to UNKNOWN (3) if freshness checks fail. */
+ vars.dummy_state = 3
+
+ /* Use a runtime function to retrieve the last check time and more details. */
+ vars.dummy_text = {{
+ var service = get_service(macro("$host.name$"), macro("$service.name$"))
+ var lastCheck = DateTime(service.last_check).to_string()
+
+ return "No check results received. Last result time: " + lastCheck
+ }}
+
+ assign where "external" in host.vars.services
+}
+```
+
+References: [get_service](18-library-reference.md#objref-get_service), [macro](18-library-reference.md#scoped-functions-macro), [DateTime](18-library-reference.md#datetime-type).
+
+Example output in Icinga Web 2:
+
+![Icinga 2 Freshness Checks](images/advanced-topics/icinga2_external_checks_freshness_icingaweb2.png)
+
+
+## Check Flapping <a id="check-flapping"></a>
+
+Icinga 2 supports optional detection of hosts and services that are "flapping".
+
+Flapping occurs when a service or host changes state too frequently, which would result in a storm of problem and
+recovery notifications. With flapping detection enabled a flapping notification will be sent while other notifications are
+suppressed until it calms down after receiving the same status from checks a few times. Flapping detection can help detect
+configuration problems (wrong thresholds), troublesome services or network problems.
+
+Flapping detection can be enabled or disabled using the `enable_flapping` attribute.
+The `flapping_threshold_high` and `flapping_threshold_low` attributes allows to specify the thresholds that control
+when a [host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) is considered to be flapping.
+
+The default thresholds are 30% for high and 25% for low. If the computed flapping value exceeds the high threshold a
+host or service is considered flapping until it drops below the low flapping threshold.
+
+The attribute `flapping_ignore_states` allows to ignore state changes to specified states during the flapping calculation.
+
+`FlappingStart` and `FlappingEnd` notifications will be sent out accordingly, if configured. See the chapter on
+[notifications](alert-notifications) for details
+
+> Note: There is no distinctions between hard and soft states with flapping. All state changes count and notifications
+> will be sent out regardless of the objects state.
+
+### How it works <a id="check-flapping-how-it-works"></a>
+
+Icinga 2 saves the last 20 state changes for every host and service. See the graphic below:
+
+![Icinga 2 Flapping State Timeline](images/advanced-topics/flapping-state-graph.png)
+
+All the states are weighted, with the most recent one being worth the most (1.15) and the 20th the least (0.8). The
+states in between are fairly distributed. The final flapping value are the weighted state changes divided by the total
+count of 20.
+
+In the example above, the added states would have a total value of 7.82 (`0.84 + 0.86 + 0.88 + 0.9 + 0.98 + 1.06 + 1.12 + 1.18`).
+This yields a flapping percentage of 39.1% (`7.82 / 20 * 100`). As the default upper flapping threshold is 30%, it would be
+considered flapping.
+
+If the next seven check results then would not be state changes, the flapping percentage would fall below the lower threshold
+of 25% and therefore the host or service would recover from flapping.
+
+## Volatile Services and Hosts <a id="volatile-services-hosts"></a>
+
+The `volatile` option, if enabled for a host or service, makes it treat every [state change](03-monitoring-basics.md#hard-soft-states)
+as a `HARD` state change. It is comparable to `max_check_attempts = 1`. With this any `NOT-OK` result will
+ignore `max_check_attempts` and trigger notifications etc. It will further cause any additional `NOT-OK`
+result to re-send notifications.
+
+It may be reasonable to have a volatile service which stays in a `HARD` state if the service stays in a `NOT-OK`
+state. That way each service recheck will automatically trigger a notification unless the service is acknowledged or
+in a scheduled downtime.
+
+A common example are security checks where each `NOT-OK` check result should immediately trigger a notification.
+
+The default for this option is `false` and should only be enabled when required.
+
+
+## Monitoring Icinga 2 <a id="monitoring-icinga"></a>
+
+Why should you do that? Icinga and its components run like any other
+service application on your server. There are predictable issues
+such as "disk space is running low" and your monitoring suffers from just
+that.
+
+You would also like to ensure that features and backends are running
+and storing required data. Be it the database backend where Icinga Web 2
+presents fancy dashboards, forwarded metrics to Graphite or InfluxDB or
+the entire distributed setup.
+
+This list isn't complete but should help with your own setup.
+Windows client specific checks are highlighted.
+
+Type | Description | Plugins and CheckCommands
+----------------|-------------------------------|-----------------------------------------------------
+System | Filesystem | [disk](10-icinga-template-library.md#plugin-check-command-disk), [disk-windows](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | Memory, Swap | [mem](10-icinga-template-library.md#plugin-contrib-command-mem), [swap](10-icinga-template-library.md#plugin-check-command-swap), [memory](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | Hardware | [hpasm](10-icinga-template-library.md#plugin-contrib-command-hpasm), [ipmi-sensor](10-icinga-template-library.md#plugin-contrib-command-ipmi-sensor)
+System | Virtualization | [VMware](10-icinga-template-library.md#plugin-contrib-vmware), [esxi_hardware](10-icinga-template-library.md#plugin-contrib-command-esxi-hardware)
+System | Processes | [procs](10-icinga-template-library.md#plugin-check-command-processes), [service-windows](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | System Activity Reports | [sar-perf](10-icinga-template-library.md#plugin-contrib-command-sar-perf)
+System | I/O | [iostat](10-icinga-template-library.md#plugin-contrib-command-iostat)
+System | Network interfaces | [nwc_health](10-icinga-template-library.md#plugin-contrib-command-nwc_health), [interfaces](10-icinga-template-library.md#plugin-contrib-command-interfaces)
+System | Users | [users](10-icinga-template-library.md#plugin-check-command-users), [users-windows](10-icinga-template-library.md#windows-plugins) (Windows Client)
+System | Logs | Forward them to [Elastic Stack](14-features.md#elastic-stack-integration) or [Graylog](14-features.md#graylog-integration) and add your own alerts.
+System | NTP | [ntp_time](10-icinga-template-library.md#plugin-check-command-ntp-time)
+System | Updates | [apt](10-icinga-template-library.md#plugin-check-command-apt), [yum](10-icinga-template-library.md#plugin-contrib-command-yum)
+Icinga | Status & Stats | [icinga](10-icinga-template-library.md#itl-icinga) (more below)
+Icinga | Cluster & Clients | [health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks)
+Database | MySQL | [mysql_health](10-icinga-template-library.md#plugin-contrib-command-mysql_health)
+Database | PostgreSQL | [postgres](10-icinga-template-library.md#plugin-contrib-command-postgres)
+Database | Housekeeping | Check the database size and growth and analyse metrics to examine trends.
+Database | DB IDO | [ido](10-icinga-template-library.md#itl-icinga-ido) (more below)
+Webserver | Apache2, Nginx, etc. | [http](10-icinga-template-library.md#plugin-check-command-http), [apache-status](10-icinga-template-library.md#plugin-contrib-command-apache-status), [nginx_status](10-icinga-template-library.md#plugin-contrib-command-nginx_status)
+Webserver | Certificates | [http](10-icinga-template-library.md#plugin-check-command-http), [Icinga certificate monitoring](https://icinga.com/products/icinga-certificate-monitoring/)
+Webserver | Authorization | [http](10-icinga-template-library.md#plugin-check-command-http)
+Notifications | Mail (queue) | [smtp](10-icinga-template-library.md#plugin-check-command-smtp), [mailq](10-icinga-template-library.md#plugin-check-command-mailq)
+Notifications | SMS (GSM modem) | [check_sms3_status](https://exchange.icinga.com/netways/check_sms3status)
+Notifications | Messengers, Cloud services | XMPP, Twitter, IRC, Telegram, PagerDuty, VictorOps, etc.
+Metrics | PNP, RRDTool | [check_pnp_rrds](https://github.com/lingej/pnp4nagios/tree/master/scripts) checks for stale RRD files.
+Metrics | Graphite | [graphite](10-icinga-template-library.md#plugin-contrib-command-graphite)
+Metrics | InfluxDB | [check_influxdb](https://exchange.icinga.com/Mikanoshi/InfluxDB+data+monitoring+plugin)
+Metrics | Elastic Stack | [elasticsearch](10-icinga-template-library.md#plugin-contrib-command-elasticsearch), [Elastic Stack integration](14-features.md#elastic-stack-integration)
+Metrics | Graylog | [Graylog integration](14-features.md#graylog-integration)
+
+
+The [icinga](10-icinga-template-library.md#itl-icinga) CheckCommand provides metrics for the runtime stats of
+Icinga 2. You can forward them to your preferred graphing solution.
+If you require more metrics you can also query the [REST API](12-icinga2-api.md#icinga2-api) and write
+your own custom check plugin. Or you keep using the built-in [object accessor functions](08-advanced-topics.md#access-object-attributes-at-runtime)
+to calculate stats in-memory.
+
+There is a built-in [ido](10-icinga-template-library.md#itl-icinga-ido) check available for DB IDO MySQL/PostgreSQL
+which provides additional metrics for the IDO database.
+
+```
+apply Service "ido-mysql" {
+ check_command = "ido"
+
+ vars.ido_type = "IdoMysqlConnection"
+ vars.ido_name = "ido-mysql" //the name defined in /etc/icinga2/features-enabled/ido-mysql.conf
+
+ assign where match("master*.localdomain", host.name)
+}
+```
+
+More specific database queries can be found in the [DB IDO](14-features.md#db-ido) chapter.
+
+Distributed setups should include specific [health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks).
+
+You might also want to add additional checks for TLS certificate expiration.
+This can be done using the [Icinga certificate monitoring](https://icinga.com/products/icinga-certificate-monitoring/) module.
+
+
+
+## Advanced Configuration Hints <a id="advanced-configuration-hints"></a>
+
+### Advanced Use of Apply Rules <a id="advanced-use-of-apply-rules"></a>
+
+[Apply rules](03-monitoring-basics.md#using-apply) can be used to create a rule set which is
+entirely based on host objects and their attributes.
+In addition to that [apply for and custom variable override](03-monitoring-basics.md#using-apply-for)
+extend the possibilities.
+
+The following example defines a dictionary on the host object which contains
+configuration attributes for multiple web servers. This then used to add three checks:
+
+* A `ping4` check using the local IP `address` of the web server.
+* A `tcp` check querying the TCP port where the HTTP service is running on.
+* If the `url` key is defined, the third apply for rule will create service objects using the `http` CheckCommand.
+In addition to that you can optionally define the `ssl` attribute which enables HTTPS checks.
+
+Host definition:
+
+```
+object Host "webserver01" {
+ import "generic-host"
+ address = "192.168.56.200"
+ vars.os = "Linux"
+
+ vars.webserver = {
+ instance["status"] = {
+ address = "192.168.56.201"
+ port = "80"
+ url = "/status"
+ }
+ instance["tomcat"] = {
+ address = "192.168.56.202"
+ port = "8080"
+ }
+ instance["icingaweb2"] = {
+ address = "192.168.56.210"
+ port = "443"
+ url = "/icingaweb2"
+ ssl = true
+ }
+ }
+}
+```
+
+Service apply for definitions:
+
+```
+apply Service "webserver_ping" for (instance => config in host.vars.webserver.instance) {
+ display_name = "webserver_" + instance
+ check_command = "ping4"
+
+ vars.ping_address = config.address
+
+ assign where host.vars.webserver.instance
+}
+
+apply Service "webserver_port" for (instance => config in host.vars.webserver.instance) {
+ display_name = "webserver_" + instance + "_" + config.port
+ check_command = "tcp"
+
+ vars.tcp_address = config.address
+ vars.tcp_port = config.port
+
+ assign where host.vars.webserver.instance
+}
+
+apply Service "webserver_url" for (instance => config in host.vars.webserver.instance) {
+ display_name = "webserver_" + instance + "_" + config.url
+ check_command = "http"
+
+ vars.http_address = config.address
+ vars.http_port = config.port
+ vars.http_uri = config.url
+
+ if (config.ssl) {
+ vars.http_ssl = config.ssl
+ }
+
+ assign where config.url != ""
+}
+```
+
+The variables defined in the host dictionary are not using the typical custom variable
+prefix recommended for CheckCommand parameters. Instead they are re-used for multiple
+service checks in this example.
+In addition to defining check parameters this way, you can also enrich the `display_name`
+attribute with more details. This will be shown in in Icinga Web 2 for example.
+
+### Use Functions in Object Configuration <a id="use-functions-object-config"></a>
+
+There is a limited scope where functions can be used as object attributes such as:
+
+* As value for [Custom Variables](03-monitoring-basics.md#custom-variables-functions)
+* Returning boolean expressions for [set_if](08-advanced-topics.md#use-functions-command-arguments-setif) inside command arguments
+* Returning a [command](08-advanced-topics.md#use-functions-command-attribute) array inside command objects
+
+The other way around you can create objects dynamically using your own global functions.
+
+> **Note**
+>
+> Functions called inside command objects share the same global scope as runtime macros.
+> Therefore you can access host custom variables like `host.vars.os`, or any other
+> object attribute from inside the function definition used for [set_if](08-advanced-topics.md#use-functions-command-arguments-setif) or [command](08-advanced-topics.md#use-functions-command-attribute).
+
+Tips when implementing functions:
+
+* Use [log()](18-library-reference.md#global-functions-log) to dump variables. You can see the output
+inside the `icinga2.log` file depending in your log severity
+* Use the `icinga2 console` to test basic functionality (e.g. iterating over a dictionary)
+* Build them step-by-step. You can always refactor your code later on.
+
+#### Register and Use Global Functions <a id="use-functions-global-register"></a>
+
+[Functions](17-language-reference.md#functions) can be registered into the global scope. This allows custom functions being available
+in objects and other functions. Keep in mind that these functions are not marked
+as side-effect-free and as such are not available via the REST API.
+
+Add a new configuration file `functions.conf` and include it into the [icinga2.conf](04-configuration.md#icinga2-conf)
+configuration file in the very beginning, e.g. after `constants.conf`. You can also manage global
+functions inside `constants.conf` if you prefer.
+
+The following function converts a given state parameter into a returned string value. The important
+bits for registering it into the global scope are:
+
+* `globals.<unique_function_name>` adds a new globals entry.
+* `function()` specifies that a call to `state_to_string()` executes a function.
+* Function parameters are defined inside the `function()` definition.
+
+```
+globals.state_to_string = function(state) {
+ if (state == 2) {
+ return "Critical"
+ } else if (state == 1) {
+ return "Warning"
+ } else if (state == 0) {
+ return "OK"
+ } else if (state == 3) {
+ return "Unknown"
+ } else {
+ log(LogWarning, "state_to_string", "Unknown state " + state + " provided.")
+ }
+}
+```
+
+The else-condition allows for better error handling. This warning will be shown in the Icinga 2
+log file once the function is called.
+
+> **Note**
+>
+> If these functions are used in a distributed environment, you must ensure to deploy them
+> everywhere needed.
+
+In order to test-drive the newly created function, restart Icinga 2 and use the [debug console](11-cli-commands.md#cli-command-console)
+to connect to the REST API.
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/'
+Icinga 2 (version: v2.11.0)
+<1> => globals.state_to_string(1)
+"Warning"
+<2> => state_to_string(2)
+"Critical"
+```
+
+You can see that this function is now registered into the [global scope](17-language-reference.md#variable-scopes). The function call
+`state_to_string()` can be used in any object at static config compile time or inside runtime
+lambda functions.
+
+The following service object example uses the service state and converts it to string output.
+The function definition is not optimized and is enrolled for better readability including a log message.
+
+```
+object Service "state-test" {
+ check_command = "dummy"
+ host_name = NodeName
+
+ vars.dummy_state = 2
+
+ vars.dummy_text = {{
+ var h = macro("$host.name$")
+ var s = macro("$service.name$")
+
+ var state = get_service(h, s).state
+
+ log(LogInformation, "dummy_state", "Host: " + h + " Service: " + s + " State: " + state)
+
+ return state_to_string(state)
+ }}
+}
+```
+
+
+#### Use Custom Functions as Attribute <a id="custom-functions-as-attribute"></a>
+
+To use custom functions as attributes, the function must be defined in a
+slightly unexpected way. The following example shows how to assign values
+depending on group membership. All hosts in the `slow-lan` host group use 300
+as value for `ping_wrta`, all other hosts use 100.
+
+```
+globals.group_specific_value = function(group, group_value, non_group_value) {
+ return function() use (group, group_value, non_group_value) {
+ if (group in host.groups) {
+ return group_value
+ } else {
+ return non_group_value
+ }
+ }
+}
+
+apply Service "ping4" {
+ import "generic-service"
+ check_command = "ping4"
+
+ vars.ping_wrta = group_specific_value("slow-lan", 300, 100)
+ vars.ping_crta = group_specific_value("slow-lan", 500, 200)
+
+ assign where true
+}
+```
+
+#### Use Functions in Assign Where Expressions <a id="use-functions-assign-where"></a>
+
+If a simple expression for matching a name or checking if an item
+exists in an array or dictionary does not fit, you should consider
+writing your own global [functions](17-language-reference.md#functions).
+You can call them inside `assign where` and `ignore where` expressions
+for [apply rules](03-monitoring-basics.md#using-apply-expressions) or
+[group assignments](03-monitoring-basics.md#group-assign-intro) just like
+any other global functions for example [match](18-library-reference.md#global-functions-match).
+
+The following example requires the host `myprinter` being added
+to the host group `printers-lexmark` but only if the host uses
+a template matching the name `lexmark*`.
+
+```
+template Host "lexmark-printer-host" {
+ vars.printer_type = "Lexmark"
+}
+
+object Host "myprinter" {
+ import "generic-host"
+ import "lexmark-printer-host"
+
+ address = "192.168.1.1"
+}
+
+/* register a global function for the assign where call */
+globals.check_host_templates = function(host, search) {
+ /* iterate over all host templates and check if the search matches */
+ for (tmpl in host.templates) {
+ if (match(search, tmpl)) {
+ return true
+ }
+ }
+
+ /* nothing matched */
+ return false
+}
+
+object HostGroup "printers-lexmark" {
+ display_name = "Lexmark Printers"
+ /* call the global function and pass the arguments */
+ assign where check_host_templates(host, "lexmark*")
+}
+```
+
+Take a different more complex example: All hosts with the
+custom variable `vars_app` as nested dictionary should be
+added to the host group `ABAP-app-server`. But only if the
+`app_type` for all entries is set to `ABAP`.
+
+It could read as wildcard match for nested dictionaries:
+
+```
+ where host.vars.vars_app["*"].app_type == "ABAP"
+```
+
+The solution for this problem is to register a global
+function which checks the `app_type` for all hosts
+with the `vars_app` dictionary.
+
+```
+object Host "appserver01" {
+ check_command = "dummy"
+ vars.vars_app["ABC"] = { app_type = "ABAP" }
+}
+object Host "appserver02" {
+ check_command = "dummy"
+ vars.vars_app["DEF"] = { app_type = "ABAP" }
+}
+
+globals.check_app_type = function(host, type) {
+ /* ensure that other hosts without the custom variable do not match */
+ if (typeof(host.vars.vars_app) != Dictionary) {
+ return false
+ }
+
+ /* iterate over the vars_app dictionary */
+ for (key => val in host.vars.vars_app) {
+ /* if the value is a dictionary and if contains the app_type being the requested type */
+ if (typeof(val) == Dictionary && val.app_type == type) {
+ return true
+ }
+ }
+
+ /* nothing matched */
+ return false
+}
+
+object HostGroup "ABAP-app-server" {
+ assign where check_app_type(host, "ABAP")
+}
+```
+
+#### Use Functions in Command Arguments set_if <a id="use-functions-command-arguments-setif"></a>
+
+The `set_if` attribute inside the command arguments definition in the
+[CheckCommand object definition](09-object-types.md#objecttype-checkcommand) is primarily used to
+evaluate whether the command parameter should be set or not.
+
+By default you can evaluate runtime macros for their existence. If the result is not an empty
+string, the command parameter is passed. This becomes fairly complicated when want to evaluate
+multiple conditions and attributes.
+
+The following example was found on the community support channels. The user had defined a host
+dictionary named `compellent` with the key `disks`. This was then used inside service apply for rules.
+
+```
+object Host "dict-host" {
+ check_command = "check_compellent"
+ vars.compellent["disks"] = {
+ file = "/var/lib/check_compellent/san_disks.0.json",
+ checks = ["disks"]
+ }
+}
+```
+
+The more significant problem was to only add the command parameter `--disk` to the plugin call
+when the dictionary `compellent` contains the key `disks`, and omit it if not found.
+
+By defining `set_if` as [abbreviated lambda function](17-language-reference.md#nullary-lambdas)
+and evaluating the host custom variable `compellent` containing the `disks` this problem was
+solved like this:
+
+```
+object CheckCommand "check_compellent" {
+ command = [ "/usr/bin/check_compellent" ]
+ arguments = {
+ "--disks" = {
+ set_if = {{
+ var host_vars = host.vars
+ log(host_vars)
+ var compel = host_vars.compellent
+ log(compel)
+ compel.contains("disks")
+ }}
+ }
+ }
+}
+```
+
+This implementation uses the dictionary type method [contains](18-library-reference.md#dictionary-contains)
+and will fail if `host.vars.compellent` is not of the type `Dictionary`.
+Therefore you can extend the checks using the [typeof](17-language-reference.md#types) function.
+
+You can test the types using the `icinga2 console`:
+
+```
+# icinga2 console
+Icinga (version: v2.3.0-193-g3eb55ad)
+<1> => srv_vars.compellent["check_a"] = { file="outfile_a.json", checks = [ "disks", "fans" ] }
+null
+<2> => srv_vars.compellent["check_b"] = { file="outfile_b.json", checks = [ "power", "voltages" ] }
+null
+<3> => typeof(srv_vars.compellent)
+type 'Dictionary'
+<4> =>
+```
+
+The more programmatic approach for `set_if` could look like this:
+
+```
+ "--disks" = {
+ set_if = {{
+ var srv_vars = service.vars
+ if(len(srv_vars) > 0) {
+ if (typeof(srv_vars.compellent) == Dictionary) {
+ return srv_vars.compellent.contains("disks")
+ } else {
+ log(LogInformation, "checkcommand set_if", "custom variable compellent_checks is not a dictionary, ignoring it.")
+ return false
+ }
+ } else {
+ log(LogWarning, "checkcommand set_if", "empty custom variables")
+ return false
+ }
+ }}
+ }
+```
+
+#### Use Functions as Command Attribute <a id="use-functions-command-attribute"></a>
+
+This comes in handy for [NotificationCommands](09-object-types.md#objecttype-notificationcommand)
+or [EventCommands](09-object-types.md#objecttype-eventcommand) which does not require
+a returned checkresult including state/output.
+
+The following example was taken from the community support channels. The requirement was to
+specify a custom variable inside the notification apply rule and decide which notification
+script to call based on that.
+
+```
+object User "short-dummy" {
+}
+
+object UserGroup "short-dummy-group" {
+ assign where user.name == "short-dummy"
+}
+
+apply Notification "mail-admins-short" to Host {
+ import "mail-host-notification"
+ command = "mail-host-notification-test"
+ user_groups = [ "short-dummy-group" ]
+ vars.short = true
+ assign where host.vars.notification.mail
+}
+```
+
+The solution is fairly simple: The `command` attribute is implemented as function returning
+an array required by the caller Icinga 2.
+The local variable `mailscript` sets the default value for the notification scrip location.
+If the notification custom variable `short` is set, it will override the local variable `mailscript`
+with a new value.
+The `mailscript` variable is then used to compute the final notification command array being
+returned.
+
+You can omit the `log()` calls, they only help debugging.
+
+```
+object NotificationCommand "mail-host-notification-test" {
+ command = {{
+ log("command as function")
+ var mailscript = "mail-host-notification-long.sh"
+ if (notification.vars.short) {
+ mailscript = "mail-host-notification-short.sh"
+ }
+ log("Running command")
+ log(mailscript)
+
+ var cmd = [ ConfigDir + "/scripts/" + mailscript ]
+ log(LogCritical, "me", cmd)
+ return cmd
+ }}
+
+ env = {
+ }
+}
+```
+
+### Access Object Attributes at Runtime <a id="access-object-attributes-at-runtime"></a>
+
+The [Object Accessor Functions](18-library-reference.md#object-accessor-functions)
+can be used to retrieve references to other objects by name.
+
+This allows you to access configuration and runtime object attributes. A detailed
+list can be found [here](09-object-types.md#object-types).
+
+#### Access Object Attributes at Runtime: Cluster Check <a id="access-object-attributes-at-runtime-cluster-check"></a>
+
+This is a simple cluster example for accessing two host object states and calculating a virtual
+cluster state and output:
+
+```
+object Host "cluster-host-01" {
+ check_command = "dummy"
+ vars.dummy_state = 2
+ vars.dummy_text = "This host is down."
+}
+
+object Host "cluster-host-02" {
+ check_command = "dummy"
+ vars.dummy_state = 0
+ vars.dummy_text = "This host is up."
+}
+
+object Host "cluster" {
+ check_command = "dummy"
+ vars.cluster_nodes = [ "cluster-host-01", "cluster-host-02" ]
+
+ vars.dummy_state = {{
+ var up_count = 0
+ var down_count = 0
+ var cluster_nodes = macro("$cluster_nodes$")
+
+ for (node in cluster_nodes) {
+ if (get_host(node).state > 0) {
+ down_count += 1
+ } else {
+ up_count += 1
+ }
+ }
+
+ if (up_count >= down_count) {
+ return 0 //same up as down -> UP
+ } else {
+ return 2 //something is broken
+ }
+ }}
+
+ vars.dummy_text = {{
+ var output = "Cluster hosts:\n"
+ var cluster_nodes = macro("$cluster_nodes$")
+
+ for (node in cluster_nodes) {
+ output += node + ": " + get_host(node).last_check_result.output + "\n"
+ }
+
+ return output
+ }}
+}
+```
+
+#### Time Dependent Thresholds <a id="access-object-attributes-at-runtime-time-dependent-thresholds"></a>
+
+The following example sets time dependent thresholds for the load check based on the current
+time of the day compared to the defined time period.
+
+```
+object TimePeriod "backup" {
+ ranges = {
+ monday = "02:00-03:00"
+ tuesday = "02:00-03:00"
+ wednesday = "02:00-03:00"
+ thursday = "02:00-03:00"
+ friday = "02:00-03:00"
+ saturday = "02:00-03:00"
+ sunday = "02:00-03:00"
+ }
+}
+
+object Host "webserver-with-backup" {
+ check_command = "hostalive"
+ address = "127.0.0.1"
+}
+
+object Service "webserver-backup-load" {
+ check_command = "load"
+ host_name = "webserver-with-backup"
+
+ vars.load_wload1 = {{
+ if (get_time_period("backup").is_inside) {
+ return 20
+ } else {
+ return 5
+ }
+ }}
+ vars.load_cload1 = {{
+ if (get_time_period("backup").is_inside) {
+ return 40
+ } else {
+ return 10
+ }
+ }}
+}
+```
+
+
+## Advanced Value Types <a id="advanced-value-types"></a>
+
+In addition to the default value types Icinga 2 also uses a few other types
+to represent its internal state. The following types are exposed via the [API](12-icinga2-api.md#icinga2-api).
+
+### CheckResult <a id="advanced-value-types-checkresult"></a>
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ exit\_status | Number | The exit status returned by the check execution.
+ output | String | The check output.
+ performance\_data | Array | Array of [performance data values](08-advanced-topics.md#advanced-value-types-perfdatavalue).
+ check\_source | String | Name of the node executing the check.
+ scheduling\_source | String | Name of the node scheduling the check.
+ state | Number | The current state (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN).
+ command | Value | Array of command with shell-escaped arguments or command line string.
+ execution\_start | Timestamp | Check execution start time (as a UNIX timestamp).
+ execution\_end | Timestamp | Check execution end time (as a UNIX timestamp).
+ schedule\_start | Timestamp | Scheduled check execution start time (as a UNIX timestamp).
+ schedule\_end | Timestamp | Scheduled check execution end time (as a UNIX timestamp).
+ active | Boolean | Whether the result is from an active or passive check.
+ vars\_before | Dictionary | Internal attribute used for calculations.
+ vars\_after | Dictionary | Internal attribute used for calculations.
+ ttl | Number | Time-to-live duration in seconds for this check result. The next expected check result is `now + ttl` where freshness checks are executed.
+
+### PerfdataValue <a id="advanced-value-types-perfdatavalue"></a>
+
+Icinga 2 parses performance data strings returned by check plugins and makes the information available to external interfaces (e.g. [GraphiteWriter](09-object-types.md#objecttype-graphitewriter) or the [Icinga 2 API](12-icinga2-api.md#icinga2-api)).
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ label | String | Performance data label.
+ value | Number | Normalized performance data value without unit.
+ counter | Boolean | Enabled if the original value contains `c` as unit. Defaults to `false`.
+ unit | String | Unit of measurement (`seconds`, `bytes`. `percent`) according to the [plugin API](05-service-monitoring.md#service-monitoring-plugin-api).
+ crit | Value | Critical threshold value.
+ warn | Value | Warning threshold value.
+ min | Value | Minimum value returned by the check.
+ max | Value | Maximum value returned by the check.
diff --git a/doc/09-object-types.md b/doc/09-object-types.md
new file mode 100644
index 0000000..93974ef
--- /dev/null
+++ b/doc/09-object-types.md
@@ -0,0 +1,1958 @@
+# Object Types <a id="object-types"></a>
+
+This chapter provides an overview of all available config object types which can be
+instantiated using the `object` keyword.
+
+Additional details on configuration and runtime attributes and their
+description are explained here too.
+
+The attributes need to have a specific type value. Many of them are
+explained in [this chapter](03-monitoring-basics.md#attribute-value-types) already.
+You should note that the `Timestamp` type is a `Number`.
+In addition to that `Object name` is an object reference to
+an existing object name as `String` type.
+
+## Overview <a id="object-types-overview"></a>
+
+* [Monitoring Objects](09-object-types.md#object-types-monitoring) such as host, service, etc.
+* [Runtime Objects](09-object-types.md#object-types-runtime) generated by Icinga itself.
+* [Features](09-object-types.md#object-types-features) available via `icinga2 feature` CLI command.
+
+## Common Runtime Attributes <a id="object-types-runtime-attributes"></a>
+
+Configuration objects share these runtime attributes which cannot be
+modified by the user. You can access these attributes using
+the [Icinga 2 API](12-icinga2-api.md#icinga2-api-config-objects).
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ version | Number | Timestamp when the object was created or modified. Synced throughout cluster nodes.
+ type | String | Object type.
+ original\_attributes | Dictionary | Original values of object attributes modified at runtime.
+ active | Boolean | Object is active (e.g. a service being checked).
+ paused | Boolean | Object has been paused at runtime (e.g. [IdoMysqlConnection](09-object-types.md#objecttype-idomysqlconnection). Defaults to `false`.
+ templates | Array | Templates imported on object compilation.
+ package | String | [Configuration package name](12-icinga2-api.md#icinga2-api-config-management) this object belongs to. Local configuration is set to `_etc`, runtime created objects use `_api`.
+ source\_location | Dictionary | Location information where the configuration files are stored.
+ name | String | Object name. Might be used in [apply rules](03-monitoring-basics.md#using-apply).
+
+## Monitoring Objects <a id="object-types-monitoring"></a>
+
+### ApiUser <a id="objecttype-apiuser"></a>
+
+ApiUser objects are used for authentication against the [Icinga 2 API](12-icinga2-api.md#icinga2-api-authentication).
+
+Example:
+
+```
+object ApiUser "root" {
+ password = "mysecretapipassword"
+ permissions = [ "*" ]
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ password | String | **Optional.** Password string. Note: This attribute is hidden in API responses.
+ client\_cn | String | **Optional.** Client Common Name (CN).
+ permissions | Array | **Required.** Array of permissions. Either as string or dictionary with the keys `permission` and `filter`. The latter must be specified as function.
+
+Available permissions are explained in the [API permissions](12-icinga2-api.md#icinga2-api-permissions)
+chapter.
+
+### CheckCommand <a id="objecttype-checkcommand"></a>
+
+A check command definition. Additional default command custom variables can be
+defined here.
+
+Example:
+
+```
+object CheckCommand "http" {
+ command = [ PluginDir + "/check_http" ]
+
+ arguments = {
+ "-H" = "$http_vhost$"
+ "-I" = "$http_address$"
+ "-u" = "$http_uri$"
+ "-p" = "$http_port$"
+ "-S" = {
+ set_if = "$http_ssl$"
+ }
+ "--sni" = {
+ set_if = "$http_sni$"
+ }
+ "-a" = {
+ value = "$http_auth_pair$"
+ description = "Username:password on sites with basic authentication"
+ }
+ "--no-body" = {
+ set_if = "$http_ignore_body$"
+ }
+ "-r" = "$http_expect_body_regex$"
+ "-w" = "$http_warn_time$"
+ "-c" = "$http_critical_time$"
+ "-e" = "$http_expect$"
+ }
+
+ vars.http_address = "$address$"
+ vars.http_ssl = false
+ vars.http_sni = false
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ command | Array | **Required.** The command. This can either be an array of individual command arguments. Alternatively a string can be specified in which case the shell interpreter (usually /bin/sh) takes care of parsing the command. When using the "arguments" attribute this must be an array. Can be specified as function for advanced implementations.
+ env | Dictionary | **Optional.** A dictionary of macros which should be exported as environment variables prior to executing the command.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this command.
+ timeout | Duration | **Optional.** The command timeout in seconds. Defaults to `1m`.
+ arguments | Dictionary | **Optional.** A dictionary of command arguments.
+
+
+#### CheckCommand Arguments <a id="objecttype-checkcommand-arguments"></a>
+
+Command arguments can be defined as key-value-pairs in the `arguments`
+dictionary. Best practice is to assign a dictionary as value which
+provides additional details such as the `description` next to the `value`.
+
+```
+ arguments = {
+ "--parameter" = {
+ description = "..."
+ value = "..."
+ }
+ }
+```
+
+All available argument value entries are shown below:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ value | String/Function | Optional argument value set by a [runtime macro string](03-monitoring-basics.md#runtime-macros) or a [function call](17-language-reference.md#functions). [More details](03-monitoring-basics.md#command-arguments-value).
+ description | String | Optional argument description. [More details](03-monitoring-basics.md#command-arguments-description).
+ required | Boolean | Required argument. Execution error if not set. Defaults to false (optional). [More details](03-monitoring-basics.md#command-arguments-required).
+ skip\_key | Boolean | Use the value as argument and skip the key. [More details](03-monitoring-basics.md#command-arguments-skip-key).
+ set\_if | String/Function | Argument is added if the [runtime macro string](03-monitoring-basics.md#runtime-macros) resolves to a defined numeric or boolean value. String values are not supported. [Function calls](17-language-reference.md#functions) returning a value are supported too. [More details](03-monitoring-basics.md#command-arguments-set-if).
+ order | Number | Set if multiple arguments require a defined argument order. The syntax is `..., -3, -2, -1, <un-ordered keys>, 1, 2, 3, ...`. [More details](03-monitoring-basics.md#command-arguments-order).
+ repeat\_key | Boolean | If the argument value is an array, repeat the argument key, or not. Defaults to true (repeat). [More details](03-monitoring-basics.md#command-arguments-repeat-key).
+ key | String | Optional argument key overriding the key identifier. [More details](03-monitoring-basics.md#command-arguments-key).
+ separator | String | Key-value separator. If given, e.g. `=`, appears between key and value like `--key=value` instead of the regular `--key` `value`.
+
+`value` and `description` are commonly used, the other entries allow
+to build more advanced CheckCommand objects and arguments.
+
+Please continue reading [here](03-monitoring-basics.md#command-arguments) for advanced usage and examples
+for command arguments.
+
+
+### Dependency <a id="objecttype-dependency"></a>
+
+Dependency objects are used to specify dependencies between hosts and services. Dependencies
+can be defined as Host-to-Host, Service-to-Service, Service-to-Host, or Host-to-Service
+relations.
+
+> **Best Practice**
+>
+> Rather than creating a `Dependency` object for a specific host or service it is usually easier
+> to just create a `Dependency` template and use the `apply` keyword to assign the
+> dependency to a number of hosts or services. Use the `to` keyword to set the specific target
+> type for `Host` or `Service`.
+> Check the [dependencies](03-monitoring-basics.md#dependencies) chapter for detailed examples.
+
+Service-to-Service Example:
+
+```
+object Dependency "webserver-internet" {
+ parent_host_name = "internet"
+ parent_service_name = "ping4"
+
+ child_host_name = "webserver"
+ child_service_name = "ping4"
+
+ states = [ OK, Warning ]
+
+ disable_checks = true
+}
+```
+
+Host-to-Host Example:
+
+```
+object Dependency "webserver-internet" {
+ parent_host_name = "internet"
+
+ child_host_name = "webserver"
+
+ states = [ Up ]
+
+ disable_checks = true
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ parent\_host\_name | Object name | **Required.** The parent host.
+ parent\_service\_name | Object name | **Optional.** The parent service. If omitted, this dependency object is treated as host dependency.
+ child\_host\_name | Object name | **Required.** The child host.
+ child\_service\_name | Object name | **Optional.** The child service. If omitted, this dependency object is treated as host dependency.
+ redundancy\_group | String | **Optional.** Puts the dependency into a group of [mutually redundant ones](03-monitoring-basics.md#dependencies-redundancy-groups).
+ disable\_checks | Boolean | **Optional.** Whether to disable checks (i.e., don't schedule active checks and drop passive results) when this dependency fails. Defaults to false.
+ disable\_notifications | Boolean | **Optional.** Whether to disable notifications when this dependency fails. Defaults to true.
+ ignore\_soft\_states | Boolean | **Optional.** Whether to ignore soft states for the reachability calculation. Defaults to true.
+ period | Object name | **Optional.** Time period object during which this dependency is enabled.
+ states | Array | **Optional.** A list of state filters when this dependency should be OK. Defaults to [ OK, Warning ] for services and [ Up ] for hosts.
+
+Available state filters:
+
+```
+OK
+Warning
+Critical
+Unknown
+Up
+Down
+```
+
+When using [apply rules](03-monitoring-basics.md#using-apply) for dependencies, you can leave out certain attributes which will be
+automatically determined by Icinga 2.
+
+Service-to-Host Dependency Example:
+
+```
+apply Dependency "internet" to Service {
+ parent_host_name = "dsl-router"
+ disable_checks = true
+
+ assign where host.name != "dsl-router"
+}
+```
+
+This example sets all service objects matching the assign condition into a dependency relation to
+the parent host object `dsl-router` as implicit child services.
+
+Service-to-Service-on-the-same-Host Dependency Example:
+
+```
+apply Dependency "disable-agent-checks" to Service {
+ parent_service_name = "agent-health"
+
+ assign where service.check_command == "ssh"
+ ignore where service.name == "agent-health"
+}
+```
+
+This example omits the `parent_host_name` attribute and Icinga 2 automatically sets its value to the name of the
+host object matched by the apply rule condition. All services where apply matches are made implicit child services
+in this dependency relation.
+
+
+Dependency objects have composite names, i.e. their names are based on the `child_host_name` and `child_service_name` attributes and the
+name you specified. This means you can define more than one object with the same (short) name as long as one of the `child_host_name` and
+`child_service_name` attributes has a different value.
+
+### Endpoint <a id="objecttype-endpoint"></a>
+
+Endpoint objects are used to specify connection information for remote
+Icinga 2 instances. More details can be found in the [distributed monitoring chapter](06-distributed-monitoring.md#distributed-monitoring).
+
+Example:
+
+```
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.56.111"
+ port = 5665
+ log_duration = 1d
+}
+```
+
+Example (disable replay log):
+
+```
+object Endpoint "icinga2-agent1.localdomain" {
+ host = "192.168.5.111"
+ port = 5665
+ log_duration = 0
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** The hostname/IP address of the remote Icinga 2 instance.
+ port | Number | **Optional.** The service name/port of the remote Icinga 2 instance. Defaults to `5665`.
+ log\_duration | Duration | **Optional.** Duration for keeping replay logs on connection loss. Defaults to `1d` (86400 seconds). Attribute is specified in seconds. If log_duration is set to 0, replaying logs is disabled. You could also specify the value in human readable format like `10m` for 10 minutes or `1h` for one hour.
+
+Endpoint objects cannot currently be created with the API.
+
+### EventCommand <a id="objecttype-eventcommand"></a>
+
+An event command definition.
+
+Example:
+
+```
+object EventCommand "restart-httpd-event" {
+ command = "/opt/bin/restart-httpd.sh"
+}
+```
+
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ command | Array | **Required.** The command. This can either be an array of individual command arguments. Alternatively a string can be specified in which case the shell interpreter (usually /bin/sh) takes care of parsing the command. When using the "arguments" attribute this must be an array. Can be specified as function for advanced implementations.
+ env | Dictionary | **Optional.** A dictionary of macros which should be exported as environment variables prior to executing the command.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this command.
+ timeout | Duration | **Optional.** The command timeout in seconds. Defaults to `1m`.
+ arguments | Dictionary | **Optional.** A dictionary of command arguments.
+
+Command arguments can be used the same way as for [CheckCommand objects](09-object-types.md#objecttype-checkcommand-arguments).
+
+More advanced examples for event command usage can be found [here](03-monitoring-basics.md#event-commands).
+
+
+### Host <a id="objecttype-host"></a>
+
+A host.
+
+Example:
+
+```
+object Host "icinga2-agent1.localdomain" {
+ display_name = "Linux Client 1"
+ address = "192.168.56.111"
+ address6 = "2a00:1450:4001:815::2003"
+
+ groups = [ "linux-servers" ]
+
+ check_command = "hostalive"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the host (e.g. displayed by external interfaces instead of the name if set).
+ address | String | **Optional.** The host's IPv4 address. Available as command runtime macro `$address$` if set.
+ address6 | String | **Optional.** The host's IPv6 address. Available as command runtime macro `$address6$` if set.
+ groups | Array of object names | **Optional.** A list of host groups this host belongs to.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this host.
+ check\_command | Object name | **Required.** The name of the check command.
+ max\_check\_attempts | Number | **Optional.** The number of times a host is re-checked before changing into a hard state. Defaults to 3.
+ check\_period | Object name | **Optional.** The name of a time period which determines when this host should be checked. Not set by default (effectively 24x7).
+ check\_timeout | Duration | **Optional.** Check command timeout in seconds. Overrides the CheckCommand's `timeout` attribute.
+ check\_interval | Duration | **Optional.** The check interval (in seconds). This interval is used for checks when the host is in a `HARD` state. Defaults to `5m`.
+ retry\_interval | Duration | **Optional.** The retry interval (in seconds). This interval is used for checks when the host is in a `SOFT` state. Defaults to `1m`. Note: This does not affect the scheduling [after a passive check result](08-advanced-topics.md#check-result-freshness).
+ enable\_notifications | Boolean | **Optional.** Whether notifications are enabled. Defaults to true.
+ enable\_active\_checks | Boolean | **Optional.** Whether active checks are enabled. Defaults to true.
+ enable\_passive\_checks | Boolean | **Optional.** Whether passive checks are enabled. Defaults to true.
+ enable\_event\_handler | Boolean | **Optional.** Enables event handlers for this host. Defaults to true.
+ enable\_flapping | Boolean | **Optional.** Whether flap detection is enabled. Defaults to false.
+ enable\_perfdata | Boolean | **Optional.** Whether performance data processing is enabled. Defaults to true.
+ event\_command | Object name | **Optional.** The name of an event command that should be executed every time the host's state changes or the host is in a `SOFT` state.
+ flapping\_threshold\_high | Number | **Optional.** Flapping upper bound in percent for a host to be considered flapping. Default `30.0`
+ flapping\_threshold\_low | Number | **Optional.** Flapping lower bound in percent for a host to be considered not flapping. Default `25.0`
+ flapping\_ignore\_states | Array | **Optional.** A list of states that should be ignored during flapping calculation. By default no state is ignored.
+ volatile | Boolean | **Optional.** Treat all state changes as HARD changes. See [here](08-advanced-topics.md#volatile-services-hosts) for details. Defaults to `false`.
+ zone | Object name | **Optional.** The zone this object is a member of. Please read the [distributed monitoring](06-distributed-monitoring.md#distributed-monitoring) chapter for details.
+ command\_endpoint | Object name | **Optional.** The endpoint where commands are executed on.
+ notes | String | **Optional.** Notes for the host.
+ notes\_url | String | **Optional.** URL for notes for the host (for example, in notification commands).
+ action\_url | String | **Optional.** URL for actions for the host (for example, an external graphing tool).
+ icon\_image | String | **Optional.** Icon image for the host. Used by external interfaces only.
+ icon\_image\_alt | String | **Optional.** Icon image description for the host. Used by external interface only.
+
+The actual check interval might deviate slightly from the configured values due to the fact that Icinga tries
+to evenly distribute all checks over a certain period of time, i.e. to avoid load spikes.
+
+> **Best Practice**
+>
+> The `address` and `address6` attributes are required for running commands using
+> the `$address$` and `$address6$` runtime macros.
+
+Runtime Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ next\_check | Timestamp | When the next check occurs (as a UNIX timestamp).
+ last\_check | Timestamp | When the last check occurred (as a UNIX timestamp).
+ check\_attempt | Number | The current check attempt number.
+ state\_type | Number | The current state type (0 = SOFT, 1 = HARD).
+ last\_state\_type | Number | The previous state type (0 = SOFT, 1 = HARD).
+ last\_reachable | Boolean | Whether the host was reachable when the last check occurred.
+ last\_check\_result | CheckResult | The current [check result](08-advanced-topics.md#advanced-value-types-checkresult).
+ last\_state\_change | Timestamp | When the last state change occurred (as a UNIX timestamp).
+ last\_hard\_state\_change | Timestamp | When the last hard state change occurred (as a UNIX timestamp).
+ last\_in\_downtime | Boolean | Whether the host was in a downtime when the last check occurred.
+ acknowledgement | Number | The acknowledgement type (0 = NONE, 1 = NORMAL, 2 = STICKY).
+ acknowledgement\_expiry | Timestamp | When the acknowledgement expires (as a UNIX timestamp; 0 = no expiry).
+ downtime\_depth | Number | Whether the host has one or more active downtimes.
+ flapping\_last\_change | Timestamp | When the last flapping change occurred (as a UNIX timestamp).
+ flapping | Boolean | Whether the host is flapping between states.
+ flapping\_current | Number | Current flapping value in percent (see flapping\_thresholds)
+ state | Number | The current state (0 = UP, 1 = DOWN).
+ last\_state | Number | The previous state (0 = UP, 1 = DOWN).
+ last\_hard\_state | Number | The last hard state (0 = UP, 1 = DOWN).
+ last\_state\_up | Timestamp | When the last UP state occurred (as a UNIX timestamp).
+ last\_state\_down | Timestamp | When the last DOWN state occurred (as a UNIX timestamp).
+ last\_state\_unreachable | Timestamp | When the host was unreachable the last time (as a UNIX timestamp).
+ previous\_state\_change | Timestamp | Previous timestamp of `last_state_change` before processing a new check result.
+ severity | Number | [Severity](19-technical-concepts.md#technical-concepts-checks-severity) calculated value.
+ problem | Boolean | Whether the host is considered in a problem state type (NOT-UP).
+ handled | Boolean | Whether the host problem is handled (downtime or acknowledgement).
+ next\_update | Timestamp | When the next check update is to be expected.
+
+
+
+### HostGroup <a id="objecttype-hostgroup"></a>
+
+A group of hosts.
+
+> **Best Practice**
+>
+> Assign host group members using the [group assign](17-language-reference.md#group-assign) rules.
+
+Example:
+
+```
+object HostGroup "linux-servers" {
+ display_name = "Linux Servers"
+
+ assign where host.vars.os == "Linux"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the host group.
+ groups | Array of object names | **Optional.** An array of nested group names.
+
+
+
+### Notification <a id="objecttype-notification"></a>
+
+Notification objects are used to specify how users should be notified in case
+of host and service state changes and other events.
+
+> **Best Practice**
+>
+> Rather than creating a `Notification` object for a specific host or service it is
+> usually easier to just create a `Notification` template and use the `apply` keyword
+> to assign the notification to a number of hosts or services. Use the `to` keyword
+> to set the specific target type for `Host` or `Service`.
+> Check the [notifications](03-monitoring-basics.md#alert-notifications) chapter for detailed examples.
+
+Example:
+
+```
+object Notification "localhost-ping-notification" {
+ host_name = "localhost"
+ service_name = "ping4"
+
+ command = "mail-notification"
+
+ users = [ "user1", "user2" ] // reference to User objects
+
+ types = [ Problem, Recovery ]
+ states = [ Critical, Warning, OK ]
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host\_name | Object name | **Required.** The name of the host this notification belongs to.
+ service\_name | Object name | **Optional.** The short name of the service this notification belongs to. If omitted, this notification object is treated as host notification.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this notification object.
+ users | Array of object names | **Required.** A list of user names who should be notified. **Optional.** if the `user_groups` attribute is set.
+ user\_groups | Array of object names | **Required.** A list of user group names who should be notified. **Optional.** if the `users` attribute is set.
+ times | Dictionary | **Optional.** A dictionary containing `begin` and `end` attributes for the notification. If `end` is set to 0, `Notifications` are disabled permanently. Please read the [notification delay](03-monitoring-basics.md#notification-delay) chapter for details.
+ command | Object name | **Required.** The name of the notification command which should be executed when the notification is triggered.
+ interval | Duration | **Optional.** The notification interval (in seconds). This interval is used for active notifications. Defaults to 30 minutes. If set to 0, [re-notifications](03-monitoring-basics.md#disable-renotification) are disabled.
+ period | Object name | **Optional.** The name of a time period which determines when this notification should be triggered. Not set by default (effectively 24x7).
+ zone | Object name | **Optional.** The zone this object is a member of. Please read the [distributed monitoring](06-distributed-monitoring.md#distributed-monitoring) chapter for details.
+ types | Array | **Optional.** A list of type filters when this notification should be triggered. By default everything is matched.
+ states | Array | **Optional.** A list of state filters when this notification should be triggered. By default everything is matched. Note that the states filter is ignored for notifications of type Acknowledgement!
+
+Available notification state filters for Service:
+
+```
+OK
+Warning
+Critical
+Unknown
+```
+
+Available notification state filters for Host:
+
+```
+Up
+Down
+```
+
+Available notification type filters:
+
+```
+DowntimeStart
+DowntimeEnd
+DowntimeRemoved
+Custom
+Acknowledgement
+Problem
+Recovery
+FlappingStart
+FlappingEnd
+```
+
+Runtime Attributes:
+
+ Name | Type | Description
+ ----------------------------|-----------------------|-----------------
+ last\_notification | Timestamp | When the last notification was sent for this Notification object (as a UNIX timestamp).
+ next\_notification | Timestamp | When the next notification is going to be sent for this assuming the associated host/service is still in a non-OK state (as a UNIX timestamp).
+ notification\_number | Number | The notification number.
+ last\_problem\_notification | Timestamp | When the last notification was sent for a problem (as a UNIX timestamp).
+
+
+### NotificationCommand <a id="objecttype-notificationcommand"></a>
+
+A notification command definition.
+
+Example:
+
+```
+object NotificationCommand "mail-service-notification" {
+ command = [ ConfigDir + "/scripts/mail-service-notification.sh" ]
+
+ arguments += {
+ "-4" = {
+ required = true
+ value = "$notification_address$"
+ }
+ "-6" = "$notification_address6$"
+ "-b" = "$notification_author$"
+ "-c" = "$notification_comment$"
+ "-d" = {
+ required = true
+ value = "$notification_date$"
+ }
+ "-e" = {
+ required = true
+ value = "$notification_servicename$"
+ }
+ "-f" = {
+ value = "$notification_from$"
+ description = "Set from address. Requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE)"
+ }
+ "-i" = "$notification_icingaweb2url$"
+ "-l" = {
+ required = true
+ value = "$notification_hostname$"
+ }
+ "-n" = {
+ required = true
+ value = "$notification_hostdisplayname$"
+ }
+ "-o" = {
+ required = true
+ value = "$notification_serviceoutput$"
+ }
+ "-r" = {
+ required = true
+ value = "$notification_useremail$"
+ }
+ "-s" = {
+ required = true
+ value = "$notification_servicestate$"
+ }
+ "-t" = {
+ required = true
+ value = "$notification_type$"
+ }
+ "-u" = {
+ required = true
+ value = "$notification_servicedisplayname$"
+ }
+ "-v" = "$notification_logtosyslog$"
+ }
+
+ vars += {
+ notification_address = "$address$"
+ notification_address6 = "$address6$"
+ notification_author = "$notification.author$"
+ notification_comment = "$notification.comment$"
+ notification_type = "$notification.type$"
+ notification_date = "$icinga.long_date_time$"
+ notification_hostname = "$host.name$"
+ notification_hostdisplayname = "$host.display_name$"
+ notification_servicename = "$service.name$"
+ notification_serviceoutput = "$service.output$"
+ notification_servicestate = "$service.state$"
+ notification_useremail = "$user.email$"
+ notification_servicedisplayname = "$service.display_name$"
+ }
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ command | Array | **Required.** The command. This can either be an array of individual command arguments. Alternatively a string can be specified in which case the shell interpreter (usually /bin/sh) takes care of parsing the command. When using the "arguments" attribute this must be an array. Can be specified as function for advanced implementations.
+ env | Dictionary | **Optional.** A dictionary of macros which should be exported as environment variables prior to executing the command.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this command.
+ timeout | Duration | **Optional.** The command timeout in seconds. Defaults to `1m`.
+ arguments | Dictionary | **Optional.** A dictionary of command arguments.
+
+Command arguments can be used the same way as for [CheckCommand objects](09-object-types.md#objecttype-checkcommand-arguments).
+
+More details on specific attributes can be found in [this chapter](03-monitoring-basics.md#notification-commands).
+
+### ScheduledDowntime <a id="objecttype-scheduleddowntime"></a>
+
+ScheduledDowntime objects can be used to set up recurring downtimes for hosts/services.
+
+> **Best Practice**
+>
+> Rather than creating a `ScheduledDowntime` object for a specific host or service it is usually easier
+> to just create a `ScheduledDowntime` template and use the `apply` keyword to assign the
+> scheduled downtime to a number of hosts or services. Use the `to` keyword to set the specific target
+> type for `Host` or `Service`.
+> Check the [recurring downtimes](08-advanced-topics.md#recurring-downtimes) example for details.
+
+Example:
+
+```
+object ScheduledDowntime "some-downtime" {
+ host_name = "localhost"
+ service_name = "ping4"
+
+ author = "icingaadmin"
+ comment = "Some comment"
+
+ fixed = false
+ duration = 30m
+
+ ranges = {
+ "sunday" = "02:00-03:00"
+ }
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host\_name | Object name | **Required.** The name of the host this scheduled downtime belongs to.
+ service\_name | Object name | **Optional.** The short name of the service this scheduled downtime belongs to. If omitted, this downtime object is treated as host downtime.
+ author | String | **Required.** The author of the downtime.
+ comment | String | **Required.** A comment for the downtime.
+ fixed | Boolean | **Optional.** Whether this is a fixed downtime. Defaults to `true`.
+ duration | Duration | **Optional.** How long the downtime lasts. Only has an effect for flexible (non-fixed) downtimes.
+ ranges | Dictionary | **Required.** A dictionary containing information which days and durations apply to this timeperiod.
+ child\_options | String | **Optional.** Schedule child downtimes. `DowntimeNoChildren` does not do anything, `DowntimeTriggeredChildren` schedules child downtimes triggered by this downtime, `DowntimeNonTriggeredChildren` schedules non-triggered downtimes. Defaults to `DowntimeNoChildren`.
+
+ScheduledDowntime objects have composite names, i.e. their names are based
+on the `host_name` and `service_name` attributes and the
+name you specified. This means you can define more than one object
+with the same (short) name as long as one of the `host_name` and
+`service_name` attributes has a different value.
+
+See also [time zone handling](08-advanced-topics.md#timeperiods-timezones).
+
+
+### Service <a id="objecttype-service"></a>
+
+Service objects describe network services and how they should be checked
+by Icinga 2.
+
+> **Best Practice**
+>
+> Rather than creating a `Service` object for a specific host it is usually easier
+> to just create a `Service` template and use the `apply` keyword to assign the
+> service to a number of hosts.
+> Check the [apply](03-monitoring-basics.md#using-apply) chapter for details.
+
+Example:
+
+```
+object Service "uptime" {
+ host_name = "localhost"
+
+ display_name = "localhost Uptime"
+
+ check_command = "snmp"
+
+ vars.snmp_community = "public"
+ vars.snmp_oid = "DISMAN-EVENT-MIB::sysUpTimeInstance"
+
+ check_interval = 60s
+ retry_interval = 15s
+
+ groups = [ "all-services", "snmp" ]
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the service.
+ host\_name | Object name | **Required.** The host this service belongs to. There must be a `Host` object with that name.
+ groups | Array of object names | **Optional.** The service groups this service belongs to.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this service.
+ check\_command | Object name | **Required.** The name of the check command.
+ max\_check\_attempts | Number | **Optional.** The number of times a service is re-checked before changing into a hard state. Defaults to 3.
+ check\_period | Object name | **Optional.** The name of a time period which determines when this service should be checked. Not set by default (effectively 24x7).
+ check\_timeout | Duration | **Optional.** Check command timeout in seconds. Overrides the CheckCommand's `timeout` attribute.
+ check\_interval | Duration | **Optional.** The check interval (in seconds). This interval is used for checks when the service is in a `HARD` state. Defaults to `5m`.
+ retry\_interval | Duration | **Optional.** The retry interval (in seconds). This interval is used for checks when the service is in a `SOFT` state. Defaults to `1m`. Note: This does not affect the scheduling [after a passive check result](08-advanced-topics.md#check-result-freshness).
+ enable\_notifications | Boolean | **Optional.** Whether notifications are enabled. Defaults to `true`.
+ enable\_active\_checks | Boolean | **Optional.** Whether active checks are enabled. Defaults to `true`.
+ enable\_passive\_checks | Boolean | **Optional.** Whether passive checks are enabled. Defaults to `true`.
+ enable\_event\_handler | Boolean | **Optional.** Enables event handlers for this host. Defaults to `true`.
+ enable\_flapping | Boolean | **Optional.** Whether flap detection is enabled. Defaults to `false`.
+ flapping\_threshold\_high | Number | **Optional.** Flapping upper bound in percent for a service to be considered flapping. `30.0`
+ flapping\_threshold\_low | Number | **Optional.** Flapping lower bound in percent for a service to be considered not flapping. `25.0`
+ flapping\_ignore\_states | Array | **Optional.** A list of states that should be ignored during flapping calculation. By default no state is ignored.
+ enable\_perfdata | Boolean | **Optional.** Whether performance data processing is enabled. Defaults to `true`.
+ event\_command | Object name | **Optional.** The name of an event command that should be executed every time the service's state changes or the service is in a `SOFT` state.
+ volatile | Boolean | **Optional.** Treat all state changes as HARD changes. See [here](08-advanced-topics.md#volatile-services-hosts) for details. Defaults to `false`.
+ zone | Object name | **Optional.** The zone this object is a member of. Please read the [distributed monitoring](06-distributed-monitoring.md#distributed-monitoring) chapter for details.
+ command\_endpoint | Object name | **Optional.** The endpoint where commands are executed on.
+ notes | String | **Optional.** Notes for the service.
+ notes\_url | String | **Optional.** URL for notes for the service (for example, in notification commands).
+ action\_url | String | **Optional.** URL for actions for the service (for example, an external graphing tool).
+ icon\_image | String | **Optional.** Icon image for the service. Used by external interfaces only.
+ icon\_image\_alt | String | **Optional.** Icon image description for the service. Used by external interface only.
+
+Service objects have composite names, i.e. their names are based on the host\_name attribute and the name you specified. This means
+you can define more than one object with the same (short) name as long as the `host_name` attribute has a different value.
+
+The actual check interval might deviate slightly from the configured values due to the fact that Icinga tries
+to evenly distribute all checks over a certain period of time, i.e. to avoid load spikes.
+
+Runtime Attributes:
+
+ Name | Type | Description
+ ------------------------------|-------------------|----------------------------------
+ next\_check | Timestamp | When the next check occurs (as a UNIX timestamp).
+ last\_check | Timestamp | When the last check occurred (as a UNIX timestamp).
+ check\_attempt | Number | The current check attempt number.
+ state\_type | Number | The current state type (0 = SOFT, 1 = HARD).
+ last\_state\_type | Number | The previous state type (0 = SOFT, 1 = HARD).
+ last\_reachable | Boolean | Whether the service was reachable when the last check occurred.
+ last\_check\_result | CheckResult | The current [check result](08-advanced-topics.md#advanced-value-types-checkresult).
+ last\_state\_change | Timestamp | When the last state change occurred (as a UNIX timestamp).
+ last\_hard\_state\_change | Timestamp | When the last hard state change occurred (as a UNIX timestamp).
+ last\_in\_downtime | Boolean | Whether the service was in a downtime when the last check occurred.
+ acknowledgement | Number | The acknowledgement type (0 = NONE, 1 = NORMAL, 2 = STICKY).
+ acknowledgement\_expiry | Timestamp | When the acknowledgement expires (as a UNIX timestamp; 0 = no expiry).
+ acknowledgement\_last\_change | Timestamp | When the acknowledgement has been set/cleared
+ downtime\_depth | Number | Whether the service has one or more active downtimes.
+ flapping\_last\_change | Timestamp | When the last flapping change occurred (as a UNIX timestamp).
+ flapping\_current | Number | Current flapping value in percent (see flapping\_thresholds)
+ flapping | Boolean | Whether the service is flapping between states.
+ state | Number | The current state (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN).
+ last\_state | Number | The previous state (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN).
+ last\_hard\_state | Number | The last hard state (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN).
+ last\_state\_ok | Timestamp | When the last OK state occurred (as a UNIX timestamp).
+ last\_state\_warning | Timestamp | When the last WARNING state occurred (as a UNIX timestamp).
+ last\_state\_critical | Timestamp | When the last CRITICAL state occurred (as a UNIX timestamp).
+ last\_state\_unknown | Timestamp | When the last UNKNOWN state occurred (as a UNIX timestamp).
+ last\_state\_unreachable | Timestamp | When the service was unreachable the last time (as a UNIX timestamp).
+ previous\_state\_change | Timestamp | Previous timestamp of `last_state_change` before processing a new check result.
+ severity | Number | [Severity](19-technical-concepts.md#technical-concepts-checks-severity) calculated value.
+ problem | Boolean | Whether the service is considered in a problem state type (NOT-OK).
+ handled | Boolean | Whether the service problem is handled (downtime or acknowledgement).
+ next\_update | Timestamp | When the next check update is to be expected.
+
+
+### ServiceGroup <a id="objecttype-servicegroup"></a>
+
+A group of services.
+
+> **Best Practice**
+>
+> Assign service group members using the [group assign](17-language-reference.md#group-assign) rules.
+
+Example:
+
+```
+object ServiceGroup "snmp" {
+ display_name = "SNMP services"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the service group.
+ groups | Array of object names | **Optional.** An array of nested group names.
+
+
+
+### TimePeriod <a id="objecttype-timeperiod"></a>
+
+Time periods can be used to specify when hosts/services should be checked or to limit
+when notifications should be sent out.
+
+Examples:
+
+```
+object TimePeriod "nonworkhours" {
+ display_name = "Icinga 2 TimePeriod for non working hours"
+
+ ranges = {
+ monday = "00:00-8:00,17:00-24:00"
+ tuesday = "00:00-8:00,17:00-24:00"
+ wednesday = "00:00-8:00,17:00-24:00"
+ thursday = "00:00-8:00,17:00-24:00"
+ friday = "00:00-8:00,16:00-24:00"
+ saturday = "00:00-24:00"
+ sunday = "00:00-24:00"
+ }
+}
+
+object TimePeriod "exampledays" {
+ display_name = "Icinga 2 TimePeriod for random example days"
+
+ ranges = {
+ //We still believe in Santa, no peeking!
+ //Applies every 25th of December every year
+ "december 25" = "00:00-24:00"
+
+ //Any point in time can be specified,
+ //but you still have to use a range
+ "2038-01-19" = "03:13-03:15"
+
+ //Evey 3rd day from the second monday of February
+ //to 8th of November
+ "monday 2 february - november 8 / 3" = "00:00-24:00"
+ }
+}
+```
+
+Additional examples can be found [here](08-advanced-topics.md#timeperiods).
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the time period.
+ ranges | Dictionary | **Required.** A dictionary containing information which days and durations apply to this timeperiod.
+ prefer\_includes | Boolean | **Optional.** Whether to prefer timeperiods `includes` or `excludes`. Default to true.
+ excludes | Array of object names | **Optional.** An array of timeperiods, which should exclude from your timerange.
+ includes | Array of object names | **Optional.** An array of timeperiods, which should include into your timerange
+
+
+Runtime Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ is\_inside | Boolean | Whether we're currently inside this timeperiod.
+
+See also [time zone handling](08-advanced-topics.md#timeperiods-timezones).
+
+
+### User <a id="objecttype-user"></a>
+
+A user.
+
+Example:
+
+```
+object User "icingaadmin" {
+ display_name = "Icinga 2 Admin"
+ groups = [ "icingaadmins" ]
+ email = "icinga@localhost"
+ pager = "icingaadmin@localhost.localdomain"
+
+ period = "24x7"
+
+ states = [ OK, Warning, Critical, Unknown ]
+ types = [ Problem, Recovery ]
+
+ vars.additional_notes = "This is the Icinga 2 Admin account."
+}
+```
+
+Available notification state filters:
+
+```
+OK
+Warning
+Critical
+Unknown
+Up
+Down
+```
+
+Available notification type filters:
+
+```
+DowntimeStart
+DowntimeEnd
+DowntimeRemoved
+Custom
+Acknowledgement
+Problem
+Recovery
+FlappingStart
+FlappingEnd
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the user.
+ email | String | **Optional.** An email string for this user. Useful for notification commands.
+ pager | String | **Optional.** A pager string for this user. Useful for notification commands.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are specific to this user.
+ groups | Array of object names | **Optional.** An array of group names.
+ enable\_notifications | Boolean | **Optional.** Whether notifications are enabled for this user. Defaults to true.
+ period | Object name | **Optional.** The name of a time period which determines when a notification for this user should be triggered. Not set by default (effectively 24x7).
+ types | Array | **Optional.** A set of type filters when a notification for this user should be triggered. By default everything is matched.
+ states | Array | **Optional.** A set of state filters when a notification for this should be triggered. By default everything is matched.
+
+Runtime Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ last\_notification | Timestamp | When the last notification was sent for this user (as a UNIX timestamp).
+
+### UserGroup <a id="objecttype-usergroup"></a>
+
+A user group.
+
+> **Best Practice**
+>
+> Assign user group members using the [group assign](17-language-reference.md#group-assign) rules.
+
+Example:
+
+```
+object UserGroup "icingaadmins" {
+ display_name = "Icinga 2 Admin Group"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ display\_name | String | **Optional.** A short description of the user group.
+ groups | Array of object names | **Optional.** An array of nested group names.
+
+
+### Zone <a id="objecttype-zone"></a>
+
+Zone objects are used to specify which Icinga 2 instances are located in a zone.
+Please read the [distributed monitoring chapter](06-distributed-monitoring.md#distributed-monitoring) for additional details.
+Example:
+
+```
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain" ]
+ parent = "master"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ endpoints | Array of object names | **Optional.** Array of endpoint names located in this zone.
+ parent | Object name | **Optional.** The name of the parent zone. (Do not specify a global zone)
+ global | Boolean | **Optional.** Whether configuration files for this zone should be [synced](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync) to all endpoints. Defaults to `false`.
+
+Zone objects cannot currently be created with the API.
+
+
+## Runtime Objects <a id="object-types-runtime"></a>
+
+These objects are generated at runtime by the daemon
+from API actions. Downtime objects are also created
+by ScheduledDowntime objects.
+
+### Comment <a id="objecttype-comment"></a>
+
+Comments created at runtime are represented as objects.
+Note: This is for reference only. You can create comments
+with the [add-comment](12-icinga2-api.md#icinga2-api-actions-add-comment) API action.
+
+Example:
+
+```
+object Comment "my-comment" {
+ host_name = "localhost"
+ author = "icingaadmin"
+ text = "This is a comment."
+ entry_time = 1234567890
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host\_name | Object name | **Required.** The name of the host this comment belongs to.
+ service\_name | Object name | **Optional.** The short name of the service this comment belongs to. If omitted, this comment object is treated as host comment.
+ author | String | **Required.** The author's name.
+ text | String | **Required.** The comment text.
+ entry\_time | Timestamp | **Optional.** The UNIX timestamp when this comment was added. If omitted, the entry time is volatile!
+ entry\_type | Number | **Optional.** The comment type (`User` = 1, `Downtime` = 2, `Flapping` = 3, `Acknowledgement` = 4).
+ expire\_time | Timestamp | **Optional.** The comment's expire time as UNIX timestamp.
+ persistent | Boolean | **Optional.** Only evaluated for `entry_type` Acknowledgement. `true` does not remove the comment when the acknowledgement is removed.
+
+### Downtime <a id="objecttype-downtime"></a>
+
+Downtimes created at runtime are represented as objects.
+You can create downtimes with the [schedule-downtime](12-icinga2-api.md#icinga2-api-actions-schedule-downtime) API action.
+
+Example:
+
+```
+object Downtime "my-downtime" {
+ host_name = "localhost"
+ author = "icingaadmin"
+ comment = "This is a downtime."
+ start_time = 1505312869
+ end_time = 1505312924
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host\_name | Object name | **Required.** The name of the host this downtime belongs to.
+ service\_name | Object name | **Optional.** The short name of the service this downtime belongs to. If omitted, this downtime object is treated as host downtime.
+ author | String | **Required.** The author's name.
+ comment | String | **Required.** The comment text.
+ start\_time | Timestamp | **Required.** The start time as UNIX timestamp.
+ end\_time | Timestamp | **Required.** The end time as UNIX timestamp.
+ duration | Number | **Optional.** The duration as number.
+ entry\_time | Timestamp | **Optional.** The UNIX timestamp when this downtime was added.
+ fixed | Boolean | **Optional.** Whether the downtime is fixed (true) or flexible (false). Defaults to flexible. Details in the [advanced topics chapter](08-advanced-topics.md#fixed-flexible-downtimes).
+ triggers | Array of object names | **Optional.** List of downtimes which should be triggered by this downtime.
+
+Runtime Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ trigger\_time | Timestamp | The UNIX timestamp when this downtime was triggered.
+ triggered\_by | Object name | The name of the downtime this downtime was triggered by.
+
+
+
+## Features <a id="object-types-features"></a>
+
+### ApiListener <a id="objecttype-apilistener"></a>
+
+ApiListener objects are used for distributed monitoring setups
+and API usage specifying the certificate files used for ssl
+authorization and additional restrictions.
+This configuration object is available as [api feature](11-cli-commands.md#cli-command-feature).
+
+The `TicketSalt` constant must be defined in [constants.conf](04-configuration.md#constants-conf).
+
+Example:
+
+```
+object ApiListener "api" {
+ accept_commands = true
+ accept_config = true
+
+ ticket_salt = TicketSalt
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------------------|-----------------------|----------------------------------
+ cert\_path | String | **Deprecated.** Path to the public key.
+ key\_path | String | **Deprecated.** Path to the private key.
+ ca\_path | String | **Deprecated.** Path to the CA certificate file.
+ ticket\_salt | String | **Optional.** Private key for [CSR auto-signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing). **Required** for a signing master instance.
+ crl\_path | String | **Optional.** Path to the CRL file.
+ bind\_host | String | **Optional.** The IP address the api listener should be bound to. If not specified, the ApiListener is bound to `::` and listens for both IPv4 and IPv6 connections or to `0.0.0.0` if IPv6 is not supported by the operating system.
+ bind\_port | Number | **Optional.** The port the api listener should be bound to. Defaults to `5665`.
+ accept\_config | Boolean | **Optional.** Accept zone configuration. Defaults to `false`.
+ accept\_commands | Boolean | **Optional.** Accept remote commands. Defaults to `false`.
+ max\_anonymous\_clients | Number | **Optional.** Limit the number of anonymous client connections (not configured endpoints and signing requests).
+ cipher\_list | String | **Optional.** Cipher list that is allowed. For a list of available ciphers run `openssl ciphers`. Defaults to `ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256`.
+ tls\_protocolmin | String | **Optional.** Minimum TLS protocol version. Since v2.11, only `TLSv1.2` is supported. Defaults to `TLSv1.2`.
+ tls\_handshake\_timeout | Number | **Deprecated.** TLS Handshake timeout. Defaults to `10s`.
+ connect\_timeout | Number | **Optional.** Timeout for establishing new connections. Affects both incoming and outgoing connections. Within this time, the TCP and TLS handshakes must complete and either a HTTP request or an Icinga cluster connection must be initiated. Defaults to `15s`.
+ access\_control\_allow\_origin | Array | **Optional.** Specifies an array of origin URLs that may access the API. [(MDN docs)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Access-Control-Allow-Origin)
+ access\_control\_allow\_credentials | Boolean | **Deprecated.** Indicates whether or not the actual request can be made using credentials. Defaults to `true`. [(MDN docs)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Access-Control-Allow-Credentials)
+ access\_control\_allow\_headers | String | **Deprecated.** Used in response to a preflight request to indicate which HTTP headers can be used when making the actual request. Defaults to `Authorization`. [(MDN docs)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Access-Control-Allow-Headers)
+ access\_control\_allow\_methods | String | **Deprecated.** Used in response to a preflight request to indicate which HTTP methods can be used when making the actual request. Defaults to `GET, POST, PUT, DELETE`. [(MDN docs)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Access-Control-Allow-Methods)
+ environment | String | **Optional.** Used as suffix in TLS SNI extension name; default from constant `ApiEnvironment`, which is empty.
+
+The attributes `access_control_allow_credentials`, `access_control_allow_headers` and `access_control_allow_methods`
+are controlled by Icinga 2 and are not changeable by config any more.
+
+
+The ApiListener type expects its certificate files to be in the following locations:
+
+ Type | Location
+ ---------------------|-------------------------------------
+ Private key | `DataDir + "/certs/" + NodeName + ".key"`
+ Certificate file | `DataDir + "/certs/" + NodeName + ".crt"`
+ CA certificate file | `DataDir + "/certs/ca.crt"`
+
+If the deprecated attributes `cert_path`, `key_path` and/or `ca_path` are specified Icinga 2
+copies those files to the new location in `DataDir + "/certs"` unless the
+file(s) there are newer.
+
+Please check the [upgrading chapter](16-upgrading-icinga-2.md#upgrading-to-2-8-certificate-paths) for more details.
+
+While Icinga 2 and the underlying OpenSSL library use sane and secure defaults, the attributes
+`cipher_list` and `tls_protocolmin` can be used to increase communication security. A good source
+for a more secure configuration is provided by the [Mozilla Wiki](https://wiki.mozilla.org/Security/Server_Side_TLS).
+Ensure to use the same configuration for both attributes on **all** endpoints to avoid communication problems which
+requires to use `cipher_list` compatible with the endpoint using the oldest version of the OpenSSL library. If using
+other tools to connect to the API ensure also compatibility with them as this setting affects not only inter-cluster
+communcation but also the REST API.
+
+### CheckerComponent <a id="objecttype-checkercomponent"></a>
+
+The checker component is responsible for scheduling active checks.
+This configuration object is available as [checker feature](11-cli-commands.md#cli-command-feature).
+
+Example:
+
+```
+object CheckerComponent "checker" { }
+```
+
+In order to limit the concurrent checks on a master/satellite endpoint,
+use [MaxConcurrentChecks](17-language-reference.md#icinga-constants-global-config) constant.
+This also applies to an agent as command endpoint where the checker
+feature is disabled.
+
+### CompatLogger <a id="objecttype-compatlogger"></a>
+
+Writes log files in a format that's compatible with Icinga 1.x.
+This configuration object is available as [compatlog feature](14-features.md#compat-logging).
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+Example:
+
+```
+object CompatLogger "compatlog" {
+ log_dir = "/var/log/icinga2/compat"
+ rotation_method = "DAILY"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ log\_dir | String | **Optional.** Path to the compat log directory. Defaults to LogDir + "/compat".
+ rotation\_method | String | **Optional.** Specifies when to rotate log files. Can be one of "HOURLY", "DAILY", "WEEKLY" or "MONTHLY". Defaults to "HOURLY".
+
+
+### ElasticsearchWriter <a id="objecttype-elasticsearchwriter"></a>
+
+Writes check result metrics and performance data to an Elasticsearch instance.
+This configuration object is available as [elasticsearch feature](14-features.md#elasticsearch-writer).
+
+Example:
+
+```
+object ElasticsearchWriter "elasticsearch" {
+ host = "127.0.0.1"
+ port = 9200
+ index = "icinga2"
+
+ enable_send_perfdata = true
+
+ flush_threshold = 1024
+ flush_interval = 10
+}
+```
+
+The index is rotated daily, as is recommended by Elastic, meaning the index will be renamed to `$index-$d.$M.$y`.
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Required.** Elasticsearch host address. Defaults to `127.0.0.1`.
+ port | Number | **Required.** Elasticsearch port. Defaults to `9200`.
+ index | String | **Required.** Elasticsearch index name. Defaults to `icinga2`.
+ enable\_send\_perfdata | Boolean | **Optional.** Send parsed performance data metrics for check results. Defaults to `false`.
+ flush\_interval | Duration | **Optional.** How long to buffer data points before transferring to Elasticsearch. Defaults to `10s`.
+ flush\_threshold | Number | **Optional.** How many data points to buffer before forcing a transfer to Elasticsearch. Defaults to `1024`.
+ username | String | **Optional.** Basic auth username if Elasticsearch is hidden behind an HTTP proxy.
+ password | String | **Optional.** Basic auth password if Elasticsearch is hidden behind an HTTP proxy.
+ enable\_tls | Boolean | **Optional.** Whether to use a TLS stream. Defaults to `false`. Requires an HTTP proxy.
+ insecure\_noverify | Boolean | **Optional.** Disable TLS peer verification.
+ ca\_path | String | **Optional.** Path to CA certificate to validate the remote host. Requires `enable_tls` set to `true`.
+ cert\_path | String | **Optional.** Path to host certificate to present to the remote host for mutual verification. Requires `enable_tls` set to `true`.
+ key\_path | String | **Optional.** Path to host key to accompany the cert\_path. Requires `enable_tls` set to `true`.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+
+Note: If `flush_threshold` is set too low, this will force the feature to flush all data to Elasticsearch too often.
+Experiment with the setting, if you are processing more than 1024 metrics per second or similar.
+
+Basic auth is supported with the `username` and `password` attributes. This requires an
+HTTP proxy (Nginx, etc.) in front of the Elasticsearch instance. Check [this blogpost](https://blog.netways.de/2017/09/14/secure-elasticsearch-and-kibana-with-an-nginx-http-proxy/)
+for an example.
+
+TLS for the HTTP proxy can be enabled with `enable_tls`. In addition to that
+you can specify the certificates with the `ca_path`, `cert_path` and `cert_key` attributes.
+
+### ExternalCommandListener <a id="objecttype-externalcommandlistener"></a>
+
+Implements the Icinga 1.x command pipe which can be used to send commands to Icinga.
+This configuration object is available as [command feature](14-features.md#external-commands).
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+Example:
+
+```
+object ExternalCommandListener "command" {
+ command_path = "/var/run/icinga2/cmd/icinga2.cmd"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ command\_path | String | **Optional.** Path to the command pipe. Defaults to RunDir + "/icinga2/cmd/icinga2.cmd".
+
+
+
+### FileLogger <a id="objecttype-filelogger"></a>
+
+Specifies Icinga 2 logging to a file.
+This configuration object is available as `mainlog` and `debuglog` [logging feature](14-features.md#logging).
+
+Example:
+
+```
+object FileLogger "debug-file" {
+ severity = "debug"
+ path = "/var/log/icinga2/debug.log"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ path | String | **Required.** The log path.
+ severity | String | **Optional.** The minimum severity for this log. Can be "debug", "notice", "information", "warning" or "critical". Defaults to "information".
+
+
+### GelfWriter <a id="objecttype-gelfwriter"></a>
+
+Writes event log entries to a defined GELF receiver host (Graylog, Logstash).
+This configuration object is available as [gelf feature](14-features.md#gelfwriter).
+
+Example:
+
+```
+object GelfWriter "gelf" {
+ host = "127.0.0.1"
+ port = 12201
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** GELF receiver host address. Defaults to `127.0.0.1`.
+ port | Number | **Optional.** GELF receiver port. Defaults to `12201`.
+ source | String | **Optional.** Source name for this instance. Defaults to `icinga2`.
+ enable\_send\_perfdata | Boolean | **Optional.** Enable performance data for 'CHECK RESULT' events.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+ enable\_tls | Boolean | **Optional.** Whether to use a TLS stream. Defaults to `false`.
+ insecure\_noverify | Boolean | **Optional.** Disable TLS peer verification.
+ ca\_path | String | **Optional.** Path to CA certificate to validate the remote host. Requires `enable_tls` set to `true`.
+ cert\_path | String | **Optional.** Path to host certificate to present to the remote host for mutual verification. Requires `enable_tls` set to `true`.
+ key\_path | String | **Optional.** Path to host key to accompany the cert\_path. Requires `enable_tls` set to `true`.
+
+### GraphiteWriter <a id="objecttype-graphitewriter"></a>
+
+Writes check result metrics and performance data to a defined
+Graphite Carbon host.
+This configuration object is available as [graphite feature](14-features.md#graphite-carbon-cache-writer).
+
+Example:
+
+```
+object GraphiteWriter "graphite" {
+ host = "127.0.0.1"
+ port = 2003
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** Graphite Carbon host address. Defaults to `127.0.0.1`.
+ port | Number | **Optional.** Graphite Carbon port. Defaults to `2003`.
+ host\_name\_template | String | **Optional.** Metric prefix for host name. Defaults to `icinga2.$host.name$.host.$host.check_command$`.
+ service\_name\_template | String | **Optional.** Metric prefix for service name. Defaults to `icinga2.$host.name$.services.$service.name$.$service.check_command$`.
+ enable\_send\_thresholds | Boolean | **Optional.** Send additional threshold metrics. Defaults to `false`.
+ enable\_send\_metadata | Boolean | **Optional.** Send additional metadata metrics. Defaults to `false`.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+
+Additional usage examples can be found [here](14-features.md#graphite-carbon-cache-writer).
+
+
+### IcingaApplication <a id="objecttype-icingaapplication"></a>
+
+The IcingaApplication object is required to start Icinga 2.
+The object name must be `app`. If the object configuration
+is missing, Icinga 2 will automatically create an IcingaApplication
+object.
+
+Example:
+
+```
+object IcingaApplication "app" {
+ enable_perfdata = false
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ enable\_notifications | Boolean | **Optional.** Whether notifications are globally enabled. Defaults to true.
+ enable\_event\_handlers | Boolean | **Optional.** Whether event handlers are globally enabled. Defaults to true.
+ enable\_flapping | Boolean | **Optional.** Whether flap detection is globally enabled. Defaults to true.
+ enable\_host\_checks | Boolean | **Optional.** Whether active host checks are globally enabled. Defaults to true.
+ enable\_service\_checks | Boolean | **Optional.** Whether active service checks are globally enabled. Defaults to true.
+ enable\_perfdata | Boolean | **Optional.** Whether performance data processing is globally enabled. Defaults to true.
+ vars | Dictionary | **Optional.** A dictionary containing custom variables that are available globally.
+ environment | String | **Optional.** Specify the Icinga environment. This overrides the `Environment` constant specified in the configuration or on the CLI with `--define`. Defaults to empty.
+
+
+### IcingaDB <a id="objecttype-icingadb"></a>
+
+The `IcingaDB` object implements the [Icinga DB feature](14-features.md#icinga-db).
+
+Example:
+
+```
+object IcingaDB "icingadb" {
+ //host = "127.0.0.1"
+ //port = 6380
+ //password = "xxx"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** Redis host. Defaults to `127.0.0.1`.
+ port | Number | **Optional.** Redis port. Defaults to `6380` since the Redis server provided by the `icingadb-redis` package listens on that port.
+ path | String | **Optional.** Redis unix socket path. Can be used instead of `host` and `port` attributes.
+ password | String | **Optional.** Redis auth password.
+ enable\_tls | Boolean | **Optional.** Whether to use TLS.
+ cert\_path | String | **Optional.** Path to the certificate.
+ key\_path | String | **Optional.** Path to the private key.
+ ca\_path | String | **Optional.** Path to the CA certificate to use instead of the system's root CAs.
+ crl\_path | String | **Optional.** Path to the CRL file.
+ cipher\_list | String | **Optional.** Cipher list that is allowed. For a list of available ciphers run `openssl ciphers`. Defaults to `ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256`.
+ tls\_protocolmin | String | **Optional.** Minimum TLS protocol version. Defaults to `TLSv1.2`.
+ insecure\_noverify | Boolean | **Optional.** Whether not to verify the peer.
+ connect\_timeout | Number | **Optional.** Timeout for establishing new connections. Within this time, the TCP, TLS (if enabled) and Redis handshakes must complete. Defaults to `15s`.
+
+### IdoMySqlConnection <a id="objecttype-idomysqlconnection"></a>
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+IDO database adapter for MySQL.
+This configuration object is available as [ido-mysql feature](14-features.md#db-ido).
+
+Example:
+
+```
+object IdoMysqlConnection "mysql-ido" {
+ host = "127.0.0.1"
+ port = 3306
+ user = "icinga"
+ password = "icinga"
+ database = "icinga"
+
+ cleanup = {
+ downtimehistory_age = 48h
+ contactnotifications_age = 31d
+ }
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** MySQL database host address. Defaults to `localhost`.
+ port | Number | **Optional.** MySQL database port. Defaults to `3306`.
+ socket\_path | String | **Optional.** MySQL socket path.
+ user | String | **Optional.** MySQL database user with read/write permission to the icinga database. Defaults to `icinga`.
+ password | String | **Optional.** MySQL database user's password. Defaults to `icinga`.
+ database | String | **Optional.** MySQL database name. Defaults to `icinga`.
+ enable\_ssl | Boolean | **Optional.** Use SSL. Defaults to false. Change to `true` in case you want to use any of the SSL options.
+ ssl\_key | String | **Optional.** MySQL SSL client key file path.
+ ssl\_cert | String | **Optional.** MySQL SSL certificate file path.
+ ssl\_ca | String | **Optional.** MySQL SSL certificate authority certificate file path.
+ ssl\_capath | String | **Optional.** MySQL SSL trusted SSL CA certificates in PEM format directory path.
+ ssl\_cipher | String | **Optional.** MySQL SSL list of allowed ciphers.
+ table\_prefix | String | **Optional.** MySQL database table prefix. Defaults to `icinga_`.
+ instance\_name | String | **Optional.** Unique identifier for the local Icinga 2 instance, used for multiple Icinga 2 clusters writing to the same database. Defaults to `default`.
+ instance\_description | String | **Optional.** Description for the Icinga 2 instance.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-db-ido). Defaults to `true`.
+ failover\_timeout | Duration | **Optional.** Set the failover timeout in a [HA cluster](06-distributed-monitoring.md#distributed-monitoring-high-availability-db-ido). Must not be lower than 30s. Defaults to `30s`.
+ cleanup | Dictionary | **Optional.** Dictionary with items for historical table cleanup.
+ categories | Array | **Optional.** Array of information types that should be written to the database.
+
+Cleanup Items:
+
+ Name | Type | Description
+ --------------------------------|-----------------------|----------------------------------
+ acknowledgements\_age | Duration | **Optional.** Max age for acknowledgements table rows (entry\_time). Defaults to 0 (never).
+ commenthistory\_age | Duration | **Optional.** Max age for commenthistory table rows (entry\_time). Defaults to 0 (never).
+ contactnotifications\_age | Duration | **Optional.** Max age for contactnotifications table rows (start\_time). Defaults to 0 (never).
+ contactnotificationmethods\_age | Duration | **Optional.** Max age for contactnotificationmethods table rows (start\_time). Defaults to 0 (never).
+ downtimehistory\_age | Duration | **Optional.** Max age for downtimehistory table rows (entry\_time). Defaults to 0 (never).
+ eventhandlers\_age | Duration | **Optional.** Max age for eventhandlers table rows (start\_time). Defaults to 0 (never).
+ externalcommands\_age | Duration | **Optional.** Max age for externalcommands table rows (entry\_time). Defaults to 0 (never).
+ flappinghistory\_age | Duration | **Optional.** Max age for flappinghistory table rows (event\_time). Defaults to 0 (never).
+ hostchecks\_age | Duration | **Optional.** Max age for hostchecks table rows (start\_time). Defaults to 0 (never).
+ logentries\_age | Duration | **Optional.** Max age for logentries table rows (logentry\_time). Defaults to 0 (never).
+ notifications\_age | Duration | **Optional.** Max age for notifications table rows (start\_time). Defaults to 0 (never).
+ processevents\_age | Duration | **Optional.** Max age for processevents table rows (event\_time). Defaults to 0 (never).
+ statehistory\_age | Duration | **Optional.** Max age for statehistory table rows (state\_time). Defaults to 0 (never).
+ servicechecks\_age | Duration | **Optional.** Max age for servicechecks table rows (start\_time). Defaults to 0 (never).
+ systemcommands\_age | Duration | **Optional.** Max age for systemcommands table rows (start\_time). Defaults to 0 (never).
+
+> **Supported units**
+>
+> Supported suffixes include ms (milliseconds), s (seconds), m (minutes), h (hours) and d (days).
+> Check the [language reference](17-language-reference.md#duration-literals).
+
+Data Categories:
+
+ Name | Description | Required by
+ ---------------------|------------------------|--------------------
+ DbCatConfig | Configuration data | Icinga Web 2
+ DbCatState | Current state data | Icinga Web 2
+ DbCatAcknowledgement | Acknowledgements | Icinga Web 2
+ DbCatComment | Comments | Icinga Web 2
+ DbCatDowntime | Downtimes | Icinga Web 2
+ DbCatEventHandler | Event handler data | Icinga Web 2
+ DbCatExternalCommand | External commands | --
+ DbCatFlapping | Flap detection data | Icinga Web 2
+ DbCatCheck | Check results | --
+ DbCatLog | Log messages | --
+ DbCatNotification | Notifications | Icinga Web 2
+ DbCatProgramStatus | Program status data | Icinga Web 2
+ DbCatRetention | Retention data | Icinga Web 2
+ DbCatStateHistory | Historical state data | Icinga Web 2
+
+The default value for `categories` includes everything required
+by Icinga Web 2 in the table above.
+
+In addition to the category flags listed above the `DbCatEverything`
+flag may be used as a shortcut for listing all flags.
+
+Runtime Attributes:
+
+ Name | Type | Description
+ ----------------------------|-----------------------|-----------------
+ last\_failover | Timestamp | When the last failover happened for this connection (only available with `enable_ha = true`.
+
+### IdoPgsqlConnection <a id="objecttype-idopgsqlconnection"></a>
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+IDO database adapter for PostgreSQL.
+This configuration object is available as [ido-pgsql feature](14-features.md#db-ido).
+
+Example:
+
+```
+object IdoPgsqlConnection "pgsql-ido" {
+ host = "127.0.0.1"
+ port = 5432
+ user = "icinga"
+ password = "icinga"
+ database = "icinga"
+
+ cleanup = {
+ downtimehistory_age = 48h
+ contactnotifications_age = 31d
+ }
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** PostgreSQL database host address. Defaults to `localhost`.
+ port | Number | **Optional.** PostgreSQL database port. Defaults to `5432`.
+ user | String | **Optional.** PostgreSQL database user with read/write permission to the icinga database. Defaults to `icinga`.
+ password | String | **Optional.** PostgreSQL database user's password. Defaults to `icinga`.
+ database | String | **Optional.** PostgreSQL database name. Defaults to `icinga`.
+ ssl\_mode | String | **Optional.** Enable SSL connection mode. Value must be set according to the [sslmode setting](https://www.postgresql.org/docs/9.3/static/libpq-connect.html#LIBPQ-CONNSTRING): `prefer`, `require`, `verify-ca`, `verify-full`, `allow`, `disable`.
+ ssl\_key | String | **Optional.** PostgreSQL SSL client key file path.
+ ssl\_cert | String | **Optional.** PostgreSQL SSL certificate file path.
+ ssl\_ca | String | **Optional.** PostgreSQL SSL certificate authority certificate file path.
+ table\_prefix | String | **Optional.** PostgreSQL database table prefix. Defaults to `icinga_`.
+ instance\_name | String | **Optional.** Unique identifier for the local Icinga 2 instance, used for multiple Icinga 2 clusters writing to the same database. Defaults to `default`.
+ instance\_description | String | **Optional.** Description for the Icinga 2 instance.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-db-ido). Defaults to `true`.
+ failover\_timeout | Duration | **Optional.** Set the failover timeout in a [HA cluster](06-distributed-monitoring.md#distributed-monitoring-high-availability-db-ido). Must not be lower than 30s. Defaults to `30s`.
+ cleanup | Dictionary | **Optional.** Dictionary with items for historical table cleanup.
+ categories | Array | **Optional.** Array of information types that should be written to the database.
+
+Cleanup Items:
+
+ Name | Type | Description
+ --------------------------------|-----------------------|----------------------------------
+ acknowledgements\_age | Duration | **Optional.** Max age for acknowledgements table rows (entry\_time). Defaults to 0 (never).
+ commenthistory\_age | Duration | **Optional.** Max age for commenthistory table rows (entry\_time). Defaults to 0 (never).
+ contactnotifications\_age | Duration | **Optional.** Max age for contactnotifications table rows (start\_time). Defaults to 0 (never).
+ contactnotificationmethods\_age | Duration | **Optional.** Max age for contactnotificationmethods table rows (start\_time). Defaults to 0 (never).
+ downtimehistory\_age | Duration | **Optional.** Max age for downtimehistory table rows (entry\_time). Defaults to 0 (never).
+ eventhandlers\_age | Duration | **Optional.** Max age for eventhandlers table rows (start\_time). Defaults to 0 (never).
+ externalcommands\_age | Duration | **Optional.** Max age for externalcommands table rows (entry\_time). Defaults to 0 (never).
+ flappinghistory\_age | Duration | **Optional.** Max age for flappinghistory table rows (event\_time). Defaults to 0 (never).
+ hostchecks\_age | Duration | **Optional.** Max age for hostchecks table rows (start\_time). Defaults to 0 (never).
+ logentries\_age | Duration | **Optional.** Max age for logentries table rows (logentry\_time). Defaults to 0 (never).
+ notifications\_age | Duration | **Optional.** Max age for notifications table rows (start\_time). Defaults to 0 (never).
+ processevents\_age | Duration | **Optional.** Max age for processevents table rows (event\_time). Defaults to 0 (never).
+ statehistory\_age | Duration | **Optional.** Max age for statehistory table rows (state\_time). Defaults to 0 (never).
+ servicechecks\_age | Duration | **Optional.** Max age for servicechecks table rows (start\_time). Defaults to 0 (never).
+ systemcommands\_age | Duration | **Optional.** Max age for systemcommands table rows (start\_time). Defaults to 0 (never).
+
+> **Supported units**
+>
+> Supported suffixes include ms (milliseconds), s (seconds), m (minutes), h (hours) and d (days).
+> Check the [language reference](17-language-reference.md#duration-literals).
+
+Data Categories:
+
+ Name | Description | Required by
+ ---------------------|------------------------|--------------------
+ DbCatConfig | Configuration data | Icinga Web 2
+ DbCatState | Current state data | Icinga Web 2
+ DbCatAcknowledgement | Acknowledgements | Icinga Web 2
+ DbCatComment | Comments | Icinga Web 2
+ DbCatDowntime | Downtimes | Icinga Web 2
+ DbCatEventHandler | Event handler data | Icinga Web 2
+ DbCatExternalCommand | External commands | --
+ DbCatFlapping | Flap detection data | Icinga Web 2
+ DbCatCheck | Check results | --
+ DbCatLog | Log messages | --
+ DbCatNotification | Notifications | Icinga Web 2
+ DbCatProgramStatus | Program status data | Icinga Web 2
+ DbCatRetention | Retention data | Icinga Web 2
+ DbCatStateHistory | Historical state data | Icinga Web 2
+
+The default value for `categories` includes everything required
+by Icinga Web 2 in the table above.
+
+In addition to the category flags listed above the `DbCatEverything`
+flag may be used as a shortcut for listing all flags.
+
+Runtime Attributes:
+
+ Name | Type | Description
+ ----------------------------|-----------------------|-----------------
+ last\_failover | Timestamp | When the last failover happened for this connection (only available with `enable_ha = true`.
+
+### InfluxdbWriter <a id="objecttype-influxdbwriter"></a>
+
+Writes check result metrics and performance data to a defined InfluxDB v1 host.
+This configuration object is available as [influxdb feature](14-features.md#influxdb-writer).
+For InfluxDB v2 support see the [Influxdb2Writer](#objecttype-influxdb2writer) below.
+
+Example:
+
+```
+object InfluxdbWriter "influxdb" {
+ host = "127.0.0.1"
+ port = 8086
+ database = "icinga2"
+ username = "icinga2"
+ password = "icinga2"
+
+ basic_auth = {
+ username = "icinga"
+ password = "icinga"
+ }
+
+ flush_threshold = 1024
+ flush_interval = 10s
+
+ host_template = {
+ measurement = "$host.check_command$"
+ tags = {
+ hostname = "$host.name$"
+ }
+ }
+ service_template = {
+ measurement = "$service.check_command$"
+ tags = {
+ hostname = "$host.name$"
+ service = "$service.name$"
+ }
+ }
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Required.** InfluxDB host address. Defaults to `127.0.0.1`.
+ port | Number | **Required.** InfluxDB HTTP port. Defaults to `8086`.
+ database | String | **Required.** InfluxDB database name. Defaults to `icinga2`.
+ username | String | **Optional.** InfluxDB user name. Defaults to `none`.
+ password | String | **Optional.** InfluxDB user password. Defaults to `none`.
+ basic\_auth | Dictionary | **Optional.** Username and password for HTTP basic authentication.
+ ssl\_enable | Boolean | **Optional.** Whether to use a TLS stream. Defaults to `false`.
+ ssl\_insecure\_noverify | Boolean | **Optional.** Disable TLS peer verification.
+ ssl\_ca\_cert | String | **Optional.** Path to CA certificate to validate the remote host.
+ ssl\_cert | String | **Optional.** Path to host certificate to present to the remote host for mutual verification.
+ ssl\_key | String | **Optional.** Path to host key to accompany the ssl\_cert.
+ host\_template | Dictionary | **Required.** Host template to define the InfluxDB line protocol.
+ service\_template | Dictionary | **Required.** Service template to define the influxDB line protocol.
+ enable\_send\_thresholds | Boolean | **Optional.** Whether to send warn, crit, min & max tagged data.
+ enable\_send\_metadata | Boolean | **Optional.** Whether to send check metadata e.g. states, execution time, latency etc.
+ flush\_interval | Duration | **Optional.** How long to buffer data points before transferring to InfluxDB. Defaults to `10s`.
+ flush\_threshold | Number | **Optional.** How many data points to buffer before forcing a transfer to InfluxDB. Defaults to `1024`.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+
+Note: If `flush_threshold` is set too low, this will always force the feature to flush all data
+to InfluxDB. Experiment with the setting, if you are processing more than 1024 metrics per second
+or similar.
+
+
+
+### Influxdb2Writer <a id="objecttype-influxdb2writer"></a>
+
+Writes check result metrics and performance data to a defined InfluxDB v2 host.
+This configuration object is available as [influxdb feature](14-features.md#influxdb-writer).
+For InfluxDB v1 support see the [InfluxdbWriter](#objecttype-influxdbwriter) above.
+
+Example:
+
+```
+object Influxdb2Writer "influxdb2" {
+ host = "127.0.0.1"
+ port = 8086
+ organization = "monitoring"
+ bucket = "icinga2"
+ auth_token = "ABCDEvwxyz0189-_"
+
+ flush_threshold = 1024
+ flush_interval = 10s
+
+ host_template = {
+ measurement = "$host.check_command$"
+ tags = {
+ hostname = "$host.name$"
+ }
+ }
+ service_template = {
+ measurement = "$service.check_command$"
+ tags = {
+ hostname = "$host.name$"
+ service = "$service.name$"
+ }
+ }
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Required.** InfluxDB host address. Defaults to `127.0.0.1`.
+ port | Number | **Required.** InfluxDB HTTP port. Defaults to `8086`.
+ organization | String | **Required.** InfluxDB organization name.
+ bucket | String | **Required.** InfluxDB bucket name.
+ auth\_token | String | **Required.** InfluxDB authentication token.
+ ssl\_enable | Boolean | **Optional.** Whether to use a TLS stream. Defaults to `false`.
+ ssl\_insecure\_noverify | Boolean | **Optional.** Disable TLS peer verification.
+ ssl\_ca\_cert | String | **Optional.** Path to CA certificate to validate the remote host.
+ ssl\_cert | String | **Optional.** Path to host certificate to present to the remote host for mutual verification.
+ ssl\_key | String | **Optional.** Path to host key to accompany the ssl\_cert.
+ host\_template | Dictionary | **Required.** Host template to define the InfluxDB line protocol.
+ service\_template | Dictionary | **Required.** Service template to define the influxDB line protocol.
+ enable\_send\_thresholds | Boolean | **Optional.** Whether to send warn, crit, min & max tagged data.
+ enable\_send\_metadata | Boolean | **Optional.** Whether to send check metadata e.g. states, execution time, latency etc.
+ flush\_interval | Duration | **Optional.** How long to buffer data points before transferring to InfluxDB. Defaults to `10s`.
+ flush\_threshold | Number | **Optional.** How many data points to buffer before forcing a transfer to InfluxDB. Defaults to `1024`.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+
+Note: If `flush_threshold` is set too low, this will always force the feature to flush all data
+to InfluxDB. Experiment with the setting, if you are processing more than 1024 metrics per second
+or similar.
+
+
+### JournaldLogger <a id="objecttype-journaldlogger"></a>
+
+Specifies Icinga 2 logging to the systemd journal using its native interface.
+This configuration object is available as `journald` [logging feature](14-features.md#logging).
+
+Resulting journal records have fields as described in
+[journal fields](https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html),
+and an additional custom field `ICINGA2_FACILITY` with the detailed message origin (e.g. "ApiListener").
+
+Example:
+
+```
+object JournaldLogger "journald" {
+ severity = "warning"
+}
+```
+
+Configuration Attributes:
+
+Name | Type | Description
+--------------------------|-----------------------|----------------------------------
+severity | String | **Optional.** The minimum syslog compatible severity for this log. Can be "debug", "notice", "information", "warning" or "critical". Defaults to "information".
+facility | String | **Optional.** Defines the syslog compatible facility to use for journal entries. This can be a facility constant like `FacilityDaemon`. Defaults to `FacilityUser`.
+identifier | String | **Optional.** Defines the syslog compatible identifier (also known as "tag") to use for journal entries. If not given, systemd's default behavior is used and usually results in "icinga2".
+
+Facility Constants are the same as for [SyslogLogger](09-object-types.md#objecttype-sysloglogger).
+
+
+### LiveStatusListener <a id="objecttype-livestatuslistener"></a>
+
+Livestatus API interface available as TCP or UNIX socket. Historical table queries
+require the [CompatLogger](09-object-types.md#objecttype-compatlogger) feature enabled
+pointing to the log files using the `compat_log_path` configuration attribute.
+This configuration object is available as [livestatus feature](14-features.md#setting-up-livestatus).
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+Examples:
+
+```
+object LivestatusListener "livestatus-tcp" {
+ socket_type = "tcp"
+ bind_host = "127.0.0.1"
+ bind_port = "6558"
+}
+
+object LivestatusListener "livestatus-unix" {
+ socket_type = "unix"
+ socket_path = "/var/run/icinga2/cmd/livestatus"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ socket\_type | String | **Optional.** Specifies the socket type. Can be either `tcp` or `unix`. Defaults to `unix`.
+ bind\_host | String | **Optional.** Only valid when `socket_type` is set to `tcp`. Host address to listen on for connections. Defaults to `127.0.0.1`.
+ bind\_port | Number | **Optional.** Only valid when `socket_type` is set to `tcp`. Port to listen on for connections. Defaults to `6558`.
+ socket\_path | String | **Optional.** Only valid when `socket_type` is set to `unix`. Specifies the path to the UNIX socket file. Defaults to RunDir + "/icinga2/cmd/livestatus".
+ compat\_log\_path | String | **Optional.** Path to Icinga 1.x log files. Required for historical table queries. Requires `CompatLogger` feature enabled. Defaults to LogDir + "/compat"
+
+> **Note**
+>
+> UNIX sockets are not supported on Windows.
+
+### NotificationComponent <a id="objecttype-notificationcomponent"></a>
+
+The notification component is responsible for sending notifications.
+This configuration object is available as [notification feature](11-cli-commands.md#cli-command-feature).
+
+Example:
+
+```
+object NotificationComponent "notification" { }
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-notifications). Disabling this currently only affects reminder notifications. Defaults to "true".
+
+### OpenTsdbWriter <a id="objecttype-opentsdbwriter"></a>
+
+Writes check result metrics and performance data to [OpenTSDB](http://opentsdb.net).
+This configuration object is available as [opentsdb feature](14-features.md#opentsdb-writer).
+
+Example:
+
+```
+object OpenTsdbWriter "opentsdb" {
+ host = "127.0.0.1"
+ port = 4242
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host | String | **Optional.** OpenTSDB host address. Defaults to `127.0.0.1`.
+ port | Number | **Optional.** OpenTSDB port. Defaults to `4242`.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+ enable_generic_metrics | Boolean | **Optional.** Re-use metric names to store different perfdata values for a particular check. Use tags to distinguish perfdata instead of metric name. Defaults to `false`.
+ host_template | Dictionary | **Optional.** Specify additional tags to be included with host metrics. This requires a sub-dictionary named `tags`. Also specify a naming prefix by setting `metric`. More information can be found in [OpenTSDB custom tags](14-features.md#opentsdb-custom-tags) and [OpenTSDB Metric Prefix](14-features.md#opentsdb-metric-prefix). More information can be found in [OpenTSDB custom tags](14-features.md#opentsdb-custom-tags). Defaults to an `empty Dictionary`.
+ service_template | Dictionary | **Optional.** Specify additional tags to be included with service metrics. This requires a sub-dictionary named `tags`. Also specify a naming prefix by setting `metric`. More information can be found in [OpenTSDB custom tags](14-features.md#opentsdb-custom-tags) and [OpenTSDB Metric Prefix](14-features.md#opentsdb-metric-prefix). Defaults to an `empty Dictionary`.
+
+
+### PerfdataWriter <a id="objecttype-perfdatawriter"></a>
+
+Writes check result performance data to a defined path using macro
+pattern consisting of custom variables and runtime macros.
+This configuration object is available as [perfdata feature](14-features.md#writing-performance-data-files).
+
+Example:
+
+```
+object PerfdataWriter "perfdata" {
+ host_perfdata_path = "/var/spool/icinga2/perfdata/host-perfdata"
+
+ service_perfdata_path = "/var/spool/icinga2/perfdata/service-perfdata"
+
+ host_format_template = "DATATYPE::HOSTPERFDATA\tTIMET::$icinga.timet$\tHOSTNAME::$host.name$\tHOSTPERFDATA::$host.perfdata$\tHOSTCHECKCOMMAND::$host.check_command$\tHOSTSTATE::$host.state$\tHOSTSTATETYPE::$host.state_type$"
+ service_format_template = "DATATYPE::SERVICEPERFDATA\tTIMET::$icinga.timet$\tHOSTNAME::$host.name$\tSERVICEDESC::$service.name$\tSERVICEPERFDATA::$service.perfdata$\tSERVICECHECKCOMMAND::$service.check_command$\tHOSTSTATE::$host.state$\tHOSTSTATETYPE::$host.state_type$\tSERVICESTATE::$service.state$\tSERVICESTATETYPE::$service.state_type$"
+
+ rotation_interval = 15s
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ host\_perfdata\_path | String | **Optional.** Path to the host performance data file. Defaults to SpoolDir + "/perfdata/host-perfdata".
+ service\_perfdata\_path | String | **Optional.** Path to the service performance data file. Defaults to SpoolDir + "/perfdata/service-perfdata".
+ host\_temp\_path | String | **Optional.** Path to the temporary host file. Defaults to SpoolDir + "/tmp/host-perfdata".
+ service\_temp\_path | String | **Optional.** Path to the temporary service file. Defaults to SpoolDir + "/tmp/service-perfdata".
+ host\_format\_template | String | **Optional.** Host Format template for the performance data file. Defaults to a template that's suitable for use with PNP4Nagios.
+ service\_format\_template | String | **Optional.** Service Format template for the performance data file. Defaults to a template that's suitable for use with PNP4Nagios.
+ rotation\_interval | Duration | **Optional.** Rotation interval for the files specified in `{host,service}_perfdata_path`. Defaults to `30s`.
+ enable\_ha | Boolean | **Optional.** Enable the high availability functionality. Only valid in a [cluster setup](06-distributed-monitoring.md#distributed-monitoring-high-availability-features). Defaults to `false`.
+
+When rotating the performance data file the current UNIX timestamp is appended to the path specified
+in `host_perfdata_path` and `service_perfdata_path` to generate a unique filename.
+
+
+### SyslogLogger <a id="objecttype-sysloglogger"></a>
+
+Specifies Icinga 2 logging to syslog.
+This configuration object is available as `syslog` [logging feature](14-features.md#logging).
+
+Example:
+
+```
+object SyslogLogger "syslog" {
+ severity = "warning"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ severity | String | **Optional.** The minimum severity for this log. Can be "debug", "notice", "information", "warning" or "critical". Defaults to "information".
+ facility | String | **Optional.** Defines the facility to use for syslog entries. This can be a facility constant like `FacilityDaemon`. Defaults to `FacilityUser`.
+
+Facility Constants:
+
+ Name | Facility | Description
+ ---------------------|---------------|----------------
+ FacilityAuth | LOG\_AUTH | The authorization system.
+ FacilityAuthPriv | LOG\_AUTHPRIV | The same as `FacilityAuth`, but logged to a file readable only by selected individuals.
+ FacilityCron | LOG\_CRON | The cron daemon.
+ FacilityDaemon | LOG\_DAEMON | System daemons that are not provided for explicitly by other facilities.
+ FacilityFtp | LOG\_FTP | The file transfer protocol daemons.
+ FacilityKern | LOG\_KERN | Messages generated by the kernel. These cannot be generated by any user processes.
+ FacilityLocal0 | LOG\_LOCAL0 | Reserved for local use.
+ FacilityLocal1 | LOG\_LOCAL1 | Reserved for local use.
+ FacilityLocal2 | LOG\_LOCAL2 | Reserved for local use.
+ FacilityLocal3 | LOG\_LOCAL3 | Reserved for local use.
+ FacilityLocal4 | LOG\_LOCAL4 | Reserved for local use.
+ FacilityLocal5 | LOG\_LOCAL5 | Reserved for local use.
+ FacilityLocal6 | LOG\_LOCAL6 | Reserved for local use.
+ FacilityLocal7 | LOG\_LOCAL7 | Reserved for local use.
+ FacilityLpr | LOG\_LPR | The line printer spooling system.
+ FacilityMail | LOG\_MAIL | The mail system.
+ FacilityNews | LOG\_NEWS | The network news system.
+ FacilitySyslog | LOG\_SYSLOG | Messages generated internally by syslogd.
+ FacilityUser | LOG\_USER | Messages generated by user processes. This is the default facility identifier if none is specified.
+ FacilityUucp | LOG\_UUCP | The UUCP system.
+
+
+### WindowsEventLogLogger <a id="objecttype-windowseventloglogger"></a>
+
+Specifies Icinga 2 logging to the Windows Event Log.
+This configuration object is available as `windowseventlog` [logging feature](14-features.md#logging).
+
+Example:
+
+```
+object WindowsEventLogLogger "windowseventlog" {
+ severity = "information"
+}
+```
+
+Configuration Attributes:
+
+ Name | Type | Description
+ --------------------------|-----------------------|----------------------------------
+ severity | String | **Optional.** The minimum severity for this log. Can be "debug", "notice", "information", "warning" or "critical". Defaults to "information".
diff --git a/doc/10-icinga-template-library.md b/doc/10-icinga-template-library.md
new file mode 100644
index 0000000..a8d9c7b
--- /dev/null
+++ b/doc/10-icinga-template-library.md
@@ -0,0 +1,6114 @@
+# Icinga Template Library <a id="icinga-template-library"></a>
+
+The Icinga Template Library (ITL) implements standard templates
+and object definitions.
+
+There is a subset of templates and object definitions available:
+
+* [Generic ITL templates](10-icinga-template-library.md#itl-generic-templates)
+* [CheckCommand definitions for Icinga 2](10-icinga-template-library.md#itl-check-commands) (this includes [icinga](10-icinga-template-library.md#itl-icinga),
+[cluster](10-icinga-template-library.md#itl-icinga-cluster), [cluster-zone](10-icinga-template-library.md#itl-icinga-cluster-zone), [ido](10-icinga-template-library.md#itl-icinga-ido), etc.)
+* [CheckCommand definitions for Monitoring Plugins](10-icinga-template-library.md#plugin-check-commands-monitoring-plugins)
+* [CheckCommand definitions for Icinga 2 Windows Plugins](10-icinga-template-library.md#windows-plugins)
+* [CheckCommand definitions for NSClient++](10-icinga-template-library.md#nscp-plugin-check-commands)
+* [CheckCommand definitions for Manubulon SNMP](10-icinga-template-library.md#snmp-manubulon-plugin-check-commands)
+* [Contributed CheckCommand definitions](10-icinga-template-library.md#plugin-contrib)
+
+The ITL content is updated with new releases. Please do not modify
+templates and/or objects as changes will be overridden without
+further notice.
+
+You are advised to create your own CheckCommand definitions in
+`/etc/icinga2`.
+
+## Generic Templates <a id="itl-generic-templates"></a>
+
+By default the generic templates are included in the [icinga2.conf](04-configuration.md#icinga2-conf) configuration file:
+
+```
+include <itl>
+```
+
+These templates are imported by the provided example configuration.
+
+> **Note**:
+>
+> These templates are built into the binaries. By convention
+> all command and timeperiod objects should import these templates.
+
+### plugin-check-command <a id="itl-plugin-check-command"></a>
+
+Command template for check plugins executed by Icinga 2.
+
+The `plugin-check-command` command does not support any vars.
+
+By default this template is automatically imported into all [CheckCommand](09-object-types.md#objecttype-checkcommand) definitions.
+
+### plugin-notification-command <a id="itl-plugin-notification-command"></a>
+
+Command template for notification scripts executed by Icinga 2.
+
+The `plugin-notification-command` command does not support any vars.
+
+By default this template is automatically imported into all [NotificationCommand](09-object-types.md#objecttype-notificationcommand) definitions.
+
+### plugin-event-command <a id="itl-plugin-event-command"></a>
+
+Command template for event handler scripts executed by Icinga 2.
+
+The `plugin-event-command` command does not support any vars.
+
+By default this template is automatically imported into all [EventCommand](09-object-types.md#objecttype-eventcommand) definitions.
+
+### legacy-timeperiod <a id="itl-legacy-timeperiod"></a>
+
+Timeperiod template for [TimePeriod objects](09-object-types.md#objecttype-timeperiod).
+
+The `legacy-timeperiod` timeperiod does not support any vars.
+
+By default this template is automatically imported into all [TimePeriod](09-object-types.md#objecttype-timeperiod) definitions.
+
+## Check Commands <a id="itl-check-commands"></a>
+
+These check commands are embedded into Icinga 2 and do not require any external
+plugin scripts.
+
+### icinga <a id="itl-icinga"></a>
+
+Check command for the built-in `icinga` check. This check returns performance
+data for the current Icinga instance, reports as warning if the last reload failed and optionally allows for minimum version checks.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------|---------------
+icinga\_min\_version | **Optional.** Required minimum Icinga 2 version, e.g. `2.8.0`. If not satisfied, the state changes to `Critical`. Release packages only.
+
+### cluster <a id="itl-icinga-cluster"></a>
+
+Check command for the built-in `cluster` check. This check returns performance
+data for the current Icinga instance and connected endpoints.
+
+The `cluster` check command does not support any vars.
+
+### cluster-zone <a id="itl-icinga-cluster-zone"></a>
+
+Check command for the built-in `cluster-zone` check.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------|---------------
+cluster\_zone | **Required.** The zone name. Defaults to `$host.name$`.
+cluster\_lag\_warning | **Optional.** Warning threshold for log lag in seconds. Applies if the log lag is greater than the threshold.
+cluster\_lag\_critical | **Optional.** Critical threshold for log lag in seconds. Applies if the log lag is greater than the threshold.
+
+### icingadb <a id="itl-icinga-icingadb"></a>
+
+Check command for the built-in `icingadb` check.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------------------------|-----------------------------
+icingadb\_name | **Required.** The name of the Icinga DB connection object. Defaults to `icingadb`.
+icingadb\_full\_dump\_duration\_warning | **Optional.** Warning threshold for ongoing Redis dump duration. Applies if the value is higher than the threshold. Defaults to 5 minutes.
+icingadb\_full\_dump\_duration\_critical | **Optional.** Critical threshold for ongoing Redis dump duration. Applies if the value is higher than the threshold. Defaults to 10 minutes.
+icingadb\_full\_sync\_duration\_warning | **Optional.** Warning threshold for ongoing database sync duration. Applies if the value is higher than the threshold. Defaults to 5 minutes.
+icingadb\_full\_sync\_duration\_critical | **Optional.** Critical threshold for ongoing database sync duration. Applies if the value is higher than the threshold. Defaults to 10 minutes.
+icingadb\_redis\_backlog\_warning | **Optional.** Warning threshold for Redis write backlog. Applies if the value is higher than the threshold. Defaults to 5 minutes.
+icingadb\_redis\_backlog\_critical | **Optional.** Critical threshold for Redis write backlog. Applies if the value is higher than the threshold. Defaults to 15 minutes.
+icingadb\_database\_backlog\_warning | **Optional.** Warning threshold for database sync backlog. Applies if the value is higher than the threshold. Defaults to 5 minutes.
+icingadb\_database\_backlog\_critical | **Optional.** Critical threshold for database sync backlog. Applies if the value is higher than the threshold. Defaults to 15 minutes.
+
+### ido <a id="itl-icinga-ido"></a>
+
+Check command for the built-in `ido` check.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------
+ido\_type | **Required.** The type of the IDO connection object. Can be either "IdoMysqlConnection" or "IdoPgsqlConnection".
+ido\_name | **Required.** The name of the IDO connection object.
+ido\_queries\_warning | **Optional.** Warning threshold for queries/s. Applies if the rate is lower than the threshold.
+ido\_queries\_critical | **Optional.** Critical threshold for queries/s. Applies if the rate is lower than the threshold.
+ido\_pending\_queries\_warning | **Optional.** Warning threshold for pending queries. Applies if pending queries are higher than the threshold. Supersedes the `ido_queries` thresholds above.
+ido\_pending\_queries\_critical | **Optional.** Critical threshold for pending queries. Applies if pending queries are higher than the threshold. Supersedes the `ido_queries` thresholds above.
+
+
+### dummy <a id="itl-dummy"></a>
+
+Check command for the built-in `dummy` check. This allows to set
+a check result state and output and can be used in [freshness checks](08-advanced-topics.md#check-result-freshness)
+or [runtime object checks](08-advanced-topics.md#access-object-attributes-at-runtime).
+In contrast to the [check_dummy](https://www.monitoring-plugins.org/doc/man/check_dummy.html)
+plugin, Icinga 2 implements a light-weight in memory check with 2.9+.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+dummy\_state | **Optional.** The state. Can be one of 0 (ok), 1 (warning), 2 (critical) and 3 (unknown). Defaults to 0.
+dummy\_text | **Optional.** Plugin output. Defaults to "Check was successful.".
+
+### passive <a id="itl-check-command-passive"></a>
+
+Specialised check command object for passive checks which uses the functionality of the "dummy" check command with appropriate default values.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+dummy_state | **Optional.** The state. Can be one of 0 (ok), 1 (warning), 2 (critical) and 3 (unknown). Defaults to 3.
+dummy_text | **Optional.** Plugin output. Defaults to "No Passive Check Result Received.".
+
+### random <a id="itl-random"></a>
+
+Check command for the built-in `random` check. This check returns random states
+and adds the check source to the check output.
+
+For test and demo purposes only. The `random` check command does not support
+any vars.
+
+### exception <a id="itl-exception"></a>
+
+Check command for the built-in `exception` check. This check throws an exception.
+
+For test and demo purposes only. The `exception` check command does not support
+any vars.
+
+### sleep <a id="itl-sleep"></a>
+
+Check command for the built-in `sleep` check. This allows to use sleep for testing
+and debugging only.
+
+Name | Description
+----------------|--------------
+sleep\_time | **Optional.** The duration of the sleep in seconds. Defaults to 1s.
+
+### ifw-api <a id="itl-ifw-api"></a>
+
+Built-in check command for executing arbitrary PowerShell check commands via the
+[Icinga for Windows REST API](https://icinga.com/docs/icinga-for-windows/latest/doc/110-Installation/30-API-Check-Forwarder/).
+Consult that documentation for why and how to optimally use the `ifw-api`
+command as an addon for existing Icinga clusters with Icinga for Windows.
+
+In short, that feature lets the PowerShell processes spawned by Icinga just
+talk to the pre-loaded IfW API instead of loading all PowerShell check commands
+by itself on every check. In contrast, the `ifw-api` command doesn't even spawn
+any process, but communicates directly with the IfW API.
+
+It may be also used like e.g. [check_by_ssh](#plugin-check-command-by-ssh).
+Its custom variables provide high flexibility.
+From using a custom CA to controlling the IfW API directly from a Linux satellite.
+
+Optional custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+| Name | Default | Description |
+|-------------------------|-------------------|-------------------------------------------------------------------------------------------------------------|
+| ifw\_api\_command | `$command.name$` | Command to run. |
+| ifw\_api\_arguments | {} (none) | Arguments for the command, similar to [CheckCommand](09-object-types.md#objecttype-checkcommand)#arguments. |
+| ifw\_api\_host | null (localhost) | IfW API host. |
+| ifw\_api\_port | 5668 | IfW API port. |
+| ifw\_api\_expected\_san | `$ifw_api_host$` | Peer TLS certificate SAN (and SNI). null means agent NodeName. |
+| ifw\_api\_cert | null (Icinga PKI) | TLS client certificate path. |
+| ifw\_api\_key | null (Icinga PKI) | TLS client private key path. |
+| ifw\_api\_ca | null (Icinga PKI) | Peer TLS CA certificate path. |
+| ifw\_api\_crl | null (none) | Path to TLS CRL to check peer against. |
+| ifw\_api\_username | null (none) | Basic auth username. |
+| ifw\_api\_password | null (none) | Basic auth password. |
+
+!!! info
+
+ Due to how Icinga 2 resolves macros and serializes the resolved values for
+ sending to a command endpoint (if any), ifw\_api\_arguments may not directly
+ contain functions for the case `ifw-api` is used with command endpoints. Only
+ macro strings referring to custom variables which are set to functions work.
+
+#### Remarks
+
+* `$command.name$` is resolved at runtime to the name of the specific
+ check command being run and not any of the templates it imports, i.e. it
+ becomes e.g. "Invoke-IcingaCheckCPU" if "ifw-api" is imported there
+* `ifw-api` connects to localhost (if ifw\_api\_host is null), but expects
+ the peer to identify itself via TLS with the NodeName of the endpoint
+ actually running the command (if ifw\_api\_expected\_san is null)
+* The actual values of ifw\_api\_cert, ifw\_api\_key, ifw\_api\_ca and ifw\_api\_crl
+ are also resolved to the Icinga PKI on the command endpoint if null
+
+<!-- keep this anchor for URL link history only -->
+<a id="plugin-check-commands"></a>
+
+## Plugin Check Commands for Monitoring Plugins <a id="plugin-check-commands-monitoring-plugins"></a>
+
+The Plugin Check Commands provides example configuration for plugin check commands
+provided by the [Monitoring Plugins](https://www.monitoring-plugins.org) project.
+
+By default the Plugin Check Commands are included in the [icinga2.conf](04-configuration.md#icinga2-conf) configuration
+file:
+
+ include <plugins>
+
+The plugin check commands assume that there's a global constant named `PluginDir`
+which contains the path of the plugins from the Monitoring Plugins project.
+
+> **Note**:
+>
+> Please be aware that the CheckCommand definitions are based on the [Monitoring Plugins](https://www.monitoring-plugins.org), other Plugin collections might not support
+> all parameters. If there are command parameters missing for the provided CheckCommand definitions please kindly send a patch upstream.
+> This should include an update for the ITL CheckCommand itself and this documentation section.
+
+### apt <a id="plugin-check-command-apt"></a>
+
+The plugin [apt](https://www.monitoring-plugins.org/doc/man/check_apt.html) checks for software updates on systems that use
+package management systems based on the apt-get(8) command found in Debian based systems.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+apt_extra_opts | **Optional.** Read options from an ini file.
+apt_upgrade | **Optional.** [Default] Perform an upgrade. If an optional OPTS argument is provided, apt-get will be run with these command line options instead of the default.
+apt_dist_upgrade | **Optional.** Perform a dist-upgrade instead of normal upgrade. Like with -U OPTS can be provided to override the default options.
+apt_include | **Optional.** Include only packages matching REGEXP. Can be specified multiple times the values will be combined together.
+apt_exclude | **Optional.** Exclude packages matching REGEXP from the list of packages that would otherwise be included. Can be specified multiple times.
+apt_critical | **Optional.** If the full package information of any of the upgradable packages match this REGEXP, the plugin will return CRITICAL status. Can be specified multiple times.
+apt_timeout | **Optional.** Seconds before plugin times out (default: 10).
+apt_only_critical | **Optional.** Only warn about critical upgrades.
+apt_list | **Optional.** List packages available for upgrade.
+
+
+### breeze <a id="plugin-check-command-breeze"></a>
+
+The [check_breeze](https://www.monitoring-plugins.org/doc/man/check_breeze.html) plugin reports the signal
+strength of a Breezecom wireless equipment.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------|---------------------------------
+breeze_hostname | **Required.** Name or IP address of host to check. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+breeze_community | **Optional.** SNMPv1 community. Defaults to "public".
+breeze_warning | **Required.** Percentage strength below which a WARNING status will result. Defaults to 50.
+breeze_critical | **Required.** Percentage strength below which a WARNING status will result. Defaults to 20.
+
+
+### by_ssh <a id="plugin-check-command-by-ssh"></a>
+
+The [check_by_ssh](https://www.monitoring-plugins.org/doc/man/check_by_ssh.html) plugin uses SSH to execute
+commands on a remote host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------- | --------------
+by_ssh_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+by_ssh_port | **Optional.** The SSH port. Defaults to 22.
+by_ssh_command | **Required.** The command that should be executed. Can be an array if multiple arguments should be passed to `check_by_ssh`.
+by_ssh_arguments | **Optional.** A dictionary with arguments for the command. This works exactly like the 'arguments' dictionary for ordinary CheckCommands.
+by_ssh_logname | **Optional.** The SSH username.
+by_ssh_identity | **Optional.** The SSH identity.
+by_ssh_quiet | **Optional.** Whether to suppress SSH warnings. Defaults to false.
+by_ssh_warn | **Optional.** The warning threshold.
+by_ssh_crit | **Optional.** The critical threshold.
+by_ssh_timeout | **Optional.** The timeout in seconds.
+by_ssh_options | **Optional.** Call ssh with '-o OPTION' (multiple options may be specified as an array).
+by_ssh_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+by_ssh_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+by_ssh_skip_stderr | **Optional.** Ignore all or (if specified) first n lines on STDERR.
+
+
+### clamd <a id="plugin-check-command-clamd"></a>
+
+The [check_clamd](https://www.monitoring-plugins.org/doc/man/check_clamd.html) plugin tests CLAMD
+connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------|--------------
+clamd_address | **Required.** The host's address or unix socket (must be an absolute path).
+clamd_port | **Optional.** Port number (default: none).
+clamd_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+clamd_all | **Optional.** All expect strings need to occur in server response. Defaults to false.
+clamd_escape_send | **Optional.** Enable usage of \\n, \\r, \\t or \\\\ in send string.
+clamd_send | **Optional.** String to send to the server.
+clamd_escape_quit | **Optional.** Enable usage of \\n, \\r, \\t or \\\\ in quit string.
+clamd_quit | **Optional.** String to send server to initiate a clean close of the connection.
+clamd_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit. Defaults to crit.
+clamd_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit. Defaults to warn.
+clamd_jail | **Optional.** Hide output from TCP socket.
+clamd_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+clamd_delay | **Optional.** Seconds to wait between sending string and polling for response.
+clamd_certificate | **Optional.** Minimum number of days a certificate has to be valid. 1st value is number of days for warning, 2nd is critical (if not specified: 0) -- separated by comma.
+clamd_ssl | **Optional.** Use SSL for the connection. Defaults to false.
+clamd_wtime | **Optional.** Response time to result in warning status (seconds).
+clamd_ctime | **Optional.** Response time to result in critical status (seconds).
+clamd_timeout | **Optional.** Seconds before connection times out. Defaults to 10.
+clamd_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+clamd_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### dhcp <a id="plugin-check-command-dhcp"></a>
+
+The [check_dhcp](https://www.monitoring-plugins.org/doc/man/check_dhcp.html) plugin
+tests the availability of DHCP servers on a network.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+dhcp_serverip | **Optional.** The IP address of the DHCP server which we should get a response from.
+dhcp_requestedip| **Optional.** The IP address which we should be offered by a DHCP server.
+dhcp_timeout | **Optional.** The timeout in seconds.
+dhcp_interface | **Optional.** The interface to use.
+dhcp_mac | **Optional.** The MAC address to use in the DHCP request.
+dhcp_unicast | **Optional.** Whether to use unicast requests. Defaults to false.
+
+
+### dig <a id="plugin-check-command-dig"></a>
+
+The [check_dig](https://www.monitoring-plugins.org/doc/man/check_dig.html) plugin
+test the DNS service on the specified host using dig.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------|--------------
+dig_server | **Optional.** The DNS server to query. Defaults to "127.0.0.1".
+dig_port | **Optional.** Port number (default: 53).
+dig_lookup | **Required.** The address that should be looked up.
+dig_record_type | **Optional.** Record type to lookup (default: A).
+dig_expected_address | **Optional.** An address expected to be in the answer section. If not set, uses whatever was in -l.
+dig_arguments | **Optional.** Pass STRING as argument(s) to dig.
+dig_retries | **Optional.** Number of retries passed to dig, timeout is divided by this value (Default: 3).
+dig_warning | **Optional.** Response time to result in warning status (seconds).
+dig_critical | **Optional.** Response time to result in critical status (seconds).
+dig_timeout | **Optional.** Seconds before connection times out (default: 10).
+dig_ipv4 | **Optional.** Force dig to only use IPv4 query transport. Defaults to false.
+dig_ipv6 | **Optional.** Force dig to only use IPv6 query transport. Defaults to false.
+
+
+### disk <a id="plugin-check-command-disk"></a>
+
+The [check_disk](https://www.monitoring-plugins.org/doc/man/check_disk.html) plugin
+checks the amount of used disk space on a mounted file system and generates an alert
+if free space is less than one of the threshold values.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------|------------------------
+disk\_wfree | **Optional.** The free space warning threshold. Defaults to "20%". If the percent sign is omitted, units from `disk_units` are used.
+disk\_cfree | **Optional.** The free space critical threshold. Defaults to "10%". If the percent sign is omitted, units from `disk_units` are used.
+disk\_inode\_wfree | **Optional.** The free inode warning threshold.
+disk\_inode\_cfree | **Optional.** The free inode critical threshold.
+disk\_partition | **Optional.** The partition. **Deprecated in 2.3.**
+disk\_partition\_excluded | **Optional.** The excluded partition. **Deprecated in 2.3.**
+disk\_partitions | **Optional.** The partition(s). Multiple partitions must be defined as array.
+disk\_partitions\_excluded | **Optional.** The excluded partition(s). Multiple partitions must be defined as array.
+disk\_clear | **Optional.** Clear thresholds. May be true or false.
+disk\_exact\_match | **Optional.** For paths or partitions specified with -p, only check for exact paths. May be true or false.
+disk\_errors\_only | **Optional.** Display only devices/mountpoints with errors. May be true or false.
+disk\_ignore\_reserved | **Optional.** If set, account root-reserved blocks are not accounted for freespace in perfdata. May be true or false.
+disk\_group | **Optional.** Group paths. Thresholds apply to (free-)space of all partitions together.
+disk\_kilobytes | **Optional.** Same as --units kB. May be true or false.
+disk\_local | **Optional.** Only check local filesystems. May be true or false.
+disk\_stat\_remote\_fs | **Optional.** Only check local filesystems against thresholds. Yet call stat on remote filesystems to test if they are accessible (e.g. to detect Stale NFS Handles). May be true or false.
+disk\_mountpoint | **Optional.** Display the mountpoint instead of the partition. May be true or false.
+disk\_megabytes | **Optional.** Same as --units MB. May be true or false.
+disk\_all | **Optional.** Explicitly select all paths. This is equivalent to -R '.\*'. May be true or false.
+disk\_eregi\_path | **Optional.** Case insensitive regular expression for path/partition. Multiple regular expression strings must be defined as array.
+disk\_ereg\_path | **Optional.** Regular expression for path or partition. Multiple regular expression strings must be defined as array.
+disk\_ignore\_eregi\_path | **Optional.** Regular expression to ignore selected path/partition (case insensitive). Multiple regular expression strings must be defined as array.
+disk\_ignore\_ereg\_path | **Optional.** Regular expression to ignore selected path or partition. Multiple regular expression strings must be defined as array.
+disk\_timeout | **Optional.** Seconds before connection times out (default: 10).
+disk\_units | **Optional.** Choose bytes, kB, MB, GB, TB.
+disk\_exclude\_type | **Optional.** Ignore all filesystems of indicated type. Multiple regular expression strings must be defined as array. Defaults to "none", "tmpfs", "sysfs", "proc", "configfs", "devtmpfs", "devfs", "mtmfs", "tracefs", "cgroup", "fuse.gvfsd-fuse", "fuse.gvfs-fuse-daemon", "fdescfs", "overlay", "nsfs", "squashfs".
+disk\_include\_type | **Optional.** Check only filesystems of indicated type. Multiple regular expression strings must be defined as array.
+disk\_inode\_perfdata | **Optional.** Display inode usage in perfdata
+
+### disk_smb <a id="plugin-check-command-disk-smb"></a>
+
+The [check_disk_smb](https://www.monitoring-plugins.org/doc/man/check_disk_smb.html) plugin
+uses the `smbclient` binary to check SMB shares.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|------------------------
+disk_smb_hostname | **Required.** NetBIOS name of the server.
+disk_smb_share | **Required.** Share name being queried.
+disk_smb_workgroup | **Optional.** Workgroup or Domain used (defaults to 'WORKGROUP' if omitted).
+disk_smb_address | **Optional.** IP address of the host (only necessary if host belongs to another network).
+disk_smb_username | **Optional.** Username for server log-in (defaults to 'guest' if omitted).
+disk_smb_password | **Optional.** Password for server log-in (defaults to an empty password if omitted).
+disk_smb_wused | **Optional.** The used space warning threshold. Defaults to "85%". If the percent sign is omitted, use optional disk units.
+disk_smb_cused | **Optional.** The used space critical threshold. Defaults to "95%". If the percent sign is omitted, use optional disk units.
+disk_smb_port | **Optional.** Connection port, e.g. `139` or `445`. Defaults to `smbclient` default if omitted.
+
+### dns <a id="plugin-check-command-dns"></a>
+
+The [check_dns](https://www.monitoring-plugins.org/doc/man/check_dns.html) plugin
+uses the nslookup program to obtain the IP address for the given host/domain query.
+An optional DNS server to use may be specified. If no DNS server is specified, the
+default server(s) specified in `/etc/resolv.conf` will be used.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------|--------------
+dns_lookup | **Optional.** The hostname or IP to query the DNS for. Defaults to "$host_name$".
+dns_server | **Optional.** The DNS server to query. Defaults to the server configured in the OS.
+dns_query_type | **Optional.** The DNS record query type where TYPE =(A, AAAA, SRV, TXT, MX, ANY). The default query type is 'A' (IPv4 host entry). **Only supported by the Nagios plugins version of check\_dns, not by the monitoring plugins one.**
+dns_expected_answers | **Optional.** The answer(s) to look for. A hostname must end with a dot. Format depends on the monitoring-plugins version: In version 2.2 and before, a single string with the values alphabetically ordered and joined by commas. In version 2.3 and later, multiple answers must be defined as array.
+dns_all_expected | **Optional.** Denotes whether to require all values passed in `dns_expected_answers` to pass, or at least one. Only supported in newer versions of monitoring-plugins (2.3 and later), and is needed in such versions to replicate behaviour of previous versions of the plugins.
+dns_authoritative | **Optional.** Expect the server to send an authoritative answer.
+dns_accept_cname | **Optional.** Accept cname responses as a valid result to a query.
+dns_wtime | **Optional.** Return warning if elapsed time exceeds value.
+dns_ctime | **Optional.** Return critical if elapsed time exceeds value.
+dns_timeout | **Optional.** Seconds before connection times out. Defaults to 10.
+
+
+
+### file_age <a id="plugin-check-command-file-age"></a>
+
+The [check_file_age](https://www.monitoring-plugins.org/doc/man/check_file_age.html) plugin
+checks a file's size and modification time to make sure it's not empty and that it's sufficiently recent.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------|--------------------------------------------------------------------------------------------------------
+file_age_file | **Required.** File to monitor.
+file_age_warning_time | **Optional.** File must be no more than this many seconds old as warning threshold. Defaults to "240s".
+file_age_critical_time | **Optional.** File must be no more than this many seconds old as critical threshold. Defaults to "600s".
+file_age_warning_size | **Optional.** File must be at least this many bytes long as warning threshold. No default given.
+file_age_critical_size | **Optional.** File must be at least this many bytes long as critical threshold. Defaults to "0B".
+file_age_ignoremissing | **Optional.** Return OK if the file does not exist. Defaults to false.
+
+
+### flexlm <a id="plugin-check-command-flexlm"></a>
+
+The [check_flexlm](https://www.monitoring-plugins.org/doc/man/check_flexlm.html) plugin
+checks available flexlm license managers. Requires the `lmstat` command.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------|----------------------------------------------------------
+flexlm_licensefile | **Required.** Name of license file (usually license.dat).
+flexlm_timeout | **Optional.** Plugin time out in seconds. Defaults to 15.
+
+
+### fping4 <a id="plugin-check-command-fping4"></a>
+
+The [check_fping](https://www.monitoring-plugins.org/doc/man/check_fping.html) plugin
+uses the `fping` command to ping the specified host for a fast check. Note that it is
+necessary to set the `suid` flag on `fping`.
+
+This CheckCommand expects an IPv4 address.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+fping_address | **Optional.** The host's IPv4 address. Defaults to "$address$".
+fping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 100.
+fping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 5.
+fping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 200.
+fping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 15.
+fping_number | **Optional.** The number of packets to send. Defaults to 5.
+fping_interval | **Optional.** The interval between packets in milli-seconds. Defaults to 500.
+fping_bytes | **Optional.** The size of ICMP packet.
+fping_target_timeout | **Optional.** The target timeout in milli-seconds.
+fping_source_ip | **Optional.** The name or ip address of the source ip.
+fping_source_interface | **Optional.** The source interface name.
+
+
+### fping6 <a id="plugin-check-command-fping6"></a>
+
+The [check_fping](https://www.monitoring-plugins.org/doc/man/check_fping.html) plugin
+will use the `fping` command to ping the specified host for a fast check. Note that it is
+necessary to set the `suid` flag on `fping`.
+
+This CheckCommand expects an IPv6 address.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+fping_address | **Optional.** The host's IPv6 address. Defaults to "$address6$".
+fping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 100.
+fping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 5.
+fping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 200.
+fping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 15.
+fping_number | **Optional.** The number of packets to send. Defaults to 5.
+fping_interval | **Optional.** The interval between packets in milli-seconds. Defaults to 500.
+fping_bytes | **Optional.** The size of ICMP packet.
+fping_target_timeout | **Optional.** The target timeout in milli-seconds.
+fping_source_ip | **Optional.** The name or ip address of the source ip.
+fping_source_interface | **Optional.** The source interface name.
+
+
+### ftp <a id="plugin-check-command-ftp"></a>
+
+The [check_ftp](https://www.monitoring-plugins.org/doc/man/check_ftp.html) plugin
+tests FTP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------|--------------
+ftp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ftp_port | **Optional.** The FTP port number.
+ftp_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+ftp_all | **Optional.** All expect strings need to occur in server response. Defaults to false.
+ftp_escape_send | **Optional.** Enable usage of \\n, \\r, \\t or \\\\ in send string.
+ftp_send | **Optional.** String to send to the server.
+ftp_escape_quit | **Optional.** Enable usage of \\n, \\r, \\t or \\\\ in quit string.
+ftp_quit | **Optional.** String to send server to initiate a clean close of the connection.
+ftp_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit. Defaults to crit.
+ftp_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit. Defaults to warn.
+ftp_jail | **Optional.** Hide output from TCP socket.
+ftp_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+ftp_delay | **Optional.** Seconds to wait between sending string and polling for response.
+ftp_certificate | **Optional.** Minimum number of days a certificate has to be valid. 1st value is number of days for warning, 2nd is critical (if not specified: 0) -- separated by comma.
+ftp_ssl | **Optional.** Use SSL for the connection. Defaults to false.
+ftp_wtime | **Optional.** Response time to result in warning status (seconds).
+ftp_ctime | **Optional.** Response time to result in critical status (seconds).
+ftp_timeout | **Optional.** Seconds before connection times out. Defaults to 10.
+ftp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+ftp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### game <a id="plugin-check-command-game"></a>
+
+The [check_game](https://www.monitoring-plugins.org/doc/man/check_game.html) plugin
+tests game server connections with the specified host.
+This plugin uses the 'qstat' command, the popular game server status query tool.
+If you don't have the package installed, you will need to [download](http://www.activesw.com/people/steve/qstat.html)
+or install the package `quakestat` before you can use this plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------|-------------------
+game_game | **Required.** Name of the game.
+game_ipaddress | **Required.** Ipaddress of the game server to query.
+game_timeout | **Optional.** Seconds before connection times out. Defaults to 10.
+game_port | **Optional.** Port to connect to.
+game_gamefield | **Optional.** Field number in raw qstat output that contains game name.
+game_mapfield | **Optional.** Field number in raw qstat output that contains map name.
+game_pingfield | **Optional.** Field number in raw qstat output that contains ping time.
+game_gametime | **Optional.** Field number in raw qstat output that contains game time.
+game_hostname | **Optional.** Name of the host running the game.
+
+
+### hostalive <a id="plugin-check-command-hostalive"></a>
+
+Check command object for the [check_ping](https://www.monitoring-plugins.org/doc/man/check_ping.html)
+plugin with host check default values. This variant uses the host's `address` attribute
+if available and falls back to using the `address6` attribute if the `address` attribute is not set.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ping_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 3000.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 80.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 5000.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 100.
+ping_packets | **Optional.** The number of packets to send. Defaults to 5.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+
+### hostalive4 <a id="plugin-check-command-hostalive4"></a>
+
+Check command object for the [check_ping](https://www.monitoring-plugins.org/doc/man/check_ping.html)
+plugin with host check default values. This variant uses the host's `address` attribute.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ping_address | **Optional.** The host's IPv4 address. Defaults to "$address$".
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 3000.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 80.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 5000.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 100.
+ping_packets | **Optional.** The number of packets to send. Defaults to 5.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+
+### hostalive6 <a id="plugin-check-command-hostalive6"></a>
+
+Check command object for the [check_ping](https://www.monitoring-plugins.org/doc/man/check_ping.html)
+plugin with host check default values. This variant uses the host's `address6` attribute.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ping_address | **Optional.** The host's IPv6 address. Defaults to "$address6$".
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 3000.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 80.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 5000.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 100.
+ping_packets | **Optional.** The number of packets to send. Defaults to 5.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+
+### hpjd <a id="plugin-check-command-hpjd"></a>
+
+The [check_hpjd](https://www.monitoring-plugins.org/doc/man/check_hpjd.html) plugin
+tests the state of an HP printer with a JetDirect card. Net-snmp must be installed
+on the computer running the plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+hpjd_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+hpjd_port | **Optional.** The host's SNMP port. Defaults to 161.
+hpjd_community | **Optional.** The SNMP community. Defaults to "public".
+
+
+### http <a id="plugin-check-command-http"></a>
+
+The [check_http](https://www.monitoring-plugins.org/doc/man/check_http.html) plugin
+tests the HTTP service on the specified host. It can test normal (http) and secure
+(https) servers, follow redirects, search for strings and regular expressions,
+check connection times, and report on certificate expiration times.
+
+The plugin can either test the HTTP response of a server, or if `http_certificate` is set to a non-empty value, the TLS certificate age for a HTTPS host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|---------------------------------
+http_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+http_vhost | **Optional.** The virtual host that should be sent in the "Host" header.
+http_uri | **Optional.** The request URI for GET or POST. Defaults to `/`.
+http_port | **Optional.** The TCP port. Defaults to 80 when not using SSL, 443 otherwise.
+http_ssl | **Optional.** Whether to use SSL. Defaults to false.
+http_ssl_force_tlsv1 | **Optional.** Whether to force TLSv1.
+http_ssl_force_tlsv1_1 | **Optional.** Whether to force TLSv1.1.
+http_ssl_force_tlsv1_2 | **Optional.** Whether to force TLSv1.2.
+http_ssl_force_sslv2 | **Optional.** Whether to force SSLv2.
+http_ssl_force_sslv3 | **Optional.** Whether to force SSLv3.
+http_ssl_force_tlsv1_or_higher | **Optional.** Whether to force TLSv1 or higher.
+http_ssl_force_tlsv1_1_or_higher | **Optional.** Whether to force TLSv1.1 or higher.
+http_ssl_force_tlsv1_2_or_higher | **Optional.** Whether to force TLSv1.2 or higher.
+http_ssl_force_sslv2_or_higher | **Optional.** Whether to force SSLv2 or higher.
+http_ssl_force_sslv3_or_higher | **Optional.** Whether to force SSLv3 or higher.
+http_sni | **Optional.** Whether to use SNI. Defaults to false.
+http_auth_pair | **Optional.** Add 'username:password' authorization pair.
+http_proxy_auth_pair | **Optional.** Add 'username:password' authorization pair for proxy.
+http_ignore_body | **Optional.** Don't download the body, just the headers.
+http_linespan | **Optional.** Allow regex to span newline.
+http_expect_body_regex | **Optional.** A regular expression which the body must match against. Incompatible with http_ignore_body.
+http_expect_body_eregi | **Optional.** A case-insensitive expression which the body must match against. Incompatible with http_ignore_body.
+http_invertregex | **Optional.** Changes behavior of http_expect_body_regex and http_expect_body_eregi to return CRITICAL if found, OK if not.
+http_warn_time | **Optional.** The warning threshold.
+http_critical_time | **Optional.** The critical threshold.
+http_expect | **Optional.** Comma-delimited list of strings, at least one of them is expected in the first (status) line of the server response. Default: HTTP/1.
+http_certificate | **Optional.** Minimum number of days a certificate has to be valid. Port defaults to 443. When this option is used the URL is not checked. The first parameter defines the warning threshold (in days), the second parameter the critical threshold (in days). (Example `http_certificate = "30,20"`).
+http_clientcert | **Optional.** Name of file contains the client certificate (PEM format).
+http_privatekey | **Optional.** Name of file contains the private key (PEM format).
+http_headerstring | **Optional.** String to expect in the response headers.
+http_string | **Optional.** String to expect in the content.
+http_post | **Optional.** URL encoded http POST data.
+http_method | **Optional.** Set http method (for example: HEAD, OPTIONS, TRACE, PUT, DELETE).
+http_maxage | **Optional.** Warn if document is more than seconds old.
+http_contenttype | **Optional.** Specify Content-Type header when POSTing.
+http_useragent | **Optional.** String to be sent in http header as User Agent.
+http_header | **Optional.** Any other tags to be sent in http header. Can be an array if multiple headers should be passed to `check_http`.
+http_extendedperfdata | **Optional.** Print additional perfdata. Defaults to false.
+http_onredirect | **Optional.** How to handle redirect pages. Possible values: "ok" (default), "warning", "critical", "follow", "sticky" (like follow but stick to address), "stickyport" (like sticky but also to port)
+http_pagesize | **Optional.** Minimum page size required:Maximum page size required.
+http_timeout | **Optional.** Seconds before connection times out.
+http_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+http_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+http_link | **Optional.** Wrap output in HTML link. Defaults to false.
+http_verbose | **Optional.** Show details for command-line debugging. Defaults to false.
+http_verify_host | **Optional.** Verify SSL certificate is for the -H hostname (with --sni and -S). Defaults to false. **Only supported by the Nagios plugins version of check\_http, not by the monitoring plugins one.**
+
+
+### icmp <a id="plugin-check-command-icmp"></a>
+
+The [check_icmp](https://www.monitoring-plugins.org/doc/man/check_icmp.html) plugin
+check_icmp allows for checking multiple hosts at once compared to `check_ping`.
+The main difference is that check_ping executes the system's ping(1) command and
+parses its output while `check_icmp` talks ICMP itself. `check_icmp` must be installed with
+`setuid` root.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+icmp_address | **Optional.** The host's address. This can either be a single address or an array of addresses. Defaults to "$address$".
+icmp_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 100.
+icmp_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 10.
+icmp_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 200.
+icmp_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 30.
+icmp_source | **Optional.** The source IP address to send packets from.
+icmp_packets | **Optional.** The number of packets to send. Defaults to 5.
+icmp_packet_interval | **Optional** The maximum packet interval. Defaults to 80 (milliseconds).
+icmp_target_interval | **Optional.** The maximum target interval.
+icmp_hosts_alive | **Optional.** The number of hosts which have to be alive for the check to succeed.
+icmp_data_bytes | **Optional.** Payload size for each ICMP request. Defaults to 8.
+icmp_timeout | **Optional.** The plugin timeout in seconds. Defaults to 10 (seconds).
+icmp_ttl | **Optional.** The TTL on outgoing packets.
+
+
+### imap <a id="plugin-check-command-imap"></a>
+
+The [check_imap](https://www.monitoring-plugins.org/doc/man/check_imap.html) plugin
+tests IMAP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------|--------------
+imap_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+imap_port | **Optional.** The port that should be checked. Defaults to 143.
+imap_escape | **Optional.** Can use \\n, \\r, \\t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \\r\\n added to end of quit.
+imap_send | **Optional.** String to send to the server.
+imap_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+imap_all | **Optional.** All expect strings need to occur in server response. Default is any.
+imap_quit | **Optional.** String to send server to initiate a clean close of the connection.
+imap_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit (default: crit).
+imap_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit (default: warn).
+imap_jail | **Optional.** Hide output from TCP socket.
+imap_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+imap_delay | **Optional.** Seconds to wait between sending string and polling for response.
+imap_certificate_age | **Optional.** Minimum number of days a certificate has to be valid.
+imap_ssl | **Optional.** Use SSL for the connection.
+imap_warning | **Optional.** Response time to result in warning status (seconds).
+imap_critical | **Optional.** Response time to result in critical status (seconds).
+imap_timeout | **Optional.** Seconds before connection times out (default: 10).
+imap_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+imap_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### ldap <a id="plugin-check-command-ldap"></a>
+
+The [check_ldap](https://www.monitoring-plugins.org/doc/man/check_ldap.html) plugin
+can be used to check LDAP servers.
+
+The plugin can also be used for monitoring ldaps connections instead of the deprecated `check_ldaps`.
+This can be ensured by enabling `ldap_starttls` or `ldap_ssl`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+ldap_address | **Optional.** Host name, IP Address, or unix socket (must be an absolute path). Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ldap_port | **Optional.** Port number. Defaults to 389.
+ldap_attr | **Optional.** LDAP attribute to search for (default: "(objectclass=*)")
+ldap_base | **Required.** LDAP base (eg. ou=myunit,o=myorg,c=at).
+ldap_bind | **Optional.** LDAP bind DN (if required).
+ldap_pass | **Optional.** LDAP password (if required).
+ldap_starttls | **Optional.** Use STARTSSL mechanism introduced in protocol version 3.
+ldap_ssl | **Optional.** Use LDAPS (LDAP v2 SSL method). This also sets the default port to 636.
+ldap_v2 | **Optional.** Use LDAP protocol version 2 (enabled by default).
+ldap_v3 | **Optional.** Use LDAP protocol version 3 (disabled by default)
+ldap_warning | **Optional.** Response time to result in warning status (seconds).
+ldap_critical | **Optional.** Response time to result in critical status (seconds).
+ldap_warning_entries | **Optional.** Number of found entries to result in warning status.
+ldap_critical_entries | **Optional.** Number of found entries to result in critical status.
+ldap_timeout | **Optional.** Seconds before connection times out (default: 10).
+ldap_verbose | **Optional.** Show details for command-line debugging (disabled by default)
+
+### load <a id="plugin-check-command-load"></a>
+
+The [check_load](https://www.monitoring-plugins.org/doc/man/check_load.html) plugin
+tests the current system load average.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+load_wload1 | **Optional.** The 1-minute warning threshold. Defaults to 5.
+load_wload5 | **Optional.** The 5-minute warning threshold. Defaults to 4.
+load_wload15 | **Optional.** The 15-minute warning threshold. Defaults to 3.
+load_cload1 | **Optional.** The 1-minute critical threshold. Defaults to 10.
+load_cload5 | **Optional.** The 5-minute critical threshold. Defaults to 6.
+load_cload15 | **Optional.** The 15-minute critical threshold. Defaults to 4.
+load_percpu | **Optional.** Divide the load averages by the number of CPUs (when possible). Defaults to false.
+
+### mailq <a id="plugin-check-command-mailq"></a>
+
+The [check_mailq](https://www.monitoring-plugins.org/doc/man/check_mailq.html) plugin
+checks the number of messages in the mail queue (supports multiple sendmail queues, qmail).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+mailq_warning | **Required.** Min. number of messages in queue to generate warning.
+mailq_critical | **Required.** Min. number of messages in queue to generate critical alert ( w < c ).
+mailq_domain_warning | **Optional.** Min. number of messages for same domain in queue to generate warning
+mailq_domain_critical | **Optional.** Min. number of messages for same domain in queue to generate critical alert ( W < C ).
+mailq_timeout | **Optional.** Plugin timeout in seconds (default = 15).
+mailq_servertype | **Optional.** [ sendmail \| qmail \| postfix \| exim \| nullmailer ] (default = autodetect).
+mailq_sudo | **Optional.** Use sudo to execute the mailq command.
+
+### mysql <a id="plugin-check-command-mysql"></a>
+
+The [check_mysql](https://www.monitoring-plugins.org/doc/man/check_mysql.html) plugin
+tests connections to a MySQL server.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------------
+mysql_extra_opts | **Optional.** Read options from an ini file.
+mysql_hostname | **Optional.** Host name, IP Address, or unix socket (must be an absolute path).
+mysql_port | **Optional.** Port number (default: 3306).
+mysql_socket | **Optional.** Use the specified socket (has no effect if `mysql_hostname` is used).
+mysql_ignore_auth | **Optional.** Ignore authentication failure and check for mysql connectivity only.
+mysql_database | **Optional.** Check database with indicated name.
+mysql_file | **Optional.** Read from the specified client options file.
+mysql_group | **Optional.** Use a client options group.
+mysql_username | **Optional.** Connect using the indicated username.
+mysql_password | **Optional.** Use the indicated password to authenticate the connection.
+mysql_check_slave | **Optional.** Check if the slave thread is running properly.
+mysql_warning | **Optional.** Exit with WARNING status if slave server is more than INTEGER seconds behind master.
+mysql_critical | **Optional.** Exit with CRITICAL status if slave server is more then INTEGER seconds behind master.
+mysql_ssl | **Optional.** Use ssl encryption.
+mysql_cacert | **Optional.** Path to CA signing the cert.
+mysql_cert | **Optional.** Path to SSL certificate.
+mysql_key | **Optional.** Path to private SSL key.
+mysql_cadir | **Optional.** Path to CA directory.
+mysql_ciphers | **Optional.** List of valid SSL ciphers.
+
+
+### mysql_query <a id="plugin-check-command-mysql-query"></a>
+
+The [check_mysql_query](https://www.monitoring-plugins.org/doc/man/check_mysql_query.html) plugin
+checks a query result against threshold levels.
+The result from the query should be numeric. For extra security, create a user with minimal access.
+
+**Note**: You must specify `mysql_query_password` with an empty string to force an empty password,
+overriding any my.cnf settings.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------------
+mysql_query_hostname | **Optional.** Host name, IP Address, or unix socket (must be an absolute path).
+mysql_query_port | **Optional.** Port number (default: 3306).
+mysql_query_database | **Optional.** Check database with indicated name.
+mysql_query_file | **Optional.** Read from the specified client options file.
+mysql_query_group | **Optional.** Use a client options group.
+mysql_query_username | **Optional.** Connect using the indicated username.
+mysql_query_password | **Optional.** Use the indicated password to authenticate the connection.
+mysql_query_execute | **Required.** SQL Query to run on the MySQL Server.
+mysql_query_warning | **Optional.** Exit with WARNING status if query is outside of the range (format: start:end).
+mysql_query_critical | **Optional.** Exit with CRITICAL status if query is outside of the range.
+
+
+### negate <a id="plugin-check-command-negate"></a>
+
+The [negate](https://www.monitoring-plugins.org/doc/man/negate.html) plugin
+negates the status of a plugin (returns OK for CRITICAL and vice-versa).
+Additional switches can be used to control which state becomes what.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------|---------------------------------------------------------------
+negate_timeout | **Optional.** Seconds before plugin times out (default: 11).
+negate_timeout_result | **Optional.** Custom result on Negate timeouts, default to UNKNOWN.
+negate_ok | **Optional.** OK, WARNING, CRITICAL or UNKNOWN.
+negate_warning | Numeric values are accepted.
+negate_critical | If nothing is specified,
+negate_unknown | permutes OK and CRITICAL.
+negate_substitute | **Optional.** Substitute output text as well. Will only substitute text in CAPITALS.
+negate_command | **Required.** Command to be negated.
+negate_arguments | **Optional.** Arguments for the negated command.
+
+### nrpe <a id="plugin-check-command-nrpe"></a>
+
+The `check_nrpe` plugin can be used to query an [NRPE](https://icinga.com/docs/icinga1/latest/en/nrpe.html)
+server or [NSClient++](https://www.nsclient.org). **Note**: This plugin
+is considered insecure/deprecated.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+nrpe_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+nrpe_port | **Optional.** The NRPE port. Defaults to 5666.
+nrpe_command | **Optional.** The command that should be executed.
+nrpe_no_ssl | **Optional.** Whether to disable SSL or not. Defaults to `false`.
+nrpe_timeout_unknown | **Optional.** Whether to set timeouts to unknown instead of critical state. Defaults to `false`.
+nrpe_timeout | **Optional.** The timeout in seconds.
+nrpe_arguments | **Optional.** Arguments that should be passed to the command. Multiple arguments must be defined as array.
+nrpe_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+nrpe_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+nrpe_version_2 | **Optional.** Use this if you want to connect using NRPE v2 protocol (needed for NSClient++). Defaults to false.
+nrpe_version_3 | **Optional.** Use this if you want to connect using NRPE v3 protocol. Defaults to false.
+nrpe_payload_size | **Optional.** Specify non-default payload size for NSClient++. Default is 1024.
+nrpe_ca | **Optional.** The CA file to use for PKI. Defaults to none.
+nrpe_cert | **Optional.** The client cert file to use for PKI. Defaults to none.
+nrpe_key | **Optional.** The client key file to use for PKI. Defaults to none.
+nrpe_ssl_version | **Optional.** The SSL/TLS version to use. Defaults to TLSv1+.
+nrpe_cipher_list | **Optional.** The list of SSL ciphers to use. Default depends on check_nrpe version.
+nrpe_dh_opt | **Optional.** Anonymous Diffie Hellman use: 0 = deny, 1 = allow, 2 = force. Default depends on check_nrpe version.
+nrpe_no_logging | **Optional.** Disable logging of check_nrpe to syslog facilities (requires check_nrpe >= 4.0).
+
+
+### nscp <a id="plugin-check-command-nscp"></a>
+
+The [check_nt](https://www.monitoring-plugins.org/doc/man/check_nt.html) plugin
+collects data from the [NSClient++](https://www.nsclient.org) service.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+nscp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+nscp_port | **Optional.** The NSClient++ port. Defaults to 12489.
+nscp_password | **Optional.** The NSClient++ password.
+nscp_variable | **Required.** The variable that should be checked.
+nscp_params | **Optional.** Parameters for the query. Multiple parameters must be defined as array.
+nscp_warn | **Optional.** The warning threshold.
+nscp_crit | **Optional.** The critical threshold.
+nscp_timeout | **Optional.** The query timeout in seconds.
+nscp_showall | **Optional.** Use with SERVICESTATE to see working services or PROCSTATE for running processes. Defaults to false.
+
+
+### ntp_time <a id="plugin-check-command-ntp-time"></a>
+
+The [check_ntp_time](https://www.monitoring-plugins.org/doc/man/check_ntp_time.html) plugin
+checks the clock offset between the local host and a remote NTP server.
+
+**Note**: If you want to monitor an NTP server, please use `ntp_peer`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ntp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ntp_port | **Optional.** Port number (default: 123).
+ntp_quiet | **Optional.** Returns UNKNOWN instead of CRITICAL if offset cannot be found.
+ntp_warning | **Optional.** Offset to result in warning status (seconds).
+ntp_critical | **Optional.** Offset to result in critical status (seconds).
+ntp_timeoffset | **Optional.** Expected offset of the ntp server relative to local server (seconds).
+ntp_timeout | **Optional.** Seconds before connection times out (default: 10).
+ntp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+ntp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### ntp_peer <a id="plugin-check-command-ntp-peer"></a>
+
+The [check_ntp_peer](https://www.monitoring-plugins.org/doc/man/check_ntp_peer.html) plugin
+checks the health of an NTP server. It supports checking the offset with the sync peer, the
+jitter and stratum. This plugin will not check the clock offset between the local host and NTP
+ server; please use `ntp_time` for that purpose.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ntp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ntp_port | **Optional.** The port to use. Default to 123.
+ntp_quiet | **Optional.** Returns UNKNOWN instead of CRITICAL or WARNING if server isn't synchronized.
+ntp_warning | **Optional.** Offset to result in warning status (seconds).
+ntp_critical | **Optional.** Offset to result in critical status (seconds).
+ntp_wstratum | **Optional.** Warning threshold for stratum of server's synchronization peer.
+ntp_cstratum | **Optional.** Critical threshold for stratum of server's synchronization peer.
+ntp_wjitter | **Optional.** Warning threshold for jitter.
+ntp_cjitter | **Optional.** Critical threshold for jitter.
+ntp_wsource | **Optional.** Warning threshold for number of usable time sources.
+ntp_csource | **Optional.** Critical threshold for number of usable time sources.
+ntp_timeout | **Optional.** Seconds before connection times out (default: 10).
+ntp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+ntp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### pgsql <a id="plugin-check-command-pgsql"></a>
+
+The [check_pgsql](https://www.monitoring-plugins.org/doc/man/check_pgsql.html) plugin
+tests a PostgreSQL DBMS to determine whether it is active and accepting queries.
+If a query is specified using the `pgsql_query` attribute, it will be executed after
+connecting to the server. The result from the query has to be numeric in order
+to compare it against the query thresholds if set.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------------
+pgsql_extra_opts | **Optional.** Read options from an ini file.
+pgsql_hostname | **Optional.** Host name, IP Address, or unix socket (must be an absolute path).
+pgsql_port | **Optional.** Port number (default: 5432).
+pgsql_database | **Optional.** Database to check (default: template1).
+pgsql_username | **Optional.** Login name of user.
+pgsql_password | **Optional.** Password (BIG SECURITY ISSUE).
+pgsql_options | **Optional.** Connection parameters (keyword = value), see below.
+pgsql_warning | **Optional.** Response time to result in warning status (seconds).
+pgsql_critical | **Optional.** Response time to result in critical status (seconds).
+pgsql_timeout | **Optional.** Seconds before connection times out (default: 10).
+pgsql_query | **Optional.** SQL query to run. Only first column in first row will be read.
+pgsql_query_warning | **Optional.** SQL query value to result in warning status (double).
+pgsql_query_critical | **Optional.** SQL query value to result in critical status (double).
+
+### ping <a id="plugin-check-command-ping"></a>
+
+The [check_ping](https://www.monitoring-plugins.org/doc/man/check_ping.html) plugin
+uses the ping command to probe the specified host for packet loss (percentage) and
+round trip average (milliseconds).
+
+This command uses the host's `address` attribute if available and falls back to using
+the `address6` attribute if the `address` attribute is not set.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ping_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 100.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 5.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 200.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 15.
+ping_packets | **Optional.** The number of packets to send. Defaults to 5.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+
+### ping4 <a id="plugin-check-command-ping4"></a>
+
+The [check_ping](https://www.monitoring-plugins.org/doc/man/check_ping.html) plugin
+uses the ping command to probe the specified host for packet loss (percentage) and
+round trip average (milliseconds).
+
+This command uses the host's `address` attribute if not explicitly specified using
+the `ping_address` attribute.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ping_address | **Optional.** The host's IPv4 address. Defaults to "$address$".
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 100.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 5.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 200.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 15.
+ping_packets | **Optional.** The number of packets to send. Defaults to 5.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+### ping6 <a id="plugin-check-command-ping6"></a>
+
+The [check_ping](https://www.monitoring-plugins.org/doc/man/check_ping.html) plugin
+uses the ping command to probe the specified host for packet loss (percentage) and
+round trip average (milliseconds).
+
+This command uses the host's `address6` attribute if not explicitly specified using
+the `ping_address` attribute.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ping_address | **Optional.** The host's IPv6 address. Defaults to "$address6$".
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 100.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 5.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 200.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 15.
+ping_packets | **Optional.** The number of packets to send. Defaults to 5.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+
+### pop <a id="plugin-check-command-pop"></a>
+
+The [check_pop](https://www.monitoring-plugins.org/doc/man/check_pop.html) plugin
+tests POP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------|--------------
+pop_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+pop_port | **Optional.** The port that should be checked. Defaults to 110.
+pop_escape | **Optional.** Can use \\n, \\r, \\t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \\r\\n added to end of quit.
+pop_send | **Optional.** String to send to the server.
+pop_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+pop_all | **Optional.** All expect strings need to occur in server response. Default is any.
+pop_quit | **Optional.** String to send server to initiate a clean close of the connection.
+pop_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit (default: crit).
+pop_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit (default: warn).
+pop_jail | **Optional.** Hide output from TCP socket.
+pop_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+pop_delay | **Optional.** Seconds to wait between sending string and polling for response.
+pop_certificate_age | **Optional.** Minimum number of days a certificate has to be valid.
+pop_ssl | **Optional.** Use SSL for the connection.
+pop_warning | **Optional.** Response time to result in warning status (seconds).
+pop_critical | **Optional.** Response time to result in critical status (seconds).
+pop_timeout | **Optional.** Seconds before connection times out (default: 10).
+pop_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+pop_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### procs <a id="plugin-check-command-processes"></a>
+
+The [check_procs](https://www.monitoring-plugins.org/doc/man/check_procs.html) plugin
+checks all processes and generates WARNING or CRITICAL states if the specified
+metric is outside the required threshold ranges. The metric defaults to number
+of processes. Search filters can be applied to limit the processes to check.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------|--------------
+procs_warning | **Optional.** The process count warning threshold. Defaults to 250.
+procs_critical | **Optional.** The process count critical threshold. Defaults to 400.
+procs_metric | **Optional.** Check thresholds against metric.
+procs_timeout | **Optional.** Seconds before plugin times out.
+procs_traditional | **Optional.** Filter own process the traditional way by PID instead of /proc/pid/exe. Defaults to false.
+procs_state | **Optional.** Only scan for processes that have one or more of the status flags you specify.
+procs_ppid | **Optional.** Only scan for children of the parent process ID indicated.
+procs_vsz | **Optional.** Only scan for processes with VSZ higher than indicated.
+procs_rss | **Optional.** Only scan for processes with RSS higher than indicated.
+procs_pcpu | **Optional.** Only scan for processes with PCPU higher than indicated.
+procs_user | **Optional.** Only scan for processes with user name or ID indicated.
+procs_argument | **Optional.** Only scan for processes with args that contain STRING.
+procs_argument_regex | **Optional.** Only scan for processes with args that contain the regex STRING.
+procs_command | **Optional.** Only scan for exact matches of COMMAND (without path).
+procs_nokthreads | **Optional.** Only scan for non kernel threads. Defaults to false.
+
+
+### radius <a id="plugin-check-command-radius"></a>
+
+The [check_radius](https://www.monitoring-plugins.org/doc/man/check_radius.html) plugin
+checks a RADIUS server to see if it is accepting connections. The server to test
+must be specified in the invocation, as well as a user name and password. A configuration
+file may also be present. The format of the configuration file is described in the
+radiusclient library sources. The password option presents a substantial security
+issue because the password can possibly be determined by careful watching of the
+command line in a process listing. This risk is exacerbated because the plugin will
+typically be executed at regular predictable intervals. Please be sure that the
+password used does not allow access to sensitive system resources.
+
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------|--------------
+radius_address | **Optional.** The radius server's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+radius_config_file | **Required.** The radius configuration file.
+radius_username | **Required.** The radius username to test.
+radius_password | **Required.** The radius password to test.
+radius_port | **Optional.** The radius port number (default 1645).
+radius_nas_id | **Optional.** The NAS identifier.
+radius_nas_address | **Optional.** The NAS IP address.
+radius_expect | **Optional.** The response string to expect from the server.
+radius_retries | **Optional.** The number of times to retry a failed connection.
+radius_timeout | **Optional.** The number of seconds before connection times out (default: 10).
+
+### rpc <a id="plugin-check-command-rpc"></a>
+
+The [check_rpc](https://www.monitoring-plugins.org/doc/man/check_rpc.html)
+plugin tests if a service is registered and running using `rpcinfo -H host -C rpc_command`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--- | ---
+rpc_address | **Optional.** The rpc host address. Defaults to "$address$ if the host `address` attribute is set, "$address6$" otherwise.
+rpc_command | **Required.** The programm name (or number).
+rpc_port | **Optional.** The port that should be checked.
+rpc_version | **Optional.** The version you want to check for (one or more).
+rpc_udp | **Optional.** Use UDP test. Defaults to false.
+rpc_tcp | **Optional.** Use TCP test. Defaults to false.
+rpc_verbose | **Optional.** Show verbose output. Defaults to false.
+
+### simap <a id="plugin-check-command-simap"></a>
+
+The [check_simap](https://www.monitoring-plugins.org/doc/man/check_simap.html) plugin
+tests SIMAP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------|--------------
+simap_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+simap_port | **Optional.** The port that should be checked. Defaults to 993.
+simap_escape | **Optional.** Can use \\n, \\r, \\t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \\r\\n added to end of quit.
+simap_send | **Optional.** String to send to the server.
+simap_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+simap_all | **Optional.** All expect strings need to occur in server response. Default is any.
+simap_quit | **Optional.** String to send server to initiate a clean close of the connection.
+simap_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit (default: crit).
+simap_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit (default: warn).
+simap_jail | **Optional.** Hide output from TCP socket.
+simap_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+simap_delay | **Optional.** Seconds to wait between sending string and polling for response.
+simap_certificate_age | **Optional.** Minimum number of days a certificate has to be valid.
+simap_ssl | **Optional.** Use SSL for the connection.
+simap_warning | **Optional.** Response time to result in warning status (seconds).
+simap_critical | **Optional.** Response time to result in critical status (seconds).
+simap_timeout | **Optional.** Seconds before connection times out (default: 10).
+simap_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+simap_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+### smart <a id="plugin-check-command-smart"></a>
+
+The [check_ide_smart](https://www.monitoring-plugins.org/doc/man/check_ide_smart.html) plugin
+checks a local hard drive with the (Linux specific) SMART interface. Requires installation of `smartctl`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+smart_device | **Required.** The name of a local hard drive to monitor.
+
+
+### smtp <a id="plugin-check-command-smtp"></a>
+
+The [check_smtp](https://www.monitoring-plugins.org/doc/man/check_smtp.html) plugin
+will attempt to open an SMTP connection with the host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------|--------------
+smtp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+smtp_port | **Optional.** The port that should be checked. Defaults to 25.
+smtp_mail_from | **Optional.** Test a MAIL FROM command with the given email address.
+smtp_expect | **Optional.** String to expect in first line of server response (default: '220').
+smtp_command | **Optional.** SMTP command (may be used repeatedly).
+smtp_response | **Optional.** Expected response to command (may be used repeatedly).
+smtp_helo_fqdn | **Optional.** FQDN used for HELO
+smtp_certificate_age | **Optional.** Minimum number of days a certificate has to be valid.
+smtp_starttls | **Optional.** Use STARTTLS for the connection.
+smtp_authtype | **Optional.** SMTP AUTH type to check (default none, only LOGIN supported).
+smtp_authuser | **Optional.** SMTP AUTH username.
+smtp_authpass | **Optional.** SMTP AUTH password.
+smtp_ignore_quit | **Optional.** Ignore failure when sending QUIT command to server.
+smtp_warning | **Optional.** Response time to result in warning status (seconds).
+smtp_critical | **Optional.** Response time to result in critical status (seconds).
+smtp_timeout | **Optional.** Seconds before connection times out (default: 10).
+smtp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+smtp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### snmp <a id="plugin-check-command-snmp"></a>
+
+The [check_snmp](https://www.monitoring-plugins.org/doc/man/check_snmp.html) plugin
+checks the status of remote machines and obtains system information via SNMP.
+
+**Note**: This plugin uses the `snmpget` command included with the NET-SNMP package.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_oid | **Required.** The SNMP OID.
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port. Defaults to "161".
+snmp_retries | **Optional.** Number of retries to be used in the SNMP requests.
+snmp_warn | **Optional.** The warning threshold.
+snmp_crit | **Optional.** The critical threshold.
+snmp_string | **Optional.** Return OK state if the string matches exactly with the output value
+snmp_ereg | **Optional.** Return OK state if extended regular expression REGEX matches with the output value
+snmp_eregi | **Optional.** Return OK state if case-insensitive extended REGEX matches with the output value
+snmp_label | **Optional.** Prefix label for output value
+snmp_invert_search | **Optional.** Invert search result and return CRITICAL state if found
+snmp_units | **Optional.** Units label(s) for output value (e.g., 'sec.').
+snmp_version | **Optional.** Version to use. E.g. 1, 2, 2c or 3.
+snmp_miblist | **Optional.** MIB's to use, comma separated. Defaults to "ALL".
+snmp_rate_multiplier | **Optional.** Converts rate per second. For example, set to 60 to convert to per minute.
+snmp_rate | **Optional.** Boolean. Enable rate calculation.
+snmp_getnext | **Optional.** Boolean. Use SNMP GETNEXT. Defaults to false.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 10 seconds.
+snmp_offset | **Optional.** Add/subtract the specified OFFSET to numeric sensor data.
+snmp_output_delimiter | **Optional.** Separates output on multiple OID requests.
+snmp_perf_oids | **Optional.** Label performance data with OIDs instead of --label's.
+
+### snmpv3 <a id="plugin-check-command-snmpv3"></a>
+
+Check command object for the [check_snmp](https://www.monitoring-plugins.org/doc/man/check_snmp.html)
+plugin, using SNMPv3 authentication and encryption options.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------|--------------
+snmpv3_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmpv3_getnext | **Optional.** Use SNMP GETNEXT instead of SNMP GET.
+snmpv3_seclevel | **Optional.** The security level. Defaults to authPriv.
+snmpv3_auth_alg | **Optional.** The authentication algorithm. Defaults to SHA.
+snmpv3_user | **Required.** The username to log in with.
+snmpv3_context | **Optional.** The SNMPv3 context.
+snmpv3_auth_key | **Required,** The authentication key. Required if `snmpv3_seclevel` is set to `authPriv` otherwise optional.
+snmpv3_priv_key | **Required.** The encryption key.
+snmpv3_oid | **Required.** The SNMP OID.
+snmpv3_priv_alg | **Optional.** The encryption algorithm. Defaults to AES.
+snmpv3_warn | **Optional.** The warning threshold.
+snmpv3_crit | **Optional.** The critical threshold.
+snmpv3_string | **Optional.** Return OK state (for that OID) if STRING is an exact match.
+snmpv3_ereg | **Optional.** Return OK state (for that OID) if extended regular expression REGEX matches.
+snmpv3_eregi | **Optional.** Return OK state (for that OID) if case-insensitive extended REGEX matches.
+snmpv3_invert_search | **Optional.** Invert search result and return CRITICAL if found
+snmpv3_label | **Optional.** Prefix label for output value.
+snmpv3_units | **Optional.** Units label(s) for output value (e.g., 'sec.').
+snmpv3_rate_multiplier | **Optional.** Converts rate per second. For example, set to 60 to convert to per minute.
+snmpv3_rate | **Optional.** Boolean. Enable rate calculation.
+snmpv3_timeout | **Optional.** The command timeout in seconds. Defaults to 10 seconds.
+
+### snmp-uptime <a id="plugin-check-command-snmp-uptime"></a>
+
+Check command object for the [check_snmp](https://www.monitoring-plugins.org/doc/man/check_snmp.html)
+plugin, using the uptime OID by default.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_oid | **Optional.** The SNMP OID. Defaults to "1.3.6.1.2.1.1.3.0".
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+
+
+### spop <a id="plugin-check-command-spop"></a>
+
+The [check_spop](https://www.monitoring-plugins.org/doc/man/check_spop.html) plugin
+tests SPOP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------|--------------
+spop_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+spop_port | **Optional.** The port that should be checked. Defaults to 995.
+spop_escape | **Optional.** Can use \\n, \\r, \\t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \\r\\n added to end of quit.
+spop_send | **Optional.** String to send to the server.
+spop_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+spop_all | **Optional.** All expect strings need to occur in server response. Default is any.
+spop_quit | **Optional.** String to send server to initiate a clean close of the connection.
+spop_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit (default: crit).
+spop_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit (default: warn).
+spop_jail | **Optional.** Hide output from TCP socket.
+spop_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+spop_delay | **Optional.** Seconds to wait between sending string and polling for response.
+spop_certificate_age | **Optional.** Minimum number of days a certificate has to be valid.
+spop_ssl | **Optional.** Use SSL for the connection.
+spop_warning | **Optional.** Response time to result in warning status (seconds).
+spop_critical | **Optional.** Response time to result in critical status (seconds).
+spop_timeout | **Optional.** Seconds before connection times out (default: 10).
+spop_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+spop_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### ssh <a id="plugin-check-command-ssh"></a>
+
+The [check_ssh](https://www.monitoring-plugins.org/doc/man/check_ssh.html) plugin
+connects to an SSH server at a specified host and port.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ssh_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ssh_port | **Optional.** The port that should be checked. Defaults to 22.
+ssh_timeout | **Optional.** Seconds before connection times out. Defaults to 10.
+ssh_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+ssh_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### ssl <a id="plugin-check-command-ssl"></a>
+
+Check command object for the [check_tcp](https://www.monitoring-plugins.org/doc/man/check_tcp.html) plugin,
+using ssl-related options.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------|--------------
+ssl_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ssl_port | **Optional.** The port that should be checked. Defaults to 443.
+ssl_timeout | **Optional.** Timeout in seconds for the connect and handshake. The plugin default is 10 seconds.
+ssl_cert_valid_days_warn | **Optional.** Warning threshold for days before the certificate will expire. When used, the default for ssl_cert_valid_days_critical is 0.
+ssl_cert_valid_days_critical | **Optional.** Critical threshold for days before the certificate will expire. When used, ssl_cert_valid_days_warn must also be set.
+ssl_sni | **Optional.** The `server_name` that is sent to select the SSL certificate to check. Important if SNI is used.
+
+
+### ssmtp <a id="plugin-check-command-ssmtp"></a>
+
+The [check_ssmtp](https://www.monitoring-plugins.org/doc/man/check_ssmtp.html) plugin
+tests SSMTP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------|--------------
+ssmtp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ssmtp_port | **Optional.** The port that should be checked. Defaults to 465.
+ssmtp_escape | **Optional.** Can use \\n, \\r, \\t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \\r\\n added to end of quit.
+ssmtp_send | **Optional.** String to send to the server.
+ssmtp_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+ssmtp_all | **Optional.** All expect strings need to occur in server response. Default is any.
+ssmtp_quit | **Optional.** String to send server to initiate a clean close of the connection.
+ssmtp_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit (default: crit).
+ssmtp_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit (default: warn).
+ssmtp_jail | **Optional.** Hide output from TCP socket.
+ssmtp_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+ssmtp_delay | **Optional.** Seconds to wait between sending string and polling for response.
+ssmtp_certificate_age | **Optional.** Minimum number of days a certificate has to be valid.
+ssmtp_ssl | **Optional.** Use SSL for the connection.
+ssmtp_warning | **Optional.** Response time to result in warning status (seconds).
+ssmtp_critical | **Optional.** Response time to result in critical status (seconds).
+ssmtp_timeout | **Optional.** Seconds before connection times out (default: 10).
+ssmtp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+ssmtp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### swap <a id="plugin-check-command-swap"></a>
+
+The [check_swap](https://www.monitoring-plugins.org/doc/man/check_swap.html) plugin
+checks the swap space on a local machine.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+swap_wfree | **Optional.** The free swap space warning threshold in % (enable `swap_integer` for number values). Defaults to `50%`.
+swap_cfree | **Optional.** The free swap space critical threshold in % (enable `swap_integer` for number values). Defaults to `25%`.
+swap_integer | **Optional.** Specifies whether the thresholds are passed as number or percent value. Defaults to false (percent values).
+swap_allswaps | **Optional.** Conduct comparisons for all swap partitions, one by one. Defaults to false.
+swap_noswap | **Optional.** Resulting state when there is no swap regardless of thresholds. Possible values are "ok", "warning", "critical", "unknown". Defaults to "critical".
+
+
+### tcp <a id="plugin-check-command-tcp"></a>
+
+The [check_tcp](https://www.monitoring-plugins.org/doc/man/check_tcp.html) plugin
+tests TCP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+tcp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+tcp_port | **Required.** The port that should be checked.
+tcp_expect | **Optional.** String to expect in server response. Multiple strings must be defined as array.
+tcp_all | **Optional.** All expect strings need to occur in server response. Defaults to false.
+tcp_escape_send | **Optional.** Enable usage of \\n, \\r, \\t or \\\\ in send string.
+tcp_send | **Optional.** String to send to the server.
+tcp_escape_quit | **Optional.** Enable usage of \\n, \\r, \\t or \\\\ in quit string.
+tcp_quit | **Optional.** String to send server to initiate a clean close of the connection.
+tcp_refuse | **Optional.** Accept TCP refusals with states ok, warn, crit. Defaults to crit.
+tcp_mismatch | **Optional.** Accept expected string mismatches with states ok, warn, crit. Defaults to warn.
+tcp_jail | **Optional.** Hide output from TCP socket.
+tcp_maxbytes | **Optional.** Close connection once more than this number of bytes are received.
+tcp_delay | **Optional.** Seconds to wait between sending string and polling for response.
+tcp_certificate | **Optional.** Minimum number of days a certificate has to be valid. 1st value is number of days for warning, 2nd is critical (if not specified: 0) -- separated by comma.
+tcp_ssl | **Optional.** Use SSL for the connection. Defaults to false.
+tcp_sni | **Optional.** Hostname to send in the `server_name` (SNI) SSL/TLS extension.
+tcp_wtime | **Optional.** Response time to result in warning status (seconds).
+tcp_ctime | **Optional.** Response time to result in critical status (seconds).
+tcp_timeout | **Optional.** Seconds before connection times out. Defaults to 10.
+tcp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+tcp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### udp <a id="plugin-check-command-udp"></a>
+
+The [check_udp](https://www.monitoring-plugins.org/doc/man/check_udp.html) plugin
+tests UDP connections with the specified host (or unix socket).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+udp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+udp_port | **Required.** The port that should be checked.
+udp_send | **Required.** The payload to send in the UDP datagram.
+udp_expect | **Required.** The payload to expect in the response datagram.
+udp_quit | **Optional.** The payload to send to 'close' the session.
+udp_ipv4 | **Optional.** Use IPv4 connection. Defaults to false.
+udp_ipv6 | **Optional.** Use IPv6 connection. Defaults to false.
+
+
+### ups <a id="plugin-check-command-ups"></a>
+
+The [check_ups](https://www.monitoring-plugins.org/doc/man/check_ups.html) plugin
+tests the UPS service on the specified host. [Network UPS Tools](http://www.networkupstools.org)
+ must be running for this plugin to work.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+ups_address | **Required.** The address of the host running upsd. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ups_name | **Required.** The UPS name. Defaults to `ups`.
+ups_port | **Optional.** The port to which to connect. Defaults to 3493.
+ups_variable | **Optional.** The variable to monitor. Must be one of LINE, TEMP, BATTPCT or LOADPCT. If this is not set, the check only relies on the value of `ups.status`.
+ups_warning | **Optional.** The warning threshold for the selected variable.
+ups_critical | **Optional.** The critical threshold for the selected variable.
+ups_celsius | **Optional.** Display the temperature in degrees Celsius instead of Fahrenheit. Defaults to `false`.
+ups_timeout | **Optional.** The number of seconds before the connection times out. Defaults to 10.
+
+
+### users <a id="plugin-check-command-users"></a>
+
+The [check_users](https://www.monitoring-plugins.org/doc/man/check_users.html) plugin
+checks the number of users currently logged in on the local system and generates an
+error if the number exceeds the thresholds specified.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+users_wgreater | **Optional.** The user count warning threshold. Defaults to 20.
+users_cgreater | **Optional.** The user count critical threshold. Defaults to 50.
+
+
+### uptime <a id="plugin-check-command-uptime"></a>
+
+The [check_uptime](https://www.monitoring-plugins.org/doc/man/check_uptime.html) plugin
+checks the uptime of the system using /proc/uptime.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+uptime_warning | **Required.** Min. number of uptime to generate warning (-w 30m). Defaults to 30m.
+uptime_critical | **Required.** Min. number of uptime to generate critical alert (-c 15m). Defaults to 15m.
+uptime_for | **Optional.** Show uptime in a pretty format (Running for x weeks, x days, ...). Defaults to false.
+uptime_since | **Optional.** Show last boot in yyyy-mm-dd HH:MM:SS format (output from 'uptime -s'). Defaults to false.
+
+
+
+## Windows Plugins for Icinga 2 <a id="windows-plugins"></a>
+
+> **Note**
+>
+> These plugins are DEPRECATED in favor of our
+> [PowerShell Plugins](https://github.com/Icinga/icinga-powershell-plugins)
+> and may be removed in a future release.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+To allow a basic monitoring of Windows clients Icinga 2 comes with a set of Windows only plugins. While trying to mirror the functionalities of their linux cousins from the monitoring-plugins package, the differences between Windows and Linux are too big to be able use the same CheckCommands for both systems.
+
+A check-commands-windows.conf comes with Icinga 2, it assumes that the Windows Plugins are installed in the PluginDir set in your constants.conf. To enable them the following include directive is needed in you icinga2.conf:
+
+ include <windows-plugins>
+
+One of the differences between the Windows plugins and their linux counterparts is that they consistently do not require thresholds to run, functioning like dummies without.
+
+
+### Threshold syntax <a id="windows-plugins-thresholds"></a>
+
+So not specified differently the thresholds for the plugins all follow the same pattern
+
+Threshold | Meaning
+:------------|:----------
+"29" | The threshold is 29.
+"!29" | The threshold is 29, but the negative of the result is returned.
+"[10-40]" | The threshold is a range from (including) 10 to 40, a value inside means the threshold has been exceeded.
+"![10-40]" | Same as above, but the result is inverted.
+
+
+### disk-windows <a id="windows-plugins-disk-windows"></a>
+
+Check command object for the `check_disk.exe` plugin.
+Aggregates the disk space of all volumes and mount points it can find, or the ones defined in `disk_win_path`. Ignores removable storage like flash drives and discs (CD, DVD etc.).
+The data collection is instant and free disk space (default, see `disk_win_show_used`) is used for threshold computation.
+
+> **Note**
+>
+> Percentage based thresholds can be used by adding a '%' to the threshold
+> value.
+
+Custom variables:
+
+Name | Description
+:---------------------|:------------
+disk\_win\_warn | **Optional**. The warning threshold. Defaults to "20%".
+disk\_win\_crit | **Optional**. The critical threshold. Defaults to "10%".
+disk\_win\_path | **Optional**. Check only these paths, default checks all.
+disk\_win\_unit | **Optional**. Use this unit to display disk space, thresholds are interpreted in this unit. Defaults to "mb", possible values are: b, kb, mb, gb and tb.
+disk\_win\_exclude | **Optional**. Exclude these drives from check.
+disk\_win\_show\_used | **Optional**. Use used instead of free space.
+
+### load-windows <a id="windows-plugins-load-windows"></a>
+
+Check command object for the `check_load.exe` plugin.
+This plugin collects the inverse of the performance counter `\Processor(_Total)\% Idle Time` two times, with a wait time of one second between the collection. To change this wait time use [`perfmon-windows`](10-icinga-template-library.md#windows-plugins-load-windows).
+
+Custom variables:
+
+Name | Description
+:---------------|:------------
+load\_win\_warn | **Optional**. The warning threshold.
+load\_win\_crit | **Optional**. The critical threshold.
+
+
+### memory-windows <a id="windows-plugins-memory-windows"></a>
+
+Check command object for the `check_memory.exe` plugin.
+The memory collection is instant and free memory is used for threshold computation.
+
+> **Note**
+>
+> Percentage based thresholds can be used by adding a '%' to the threshold
+> value. Keep in mind that memory\_win\_unit is applied before the
+> value is calculated.
+
+Custom variables:
+
+Name | Description
+:-----------------|:------------
+memory\_win\_warn | **Optional**. The warning threshold. Defaults to "10%".
+memory\_win\_crit | **Optional**. The critical threshold. Defaults to "5%".
+memory\_win\_unit | **Optional**. The unit to display the received value in, thresholds are interpreted in this unit. Defaults to "mb" (megabyte), possible values are: b, kb, mb, gb and tb.
+memory\_win\_show\_used | **Optional**. Show used memory instead of the free memory.
+
+
+### network-windows <a id="windows-plugins-network-windows"></a>
+
+Check command object for the `check_network.exe` plugin.
+Collects the total Bytes inbound and outbound for all interfaces in one second, to itemise interfaces or use a different collection interval use [`perfmon-windows`](10-icinga-template-library.md#windows-plugins-load-windows).
+
+Custom variables:
+
+Name | Description
+:-------------------|:------------
+network\_win\_warn | **Optional**. The warning threshold.
+network\_win\_crit | **Optional**. The critical threshold.
+network\_no\_isatap | **Optional**. Do not print ISATAP interfaces.
+
+
+### perfmon-windows <a id="windows-plugins-perfmon-windows"></a>
+
+Check command object for the `check_perfmon.exe` plugin.
+This plugins allows to collect data from a Performance Counter. After the first data collection a second one is done after `perfmon_win_wait` milliseconds. When you know `perfmon_win_counter` only requires one set of data to provide valid data you can set `perfmon_win_wait` to `0`.
+
+To receive a list of possible Performance Counter Objects run `check_perfmon.exe --print-objects` and to view an objects instances and counters run `check_perfmon.exe --print-object-info -P "name of object"`
+
+Custom variables:
+
+Name | Description
+:---------------------|:------------
+perfmon\_win\_warn | **Optional**. The warning threshold.
+perfmon\_win\_crit | **Optional**. The critical threshold.
+perfmon\_win\_counter | **Required**. The Performance Counter to use. Ex. `\Processor(_Total)\% Idle Time`.
+perfmon\_win\_wait | **Optional**. Time in milliseconds to wait between data collection (default: 1000).
+perfmon\_win\_type | **Optional**. Format in which to expect performance values. Possible are: long, int64 and double (default).
+perfmon\_win\_syntax | **Optional**. Use this in the performance output instead of `perfmon\_win\_counter`. Exists for graphics compatibility reasons.
+
+
+### ping-windows <a id="windows-plugins-ping-windows"></a>
+
+Check command object for the `check_ping.exe` plugin.
+ping-windows should automatically detect whether `ping_win_address` is an IPv4 or IPv6 address. If not, use ping4-windows and ping6-windows. Also note that check\_ping.exe waits at least `ping_win_timeout` milliseconds between the pings.
+
+Custom variables:
+
+Name | Description
+:------------------|:------------
+ping\_win\_warn | **Optional**. The warning threshold. RTA and package loss separated by comma.
+ping\_win\_crit | **Optional**. The critical threshold. RTA and package loss separated by comma.
+ping\_win\_address | **Required**. An IPv4 or IPv6 address.
+ping\_win\_packets | **Optional**. Number of packages to send. Default: 5.
+ping\_win\_timeout | **Optional**. The timeout in milliseconds. Default: 1000
+
+
+### procs-windows <a id="windows-plugins-procs-windows"></a>
+
+Check command object for `check_procs.exe` plugin.
+When using `procs_win_user` this plugins needs administrative privileges to access the processes of other users, to just enumerate them no additional privileges are required.
+
+Custom variables:
+
+Name | Description
+:----------------|:------------
+procs\_win\_warn | **Optional**. The warning threshold.
+procs\_win\_crit | **Optional**. The critical threshold.
+procs\_win\_user | **Optional**. Count this users processes.
+
+
+### service-windows <a id="windows-plugins-service-windows"></a>
+
+Check command object for `check_service.exe` plugin.
+This checks thresholds work different since the binary decision whether a service is running or not does not allow for three states. As a default `check_service.exe` will return CRITICAL when `service_win_service` is not running, the `service_win_warn` flag changes this to WARNING.
+
+Custom variables:
+
+Name | Description
+:-------------------------|:------------
+service\_win\_warn | **Optional**. Warn when service is not running.
+service\_win\_description | **Optional**. If this is set, `service\_win\_service` looks at the service description.
+service\_win\_service | **Required**. Name of the service to check.
+
+
+### swap-windows <a id="windows-plugins-swap-windows"></a>
+
+Check command object for `check_swap.exe` plugin.
+The data collection is instant.
+
+Custom variables:
+
+Name | Description
+:--------------- | :------------
+swap\_win\_warn | **Optional**. The warning threshold. Defaults to "10%".
+swap\_win\_crit | **Optional**. The critical threshold. Defaults to "5%".
+swap\_win\_unit | **Optional**. The unit to display the received value in, thresholds are interpreted in this unit. Defaults to "mb" (megabyte).
+swap\_win\_show\_used | **Optional**. Show used swap instead of the free swap.
+
+### update-windows <a id="windows-plugins-update-windows"></a>
+
+Check command object for `check_update.exe` plugin.
+Querying Microsoft for Windows updates can take multiple seconds to minutes. An update is treated as important when it has the WSUS flag for SecurityUpdates or CriticalUpdates.
+
+> **Note**
+>
+> The Network Services Account which runs Icinga 2 by default does not have the required
+> permissions to run this check.
+
+Custom variables:
+
+Name | Description
+:-------------------|:------------
+update\_win\_warn | **Optional**. The warning threshold.
+update\_win\_crit | **Optional**. The critical threshold.
+update\_win\_reboot | **Optional**. Set to treat 'may need update' as 'definitely needs update'. Please Note that this is true for almost every update and is therefore not recommended.
+ignore\_reboot | **Optional**. Set to disable behavior of returning critical if any updates require a reboot.
+
+
+If a warning threshold is set but not a critical threshold, the critical threshold will be set to one greater than the set warning threshold.
+Unless the `ignore_reboot` flag is set, if any updates require a reboot the plugin will return critical.
+
+> **Note**
+>
+> If they are enabled, performance data will be shown in the web interface.
+> If run without the optional parameters, the plugin will output critical if any important updates are available.
+
+
+### uptime-windows <a id="windows-plugins-uptime-windows"></a>
+
+Check command object for `check_uptime.exe` plugin.
+Uses GetTickCount64 to get the uptime, so boot time is not included.
+
+Custom variables:
+
+Name | Description
+:-----------------|:------------
+uptime\_win\_warn | **Optional**. The warning threshold.
+uptime\_win\_crit | **Optional**. The critical threshold.
+uptime\_win\_unit | **Optional**. The unit to display the received value in, thresholds are interpreted in this unit. Defaults to "s"(seconds), possible values are ms (milliseconds), s, m (minutes), h (hours).
+
+
+### users-windows <a id="windows-plugins-users-windows"></a>
+
+Check command object for `check_users.exe` plugin.
+
+Custom variables:
+
+Name | Description
+:----------------|:------------
+users\_win\_warn | **Optional**. The warning threshold.
+users\_win\_crit | **Optional**. The critical threshold.
+
+### file-age-windows <a id="windows-plugins-file-age-windows"></a>
+
+Check command object for `check_file_age.cmd` command file and `check_file_age.cmd.ps1` plugin.
+
+Custom variables:
+
+Name | Description
+:---------------------|:------------
+file_age_win_file | **Required**. File name and location
+file_age_win_warning | **Required**. The warning threshold of file age in seconds.
+file_age_win_critical | **Required**. The critical threshold of file age in seconds.
+
+All variables are required and all variables are positional. The variable order is: file warning critical.
+
+The check_file_age.cmd and the check_file_age.cmd.ps1 files are available for [download](https://github.com/KAMI911/icinga2-basic/tree/master/plugins).
+
+## Plugin Check Commands for NSClient++ <a id="nscp-plugin-check-commands"></a>
+
+There are two methods available for querying NSClient++:
+
+* Query the [HTTP API](06-distributed-monitoring.md#distributed-monitoring-windows-nscp-check-api) locally from an Icinga 2 client (requires a running NSClient++ service)
+* Run a [local CLI check](10-icinga-template-library.md#nscp-check-local) (does not require NSClient++ as a service)
+
+Both methods have their advantages and disadvantages. One thing to
+note: If you rely on performance counter delta calculations such as
+CPU utilization, please use the HTTP API instead of the CLI sample call.
+
+For security reasons, it is advised to enable the NSClient++ HTTP API for local
+connection from the Icinga 2 client only. Remote connections to the HTTP API
+are not recommended with using the legacy HTTP API.
+
+### nscp_api <a id="nscp-check-api"></a>
+
+`check_nscp_api` is part of the Icinga 2 plugins. This plugin is available for
+both, Windows and Linux/Unix.
+
+Verify that the ITL CheckCommand is included in the [icinga2.conf](04-configuration.md#icinga2-conf) configuration file:
+
+ vim /etc/icinga2/icinga2.conf
+
+ include <plugins>
+
+`check_nscp_api` runs queries against the NSClient++ API. Therefore NSClient++ needs to have
+the `webserver` module enabled, configured and loaded.
+
+You can install the webserver using the following CLI commands:
+
+ ./nscp.exe web install
+ ./nscp.exe web password — –set icinga
+
+Now you can define specific [queries](https://docs.nsclient.org/reference/check/CheckHelpers.html#queries)
+and integrate them into Icinga 2.
+
+The check plugin `check_nscp_api` can be integrated with the `nscp_api` CheckCommand object:
+
+Custom variables:
+
+Name | Description
+:----------------------|:----------------------
+nscp\_api\_host | **Required**. NSCP API host address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+nscp\_api\_port | **Optional**. NSCP API port. Defaults to `8443`.
+nscp\_api\_password | **Required**. NSCP API password. Please check the NSCP documentation for setup details.
+nscp\_api\_query | **Required**. NSCP API query endpoint. Refer to the NSCP documentation for possible values.
+nscp\_api\_arguments | **Optional**. NSCP API arguments dictionary either as single strings or key-value pairs using `=`. Refer to the NSCP documentation.
+
+`nscp_api_arguments` can be used to pass required thresholds to the executed check. The example below
+checks the CPU utilization and specifies warning and critical thresholds.
+
+```
+check_nscp_api --host 10.0.10.148 --password icinga --query check_cpu --arguments show-all warning='load>40' critical='load>30'
+check_cpu CRITICAL: critical(5m: 48%, 1m: 36%), 5s: 0% | 'total 5m'=48%;40;30 'total 1m'=36%;40;30 'total 5s'=0%;40;30
+```
+
+
+### nscp-local <a id="nscp-check-local"></a>
+
+Icinga 2 can use the `nscp client` command to run arbitrary NSClient++ checks locally on the client.
+
+You can enable these check commands by adding the following the include directive in your
+[icinga2.conf](04-configuration.md#icinga2-conf) configuration file:
+
+ include <nscp>
+
+You can also optionally specify an alternative installation directory for NSClient++ by adding
+the NscpPath constant in your [constants.conf](04-configuration.md#constants-conf) configuration
+file:
+
+ const NscpPath = "C:\\Program Files (x86)\\NSClient++"
+
+By default Icinga 2 uses the Microsoft Installer API to determine where NSClient++ is installed. It should
+not be necessary to manually set this constant.
+
+Note that it is not necessary to run NSClient++ as a Windows service for these commands to work.
+
+The check command object for NSClient++ is available as `nscp-local`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------
+nscp_log_level | **Optional.** The log level. Defaults to "critical".
+nscp_load_all | **Optional.** Whether to load all modules. Defaults to false.
+nscp_modules | **Optional.** An array of NSClient++ modules to load. Defaults to `[ "CheckSystem" ]`.
+nscp_boot | **Optional.** Whether to use the --boot option. Defaults to true.
+nscp_query | **Required.** The NSClient++ query. Try `nscp client -q x` for a list.
+nscp_arguments | **Optional.** An array of query arguments.
+nscp_showall | **Optional.** Shows more details in plugin output, default to false.
+
+> **Tip**
+>
+> In order to measure CPU load, you'll need a running NSClient++ service.
+> Therefore it is advised to use a local [nscp-api](06-distributed-monitoring.md#distributed-monitoring-windows-nscp-check-api)
+> check against its REST API.
+
+### nscp-local-cpu <a id="nscp-check-local-cpu"></a>
+
+Check command object for the `check_cpu` NSClient++ plugin.
+
+Name | Description
+--------------------|------------------
+nscp_cpu_time | **Optional.** Calculate average usage for the given time intervals. Value has to be an array, default to [ "1m", "5m", "15m" ].
+nscp_cpu_warning | **Optional.** Threshold for WARNING state in percent, default to 80.
+nscp_cpu_critical | **Optional.** Threshold for CRITICAL state in percent, default to 90.
+nscp_cpu_arguments | **Optional.** Additional arguments.
+nscp_cpu_showall | **Optional.** Shows more details in plugin output, default to false.
+
+### nscp-local-memory <a id="nscp-check-local-memory"></a>
+
+Check command object for the `check_memory` NSClient++ plugin.
+
+Name | Description
+----------------------|------------------
+nscp_memory_committed | **Optional.** Check for committed memory, default to false.
+nscp_memory_physical | **Optional.** Check for physical memory, default to true.
+nscp_memory_free | **Optional.** Switch between checking free (true) or used memory (false), default to false.
+nscp_memory_warning | **Optional.** Threshold for WARNING state in percent or absolute (use MB, GB, ...), default to 80 (free=false) or 20 (free=true).
+nscp_memory_critical | **Optional.** Threshold for CRITICAL state in percent or absolute (use MB, GB, ...), default to 90 (free=false) or 10 (free=true).
+nscp_memory_arguments | **Optional.** Additional arguments.
+nscp_memory_showall | **Optional.** Shows more details in plugin output, default to false.
+
+### nscp-local-os-version <a id="nscp-check-local-os-version"></a>
+
+Check command object for the `check_os_version` NSClient++ plugin.
+
+This command has the same custom variables like the `nscp-local` check command.
+
+### nscp-local-pagefile <a id="nscp-check-local-pagefile"></a>
+
+Check command object for the `check_pagefile` NSClient++ plugin.
+
+This command has the same custom variables like the `nscp-local` check command.
+
+### nscp-local-process <a id="nscp-check-local-process"></a>
+
+Check command object for the `check_process` NSClient++ plugin.
+
+This command has the same custom variables like the `nscp-local` check command.
+
+### nscp-local-service <a id="nscp-check-local-service"></a>
+
+Check command object for the `check_service` NSClient++ plugin.
+
+Name | Description
+-----------------------|------------------
+nscp_service_name | **Required.** Name of service to check.
+nscp_service_type | **Optional.** Type to check, default to state.
+nscp_service_ok | **Optional.** State for return an OK, i.e. for type=state running, stopped, ...
+nscp_service_otype | **Optional.** Dedicate type for nscp_service_ok, default to nscp_service_state.
+nscp_service_warning | **Optional.** State for return an WARNING.
+nscp_service_wtype | **Optional.** Dedicate type for nscp_service_warning, default to nscp_service_state.
+nscp_service_critical | **Optional.** State for return an CRITICAL.
+nscp_service_ctype | **Optional.** Dedicate type for nscp_service_critical, default to nscp_service_state.
+nscp_service_arguments | **Optional.** Additional arguments.
+nscp_service_showall | **Optional.** Shows more details in plugin output, default to true.
+
+### nscp-local-uptime <a id="nscp-check-local-uptime"></a>
+
+Check command object for the `check_uptime` NSClient++ plugin.
+
+This command has the same custom variables like the `nscp-local` check command.
+
+### nscp-local-version <a id="nscp-check-local-version"></a>
+
+Check command object for the `check_version` NSClient++ plugin.
+
+This command has the same custom variables like the `nscp-local` check command.
+In addition to that the default value for `nscp_modules` is set to `[ "CheckHelpers" ]`.
+
+### nscp-local-disk <a id="nscp-check-local-disk"></a>
+
+Check command object for the `check_drivesize` NSClient++ plugin.
+
+Name | Description
+-----------------------|------------------
+nscp_disk_drive | **Optional.** Drive character, default to all drives. Can be an array if multiple drives should be monitored.
+nscp_disk_exclude | **Optional.** Drive character, default to none. Can be an array of drive characters if multiple drives should be excluded.
+nscp_disk_free | **Optional.** Switch between checking free space (free=true) or used space (free=false), default to false.
+nscp_disk_warning | **Optional.** Threshold for WARNING in percent or absolute (use MB, GB, ...), default to 80 (used) or 20 percent (free).
+nscp_disk_critical | **Optional.** Threshold for CRITICAL in percent or absolute (use MB, GB, ...), default to 90 (used) or 10 percent (free).
+nscp_disk_arguments | **Optional.** Additional arguments.
+nscp_disk_showall | **Optional.** Shows more details in plugin output, default to true.
+nscp_modules | **Optional.** An array of NSClient++ modules to load. Defaults to `[ "CheckDisk" ]`.
+
+### nscp-local-counter <a id="nscp-check-local-counter"></a>
+
+Check command object for the `check_pdh` NSClient++ plugin.
+
+Name | Description
+-----------------------|------------------
+nscp_counter_name | **Required.** Performance counter name.
+nscp_counter_warning | **Optional.** WARNING Threshold.
+nscp_counter_critical | **Optional.** CRITICAL Threshold.
+nscp_counter_arguments | **Optional.** Additional arguments.
+nscp_counter_showall | **Optional.** Shows more details in plugin output, default to false.
+nscp_counter_perfsyntax | **Optional.** Apply performance data label, e.g. `Total Processor Time` to avoid special character problems. Defaults to `nscp_counter_name`.
+
+### nscp-local-tasksched <a id="nscp-check-local-tasksched"></a>
+
+Check Command object for the `check_tasksched` NSClient++ plugin.
+You can check for a single task or for a complete folder (and sub folders) of tasks.
+
+Name | Description
+-----------------------|------------------
+nscp_tasksched_name | **Optional.** Name of the task to check.
+nscp_tasksched_folder | **Optional.** The folder in which the tasks to check reside.
+nscp_tasksched_recursive | **Optional.** Recurse sub folder, defaults to true.
+nscp_tasksched_hidden | **Optional.** Look for hidden tasks, defaults to false.
+nscp_tasksched_warning | **Optional.** Filter which marks items which generates a warning state, defaults to `exit_code != 0`.
+nscp_tasksched_critical | **Optional.** Filter which marks items which generates a critical state, defaults to `exit_code < 0`.
+nscp_tasksched_emptystate | **Optional.** Return status to use when nothing matched filter, defaults to warning.
+nscp_tasksched_perfsyntax | **Optional.** Performance alias syntax., defaults to `%(title)`
+nscp_tasksched_detailsyntax | **Optional.** Detail level syntax, defaults to `%(folder)/%(title): %(exit_code) != 0`
+nscp_tasksched_arguments | **Optional.** Additional arguments.
+nscp_tasksched_showall | **Optional.** Shows more details in plugin output, default to false.
+nscp_modules | **Optional.** An array of NSClient++ modules to load. Defaults to `[ "CheckTaskSched" ]`.
+
+
+## Plugin Check Commands for Manubulon SNMP <a id="snmp-manubulon-plugin-check-commands"></a>
+
+The `SNMP Manubulon Plugin Check Commands` provide configuration for plugin check
+commands provided by the [SNMP Manubulon project](http://nagios.manubulon.com/index_snmp.html).
+
+**Note:** Some plugin parameters are only available in Debian packages or in a
+[forked repository](https://github.com/dnsmichi/manubulon-snmp) with patches applied.
+
+The SNMP manubulon plugin check commands assume that the global constant named `ManubulonPluginDir`
+is set to the path where the Manubublon SNMP plugins are installed.
+
+You can enable these plugin check commands by adding the following the include directive in your
+[icinga2.conf](04-configuration.md#icinga2-conf) configuration file:
+
+ include <manubulon>
+
+### Checks by Host Type
+
+**N/A** : Not available for this type.
+
+**SNMP** : Available for simple SNMP query.
+
+**??** : Untested.
+
+**Specific** : Script name for platform specific checks.
+
+
+ Host type | Interface | storage | load/cpu | mem | process | env | specific
+ ------------------------|------------|----------|-----------|-----|----------|-----|-------------------------
+ Linux | Yes | Yes | Yes | Yes | Yes | No |
+ Windows | Yes | Yes | Yes | Yes | Yes | No | check_snmp_win.pl
+ Cisco router/switch | Yes | N/A | Yes | Yes | N/A | Yes |
+ HP router/switch | Yes | N/A | Yes | Yes | N/A | No |
+ Bluecoat proxy | Yes | SNMP | Yes | SNMP| No | Yes |
+ CheckPoint on SPLAT | Yes | Yes | Yes | Yes | Yes | No | check_snmp_cpfw.pl
+ CheckPoint on Nokia IP | Yes | Yes | Yes | No | ?? | No | check_snmp_vrrp.pl
+ Boostedge | Yes | Yes | Yes | Yes | ?? | No | check_snmp_boostedge.pl
+ AS400 | Yes | Yes | Yes | Yes | No | No |
+ NetsecureOne Netbox | Yes | Yes | Yes | ?? | Yes | No |
+ Radware Linkproof | Yes | N/A | SNMP | SNMP| No | No | check_snmp_linkproof_nhr <br> check_snmp_vrrp.pl
+ IronPort | Yes | SNMP | SNMP | SNMP| No | Yes |
+ Cisco CSS | Yes | ?? | Yes | Yes | No | ?? | check_snmp_css.pl
+
+
+### snmp-env <a id="plugin-check-command-snmp-env"></a>
+
+Check command object for the [check_snmp_env.pl](http://nagios.manubulon.com/snmp_env.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+
+Name | Description
+------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol| **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default.
+snmp_env_type | **Optional.** Environment Type [cisco|nokia|bc|iron|foundry|linux]. Defaults to "cisco".
+snmp_env_fan | **Optional.** Minimum fan rpm value (only needed for 'iron' & 'linux')
+snmp_env_celsius | **Optional.** Maximum temp in degrees celsius (only needed for 'iron' & 'linux')
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+
+### snmp-load <a id="plugin-check-command-snmp-load"></a>
+
+Check command object for the [check_snmp_load.pl](http://nagios.manubulon.com/snmp_load.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+
+Name | Description
+------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol| **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default.
+snmp_warn | **Optional.** The warning threshold. Change the `snmp_load_type` var to "netsl" for using 3 values.
+snmp_crit | **Optional.** The critical threshold. Change the `snmp_load_type` var to "netsl" for using 3 values.
+snmp_load_type | **Optional.** Load type. Defaults to "stand". Check all available types in the [snmp load](http://nagios.manubulon.com/snmp_load.html) documentation.
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+
+### snmp-memory <a id="plugin-check-command-snmp-memory"></a>
+
+Check command object for the [check_snmp_mem.pl](http://nagios.manubulon.com/snmp_mem.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol| **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default.
+snmp_warn | **Optional.** The warning threshold.
+snmp_crit | **Optional.** The critical threshold.
+snmp_is_cisco | **Optional.** Change OIDs for Cisco switches. Defaults to false.
+snmp_is_hp | **Optional.** Change OIDs for HP/Procurve switches. Defaults to false.
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_memcached | **Optional.** Include cached memory in used memory, Defaults to false.
+snmp_membuffer | **Optional.** Exclude buffered memory in used memory, Defaults to false.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+
+### snmp-storage <a id="plugin-check-command-snmp-storage"></a>
+
+Check command object for the [check_snmp_storage.pl](http://nagios.manubulon.com/snmp_storage.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol| **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default.
+snmp_warn | **Optional.** The warning threshold.
+snmp_crit | **Optional.** The critical threshold.
+snmp_storage_name | **Optional.** Storage name. Default to regex "^/$$". More options available in the [snmp storage](http://nagios.manubulon.com/snmp_storage.html) documentation.
+snmp_storage_type | **Optional.** Filter by storage type. Valid options are Other, Ram, VirtualMemory, FixedDisk, RemovableDisk, FloppyDisk, CompactDisk, RamDisk, FlashMemory, or NetworkDisk. No value defined as default.
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_exclude | **Optional.** Select all storages except the one(s) selected by -m. No action on storage type selection.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+snmp_storage_olength | **Optional.** Max-size of the SNMP message, usefull in case of Too Long responses.
+
+### snmp-interface <a id="plugin-check-command-snmp-interface"></a>
+
+Check command object for the [check_snmp_int.pl](http://nagios.manubulon.com/snmp_int.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol | **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default.
+snmp_warn | **Optional.** The warning threshold.
+snmp_crit | **Optional.** The critical threshold.
+snmp_interface | **Optional.** Network interface name. Default to regex "eth0".
+snmp_interface_inverse | **Optional.** Inverse Interface check, down is ok. Defaults to false as it is missing.
+snmp_interface_perf | **Optional.** Check the input/output bandwidth of the interface. Defaults to true.
+snmp_interface_label | **Optional.** Add label before speed in output: in=, out=, errors-out=, etc.
+snmp_interface_bits_bytes | **Optional.** Output performance data in bits/s or Bytes/s. **Depends** on snmp_interface_kbits set to true. Defaults to true.
+snmp_interface_percent | **Optional.** Output performance data in % of max speed. Defaults to false.
+snmp_interface_kbits | **Optional.** Make the warning and critical levels in KBits/s. Defaults to true.
+snmp_interface_megabytes | **Optional.** Make the warning and critical levels in Mbps or MBps. **Depends** on snmp_interface_kbits set to true. Defaults to true.
+snmp_interface_64bit | **Optional.** Use 64 bits counters instead of the standard counters when checking bandwidth & performance data for interface >= 1Gbps. Defaults to false.
+snmp_interface_errors | **Optional.** Add error & discard to Perfparse output. Defaults to true.
+snmp_interface_extended_checks | **Optional.** Also check the error and discard input/output. When enabled format of `snmp_warn` and `snmp_crit` changes to <In bytes>,<Out bytes>,<In error>,<Out error>,<In disc>,<Out disc>. More options available in the [snmp interface](http://nagios.manubulon.com/snmp_int.html) documentation. Defaults to false.
+snmp_interface_noregexp | **Optional.** Do not use regexp to match interface name in description OID. Defaults to false.
+snmp_interface_delta | **Optional.** Delta time of perfcheck. Defaults to "300" (5 min).
+snmp_interface_warncrit_percent | **Optional.** Make the warning and critical levels in % of reported interface speed. If set, **snmp_interface_megabytes** needs to be set to false. Defaults to false.
+snmp_interface_ifname | **Optional.** Switch from IF-MIB::ifDescr to IF-MIB::ifName when looking up the interface's name.
+snmp_interface_ifalias | **Optional.** Switch from IF-MIB::ifDescr to IF-MIB::ifAlias when looking up the interface's name.
+snmp_interface_weathermap | **Optional.** Output data for ["weathermap" lines](http://docs.nagvis.org/1.9/en_US/lines_weathermap_style.html) in NagVis. **Depends** on `snmp_interface_perf` set to true. Defaults to `false`. **Note**: Available in `check_snmp_int.pl v2.1.0`.
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+snmp_interface_admin | **Optional.** Use administrative status instead of operational. Defaults to false.
+
+### snmp-process <a id="plugin-check-command-snmp-process"></a>
+
+Check command object for the [check_snmp_process.pl](http://nagios.manubulon.com/snmp_process.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol | **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default..
+snmp_warn | **Optional.** The warning threshold.
+snmp_crit | **Optional.** The critical threshold.
+snmp_process_name | **Optional.** Name of the process (regexp). No trailing slash!. Defaults to ".*".
+snmp_perf | **Optional.** Enable perfdata values. Defaults to true.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+snmp_process_use_params | **Optional.** Add process parameters to process name for regexp matching. Example: "named.*-t /var/named/chroot" will only select named process with this parameter. Defaults to false.
+snmp_process_use_fullpath | **Optional.** Use full path name instead of process name to select processes. Example: "/opt/app1/app1bin" will only select named process with this full path. Defaults to false.
+snmp_process_mem_usage | **Optional.** Define to check memory usage for the process. Defaults to false.
+snmp_process_mem_threshold | **Optional.** Defines the warning and critical thresholds in Mb when snmp_process_mem_usage set to true. Example "512,1024". Defaults to "0,0".
+snmp_process_cpu_usage | **Optional.** Define to check CPU usage for the process. Defaults to false.
+snmp_process_cpu_threshold | **Optional.** Defines the warning and critical thresholds in % when snmp_process_cpu_usage set to true. If more than one CPU, value can be > 100% : 100%=1 CPU. Example "15,50". Defaults to "0,0".
+
+### snmp-service <a id="plugin-check-command-snmp-service"></a>
+
+Check command object for the [check_snmp_win.pl](http://nagios.manubulon.com/snmp_windows.html) plugin.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------|--------------
+snmp_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+snmp_nocrypt | **Optional.** Define SNMP encryption. If set to `false`, `snmp_v3` needs to be enabled. Defaults to `true` (no encryption).
+snmp_community | **Optional.** The SNMP community. Defaults to "public".
+snmp_port | **Optional.** The SNMP port connection.
+snmp_v2 | **Optional.** SNMP version to 2c. Defaults to false.
+snmp_v3 | **Optional.** SNMP version to 3. Defaults to false.
+snmp_login | **Optional.** SNMP version 3 username. Defaults to "snmpuser".
+snmp_password | **Required.** SNMP version 3 password. No value defined as default.
+snmp_v3_use_privpass | **Optional.** Define to use SNMP version 3 priv password. Defaults to false.
+snmp_v3_use_authprotocol | **Optional.** Define to use SNMP version 3 authentication protocol. Defaults to false.
+snmp_authprotocol | **Optional.** SNMP version 3 authentication protocol. Defaults to "md5,des".
+snmp_privpass | **Required.** SNMP version 3 priv password. No value defined as default.
+snmp_timeout | **Optional.** The command timeout in seconds. Defaults to 5 seconds.
+snmp_service_name | **Optional.** Comma separated names of services (perl regular expressions can be used for every one). By default, it is not case sensitive. eg. ^dns$. Defaults to ".*".
+snmp_service_count | **Optional.** Compare matching services with a specified number instead of the number of names provided.
+snmp_service_showall | **Optional.** Show all services in the output, instead of only the non-active ones. Defaults to false.
+snmp_service_noregexp | **Optional.** Do not use regexp to match NAME in service description. Defaults to false.
+
+
+## Contributed Plugin Check Commands <a id="plugin-contrib"></a>
+
+The contributed Plugin Check Commands provides various additional command definitions
+contributed by community members.
+
+These check commands assume that the global constant named `PluginContribDir`
+is set to the path where the user installs custom plugins and can be enabled by
+uncommenting the corresponding line in [icinga2.conf](04-configuration.md#icinga2-conf):
+
+```
+vim /etc/icinga2/icinga2.conf
+
+include <plugin-contrib>
+```
+
+This is enabled by default since Icinga 2 2.5.0.
+
+### Big Data <a id="plugin-contrib-big-data"></a>
+
+This category contains plugins for various Big Data systems.
+
+#### cloudera_service_status <a id="plugin-contrib-command-cloudera_service_status"></a>
+
+The [cloudera_service_status](https://github.com/miso231/icinga2-cloudera-plugin) plugin
+uses Cloudera Manager API to monitor cluster services
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------|-----------------------------------------------------------------
+cloudera_host | **Required.** Hostname of cloudera server.
+cloudera_port | **Optional.** Port where cloudera is listening. Defaults to 443.
+cloudera_user | **Required.** The username for the API connection.
+cloudera_pass | **Required.** The password for the API connection.
+cloudera_api_version | **Required.** API version of cloudera.
+cloudera_cluster | **Required.** The cluster name in cloudera manager.
+cloudera_service | **Required.** Name of cluster service to be checked.
+cloudera_verify_ssl | **Optional.** Verify SSL. Defaults to true.
+
+#### cloudera_hdfs_space <a id="plugin-contrib-command-cloudera_hdfs_space"></a>
+
+The [cloudera_hdfs_space](https://github.com/miso231/icinga2-cloudera-plugin) plugin
+connects to Hadoop Namenode and gets used capacity of selected disk
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|-----------------------------------------------------------------
+cloudera_hdfs_space_host | **Required.** Namenode host to connect to.
+cloudera_hdfs_space_port | **Optional.** Namenode port (default 50070).
+cloudera_hdfs_space_disk | **Required.** HDFS disk to check.
+cloudera_hdfs_space_warn | **Required.** Warning threshold in percent.
+cloudera_hdfs_space_crit | **Required.** Critical threshold in percent.
+
+#### cloudera_hdfs_files <a id="plugin-contrib-command-cloudera_hdfs_files"></a>
+
+The [cloudera_hdfs_files](https://github.com/miso231/icinga2-cloudera-plugin) plugin
+connects to Hadoop Namenode and gets total number of files on HDFS
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|-----------------------------------------------------------------
+cloudera_hdfs_files_host | **Required.** Namenode host to connect to.
+cloudera_hdfs_files_port | **Optional.** Namenode port (default 50070).
+cloudera_hdfs_files_warn | **Required.** Warning threshold.
+cloudera_hdfs_files_crit | **Required.** Critical threshold.
+cloudera_hdfs_files_max | **Required.** Max files count that causes problems (default 140,000,000).
+
+### Databases <a id="plugin-contrib-databases"></a>
+
+This category contains plugins for various database servers.
+
+#### db2_health <a id="plugin-contrib-command-db2_health"></a>
+
+The [check_db2_health](https://labs.consol.de/nagios/check_db2_health/) plugin
+uses the `DBD::DB2` Perl library to monitor a [DB2](https://www.ibm.com/support/knowledgecenter/SSEPGG_11.1.0/)
+database.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_db2_health).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|------------------------------------------------------------------------------------------------------------------------------
+db2_health_database | **Required.** The name of the database. (If it was catalogued locally, this parameter and `db2_health_not_catalogued = false` are the only you need. Otherwise you must specify database, hostname and port)
+db2_health_username | **Optional.** The username for the database connection.
+db2_health_password | **Optional.** The password for the database connection.
+db2_health_port | **Optional.** The port where DB2 is listening.
+db2_health_warning | **Optional.** The warning threshold depending on the mode.
+db2_health_critical | **Optional.** The critical threshold depending on the mode.
+db2_health_mode | **Required.** The mode uses predefined keywords for the different checks. For example "connection-time", "database-usage" or "sql".
+db2_health_method | **Optional.** This tells the plugin how to connect to the database. The only method implemented yet is “dbi” which is the default. (It means, the plugin uses the perl module DBD::DB2).
+db2_health_name | **Optional.** The tablespace, datafile, wait event, latch, enqueue depending on the mode or SQL statement to be executed with "db2_health_mode" sql.
+db2_health_name2 | **Optional.** If "db2_health_name" is a sql statement, "db2_health_name2" can be used to appear in the output and the performance data.
+db2_health_regexp | **Optional.** If set to true, "db2_health_name" will be interpreted as a regular expression. Defaults to false.
+db2_health_units | **Optional.** This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free. Possible values are "%", "KB", "MB" and "GB".
+db2_health_maxinactivity | **Optional.** Used for the maximum amount of time a certain event has not happened.
+db2_health_mitigation | **Optional.** Classifies the severity of an offline tablespace.
+db2_health_lookback | **Optional.** How many days in the past db2_health check should look back to calculate exitcode.
+db2_health_report | **Optional.** Report can be used to output only the bad news. Possible values are "short", "long", "html". Defaults to `short`.
+db2_health_not_catalogued | **Optional.** Set this variable to false if you want to use a catalogued locally database. Defaults to `true`.
+db2_health_env_db2_home | **Required.** Specifies the location of the db2 client libraries as environment variable `DB2_HOME`. Defaults to "/opt/ibm/db2/V10.5".
+db2_health_env_db2_version | **Optional.** Specifies the DB2 version as environment variable `DB2_VERSION`.
+
+#### mssql_health <a id="plugin-contrib-command-mssql_health"></a>
+
+The [check_mssql_health](https://labs.consol.de/nagios/check_mssql_health/index.html) plugin
+uses the `DBD::Sybase` Perl library based on [FreeTDS](https://www.freetds.org/) to monitor a
+[MS SQL](https://www.microsoft.com/en-us/sql-server/) server.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_mssql_health).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|------------------------------------------------------------------------------------------------------------------------------
+mssql_health_hostname | **Optional.** Specifies the database hostname or address. No default because you typically use "mssql_health_server".
+mssql_health_username | **Optional.** The username for the database connection.
+mssql_health_password | **Optional.** The password for the database connection.
+mssql_health_port | **Optional.** Specifies the database port. No default because you typically use "mssql_health_server".
+mssql_health_server | **Optional.** The name of a predefined connection (in freetds.conf).
+mssql_health_currentdb | **Optional.** The name of a database which is used as the current database for the connection.
+mssql_health_offlineok | **Optional.** Set this to true if offline databases are perfectly ok for you. Defaults to false.
+mssql_health_nooffline | **Optional.** Set this to true to ignore offline databases. Defaults to false.
+mssql_health_dbthresholds | **Optional.** With this parameter thresholds are read from the database table check_mssql_health_thresholds.
+mssql_health_notemp | **Optional.** Set this to true to ignore temporary databases/tablespaces. Defaults to false.
+mssql_health_commit | **Optional.** Set this to true to turn on autocommit for the dbd::sybase module. Defaults to false.
+mssql_health_method | **Optional.** How the plugin should connect to the database (dbi for the perl module `DBD::Sybase` (default) and `sqlrelay` for the SQLRelay proxy).
+mssql_health_mode | **Required.** The mode uses predefined keywords for the different checks. For example "connection-time", "database-free" or "sql".
+mssql_health_regexp | **Optional.** If set to true, "mssql_health_name" will be interpreted as a regular expression. Defaults to false.
+mssql_health_warning | **Optional.** The warning threshold depending on the mode.
+mssql_health_critical | **Optional.** The critical threshold depending on the mode.
+mssql_health_warningx | **Optional.** A possible override for the warning threshold.
+mssql_health_criticalx | **Optional.** A possible override for the critical threshold.
+mssql_health_units | **Optional.** This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free. Possible values are "%", "KB", "MB" and "GB".
+mssql_health_name | **Optional.** Depending on the mode this could be the database name or a SQL statement.
+mssql_health_name2 | **Optional.** If "mssql_health_name" is a sql statement, "mssql_health_name2" can be used to appear in the output and the performance data.
+mssql_health_name3 | **Optional.** Additional argument used for 'database-file-free' mode for example.
+mssql_health_extraopts | **Optional.** Read command line arguments from an external file.
+mssql_health_blacklist | **Optional.** Blacklist some (missing/failed) components
+mssql_health_mitigation | **Optional.** The parameter allows you to change a critical error to a warning.
+mssql_health_lookback | **Optional.** The amount of time you want to look back when calculating average rates.
+mssql_health_environment | **Optional.** Add a variable to the plugin's environment.
+mssql_health_negate | **Optional.** Emulate the negate plugin. --negate warning=critical --negate unknown=critical.
+mssql_health_morphmessage | **Optional.** Modify the final output message.
+mssql_health_morphperfdata | **Optional.** The parameter allows you to change performance data labels.
+mssql_health_selectedperfdata | **Optional.** The parameter allows you to limit the list of performance data.
+mssql_health_report | **Optional.** Report can be used to output only the bad news. Possible values are "short", "long", "html". Defaults to `short`.
+mssql_health_multiline | **Optional.** Multiline output.
+mssql_health_withmymodulesdyndir | **Optional.** Add-on modules for the my-modes will be searched in this directory.
+mssql_health_statefilesdir | **Optional.** An alternate directory where the plugin can save files.
+mssql_health_isvalidtime | **Optional.** Signals the plugin to return OK if now is not a valid check time.
+mssql_health_timeout | **Optional.** Plugin timeout. Defaults to 15s.
+
+#### mysql_health <a id="plugin-contrib-command-mysql_health"></a>
+
+The [check_mysql_health](https://labs.consol.de/nagios/check_mysql_health/index.html) plugin
+uses the `DBD::MySQL` Perl library to monitor a
+[MySQL](https://dev.mysql.com/downloads/mysql/) or [MariaDB](https://mariadb.org/about/) database.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_mysql_health).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|------------------------------------------------------------------------------------------------------------------------------
+mysql_health_hostname | **Required.** Specifies the database hostname or address. Defaults to "$address$" or "$address6$" if the `address` attribute is not set.
+mysql_health_port | **Optional.** Specifies the database port. Defaults to 3306 (or 1186 for "mysql_health_mode" cluster).
+mysql_health_socket | **Optional.** Specifies the database unix socket. No default.
+mysql_health_username | **Optional.** The username for the database connection.
+mysql_health_password | **Optional.** The password for the database connection.
+mysql_health_database | **Optional.** The database to connect to. Defaults to information_schema.
+mysql_health_warning | **Optional.** The warning threshold depending on the mode.
+mysql_health_critical | **Optional.** The critical threshold depending on the mode.
+mysql_health_warningx | **Optional.** The extended warning thresholds depending on the mode.
+mysql_health_criticalx | **Optional.** The extended critical thresholds depending on the mode.
+mysql_health_mode | **Required.** The mode uses predefined keywords for the different checks. For example "connection-time", "slave-lag" or "sql".
+mysql_health_method | **Optional.** How the plugin should connect to the database (`dbi` for using DBD::Mysql (default), `mysql` for using the mysql-Tool).
+mysql_health_commit | **Optional.** Turns on autocommit for the dbd::\* module.
+mysql_health_notemp | **Optional.** Ignore temporary databases/tablespaces.
+mysql_health_nooffline | **Optional.** Skip the offline databases.
+mysql_health_regexp | **Optional.** Parameter name/name2/name3 will be interpreted as (perl) regular expression.
+mysql_health_name | **Optional.** The name of a specific component to check.
+mysql_health_name2 | **Optional.** The secondary name of a component.
+mysql_health_name3 | **Optional.** The tertiary name of a component.
+mysql_health_units | **Optional.** This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free. Possible values are "%", "KB", "MB" and "GB".
+mysql_health_labelformat | **Optional.** One of those formats pnp4nagios or groundwork. Defaults to pnp4nagios.
+mysql_health_extraopts | **Optional.** Read command line arguments from an external file.
+mysql_health_blacklist | **Optional.** Blacklist some (missing/failed) components
+mysql_health_mitigation | **Optional.** The parameter allows you to change a critical error to a warning.
+mysql_health_lookback | **Optional.** The amount of time you want to look back when calculating average rates.
+mysql_health_environment | **Optional.** Add a variable to the plugin's environment.
+mysql_health_morphmessage | **Optional.** Modify the final output message.
+mysql_health_morphperfdata | **Optional.** The parameter allows you to change performance data labels.
+mysql_health_selectedperfdata | **Optional.** The parameter allows you to limit the list of performance data.
+mysql_health_report | **Optional.** Can be used to shorten the output.
+mysql_health_multiline | **Optional.** Multiline output.
+mysql_health_negate | **Optional.** Emulate the negate plugin. --negate warning=critical --negate unknown=critical.
+mysql_health_withmymodulesdyndir | **Optional.** Add-on modules for the my-modes will be searched in this directory.
+mysql_health_statefilesdir | **Optional.** An alternate directory where the plugin can save files.
+mysql_health_isvalidtime | **Optional.** Signals the plugin to return OK if now is not a valid check time.
+mysql_health_timeout | **Optional.** Plugin timeout. Defaults to 60s.
+
+#### oracle_health <a id="plugin-contrib-command-oracle_health"></a>
+
+The [check_oracle_health](https://labs.consol.de/nagios/check_oracle_health/index.html) plugin
+uses the `DBD::Oracle` Perl library to monitor an [Oracle](https://www.oracle.com/database/) database.
+
+The Git repository is located on [GitHub](https://github.com/lausser/check_oracle_health).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|------------------------------------------------------------------------------------------------------------------------------
+oracle_health_connect | **Required.** Specifies the database connection string (from tnsnames.ora).
+oracle_health_username | **Optional.** The username for the database connection.
+oracle_health_password | **Optional.** The password for the database connection.
+oracle_health_warning | **Optional.** The warning threshold depending on the mode.
+oracle_health_critical | **Optional.** The critical threshold depending on the mode.
+oracle_health_mode | **Required.** The mode uses predefined keywords for the different checks. For example "connection-time", "flash-recovery-area-usage" or "sql".
+oracle_health_method | **Optional.** How the plugin should connect to the database (`dbi` for using DBD::Oracle (default), `sqlplus` for using the sqlplus-Tool).
+oracle_health_name | **Optional.** The tablespace, datafile, wait event, latch, enqueue depending on the mode or SQL statement to be executed with "oracle_health_mode" sql.
+oracle_health_name2 | **Optional.** If "oracle_health_name" is a sql statement, "oracle_health_name2" can be used to appear in the output and the performance data.
+oracle_health_regexp | **Optional.** If set to true, "oracle_health_name" will be interpreted as a regular expression. Defaults to false.
+oracle_health_units | **Optional.** This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free. Possible values are "%", "KB", "MB" and "GB".
+oracle_health_ident | **Optional.** If set to true, outputs instance and database names. Defaults to false.
+oracle_health_commit | **Optional.** Set this to true to turn on autocommit for the dbd::oracle module. Defaults to false.
+oracle_health_noperfdata | **Optional.** Set this to true if you want to disable perfdata. Defaults to false.
+oracle_health_timeout | **Optional.** Plugin timeout. Defaults to 60s.
+oracle_health_report | **Optional.** Select the plugin output format. Can be short or long. Defaults to long.
+oracle_health_notemp | **Optional.** Set this to true to hide temporary and system tablespaces. Defaults to false.
+
+Environment Macros:
+
+Name | Description
+--------------------|------------------------------------------------------------------------------------------------------------------------------------------
+ORACLE\_HOME | **Required.** Specifies the location of the oracle instant client libraries. Defaults to "/usr/lib/oracle/11.2/client64/lib". Can be overridden by setting the custom variable `oracle_home`.
+LD\_LIBRARY\_PATH | **Required.** Specifies the location of the oracle instant client libraries for the run-time shared library loader. Defaults to "/usr/lib/oracle/11.2/client64/lib". Can be overridden by setting the custom variable `oracle_ld_library_path`.
+TNS\_ADMIN | **Required.** Specifies the location of the tnsnames.ora including the database connection strings. Defaults to "/etc/icinga2/plugin-configs". Can be overridden by setting the custom variable `oracle_tns_admin`.
+
+#### postgres <a id="plugin-contrib-command-postgres"></a>
+
+The [check_postgres](https://bucardo.org/wiki/Check_postgres) plugin
+uses the `psql` binary to monitor a [PostgreSQL](https://www.postgresql.org/about/) database.
+
+The Git repository is located on [GitHub](https://github.com/bucardo/check_postgres).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|------------------------------------------------------------------------------------------------------------------------------
+postgres_host | **Optional.** Specifies the database hostname or address. Defaults to "$address$" or "$address6$" if the `address` attribute is not set. If "postgres_unixsocket" is set to true, falls back to unix socket.
+postgres_port | **Optional.** Specifies the database port. Defaults to 5432.
+postgres_dbname | **Optional.** Specifies the database name to connect to. Defaults to "postgres" or "template1".
+postgres_dbuser | **Optional.** The username for the database connection. Defaults to "postgres".
+postgres_dbpass | **Optional.** The password for the database connection. You can use a .pgpass file instead.
+postgres_dbservice | **Optional.** Specifies the service name to use inside of pg_service.conf.
+postgres_warning | **Optional.** Specifies the warning threshold, range depends on the action.
+postgres_critical | **Optional.** Specifies the critical threshold, range depends on the action.
+postgres_include | **Optional.** Specifies name(s) items to specifically include (e.g. tables), depends on the action.
+postgres_exclude | **Optional.** Specifies name(s) items to specifically exclude (e.g. tables), depends on the action.
+postgres_includeuser | **Optional.** Include objects owned by certain users.
+postgres_excludeuser | **Optional.** Exclude objects owned by certain users.
+postgres_standby | **Optional.** Assume that the server is in continuous WAL recovery mode if set to true. Defaults to false.
+postgres_production | **Optional.** Assume that the server is in production mode if set to true. Defaults to false.
+postgres_action | **Required.** Determines the test executed.
+postgres_unixsocket | **Optional.** If "postgres_unixsocket" is set to true, the unix socket is used instead of an address. Defaults to false.
+postgres_query | **Optional.** Query for "custom_query" action.
+postgres_valtype | **Optional.** Value type of query result for "custom_query".
+postgres_reverse | **Optional.** If "postgres_reverse" is set, warning and critical values are reversed for "custom_query" action.
+postgres_tempdir | **Optional.** Specify directory for temporary files. The default directory is dependent on the OS. More details [here](https://perldoc.perl.org/File/Spec.html).
+postgres_datadir | **Optional.** Specifies the database directory (PGDATA). This information is required for some actions, such as "bloat", "locks" and "prepared_txns".
+postgres_language | **Optional.** Specifies the language for messages issued by the plugin. The default language depends on the system configuration.
+postgres_perflimit | **Optional.** Specifies the maximum number of performance data values returned by the plugin. The default is to return all performance data.
+postgres_pgcontroldata | **Optional.** Full path to the pg_controldata command line utility, e.g. "/usr/pgsql-12/bin/pg_controldata".
+
+#### mongodb <a id="plugin-contrib-command-mongodb"></a>
+
+The [check_mongodb.py](https://github.com/mzupan/nagios-plugin-mongodb) plugin
+uses the `pymongo` Python library to monitor a [MongoDB](https://docs.mongodb.com/manual/) instance.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|------------------------------------------------------------------------------------------------------------------------------
+mongodb_host | **Required.** Specifies the hostname or address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+mongodb_port | **Required.** The port mongodb is running on.
+mongodb_user | **Optional.** The username you want to login as.
+mongodb_passwd | **Optional.** The password you want to use for that user.
+mongodb_authdb | **Optional.** The database you want to authenticate against.
+mongodb_warning | **Optional.** The warning threshold we want to set.
+mongodb_critical | **Optional.** The critical threshold we want to set.
+mongodb_action | **Required.** The action you want to take.
+mongodb_maxlag | **Optional.** Get max replication lag (for replication_lag action only).
+mongodb_mappedmemory | **Optional.** Get mapped memory instead of resident (if resident memory can not be read).
+mongodb_perfdata | **Optional.** Enable output of Nagios performance data.
+mongodb_database | **Optional.** Specify the database to check.
+mongodb_alldatabases | **Optional.** Check all databases (action database_size).
+mongodb_ssl | **Optional.** Connect using SSL.
+mongodb_ssl_ca_cert_file | **Optional.** Path to certificate authority file for SSL.
+mongodb_replicaset | **Optional.** Connect to replicaset.
+mongodb_replcheck | **Optional.** If set to true, will enable the mongodb_replicaset value needed for "replica_primary" check.
+mongodb_querytype | **Optional.** The query type to check [query\|insert\|update\|delete\|getmore\|command] from queries_per_second.
+mongodb_collection | **Optional.** Specify the collection to check.
+mongodb_sampletime | **Optional.** Time used to sample number of pages faults.
+mongodb_disableretrywrites | **Optional.** If set to true, will disable Retry Writes, to allow counting the QPS.
+
+#### elasticsearch <a id="plugin-contrib-command-elasticsearch"></a>
+
+The [check_elasticsearch](https://github.com/anchor/nagios-plugin-elasticsearch) plugin
+uses the HTTP API to monitor an [Elasticsearch](https://www.elastic.co/products/elasticsearch) node.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------------|-------------------------------------------------------------------------------------------------------
+elasticsearch_host | **Optional.** Hostname or network address to probe. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+elasticsearch_failuredomain | **Optional.** A comma-separated list of ElasticSearch attributes that make up your cluster's failure domain.
+elasticsearch_masternodes | **Optional.** Issue a warning if the number of master-eligible nodes in the cluster drops below this number. By default, do not monitor the number of nodes in the cluster.
+elasticsearch_port | **Optional.** TCP port to probe. The ElasticSearch API should be listening here. Defaults to 9200.
+elasticsearch_prefix | **Optional.** Optional prefix (e.g. 'es') for the ElasticSearch API. Defaults to ''.
+elasticsearch_yellowcritical | **Optional.** Instead of issuing a 'warning' for a yellow cluster state, issue a 'critical' alert. Defaults to false.
+
+#### redis <a id="plugin-contrib-command-redis"></a>
+
+The [check_redis.pl](https://github.com/willixix/naglio-plugins/blob/master/check_redis.pl) plugin
+uses the `Redis` Perl library to monitor a [Redis](https://redis.io/) instance. The plugin can
+measure response time, hitrate, memory utilization, check replication synchronization, etc. It is
+also possible to test data in a specified key and calculate averages or summaries on ranges.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------------|--------------------------------------------------------------------------------------------------------------
+redis_hostname | **Required.** Hostname or IP Address to check. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+redis_port | **Optional.** Port number to query. Default to "6379".
+redis_database | **Optional.** Database name (usually a number) to query, needed for **redis_query**.
+redis_password | **Optional.** Password for Redis authentication. Safer alternative is to put them in a file and use **redis_credentials**.
+redis_credentials | **Optional.** Credentials file to read for Redis authentication.
+redis_timeout | **Optional.** Allows to set timeout for execution of this plugin.
+redis_variables | **Optional.** List of variables from info data to do threshold checks on.
+redis_warn | **Optional.** This option can only be used if **redis_variables** is used and the number of values listed here must exactly match number of variables specified.
+redis_crit | **Optional.** This option can only be used if **redis_variables** is used and the number of values listed here must exactly match number of variables specified.
+redis_perfparse | **Optional.** This should only be used with variables and causes variable data not only to be printed as part of main status line but also as perfparse compatible output. Defaults to false.
+redis_perfvars | **Optional.** This allows to list variables which values will go only into perfparse output (and not for threshold checking).
+redis_prev_perfdata | **Optional.** If set to true, previous performance data are used to calculate rate of change for counter statistics variables and for proper calculation of hitrate. Defaults to false.
+redis_rate_label | **Optional.** Prefix or Suffix label used to create a new variable which has rate of change of another base variable. You can specify PREFIX or SUFFIX or both as one string separated by ",". Default if not specified is suffix "_rate".
+redis_query | **Optional.** Option specifies key to query and optional variable name to assign the results to after.
+redis_option | **Optional.** Specifiers are separated by "," and must include NAME or PATTERN.
+redis_response_time | **Optional.** If this is used, plugin will measure and output connection response time in seconds. With **redis_perfparse** this would also be provided on perf variables.
+redis_hitrate | **Optional.** Calculates Hitrate and specify values are interpreted as WARNING and CRITICAL thresholds.
+redis_memory_utilization | **Optional.** This calculates percent of total memory on system used by redis. Total_memory on server must be specified with **redis_total_memory**. If you specify by itself, the plugin will just output this info. Parameter values are interpreted as WARNING and CRITICAL thresholds.
+redis_total_memory | **Optional.** Amount of memory on a system for memory utilization calculation. Use system memory or max_memory setting of redis.
+redis_replication_delay | **Optional.** Allows to set threshold on replication delay info.
+
+#### proxysql <a id="plugin-contrib-command-proxysql"></a>
+
+The [check_proxysql](https://github.com/sysown/proxysql-nagios) plugin,
+uses the `proxysql` binary to monitor [proxysql](https://proxysql.com/).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------------|----------------------------------------------------------------------------------
+proxysql_user | **Optional.** ProxySQL admin username (default=admin)
+proxysql_password | **Optional.** ProxySQL admin password (default=admin)
+proxysql_host | **Optional.** ProxySQL hostname / IP (default=127.0.0.1)
+proxysql_port | **Optional.** ProxySQL admin port (default=6032)
+proxysql_defaultfile | **Optional.** ProxySQL defaults file
+proxysql_type | **Required.** ProxySQL check type (one of conns,hg,rules,status,var)
+proxysql_name | **Optional.** ProxySQL variable name to check
+proxysql_lower | **Optional.** Alert if ProxySQL value are LOWER than defined WARN / CRIT thresholds (only applies to 'var' check type)
+proxysql_runtime | **Optional.** Force ProxySQL Nagios check to query the runtime_mysql_XXX tables rather than the mysql_XXX tables
+proxysql_warning | **Optional.** Warning threshold
+proxysql_critical | **Optional.** Critical threshold
+proxysql\_include\_hostgroup | **Optional.** ProxySQL hostgroup(s) to include (only applies to '--type hg' checks, accepts comma-separated list)
+proxysql\_ignore\_hostgroup | **Optional.** ProxySQL hostgroup(s) to ignore (only applies to '--type hg' checks, accepts comma-separated list)
+
+#### memcached <a id="plugin-contrib-command-memcached"></a>
+
+The [check_memcached](https://exchange.icinga.com/exchange/check_memcached) plugin
+checks the health of a running [memcached](https://memcached.org/) service.
+
+On Debian/Ubuntu, it is provided with the `nagios-plugin-contrib` package.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|----------------------------------------------------------------------------------
+memcached_hostname | **Required.** Hostname or IP address (required) optional ':port' overrides -p
+memcached_port | **Optional.** Port number (default: 11211)
+memcached_verbose | **Optional.** verbose messages
+memcached_keep | **Optional.** Keep up to this many items in the history object in memcached (default: 30)
+memcached_minimum_stat_interval | **Optional.** Minimum time interval (in minutes) to use to analyse stats. (default: 30)
+memcached_warning_hits_misses | **Optional.** Generate warning if quotient of hits/misses falls below this value (default: 2.0)
+memcached_warning_evictions | **Optional.** Generate warning if number of evictions exceeds this threshold. 0=disable. (default: 10)
+memcached_timeout | **Optional.** timeout in seconds (default: 1.0)
+memcached_key | **Optional.** key name for history object (default: check_memcached)
+memcached_expiry | **Optional.** expiry time in seconds for history object (default: 7200)
+memcached_performance_output | **Optional.** output performance statistics as rate-per-minute figures (better suited to pnp4nagios)
+
+### Hardware <a id="plugin-contrib-hardware"></a>
+
+This category includes all plugin check commands for various hardware checks.
+
+#### hpasm <a id="plugin-contrib-command-hpasm"></a>
+
+The [check_hpasm](https://labs.consol.de/de/nagios/check_hpasm/index.html) plugin
+monitors the hardware health of HP Proliant Servers, provided that the `hpasm`
+(HP Advanced Server Management) software is installed. It is also able to monitor
+the system health of HP Bladesystems and storage systems.
+
+The plugin can run in two different ways:
+
+1. Local execution using the `hpasmcli` command line tool.
+2. Remote SNMP query which invokes the HP Insight Tools on the remote node.
+
+You can either set or omit `hpasm_hostname` custom variable and select the corresponding node.
+
+The `hpasm_remote` attribute enables the plugin to execute remote SNMP queries if set to `true`.
+For compatibility reasons this attribute uses `true` as default value, and ensures that
+specifying the `hpasm_hostname` always enables remote checks.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+hpasm_hostname | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+hpasm_community | **Optional.** SNMP community of the server (SNMP v1/2 only).
+hpasm_protocol | **Optional.** The SNMP protocol to use (default: 2c, other possibilities: 1,3).
+hpasm_port | **Optional.** The SNMP port to use (default: 161).
+hpasm_blacklist | **Optional.** Blacklist some (missing/failed) components.
+hpasm_ignore-dimms | **Optional.** Ignore "N/A"-DIMM status on misc. servers (e.g. older DL320).
+hpasm_ignore-fan-redundancy | **Optional.** Ignore missing redundancy partners.
+hpasm_customthresholds | **Optional.** Use custom thresholds for certain temperatures.
+hpasm_eventrange | **Optional.** Period of time before critical IML events respectively become warnings or vanish. A range is described as a number and a unit (s, m, h, d), e.g. --eventrange 1h/20m.
+hpasm_perfdata | **Optional.** Output performance data. If your performance data string becomes too long and is truncated by Nagios, then you can use --perfdata=short instead. This will output temperature tags without location information.
+hpasm_username | **Optional.** The securityName for the USM security model (SNMPv3 only).
+hpasm_authpassword | **Optional.** The authentication password for SNMPv3.
+hpasm_authprotocol | **Optional.** The authentication protocol for SNMPv3 (md5\|sha).
+hpasm_privpassword | **Optional.** The password for authPriv security level.
+hpasm_privprotocol | **Optional.** The private protocol for SNMPv3 (des\|aes\|aes128\|3des\|3desde).
+hpasm_servertype | **Optional.** The type of the server: proliant (default) or bladesystem.
+hpasm_eval-nics | **Optional.** Check network interfaces (and groups). Try it and report me whyt you think about it. I need to build up some know how on this subject. If you get an error and think, it is not justified for your configuration, please tell me about it. (always send the output of "snmpwalk -On .... 1.3.6.1.4.1.232" and a description how you setup your nics and why it is correct opposed to the plugins error message.
+hpasm_remote | **Optional.** Run remote SNMP checks if enabled. Otherwise checks are executed locally using the `hpasmcli` binary. Defaults to `true`.
+
+#### openmanage <a id="plugin-contrib-command-openmanage"></a>
+
+The [check_openmanage](http://folk.uio.no/trondham/software/check_openmanage.html) plugin
+checks the hardware health of Dell PowerEdge (and some PowerVault) servers.
+It uses the Dell OpenManage Server Administrator (OMSA) software, which must be running on
+the monitored system. check_openmanage can be used remotely with SNMP or locally with icinga2 agent,
+check_by_ssh or similar, whichever suits your needs and particular taste.
+
+The plugin checks the health of the storage subsystem, power supplies, memory modules,
+temperature probes etc., and gives an alert if any of the components are faulty or operate outside normal parameters.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+openmanage_all | **Optional.** Check everything, even log content
+openmanage_blacklist | **Optional.** Blacklist missing and/or failed components
+openmanage_check | **Optional.** Fine-tune which components are checked
+openmanage_community | **Optional.** SNMP community string [default=public]
+openmanage_config | **Optional.** Specify configuration file
+openmanage_critical | **Optional.** Custom temperature critical limits
+openmanage_extinfo | **Optional.** Append system info to alerts
+openmanage_fahrenheit | **Optional.** Use Fahrenheit as temperature unit
+openmanage_hostname | **Optional.** Hostname or IP (required for SNMP)
+openmanage_htmlinfo | **Optional.** HTML output with clickable links
+openmanage_info | **Optional.** Prefix any alerts with the service tag
+openmanage_ipv6 | **Optional.** Use IPv6 instead of IPv4 [default=no]
+openmanage_legacy_perfdata | **Optional.** Legacy performance data output
+openmanage_no_storage | **Optional.** Don't check storage
+openmanage_only | **Optional.** Only check a certain component or alert type
+openmanage_perfdata | **Optional.** Output performance data [default=no]
+openmanage_port | **Optional.** SNMP port number [default=161]
+openmanage_protocol | **Optional.** SNMP protocol version [default=2c]
+openmanage_short_state | **Optional.** Prefix alerts with alert state abbreviated
+openmanage_show_blacklist | **Optional.** Show blacklistings in OK output
+openmanage_state | **Optional.** Prefix alerts with alert state
+openmanage_tcp | **Optional.** Use TCP instead of UDP [default=no]
+openmanage_timeout | **Optional.** Plugin timeout in seconds [default=30]
+openmanage_vdisk_critical | **Optional.** Make any alerts on virtual disks critical
+openmanage_warning | **Optional.** Custom temperature warning limits
+
+#### lmsensors <a id="plugin-contrib-command-lmsensors"></a>
+
+The [check_lmsensors](https://github.com/jackbenny/check_temp) plugin,
+uses the `lm-sensors` binary to monitor temperature sensors.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+lmsensors_warning | **Required.** Exit with WARNING status if above INTEGER degrees
+lmsensors_critical | **Required.** Exit with CRITICAL status if above INTEGER degrees
+lmsensors_sensor | **Optional.** Set what to monitor, for example CPU or MB (or M/B). Check sensors for the correct word. Default is CPU.
+
+#### hddtemp <a id="plugin-contrib-command-hddtemp"></a>
+
+The [check_hddtemp](https://github.com/vint21h/nagios-check-hddtemp) plugin,
+uses the `hddtemp` binary to monitor hard drive temperature.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+hddtemp_server | **Required.** server name or address
+hddtemp_port | **Optional.** port number
+hddtemp_devices | **Optional.** comma separated devices list, or empty for all devices in hddtemp response
+hddtemp_separator | **Optional.** hddtemp separator
+hddtemp_warning | **Required.** warning temperature
+hddtemp_critical | **Required.** critical temperature
+hddtemp_timeout | **Optional.** receiving data from hddtemp operation network timeout
+hddtemp_performance | **Optional.** If set, return performance data
+hddtemp_quiet | **Optional.** If set, be quiet
+
+The following sane default value are specified:
+```
+vars.hddtemp_server = "127.0.0.1"
+vars.hddtemp_warning = 55
+vars.hddtemp_critical = 60
+vars.hddtemp_performance = true
+vars.hddtemp_timeout = 5
+```
+
+#### adaptec-raid <a id="plugin-contrib-command-adaptec-raid"></a>
+
+The [check_adaptec_raid](https://github.com/thomas-krenn/check_adaptec_raid) plugin
+uses the `arcconf` binary to monitor Adaptec RAID controllers.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+adaptec_controller_number | **Required.** Controller number to monitor.
+arcconf_path | **Required.** Path to the `arcconf` binary, e.g. "/sbin/arcconf".
+
+#### lsi-raid <a id="plugin-contrib-command-lsi-raid"></a>
+
+The [check_lsi_raid](https://github.com/thomas-krenn/check_lsi_raid) plugin
+uses the `storcli` binary to monitor MegaRAID RAID controllers.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+lsi_controller_number | **Optional.** Controller number to monitor.
+storcli_path | **Optional.** Path to the `storcli` binary, e.g. "/usr/sbin/storcli".
+lsi_enclosure_id | **Optional.** Enclosure numbers to be checked, comma-separated.
+lsi_ld_id | **Optional.** Logical devices to be checked, comma-separated.
+lsi_pd_id | **Optional.** Physical devices to be checked, comma-separated.
+lsi_temp_warning | **Optional.** RAID controller warning temperature.
+lsi_temp_critical | **Optional.** RAID controller critical temperature.
+lsi_pd_temp_warning | **Optional.** Disk warning temperature.
+lsi_pd_temp_critical | **Optional.** Disk critical temperature.
+lsi_bbu_temp_warning | **Optional.** Battery warning temperature.
+lsi_bbu_temp_critical | **Optional.** Battery critical temperature.
+lsi_cv_temp_warning | **Optional.** CacheVault warning temperature.
+lsi_cv_temp_critical | **Optional.** CacheVault critical temperature.
+lsi_ignored_media_errors | **Optional.** Warning threshold for media errors.
+lsi_ignored_other_errors | **Optional.** Warning threshold for other errors.
+lsi_ignored_predictive_fails | **Optional.** Warning threshold for predictive failures.
+lsi_ignored_shield_counters | **Optional.** Warning threshold for shield counter.
+lsi_ignored_bbm_counters | **Optional.** Warning threshold for BBM counter.
+lsi_bbu | **Optional.** Define if BBU is present and it's state should be checked.
+lsi_noenclosures | **Optional.** If set to true, does not check enclosures.
+lsi_nosudo | **Optional.** If set to true, does not use sudo when running storcli.
+lsi_nocleanlogs | **Optional.** If set to true, does not clean up the log files after executing storcli checks.
+
+
+#### smart-attributes <a id="plugin-contrib-command-smart-attributes"></a>
+
+The [check_smart_attributes](https://github.com/thomas-krenn/check_smart_attributes) plugin
+uses the `smartctl` binary to monitor SMART values of SSDs and HDDs.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------------------------
+smart_attributes_config_path | **Required.** Path to the smart attributes config file (e.g. check_smartdb.json).
+smart_attributes_device | **Required.** Device name (e.g. /dev/sda) to monitor.
+
+
+### IcingaCLI <a id="plugin-contrib-icingacli"></a>
+
+This category includes all plugins using the icingacli provided by Icinga Web 2.
+
+The user running Icinga 2 needs sufficient permissions to read the Icinga Web 2 configuration directory. e.g. `usermod -a -G icingaweb2 icinga`. You need to restart, not reload Icinga 2 for the new group membership to work.
+
+#### Business Process <a id="plugin-contrib-icingacli-businessprocess"></a>
+
+This subcommand is provided by the [business process module](https://exchange.icinga.com/icinga/Business+Process)
+and executed as `icingacli businessprocess` CLI command.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------------------|-----------------------------------------------------------------------------------------
+icingacli_businessprocess_process | **Required.** Business process to monitor.
+icingacli_businessprocess_config | **Optional.** Configuration file containing your business process without file extension.
+icingacli_businessprocess_details | **Optional.** Get details for root cause analysis. Defaults to false.
+icingacli_businessprocess_statetype | **Optional.** Define which state type to look at, `soft` or `hard`. Overrides the default value inside the businessprocess module, if configured.
+icingacli_businessprocess_ackisok | **Optional.** Treat acknowledged hosts/services always as UP/OK.
+icingacli_businessprocess_blame | **Optional.** Show problem details as a tree reduced to the nodes which have the same state as the business process.
+icingacli_businessprocess_colors | **Optional.** Show colored output.
+icingacli_businessprocess_downtimeisok | **Optional.** Treat hosts/services in downtime always as UP/OK.
+icingacli_businessprocess_rootcause | **Optional.** Used in combination with *icingacli_businessprocess_blame*. Only shows the paths of the nodes which are responsible for the state of the business process.
+
+#### Director <a id="plugin-contrib-icingacli-director"></a>
+
+This subcommand is provided by the [director module](https://github.com/Icinga/icingaweb2-module-director) > 1.4.2 and executed as `icingacli director health check`. Please refer to the [documentation](https://github.com/Icinga/icingaweb2-module-director/blob/master/doc/60-CLI.md#health-check-plugin) for all available sub-checks.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------------------|-----------------------------------------------------------------------------------------
+icingacli_director_check | **Optional.** Run only a specific test suite.
+icingacli_director_db | **Optional.** Use a specific Icinga Web DB resource.
+
+#### Elasticsearch <a id="plugin-contrib-icingacli-elasticsearch"></a>
+
+This subcommand is provided by the [elasticsearch_module](https://github.com/Icinga/icingaweb2-module-elasticsearch) and executed as `icingacli elasticsearch check`.
+
+* The value of `icingacli_elasticsearch_instance` is the same like in the configuration of the module.
+* The value of `icingacli_elasticsearch_filter` are filters for events in Icinga Web 2 syntax. e.g. `"beat.hostname=www.example.com" AND severity=critical`
+* The thresholds are just numerical values. They get checked against how many events match the filter within the given timeframe.
+* The value of `icingacli_elasticsearch_index` is an index pattern. e.g. `logstash*`
+
+Name | Description
+------------------------------------------|-----------------------------------------------------------------------------------------
+icingacli_elasticsearch_instance | **Required.** The Elasticsearch to connect to
+icingacli_elasticsearch_index | **Required.** Index pattern to use when searching
+icingacli_elasticsearch_critical | **Required.** Critical threshold
+icingacli_elasticsearch_warning | **Required.** Warning threshold
+icingacli_elasticsearch_filter | **Required.** Filter for events
+icingacli_elasticsearch_from | **Optional.** Negative value of time to search from now (Default: -5m)
+
+#### x509 <a id="plugin-contrib-icingacli-x509"></a>
+
+This subcommand is provided by the [x509 module](https://github.com/Icinga/icingaweb2-module-x509) and executed as `icingacli x509 check host`. Please refer to the [documentation](https://github.com/Icinga/icingaweb2-module-x509/blob/master/doc/10-Monitoring.md#host-check-command) for more information.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------------------|-----------------------------------------------------------------------------------------
+icingacli_x509_ip | **Required.** A hosts IP address [or]
+icingacli_x509_host | **Required.** A hosts name
+icingacli_x509_port | **Optional.** The port to check in particular
+icingacli_x509_warning | **Optional.** Less remaining time results in state WARNING (Default: 25%)
+icingacli_x509_critical | **Optional.** Less remaining time results in state CRITICAL (Default: 10%)
+icingacli_x509_allow_self_signed | **Optional.** Ignore if a certificate or its issuer has been self-signed (Default: false)
+
+### IPMI Devices <a id="plugin-contrib-ipmi"></a>
+
+This category includes all plugins for IPMI devices.
+
+#### ipmi-sensor <a id="plugin-contrib-command-ipmi-sensor"></a>
+
+The [check_ipmi_sensor](https://github.com/thomas-krenn/check_ipmi_sensor_v3) plugin
+uses the `ipmimonitoring` binary to monitor sensor data for IPMI devices. Please
+read the [documentation](https://www.thomas-krenn.com/en/wiki/IPMI_Sensor_Monitoring_Plugin)
+for installation and configuration details.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|-----------------------------------------------------------------------------------------------------
+ipmi_address | **Required.** Specifies the remote host (IPMI device) to check. Defaults to "$address$".
+ipmi_config_file | **Optional.** Path to the FreeIPMI configuration file. It should contain IPMI username, IPMI password, and IPMI privilege-level.
+ipmi_username | **Optional.** The IPMI username.
+ipmi_password | **Optional.** The IPMI password.
+ipmi_privilege_level | **Optional.** The IPMI privilege level of the IPMI user.
+ipmi_backward_compatibility_mode | **Optional.** Enable backward compatibility mode, useful for FreeIPMI 0.5.\* (this omits FreeIPMI options "--quiet-cache" and "--sdr-cache-recreate").
+ipmi_sensor_type | **Optional.** Limit sensors to query based on IPMI sensor type. Examples for IPMI sensor types are 'Fan', 'Temperature' and 'Voltage'.
+ipmi_sel_type | **Optional.** Limit SEL entries to specific types, run 'ipmi-sel -L' for a list of types. All sensors are populated to the SEL and per default all sensor types are monitored.
+ipmi_exclude_sensor_id | **Optional.** Exclude sensor matching ipmi_sensor_id.
+ipmi_exclude_sensor | **Optional.** Exclude sensor based on IPMI sensor type. (Comma-separated)
+ipmi_exclude_sel | **Optional.** Exclude SEL entries of specific sensor types. (comma-separated list).
+ipmi_sensor_id | **Optional.** Include sensor matching ipmi_sensor_id.
+ipmi_protocol_lan_version | **Optional.** Change the protocol LAN version. Defaults to "LAN_2_0".
+ipmi_number_of_active_fans | **Optional.** Number of fans that should be active. Otherwise a WARNING state is returned.
+ipmi_show_fru | **Optional.** Print the product serial number if it is available in the IPMI FRU data.
+ipmi_show_assettag | **Optional.** Print the assettag if it is available in the IPMI FRU data. (--fru is mandatory)
+ipmi_show_board | **Optional.** Print additional motherboard informations if it is available in the IPMI FRU data. (--fru is mandatory)
+ipmi_no_sel_checking | **Optional.** Turn off system event log checking via ipmi-sel.
+ipmi_no_thresholds | **Optional.** Turn off performance data thresholds from output-sensor-thresholds.
+ipmi_verbose | **Optional.** Be Verbose multi line output, also with additional details for warnings.
+ipmi_debug | **Optional.** Be Verbose debugging output, followed by normal multi line output.
+ipmi_unify_file | **Optional.** Path to the unify file to unify sensor names.
+
+#### ipmi-alive <a id="plugin-contrib-command-ipmi-alive"></a>
+
+The `ipmi-alive` check commands allows you to create a ping check for the IPMI Interface.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------------|-----------------------------------------------------------------------------------------------------
+ping_address | **Optional.** The address of the IPMI interface. Defaults to "$address$" if the IPMI interface's `address` attribute is set, "$address6$" otherwise.
+ping_wrta | **Optional.** The RTA warning threshold in milliseconds. Defaults to 5000.
+ping_wpl | **Optional.** The packet loss warning threshold in %. Defaults to 100.
+ping_crta | **Optional.** The RTA critical threshold in milliseconds. Defaults to 5000.
+ping_cpl | **Optional.** The packet loss critical threshold in %. Defaults to 100.
+ping_packets | **Optional.** The number of packets to send. Defaults to 1.
+ping_timeout | **Optional.** The plugin timeout in seconds. Defaults to 0 (no timeout).
+
+
+### Log Management <a id="plugins-contrib-log-management"></a>
+
+This category includes all plugins for log management, for example [Logstash](https://www.elastic.co/products/logstash).
+
+#### logstash <a id="plugins-contrib-command-logstash"></a>
+
+The [logstash](https://github.com/NETWAYS/check_logstash) plugin connects to
+the Node API of Logstash. This plugin requires at least Logstash version 5.0.x.
+
+The Node API is not activated by default. You have to configure your Logstash
+installation in order to allow plugin connections.
+
+Name | Description
+---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+logstash_hostname | **Optional.** Hostname where Logstash is running. Defaults to `check_address`
+logstash_port | **Optional.** Port where Logstash is listening for API requests. Defaults to 9600
+logstash_filedesc_warn | **Optional.** Warning threshold of file descriptor usage in percent. Defaults to 85 (percent).
+logstash_filedesc_crit | **Optional.** Critical threshold of file descriptor usage in percent. Defaults to 95 (percent).
+logstash_heap_warn | **Optional.** Warning threshold of heap usage in percent. Defaults to 70 (percent).
+logstash_heap_crit | **Optional.** Critical threshold of heap usage in percent Defaults to 80 (percent).
+logstash_inflight_warn | **Optional.** Warning threshold of inflight events.
+logstash_inflight_crit | **Optional.** Critical threshold of inflight events.
+logstash_cpu_warn | **Optional.** Warning threshold for cpu usage in percent.
+logstash_cpu_crit | **Optional.** Critical threshold for cpu usage in percent.
+
+#### logfiles <a id="plugins-contrib-command-logfiles"></a>
+
+The [logfiles](https://labs.consol.de/nagios/check_logfiles/) plugin finds
+specified patterns in log files.
+
+Name | Description
+----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+logfiles_tag | **Optional.** A short unique descriptor for this search. It will appear in the output of the plugin and is used to separare the different services.
+logfiles_logfile | **Optional.** This is the name of the log file you want to scan.
+logfiles_rotation | **Optional.** This is the method how log files are rotated. One of the predefined methods or a regular expression, which helps identify the rotated archives. If this key is missing, check_logfiles assumes that the log file will be simply overwritten instead of rotated.
+logfiles_critical_pattern | **Optional.** A regular expression which will trigger a critical error.
+logfiles_warning_pattern | **Optional.** A regular expression which will trigger a warning error.
+logfiles_critical_exception | **Optional.** A regular expression, the exceptions which are not counted as critical errors.
+logfiles_warning_exception | **Optional.** A regular expression, the exceptions which are not counted as warning errors.
+logfiles_ok_pattern | **Optional.** A regular expression which resets the error counters.
+logfiles_no_protocol | **Optional.** Normally all the matched lines are written into a protocol file with this file’s name appearing in the plugin’s output. This option switches this off.
+logfiles_syslog_server | **Optional.** With this option you limit the pattern matching to lines originating from the host check_logfiles is running on.
+logfiles_syslog_client | **Optional.** With this option you limit the pattern matching to lines originating from the host named in this option.
+logfiles_sticky | **Optional.** Errors are propagated through successive runs.
+logfiles_unstick | **Optional.** Resets sticky errors.
+logfiles_config | **Optional.** The name of a configuration file.
+logfiles_configdir | **Optional.** The name of a configuration directory. Configfiles ending in .cfg or .conf are (recursively) imported.
+logfiles_searches | **Optional.** A list of tags of those searches which are to be run. Using this parameter, not all searches listed in the config file are run, but only those selected.
+logfiles_selectedsearches | **Optional.** A list of tags of those searches which are to be run. Using this parameter, not all searches listed in the config file are run, but only those selected.
+logfiles_report | **Optional.** This option turns on multiline output (Default: off). The setting html generates a table which display the last hits in the service details view. Possible values are: short, long, html or off.
+logfiles_max_length | **Optional.** With this parameter long lines are truncated (Default: off). Some programs (e.g. TrueScan) generate entries in the eventlog of such a length, that the output of the plugin becomes longer than 1024 characters. NSClient++ discards these.
+logfiles_winwarncrit | **Optional.** With this parameter messages in the eventlog are classified by the type WARNING/ERROR (Default: off). Replaces or complements warning/criticalpattern.
+logfiles_run_unique | **Optional.** This parameter prevents check_logfiles from starting when there’s already another instance using the same config file. (exits with UNKNOWN).
+logfiles_timeout | **Optional.** This parameter causes an abort of a running search after a defined number of seconds. It is an aborted in a controlled manner, so that the lines which have been read so far, are used for the computation of the final result.
+logfiles_warning | **Optional.** Complex handler-scripts can be provided with a warning-parameter this way. Inside the scripts the value is accessible as the macro CL_WARNING.
+logfiles_critical | **Optional.** Complex handler-scripts can be provided with a critical-parameter this way. Inside the scripts the value is accessible as the macro CL_CRITICAL.
+
+
+### Metrics <a id="plugin-contrib-metrics"></a>
+
+This category includes all plugins for metric-based checks.
+
+#### graphite <a id="plugin-contrib-command-graphite"></a>
+
+The [check_graphite](https://github.com/obfuscurity/nagios-scripts) plugin
+uses the `rest-client` Ruby library to monitor a [Graphite](https://graphiteapp.org) instance.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------------|-----------------------------------------------------------------------------------------------------
+graphite_url | **Required.** Target url.
+graphite_metric | **Required.** Metric path string.
+graphite_shortname | **Optional.** Metric short name (used for performance data).
+graphite_duration | **Optional.** Length, in minute of data to parse (default: 5).
+graphite_function | **Optional.** Function applied to metrics for thresholds (default: average).
+graphite_warning | **Required.** Warning threshold.
+graphite_critical | **Required.** Critical threshold.
+graphite_units | **Optional.** Adds a text tag to the metric count in the plugin output. Useful to identify the metric units. Doesn't affect data queries.
+graphite_message | **Optional.** Text message to output (default: "metric count:").
+graphite_zero_on_error | **Optional.** Return 0 on a graphite 500 error.
+graphite_link_graph | **Optional.** Add a link in the plugin output, showing a 24h graph for this metric in graphite.
+
+### Network Components <a id="plugin-contrib-network-components"></a>
+
+This category includes all plugins for various network components like routers, switches and firewalls.
+
+#### interfacetable <a id="plugin-contrib-command-interfacetable"></a>
+
+The [check_interfacetable_v3t](http://www.tontonitch.com/tiki/tiki-index.php?page=Nagios+plugins+-+interfacetable_v3t) plugin
+generates a html page containing information about the monitored node and all of its interfaces.
+
+The Git repository is located on [GitHub](https://github.com/Tontonitch/interfacetable_v3t).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------------|-----------------------------------------------------------------------------------------------------
+interfacetable_hostquery | **Required.** Specifies the remote host to poll. Defaults to "$address$".
+interfacetable_hostdisplay | **Optional.** Specifies the hostname to display in the HTML link. Defaults to "$host.display_name$".
+interfacetable_regex | **Optional.** Interface names and property names for some other options will be interpreted as regular expressions. Defaults to false.
+interfacetable_outputshort | **Optional.** Reduce the verbosity of the plugin output. Defaults to false.
+interfacetable_exclude | **Optional.** Comma separated list of interfaces globally excluded from the monitoring.
+interfacetable_include | **Optional.** Comma separated list of interfaces globally included in the monitoring.
+interfacetable_aliasmatching | **Optional.** Allow you to specify alias in addition to interface names. Defaults to false.
+interfacetable_excludetraffic | **Optional.** Comma separated list of interfaces excluded from traffic checks.
+interfacetable_includetraffic | **Optional.** Comma separated list of interfaces included for traffic checks.
+interfacetable_warningtraffic | **Optional.** Interface traffic load percentage leading to a warning alert.
+interfacetable_criticaltraffic | **Optional.** Interface traffic load percentage leading to a critical alert.
+interfacetable_pkt | **Optional.** Add unicast/non-unicast pkt stats for each interface.
+interfacetable_trafficwithpkt | **Optional.** Enable traffic calculation using pkt counters instead of octet counters. Useful when using 32-bit counters to track the load on > 1GbE interfaces. Defaults to false.
+interfacetable_trackproperty | **Optional.** List of tracked properties.
+interfacetable_excludeproperty | **Optional.** Comma separated list of interfaces excluded from the property tracking.
+interfacetable_includeproperty | **Optional.** Comma separated list of interfaces included in the property tracking.
+interfacetable_community | **Optional.** Specifies the snmp v1/v2c community string. Defaults to "public" if using snmp v1/v2c, ignored using v3.
+interfacetable_snmpv2 | **Optional.** Use snmp v2c. Defaults to false.
+interfacetable_login | **Optional.** Login for snmpv3 authentication.
+interfacetable_passwd | **Optional.** Auth password for snmpv3 authentication.
+interfacetable_privpass | **Optional.** Priv password for snmpv3 authentication.
+interfacetable_protocols | **Optional.** Authentication protocol,Priv protocol for snmpv3 authentication.
+interfacetable_domain | **Optional.** SNMP transport domain.
+interfacetable_contextname | **Optional.** Context name for the snmp requests.
+interfacetable_port | **Optional.** SNMP port. Defaults to standard port.
+interfacetable_64bits | **Optional.** Use SNMP 64-bits counters. Defaults to false.
+interfacetable_maxrepetitions | **Optional.** Increasing this value may enhance snmp query performances by gathering more results at one time.
+interfacetable_snmptimeout | **Optional.** Define the Transport Layer timeout for the snmp queries.
+interfacetable_snmpretries | **Optional.** Define the number of times to retry sending a SNMP message.
+interfacetable_snmpmaxmsgsize | **Optional.** Size of the SNMP message in octets, useful in case of too long responses. Be careful with network filters. Range 484 - 65535. Apply only to netsnmp perl bindings. The default is 1472 octets for UDP/IPv4, 1452 octets for UDP/IPv6, 1460 octets for TCP/IPv4, and 1440 octets for TCP/IPv6.
+interfacetable_unixsnmp | **Optional.** Use unix snmp utilities for snmp requests. Defaults to false, which means use the perl bindings.
+interfacetable_enableperfdata | **Optional.** Enable port performance data. Defaults to false.
+interfacetable_perfdataformat | **Optional.** Define which performance data will be generated. Possible values are "full" (default), "loadonly", "globalonly".
+interfacetable_perfdatathreshold | **Optional.** Define which thresholds are printed in the generated performance data. Possible values are "full" (default), "loadonly", "globalonly".
+interfacetable_perfdatadir | **Optional.** When specified, the performance data are also written directly to a file, in the specified location.
+interfacetable_perfdataservicedesc | **Optional.** Specify additional parameters for output performance data to PNP. Defaults to "$service.name$", only affects **interfacetable_perfdatadir**.
+interfacetable_grapher | **Optional.** Specify the used graphing solution. Possible values are "pnp4nagios" (default), "nagiosgrapher", "netwaysgrapherv2" and "ingraph".
+interfacetable_grapherurl | **Optional.** Graphing system url. Default depends on **interfacetable_grapher**.
+interfacetable_portperfunit | **Optional.** Traffic could be reported in bits (counters) or in bps (calculated value).
+interfacetable_nodetype | **Optional.** Specify the node type, for specific information to be printed / specific oids to be used. Possible values: "standard" (default), "cisco", "hp", "netscreen", "netapp", "bigip", "bluecoat", "brocade", "brocade-nos", "nortel", "hpux".
+interfacetable_duplex | **Optional.** Add the duplex mode property for each interface in the interface table. Defaults to false.
+interfacetable_stp | **Optional.** Add the stp state property for each interface in the interface table. Defaults to false.
+interfacetable_vlan | **Optional.** Add the vlan attribution property for each interface in the interface table. Defaults to false. This option is available only for the following nodetypes: "cisco", "hp", "nortel"
+interfacetable_noipinfo | **Optional.** Remove the ip information for each interface from the interface table. Defaults to false.
+interfacetable_alias | **Optional.** Add the alias information for each interface in the interface table. Defaults to false.
+interfacetable_accessmethod | **Optional.** Access method for a shortcut to the host in the HTML page. Format is : <method>[:<target>] Where method can be: ssh, telnet, http or https.
+interfacetable_htmltablelinktarget | **Optional.** Specifies the windows or the frame where the [details] link will load the generated html page. Possible values are: "_blank", "_self" (default), "_parent", "_top", or a frame name.
+interfacetable_delta | **Optional.** Set the delta used for interface throughput calculation in seconds.
+interfacetable_ifs | **Optional.** Input field separator. Defaults to ",".
+interfacetable_cache | **Optional.** Define the retention time of the cached data in seconds.
+interfacetable_noifloadgradient | **Optional.** Disable color gradient from green over yellow to red for the load percentage. Defaults to false.
+interfacetable_nohuman | **Optional.** Do not translate bandwidth usage in human readable format. Defaults to false.
+interfacetable_snapshot | **Optional.** Force the plugin to run like if it was the first launch. Defaults to false.
+interfacetable_timeout | **Optional.** Define the global timeout limit of the plugin in seconds. Defaults to "15s".
+interfacetable_css | **Optional.** Define the css stylesheet used by the generated html files. Possible values are "classic", "icinga" or "icinga-alternate1".
+interfacetable_config | **Optional.** Specify a config file to load.
+interfacetable_noconfigtable | **Optional.** Disable configuration table on the generated HTML page. Defaults to false.
+interfacetable_notips | **Optional.** Disable the tips in the generated html tables. Defaults to false.
+interfacetable_defaulttablesorting | **Optional.** Default table sorting can be "index" (default) or "name".
+interfacetable_tablesplit | **Optional.** Generate multiple interface tables, one per interface type. Defaults to false.
+interfacetable_notype | **Optional.** Remove the interface type for each interface. Defaults to false.
+
+#### iftraffic <a id="plugin-contrib-command-iftraffic"></a>
+
+The [check_iftraffic](https://exchange.icinga.com/exchange/iftraffic) plugin
+checks the utilization of a given interface name using the SNMP protocol.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------
+iftraffic_address | **Required.** Specifies the remote host. Defaults to "$address$".
+iftraffic_community | **Optional.** SNMP community. Defaults to "public'" if omitted.
+iftraffic_version | **Optional.** SNMP version to use. Defaults to "1" if omitted. Requires v1.0.2+.
+iftraffic_interface | **Required.** Queried interface name.
+iftraffic_bandwidth | **Required.** Interface maximum speed in kilo/mega/giga/bits per second.
+iftraffic_units | **Optional.** Interface units can be one of these values: `g` (gigabits/s),`m` (megabits/s), `k` (kilobits/s),`b` (bits/s)
+iftraffic_warn | **Optional.** Percent of bandwidth usage necessary to result in warning status (defaults to `85`).
+iftraffic_crit | **Optional.** Percent of bandwidth usage necessary to result in critical status (defaults to `98`).
+iftraffic_max_counter | **Optional.** Maximum counter value of net devices in kilo/mega/giga/bytes.
+
+#### iftraffic64 <a id="plugin-contrib-command-iftraffic64"></a>
+
+The [check_iftraffic64](https://exchange.icinga.com/exchange/check_iftraffic64) plugin
+checks the utilization of a given interface name using the SNMP protocol.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------
+iftraffic64_address | **Required.** Specifies the remote host. Defaults to "$address$".
+iftraffic64_community | **Optional.** SNMP community. Defaults to "public'" if omitted.
+iftraffic64_interface | **Required.** Queried interface name.
+iftraffic64_bandwidth | **Required.** Interface maximum speed in kilo/mega/giga/bits per second.
+iftraffic64_units | **Optional.** Interface units can be one of these values: `g` (gigabits/s),`m` (megabits/s), `k` (kilobits/s),`b` (bits/s)
+iftraffic64_warn | **Optional.** Percent of bandwidth usage necessary to result in warning status (defaults to `85`).
+iftraffic64_crit | **Optional.** Percent of bandwidth usage necessary to result in critical status (defaults to `98`).
+iftraffic64_max_counter | **Optional.** Maximum counter value of net devices in kilo/mega/giga/bytes.
+
+#### interfaces <a id="plugin-contrib-command-interfaces"></a>
+
+The [check_interfaces](https://git.netways.org/plugins/check_interfaces) plugin
+uses SNMP to monitor network interfaces and their utilization.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|---------------------------------------------------------
+interfaces_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+interfaces_regex | **Optional.** Interface list regexp.
+interfaces_exclude_regex | **Optional.** Interface list negative regexp.
+interfaces_errors | **Optional.** Number of in errors (CRC errors for cisco) to consider a warning (default 50).
+interface_out_errors | **Optional.** Number of out errors (collisions for cisco) to consider a warning (default same as in errors).
+interfaces_perfdata | **Optional.** perfdata from last check result.
+interfaces_prefix | **Optional.** Prefix interface names with this label.
+interfaces_lastcheck | **Optional.** Last checktime (unixtime).
+interfaces_bandwidth | **Optional.** Bandwidth warn level in percent.
+interfaces_speed | **Optional.** Override speed detection with this value (bits per sec).
+interfaces_trim | **Optional.** Cut this number of characters from the start of interface descriptions.
+interfaces_mode | **Optional.** Special operating mode (default,cisco,nonbulk,bintec).
+interfaces_auth_proto | **Optional.** SNMPv3 Auth Protocol (SHA\|MD5)
+interfaces_auth_phrase | **Optional.** SNMPv3 Auth Phrase
+interfaces_priv_proto | **Optional.** SNMPv3 Privacy Protocol (AES\|DES)
+interfaces_priv_phrase | **Optional.** SNMPv3 Privacy Phrase
+interfaces_user | **Optional.** SNMPv3 User
+interfaces_down_is_ok | **Optional.** Disables critical alerts for down interfaces.
+interfaces_aliases | **Optional.** Retrieves the interface description.
+interfaces_match_aliases | **Optional.** Also match against aliases (Option --aliases automatically enabled).
+interfaces_timeout | **Optional.** Sets the SNMP timeout (in ms).
+interfaces_sleep | **Optional.** Sleep between every SNMP query (in ms).
+interfaces_names | **Optional.** If set to true, use ifName instead of ifDescr.
+
+#### linux\_netdev <a id="plugin-contrib-command-linux_netdev"></a>
+
+The [check\_linux\_netdev](https://github.com/Al2Klimov/check_linux_netdev)
+plugin monitors a Linux system's network device statistics via `/proc/net/dev`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|---------------------------------------------------------
+linux\_netdev\_duration | **Optional.** For how long to run. E.g. "10s" or "2m". Default: "1m"
+linux\_netdev\_exclude | **Optional.** Which NICs to exclude. E.g. `eth0` or `eth?*`, may be an array. Default: none
+linux\_netdev\_thresholds | **Optional.** Warning and critical thresholds. E.g. `eth?*:tx:bytes:persec:w=1000000000` (see [plugin documentation](https://github.com/Al2Klimov/check_linux_netdev#usage)), may be an array. Default: none
+
+#### nwc_health <a id="plugin-contrib-command-nwc_health"></a>
+
+The [check_nwc_health](https://labs.consol.de/de/nagios/check_nwc_health/index.html) plugin
+uses SNMP to monitor network components. The plugin is able to generate interface statistics,
+check hardware (CPU, memory, fan, power, etc.), monitor firewall policies, HRSP, load-balancer
+pools, processor and memory usage.
+
+Currently the following network components are supported: Cisco IOS, Cisco Nexus, Cisco ASA,
+Cisco PIX, F5 BIG-IP, CheckPoint Firewall1, Juniper NetScreen, HP Procurve, Nortel, Brocade 4100/4900,
+EMC DS 4700, EMC DS 24, Allied Telesyn. Blue Coat SG600, Cisco Wireless Lan Controller 5500,
+Brocade ICX6610-24-HPOE, Cisco UC Telefonzeugs, FOUNDRY-SN-AGENT-MIB, FRITZ!BOX 7390, FRITZ!DECT 200,
+Juniper IVE, Pulse-Gateway MAG4610, Cisco IronPort AsyncOS, Foundry, etc. A complete list can be
+found in the plugin [documentation](https://labs.consol.de/nagios/check_nwc_health/index.html).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|---------------------------------------------------------
+nwc_health_hostname | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+nwc_health_mode | **Optional.** The plugin mode. A list of all available modes can be found in the [plugin documentation](https://labs.consol.de/nagios/check_nwc_health/index.html).
+nwc_health_timeout | **Optional.** Seconds before plugin times out (default: 15)
+nwc_health_blacklist | **Optional.** Blacklist some (missing/failed) components.
+nwc_health_port | **Optional.** The SNMP port to use (default: 161).
+nwc_health_domain | **Optional.** The transport domain to use (default: udp/ipv4, other possible values: udp6, udp/ipv6, tcp, tcp4, tcp/ipv4, tcp6, tcp/ipv6).
+nwc_health_protocol | **Optional.** The SNMP protocol to use (default: 2c, other possibilities: 1,3).
+nwc_health_community | **Optional.** SNMP community of the server (SNMP v1/2 only).
+nwc_health_username | **Optional.** The securityName for the USM security model (SNMPv3 only).
+nwc_health_authpassword | **Optional.** The authentication password for SNMPv3.
+nwc_health_authprotocol | **Optional.** The authentication protocol for SNMPv3 (md5\|sha).
+nwc_health_privpassword | **Optional.** The password for authPriv security level.
+nwc_health_privprotocol | **Optional.** The private protocol for SNMPv3 (des\|aes\|aes128\|3des\|3desde).
+nwc_health_contextengineid | **Optional.** The context engine id for SNMPv3 (10 to 64 hex characters).
+nwc_health_contextname | **Optional.** The context name for SNMPv3 (empty represents the default context).
+nwc_health_community2 | **Optional.** SNMP community which can be used to switch the context during runtime.
+nwc_health_name | **Optional.** The name of an interface (ifDescr).
+nwc_health_drecksptkdb | **Optional.** This parameter must be used instead of --name, because Devel::ptkdb is stealing the latter from the command line.
+nwc_health_alias | **Optional.** The alias name of a 64bit-interface (ifAlias)
+nwc_health_regexp | **Optional.** A flag indicating that --name is a regular expression
+nwc_health_ifspeedin | **Optional.** Override the ifspeed oid of an interface (only inbound)
+nwc_health_ifspeedout | **Optional.** Override the ifspeed oid of an interface (only outbound)
+nwc_health_ifspeed | **Optional.** Override the ifspeed oid of an interface
+nwc_health_units | **Optional.** One of %, B, KB, MB, GB, Bit, KBi, MBi, GBi. (used for e.g. mode interface-usage)
+nwc_health_name2 | **Optional.** The secondary name of a component.
+nwc_health_name3 | **Optional.** The tertiary name of a component.
+nwc_health_role | **Optional.** The role of this device in a hsrp group (active/standby/listen).
+nwc_health_report | **Optional.** Can be used to shorten the output. Possible values are: 'long' (default), 'short' (to shorten if available), or 'html' (to produce some html outputs if available)
+nwc_health_lookback | **Optional.** The amount of time you want to look back when calculating average rates. Use it for mode interface-errors or interface-usage. Without --lookback the time between two runs of check_nwc_health is the base for calculations. If you want your checkresult to be based for example on the past hour, use --lookback 3600.
+nwc_health_warning | **Optional.** The warning threshold
+nwc_health_critical | **Optional.** The critical threshold
+nwc_health_warningx | **Optional.** The extended warning thresholds
+nwc_health_criticalx | **Optional.** The extended critical thresholds
+nwc_health_mitigation | **Optional.** The parameter allows you to change a critical error to a warning (1) or ok (0).
+nwc_health_selectedperfdata | **Optional.** The parameter allows you to limit the list of performance data. It's a perl regexp. Only matching perfdata show up in the output.
+nwc_health_morphperfdata | **Optional.** The parameter allows you to change performance data labels. It's a perl regexp and a substitution. --morphperfdata '(.*)ISATAP(.*)'='$1patasi$2'
+nwc_health_negate | **Optional.** The parameter allows you to map exit levels, such as warning=critical.
+nwc_health_mymodules-dyn-dir | **Optional.** A directory where own extensions can be found.
+nwc_health_servertype | **Optional.** The type of the network device: cisco (default). Use it if auto-detection is not possible.
+nwc_health_statefilesdir | **Optional.** An alternate directory where the plugin can save files.
+nwc_health_oids | **Optional.** A list of oids which are downloaded and written to a cache file. Use it together with --mode oidcache.
+nwc_health_offline | **Optional.** The maximum number of seconds since the last update of cache file before it is considered too old.
+nwc_health_multiline | **Optional.** Multiline output
+
+#### printer_health <a id="plugin-contrib-command-printer_health"></a>
+
+The [check_printer_health](https://labs.consol.de/nagios/check_printer_health/index.html) plugin
+uses SNMP to monitor printer. The plugin is able to generate supply statistics and check hardware.
+A complete list can be found in the plugin [documentation](https://labs.consol.de/nagios/check_printer_health/index.html).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|---------------------------------------------------------
+printer_health_hostname | **Required.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+printer_health_mode | **Required.** The plugin mode. A list of all available modes can be found in the [plugin documentation](https://labs.consol.de/nagios/check_printer_health/index.html).
+printer_health_timeout | **Optional.** Seconds before plugin times out (default: 15)
+printer_health_blacklist | **Optional.** Blacklist some (missing/failed) components.
+printer_health_port | **Optional.** The SNMP port to use (default: 161).
+printer_health_domain | **Optional.** The transport domain to use (default: udp/ipv4, other possible values: udp6, udp/ipv6, tcp, tcp4, tcp/ipv4, tcp6, tcp/ipv6).
+printer_health_protocol | **Optional.** The SNMP protocol to use (default: 2c, other possibilities: 1,3).
+printer_health_community | **Optional.** SNMP community of the server (SNMP v1/2 only).
+printer_health_username | **Optional.** The securityName for the USM security model (SNMPv3 only).
+printer_health_authpassword | **Optional.** The authentication password for SNMPv3.
+printer_health_authprotocol | **Optional.** The authentication protocol for SNMPv3 (md5\|sha).
+printer_health_privpassword | **Optional.** The password for authPriv security level.
+printer_health_privprotocol | **Optional.** The private protocol for SNMPv3 (des\|aes\|aes128\|3des\|3desde).
+printer_health_contextengineid | **Optional.** The context engine id for SNMPv3 (10 to 64 hex characters).
+printer_health_contextname | **Optional.** The context name for SNMPv3 (empty represents the default context).
+printer_health_community2 | **Optional.** SNMP community which can be used to switch the context during runtime.
+printer_health_name | **Optional.** The name of an interface (ifDescr).
+printer_health_regexp | **Optional.** A flag indicating that --name is a regular expression
+printer_health_ifspeedin | **Optional.** Override the ifspeed oid of an interface (only inbound)
+printer_health_ifspeedout | **Optional.** Override the ifspeed oid of an interface (only outbound)
+printer_health_ifspeed | **Optional.** Override the ifspeed oid of an interface
+printer_health_units | **Optional.** One of %, B, KB, MB, GB, Bit, KBi, MBi, GBi. (used for e.g. mode interface-usage)
+printer_health_name2 | **Optional.** The secondary name of a component.
+printer_health_name3 | **Optional.** The teritary name of a component.
+printer_health_role | **Optional.** The role of this device in a hsrp group (active/standby/listen).
+printer_health_report | **Optional.** Can be used to shorten the output. Possible values are: 'long' (default), 'short' (to shorten if available), or 'html' (to produce some html outputs if available)
+printer_health_lookback | **Optional.** The amount of time you want to look back when calculating average rates. Use it for mode interface-errors or interface-usage. Without --lookback the time between two runs of `check_printer_health` is the base for calculations. If you want your checkresult to be based for example on the past hour, use --lookback 3600.
+printer_health_warning | **Optional.** The warning threshold
+printer_health_critical | **Optional.** The critical threshold
+printer_health_warningx | **Optional.** The extended warning thresholds
+printer_health_criticalx | **Optional.** The extended critical thresholds
+printer_health_mitigation | **Optional.** The parameter allows you to change a critical error to a warning (1) or ok (0).
+printer_health_selectedperfdata | **Optional.** The parameter allows you to limit the list of performance data. It's a perl regexp. Only matching perfdata show up in the output.
+printer_health_morphperfdata | **Optional.** The parameter allows you to change performance data labels. It's a perl regexp and a substitution. --morphperfdata '(.*)ISATAP(.*)'='$1patasi$2'
+printer_health_negate | **Optional.** The parameter allows you to map exit levels, such as warning=critical.
+printer_health_mymodules-dyn-dir | **Optional.** A directory where own extensions can be found.
+printer_health_servertype | **Optional.** The type of the network device: cisco (default). Use it if auto-detection is not possible.
+printer_health_statefilesdir | **Optional.** An alternate directory where the plugin can save files.
+printer_health_oids | **Optional.** A list of oids which are downloaded and written to a cache file. Use it together with --mode oidcache.
+printer_health_offline | **Optional.** The maximum number of seconds since the last update of cache file before it is considered too old.
+printer_health_multiline | **Optional.** Multiline output
+
+#### Thola <a id="plugin-contrib-command-thola"></a>
+
+The [Thola](https://thola.io) plugin
+is a tool for monitoring network devices, that mainly uses SNMP.
+
+To run these commands you need a server that is running the Thola API.
+If you don't know how to do this, you can have a look at the plugin's
+[documentation](https://docs.thola.io). Also, you have to
+put the Thola-client binary into the `PluginContribDir`.
+
+##### thola-cpu-load <a id="plugin-contrib-command-thola-cpu-load"></a>
+
+Checks the CPU load of a network device.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------------------|--------------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise.
+thola_device_snmp_community | **Optional.** SNMP community of the device
+thola_device_snmp_protocol | **Optional.** SNMP version to use
+thola_cpu_load_critical | **Optional.** Critical threshold for the CPU load in %
+thola_cpu_load_warning | **Optional.** Warning threshold for the CPU load in %
+
+##### thola-interface-metrics <a id="plugin-contrib-command-thola-interface-metrics"></a>
+
+Checks the interface metrics of a network device.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-----------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise
+thola_device_snmp_community | **Optional.** SNMP community of the device
+
+##### thola-hardware-health <a id="plugin-contrib-command-thola-hardware-health"></a>
+
+Checks the hardware health of a network device.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------------|-----------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise
+thola_device_snmp_community | **Optional.** SNMP community of the device
+
+##### thola-identify <a id="plugin-contrib-command-thola-identify"></a>
+
+Checks if a device can be identified by the given properties.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------------------|--------------------------------------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise
+thola_device_snmp_community | **Optional.** SNMP community of the device
+thola_identify_model | **Optional.** Model that is compared to the actual model of the device
+thola_identify_os_version | **Optional.** OS-version that is compared to the actual OS-version of the device
+thola_identify_vendor | **Optional.** Vendor that is compared to the actual vendor of the device
+thola_identify_serial_number | **Optional.** Serial number that is compared to the actual serial number of the device
+thola_identify_discover_retries | **Optional.** The number of discover retries before aborting
+thola_identify_discover_timeouts | **Optional.** The number of discover timeouts before aborting
+
+> **Note**:
+>
+> One of the variables `thola_identify_model`, `thola_identify_os_version`,
+> `thola_identify_vendor` or `thola_identify_serial_number` must be set
+
+##### thola-memory-usage <a id="plugin-contrib-command-thola-memory-usage"></a>
+
+Checks the memory usage of a device.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-------------------------------|-----------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise
+thola_device_snmp_community | **Optional.** SNMP community of the device
+thola_memory_usage_critical | **Optional.** Critical threshold for the memory usage in %
+thola_memory_usage_warning | **Optional.** Warning threshold for the memory usage in %
+
+##### thola-sbc <a id="plugin-contrib-command-thola-sbc"></a>
+
+Checks special metrics from sbc network devices.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------------------------|-----------------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise
+thola_device_snmp_community | **Optional.** SNMP community of the device
+thola_sbc_system_health_score_critical | **Optional.** Critical threshold for the health score in %
+thola_sbc_system_health_score_warning | **Optional.** Warning threshold for the health score in %
+
+##### thola-thola-server <a id="plugin-contrib-command-thola-thola-server"></a>
+
+Checks if a Thola API is running on a given server.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------|-----------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+
+##### thola-ups <a id="plugin-contrib-command-thola-ups"></a>
+
+Checks whether a UPS device has its main voltage applied.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------------------|-----------------------------------------------------------------
+thola_api_address | **Required.** Address of the Thola API to connect to
+thola_device_address | **Required.** The host's address. Defaults to "$address$" if the host's address attribute is set, “$address6$” otherwise
+thola_device_snmp_community | **Optional.** SNMP community of the device
+thola_ups_batt_current_critical_max | **Optional.** High critical threshold for the battery current in Volt
+thola_ups_batt_current_critical_min | **Optional.** Low critical threshold for the battery current in Volt
+thola_ups_batt_current_warning_max | **Optional.** High warning threshold for the battery current in Volt
+thola_ups_batt_current_warning_min | **Optional.** Low warning threshold for the battery current in Volt
+thola_ups_batt_temperature_critical_max | **Optional.** High critical threshold for the battery temperature in degree celsius
+thola_ups_batt_temperature_critical_min | **Optional.** Low critical threshold for the battery temperature in degree celsius
+thola_ups_batt_temperature_warning_max | **Optional.** High warning threshold for the battery temperature in degree celsius
+thola_ups_batt_temperature_warning_min | **Optional.** Low warning threshold for the battery temperature in degree celsius
+thola_ups_current_load_critical_max | **Optional.** High critical threshold for the current load in percent
+thola_ups_current_load_critical_min | **Optional.** Low critical threshold for the current load in percent
+thola_ups_current_load_warning_max | **Optional.** High warning threshold for the current load in percent
+thola_ups_current_load_warning_min | **Optional.** Low warning threshold for the current load in percent
+thola_ups_rectifier_current_critical_max | **Optional.** High critical threshold for the current rectifier in Volt
+thola_ups_rectifier_current_critical_min | **Optional.** Low critical threshold for the current rectifier in Volt
+thola_ups_rectifier_current_warning_max | **Optional.** High warning threshold for the current rectifier in Volt
+thola_ups_rectifier_current_warning_min | **Optional.** Low warning threshold for the current rectifier in Volt
+thola_ups_system_voltage_critical_max | **Optional.** High critical threshold for the system voltage in Volt
+thola_ups_system_voltage_critical_min | **Optional.** Low critical threshold for the system voltage in Volt
+thola_ups_system_voltage_warning_max | **Optional.** High warning threshold for the system voltage in Volt
+thola_ups_system_voltage_warning_min | **Optional.** Low warning threshold for the system voltage in Volt
+
+### Network Services <a id="plugin-contrib-network-services"></a>
+
+This category contains plugins which receive details about network services
+
+#### lsyncd <a id="plugin-contrib-command-lsyncd"></a>
+
+The [check_lsyncd](https://github.com/ohitz/check_lsyncd) plugin,
+uses the `lsyncd` status file to monitor [lsyncd](https://axkibe.github.io/lsyncd/).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------------------------
+lsyncd_statfile | **Optional.** Set status file path (default: /var/run/lsyncd.status).
+lsyncd_warning | **Optional.** Warning if more than N delays (default: 10).
+lsyncd_critical | **Optional.** Critical if more then N delays (default: 100).
+
+#### fail2ban <a id="plugin-contrib-command-fail2ban"></a>
+
+The [check_fail2ban](https://github.com/fail2ban/fail2ban/tree/master/files/nagios) plugin
+uses the `fail2ban-client` binary to monitor [fail2ban](https://www.fail2ban.org) jails.
+
+The plugin requires `sudo` permissions.
+You can add a sudoers file to allow your monitoring user to use the plugin, i.e. edit /etc/sudoers.d/icinga and add:
+```
+icinga ALL=(root) NOPASSWD:/usr/lib/nagios/plugins/check_fail2ban
+```
+
+and set the correct permissions:
+```bash
+chown -c root: /etc/sudoers.d/icinga
+chmod -c 0440 /etc/sudoers.d/icinga
+```
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|---------------------------------------------------------------------------
+fail2ban_display | **Optional.** To modify the output display, default is 'CHECK FAIL2BAN ACTIVITY'
+fail2ban_path | **Optional.** Specify the path to the tw_cli binary, default value is /usr/bin/fail2ban-client
+fail2ban_warning | **Optional.** Specify a warning threshold, default is 1
+fail2ban_critical | **Optional.** Specify a critical threshold, default is 2
+fail2ban_socket | **Optional.** Specify a socket path, default is unset
+fail2ban_perfdata | **Optional.** If set to true, activate the perfdata output, default value for the plugin is set to true
+fail2ban_jail | **Optional.** Specify the name of the specific jail to monitor; omitted by default, i.e. all jails are being monitored.
+
+### Operating System <a id="plugin-contrib-operating-system"></a>
+
+This category contains plugins which receive details about your operating system
+or the guest system.
+
+#### mem <a id="plugin-contrib-command-mem"></a>
+
+The [check_mem.pl](https://github.com/justintime/nagios-plugins) plugin checks the
+memory usage on linux and unix hosts. It is able to count cache memory as free when
+compared to thresholds. More details can be found on [this blog entry](http://sysadminsjourney.com/content/2009/06/04/new-and-improved-checkmempl-nagios-plugin).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------|-----------------------------------------------------------------------------------------------------------------------
+mem_used | **Optional.** Tell the plugin to check for used memory to the exclusion of **mem_free** and **mem_available**. Must specify one of these as true.
+mem_free | **Optional.** Tell the plugin to check for free memory to the exclusion of **mem_used** and **mem_available**. Must specify one of these as true.
+mem_available | **Optional.** Tell the plugin to check available memory to the exclusion of **mem_free** and **mem_used**. Must specify one of these as true.
+mem_cache | **Optional.** If set to true, plugin will count cache as free memory. Defaults to false.
+mem_warning | **Required.** Specify the warning threshold as number interpreted as percent.
+mem_critical | **Required.** Specify the critical threshold as number interpreted as percent.
+
+#### sar-perf <a id="plugin-contrib-command-sar-perf"></a>
+
+The [check_sar_perf.py](https://github.com/NETWAYS/check-sar-perf)
+plugin collects performance metrics from Linux hosts using the `sar` binary available in the `sysstat` package.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------|-----------------------------------------------------------------------------------------------------------------------
+sar_perf_profile | **Required.** Define the run profile: `pagestat`, `cpu`, `memory_util`, `memory_stat`, `io_transfer`, `queueln_load`, `swap_util`, `swap_stat`, `task`, `kernel`, `disk <disk>`. Can be a string or an array of multiple profiles.
+sar_perf_disk | **Optional.** Disk name for the 'disk' profile.
+
+
+#### running_kernel <a id="plugin-contrib-command-running_kernel"></a>
+
+The [check_running_kernel](https://packages.debian.org/stretch/nagios-plugins-contrib) plugin
+is provided by the `nagios-plugin-contrib` package on Debian/Ubuntu.
+
+Custom variables:
+
+Name | Description
+---------------------------|-------------
+running\_kernel\_use\_sudo | Whether to run the plugin with `sudo`. Defaults to false except on Ubuntu where it defaults to true.
+
+#### iostats <a id="plugin-contrib-command-iostats"></a>
+
+The [check_iostats](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_iostats) plugin
+uses the `iostat` binary to monitor I/O on a Linux host. The default thresholds are rather high
+so you can use a grapher for baselining before setting your own.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------|-----------------------------------------------------------------------------------------------------------------------
+iostats\_disk | **Required.** The device to monitor without path. e.g. sda or vda. (default: sda).
+iostats\_warning\_tps | **Required.** Warning threshold for tps (default: 3000).
+iostats\_warning\_read | **Required.** Warning threshold for KB/s reads (default: 50000).
+iostats\_warning\_write | **Required.** Warning threshold for KB/s writes (default: 10000).
+iostats\_warning\_wait | **Required.** Warning threshold for % iowait (default: 50).
+iostats\_critical\_tps | **Required.** Critical threshold for tps (default: 5000).
+iostats\_critical\_read | **Required.** Critical threshold for KB/s reads (default: 80000).
+iostats\_critical\_write | **Required.** Critical threshold for KB/s writes (default: 25000).
+iostats\_critical\_wait | **Required.** Critical threshold for % iowait (default: 80).
+
+#### iostat <a id="plugin-contrib-command-iostat"></a>
+
+The [check_iostat](https://github.com/dnsmichi/icinga-plugins/blob/master/scripts/check_iostat) plugin
+uses the `iostat` binary to monitor disk I/O on a Linux host. The default thresholds are rather high
+so you can use a grapher for baselining before setting your own.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------|-----------------------------------------------------------------------------------------------------------------------
+iostat\_disk | **Required.** The device to monitor without path. e.g. sda or vda. (default: sda).
+iostat\_wtps | **Required.** Warning threshold for tps (default: 100).
+iostat\_wread | **Required.** Warning threshold for KB/s reads (default: 100).
+iostat\_wwrite | **Required.** Warning threshold for KB/s writes (default: 100).
+iostat\_ctps | **Required.** Critical threshold for tps (default: 200).
+iostat\_cread | **Required.** Critical threshold for KB/s reads (default: 200).
+iostat\_cwrite | **Required.** Critical threshold for KB/s writes (default: 200).
+
+#### systemd <a id="plugin-contrib-command-systemd"></a>
+
+The [check_systemd.py](https://github.com/Josef-Friedrich/check_systemd) plugin
+will report a degraded system to your monitoring solution. It requires only the [nagiosplugin](https://nagiosplugin.readthedocs.io/en/stable) library.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------
+systemd\_unit | **Optional.** Name of the systemd unit that is being tested.
+systemd\_exclude\_unit | **Optional.** Exclude a systemd unit from the checks. This option can be applied multiple times. Also supports regular expressions.
+systemd\_no\_startup\_time | **Optional.** Don’t check the startup time. Using this option the options `systemd_warning` and `systemd_critical` have no effect. (Default: `false`)
+systemd\_warning | **Optional.** Startup time in seconds to result in a warning status. (Default: `60s`)
+systemd\_critical | **Optional.** Startup time in seconds to result in a critical status. (Default: `120s`)
+systemd\_dead\_timers | **Optional.** Detect dead / inactive timers. (Default: `false`)
+systemd\_dead\_timers\_warning | **Optional.** Time ago in seconds for dead / inactive timers to trigger a warning state (by default 6 days).
+systemd\_dead\_timers\_critical | **Optional.** Time ago in seconds for dead / inactive timers to trigger a critical state (by default 7 days).
+systemd\_verbose\_level | **Optional.** Increase verbosity level (Accepted values: `1`, `2` or `3`). (Defaults to none)
+
+#### yum <a id="plugin-contrib-command-yum"></a>
+
+The [check_yum](https://github.com/calestyo/check_yum) plugin checks the YUM package
+management system for package updates.
+The plugin requires the `yum-plugin-security` package to differentiate between security and normal updates.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+yum_all_updates | **Optional.** Set to true to not distinguish between security and non-security updates, but returns critical for any available update. This may be used if the YUM security plugin is absent or you want to maintain every single package at the latest version. You may want to use **yum_warn_on_any_update** instead of this option. Defaults to false.
+yum_warn_on_any_update | **Optional.** Set to true to warn if there are any (non-security) package updates available. Defaults to false.
+yum_cache_only | **Optional.** If set to true, plugin runs entirely from cache and does not update the cache when running YUM. Useful if you have `yum makecache` cronned. Defaults to false.
+yum_no_warn_on_lock | **Optional.** If set to true, returns OK instead of WARNING when YUM is locked and fails to check for updates due to another instance running. Defaults to false.
+yum_no_warn_on_updates | **Optional.** If set to true, returns OK instead of WARNING even when updates are available. The plugin output still shows the number of available updates. Defaults to false.
+yum_enablerepo | **Optional.** Explicitly enables a repository when calling YUM. Can take a comma separated list of repositories. Note that enabling repositories can lead to unexpected results, for example when protected repositories are enabled.
+yum_disablerepo | **Optional.** Explicitly disables a repository when calling YUM. Can take a comma separated list of repositories. Note that enabling repositories can lead to unexpected results, for example when protected repositories are enabled.
+yum_installroot | **Optional.** Specifies another installation root directory (for example a chroot).
+yum_timeout | **Optional.** Set a timeout in seconds after which the plugin will exit (defaults to 55 seconds).
+
+### Storage <a id="plugins-contrib-storage"></a>
+
+This category includes all plugins for various storage and object storage technologies.
+
+#### glusterfs <a id="plugins-contrib-command-glusterfs"></a>
+
+The [glusterfs](https://www.unixadm.org/software/nagios-stuff/checks/check_glusterfs) plugin
+is used to check the GlusterFS storage health on the server.
+The plugin requires `sudo` permissions.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+---------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+glusterfs_perfdata | **Optional.** Print perfdata of all or the specified volume.
+glusterfs_warnonfailedheal | **Optional.** Warn if the *heal-failed* log contains entries. The log can be cleared by restarting glusterd.
+glusterfs_volume | **Optional.** Only check the specified *VOLUME*. If --volume is not set, all volumes are checked.
+glusterfs_disk_warning | **Optional.** Warn if disk usage is above *DISKWARN*. Defaults to 90 (percent).
+glusterfs_disk_critical | **Optional.** Return a critical error if disk usage is above *DISKCRIT*. Defaults to 95 (percent).
+glusterfs_inode_warning | **Optional.** Warn if inode usage is above *DISKWARN*. Defaults to 90 (percent).
+glusterfs_inode_critical | **Optional.** Return a critical error if inode usage is above *DISKCRIT*. Defaults to 95 (percent).
+
+#### ceph <a id="plugins-contrib-command-ceph"></a>
+
+The [ceph plugin](https://github.com/ceph/ceph-nagios-plugins)
+is used to check the Ceph storage health on the server.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------|---------------------------------------------------------
+ceph_exec_dir | **Optional.** Ceph executable. Default /usr/bin/ceph.
+ceph_conf_file | **Optional.** Alternative ceph conf file.
+ceph_mon_address | **Optional.** Ceph monitor address[:port].
+ceph_client_id | **Optional.** Ceph client id.
+ceph_client_name | **Optional.** Ceph client name.
+ceph_client_key | **Optional.** Ceph client keyring file.
+ceph_whitelist | **Optional.** Whitelist regexp for ceph health warnings.
+ceph_details | **Optional.** Run 'ceph health detail'.
+
+#### btrfs <a id="plugins-contrib-command-btrfs"></a>
+
+The [btrfs plugin](https://github.com/knorrie/python-btrfs/)
+is used to check the btrfs storage health on the server.
+
+The plugin requires `sudo` permissions.
+You can add a sudoers file to allow your monitoring user to use the plugin, i.e. edit /etc/sudoers.d/icinga and add:
+```
+icinga ALL=(root) NOPASSWD:/usr/lib/nagios/plugins/check_btrfs
+```
+
+and set the correct permissions:
+```bash
+chown -c root: /etc/sudoers.d/icinga
+chmod -c 0440 /etc/sudoers.d/icinga
+```
+
+[monitoring-plugins-btrfs](https://packages.debian.org/monitoring-plugins-btrfs) provide the necessary binary on debian/ubuntu.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------|---------------------------------------------------------
+btrfs_awg | **Optional.** Exit with WARNING status if less than the specified amount of disk space (in GiB) is unallocated
+btrfs_acg | **Optional.** Exit with CRITICAL status if less than the specified amount of disk space (in GiB) is unallocated
+btrfs_awp | **Optional.** Exit with WARNING status if more than the specified percent of disk space is allocated
+btrfs_acp | **Optional.** Exit with CRITICAL status if more than the specified percent of disk space is allocated
+btrfs_mountpoint | **Required.** Path to the BTRFS mountpoint
+
+### Virtualization <a id="plugin-contrib-virtualization"></a>
+
+This category includes all plugins for various virtualization technologies.
+
+#### esxi_hardware <a id="plugin-contrib-command-esxi-hardware"></a>
+
+The [check_esxi_hardware.py](https://www.claudiokuenzler.com/monitoring-plugins/check_esxi_hardware.php) plugin
+uses the [pywbem](https://pywbem.github.io/pywbem/) Python library to monitor the hardware of ESXi servers
+through the [VMWare CIM API](https://developer.vmware.com/apis/207/cim).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+esxi_hardware_host | **Required.** Specifies the host to monitor. Defaults to "$address$".
+esxi_hardware_user | **Required.** Specifies the user for polling. Must be a local user of the root group on the system. Can also be provided as a file path file:/path/to/.passwdfile, then first string of file is used.
+esxi_hardware_pass | **Required.** Password of the user. Can also be provided as a file path file:/path/to/.passwdfile, then second string of file is used.
+esxi_hardware_port | **Optional.** Specifies the CIM port to connect to. Defaults to 5989.
+esxi_hardware_sslproto | **Optional.** Specifies the SSL/TLS protocol to use. Defaults to local openssl config.
+esxi_hardware_vendor | **Optional.** Defines the vendor of the server: "auto", "dell", "hp", "ibm", "intel", "unknown" (default).
+esxi_hardware_html | **Optional.** Add web-links to hardware manuals for Dell servers (use your country extension). Only useful with **esxi_hardware_vendor** = dell.
+esxi_hardware_ignore | **Optional.** Comma separated list of CIM elements to ignore.
+esxi_hardware_regex | **Optional.** Allow regular expression lookups of elements in ignore list. Defaults to false.
+esxi_hardware_perfdata | **Optional.** Add performcedata for graphers like PNP4Nagios to the output. Defaults to false.
+esxi_hardware_format | **Optional.** Set output format to string or json. Defaults to string.
+esxi_hardware_pretty | **Optional.** Show plugin output in a human readable format. Only useful with **esxi_hardware_format** = json.
+esxi_hardware_nopower | **Optional.** Do not collect power performance data, when **esxi_hardware_perfdata** is set to true. Defaults to false.
+esxi_hardware_novolts | **Optional.** Do not collect voltage performance data, when **esxi_hardware_perfdata** is set to true. Defaults to false.
+esxi_hardware_nocurrent | **Optional.** Do not collect current performance data, when **esxi_hardware_perfdata** is set to true. Defaults to false.
+esxi_hardware_notemp | **Optional.** Do not collect temperature performance data, when **esxi_hardware_perfdata** is set to true. Defaults to false.
+esxi_hardware_nofan | **Optional.** Do not collect fan performance data, when **esxi_hardware_perfdata** is set to true. Defaults to false.
+esxi_hardware_nolcd | **Optional.** Do not collect lcd/display status data. Defaults to false.
+
+#### VMware <a id="plugin-contrib-vmware"></a>
+
+Check commands for the [check_vmware_esx](https://github.com/BaldMansMojo/check_vmware_esx) plugin.
+
+**vmware-esx-dc-volumes**
+
+Check command object for the `check_vmware_esx` plugin. Shows all datastore volumes info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_subselect | **Optional.** Volume name to be checked the free space.
+vmware_gigabyte | **Optional.** Output in GB instead of MB.
+vmware_usedspace | **Optional.** Output used space instead of free. Defaults to "false".
+vmware_alertonly | **Optional.** List only alerting volumes. Defaults to "false".
+vmware_exclude | **Optional.** Blacklist volumes name. No value defined as default.
+vmware_include | **Optional.** Whitelist volumes name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_dc_volume_used | **Optional.** Output used space instead of free. Defaults to "true".
+vmware_warn | **Optional.** The warning threshold for volumes. Defaults to "80%".
+vmware_crit | **Optional.** The critical threshold for volumes. Defaults to "90%".
+
+
+**vmware-esx-dc-runtime-info**
+
+Check command object for the `check_vmware_esx` plugin. Shows all runtime info for the datacenter/Vcenter.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-dc-runtime-listvms**
+
+Check command object for the `check_vmware_esx` plugin. List of vmware machines and their power state. BEWARE!! In larger environments systems can cause trouble displaying the informations needed due to the mass of data. Use **vmware_alertonly** to avoid this.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_alertonly | **Optional.** List only alerting VMs. Important here to avoid masses of data.
+vmware_exclude | **Optional.** Blacklist VMs name. No value defined as default.
+vmware_include | **Optional.** Whitelist VMs name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-dc-runtime-listhost**
+
+Check command object for the `check_vmware_esx` plugin. List of VMware ESX hosts and their power state.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_alertonly | **Optional.** List only alerting hosts. Important here to avoid masses of data.
+vmware_exclude | **Optional.** Blacklist VMware ESX hosts. No value defined as default.
+vmware_include | **Optional.** Whitelist VMware ESX hosts. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-dc-runtime-listcluster**
+
+Check command object for the `check_vmware_esx` plugin. List of VMware clusters and their states.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_alertonly | **Optional.** List only alerting hosts. Important here to avoid masses of data.
+vmware_exclude | **Optional.** Blacklist VMware cluster. No value defined as default.
+vmware_include | **Optional.** Whitelist VMware cluster. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-dc-runtime-issues**
+
+Check command object for the `check_vmware_esx` plugin. All issues for the host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist issues. No value defined as default.
+vmware_include | **Optional.** Whitelist issues. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-dc-runtime-status**
+
+Check command object for the `check_vmware_esx` plugin. Overall object status (gray/green/red/yellow).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-dc-runtime-tools**
+
+Check command object for the `check_vmware_esx` plugin. Vmware Tools status.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Required.** Datacenter/vCenter hostname.
+vmware_cluster | **Optional.** ESX or ESXi clustername.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_poweredonly | **Optional.** List only VMs which are powered on. No value defined as default.
+vmware_alertonly | **Optional.** List only alerting VMs. Important here to avoid masses of data.
+vmware_exclude | **Optional.** Blacklist VMs. No value defined as default.
+vmware_include | **Optional.** Whitelist VMs. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+vmware_openvmtools | **Optional** Prevent CRITICAL state for installed and running Open VM Tools.
+vmware_novmtools | **Optional** Prevent CRITICAL state for missing VMware tools.
+
+
+**vmware-esx-soap-host-check**
+
+Check command object for the `check_vmware_esx` plugin. Simple check to verify a successful connection to VMware SOAP API.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-uptime**
+
+Check command object for the `check_vmware_esx` plugin. Displays uptime of the VMware host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-cpu**
+
+Check command object for the `check_vmware_esx` plugin. CPU usage in percentage.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. Defaults to "80%".
+vmware_crit | **Optional.** The critical threshold in percent. Defaults to "90%".
+
+
+**vmware-esx-soap-host-cpu-ready**
+
+Check command object for the `check_vmware_esx` plugin. Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU. CPU ready time is dependent on the number of virtual machines on the host and their CPU loads. High or growing ready time can be a hint CPU bottlenecks.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-cpu-wait**
+
+Check command object for the `check_vmware_esx` plugin. CPU time spent in wait state. The wait total includes time spent the CPU idle, CPU swap wait, and CPU I/O wait states. High or growing wait time can be a hint I/O bottlenecks.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-cpu-usage**
+
+Check command object for the `check_vmware_esx` plugin. Actively used CPU of the host, as a percentage of the total available CPU. Active CPU is approximately equal to the ratio of the used CPU to the available CPU.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. Defaults to "80%".
+vmware_crit | **Optional.** The critical threshold in percent. Defaults to "90%".
+
+
+**vmware-esx-soap-host-mem**
+
+Check command object for the `check_vmware_esx` plugin. All mem info(except overall and no thresholds).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-mem-usage**
+
+Check command object for the `check_vmware_esx` plugin. Average mem usage in percentage.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. Defaults to "80%".
+vmware_crit | **Optional.** The critical threshold in percent. Defaults to "90%".
+
+
+**vmware-esx-soap-host-mem-consumed**
+
+Check command object for the `check_vmware_esx` plugin. Amount of machine memory used on the host. Consumed memory includes Includes memory used by the Service Console, the VMkernel vSphere services, plus the total consumed metrics for all running virtual machines in MB.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. No value defined as default.
+vmware_crit | **Optional.** The critical threshold in percent. No value defined as default.
+
+
+**vmware-esx-soap-host-mem-swapused**
+
+Check command object for the `check_vmware_esx` plugin. Amount of memory that is used by swap. Sum of memory swapped of all powered on VMs and vSphere services on the host in MB. In case of an error all VMs with their swap used will be displayed.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. No value defined as default.
+vmware_crit | **Optional.** The critical threshold in percent. No value defined as default.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-mem-overhead**
+
+Check command object for the `check_vmware_esx` plugin. Additional mem used by VM Server in MB.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Auhentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. No value defined as default.
+vmware_crit | **Optional.** The critical threshold in percent. No value defined as default.
+
+
+**vmware-esx-soap-host-mem-memctl**
+
+Check command object for the `check_vmware_esx` plugin. The sum of all vmmemctl values in MB for all powered-on virtual machines, plus vSphere services on the host. If the balloon target value is greater than the balloon value, the VMkernel inflates the balloon, causing more virtual machine memory to be reclaimed. If the balloon target value is less than the balloon value, the VMkernel deflates the balloon, which allows the virtual machine to consume additional memory if needed (used by VM memory control driver). In case of an error all VMs with their vmmemctl values will be displayed.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in percent. No value defined as default.
+vmware_crit | **Optional.** The critical threshold in percent. No value defined as default.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-net**
+
+Check command object for the `check_vmware_esx` plugin. Shows net info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist NICs. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist expression as regexp.
+
+
+**vmware-esx-soap-host-net-usage**
+
+Check command object for the `check_vmware_esx` plugin. Overall network usage in KBps(Kilobytes per Second).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in KBps(Kilobytes per Second). No value defined as default.
+vmware_crit | **Optional.** The critical threshold in KBps(Kilobytes per Second). No value defined as default.
+
+
+**vmware-esx-soap-host-net-receive**
+
+Check command object for the `check_vmware_esx` plugin. Data receive in KBps(Kilobytes per Second).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in KBps(Kilobytes per Second). No value defined as default.
+vmware_crit | **Optional.** The critical threshold in KBps(Kilobytes per Second). No value defined as default.
+
+
+**vmware-esx-soap-host-net-send**
+
+Check command object for the `check_vmware_esx` plugin. Data send in KBps(Kilobytes per Second).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold in KBps(Kilobytes per Second). No value defined as default.
+vmware_crit | **Optional.** The critical threshold in KBps(Kilobytes per Second). No value defined as default.
+
+
+**vmware-esx-soap-host-net-nic**
+
+Check command object for the `check_vmware_esx` plugin. Check all active NICs.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist NICs. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist expression as regexp.
+
+
+**vmware-esx-soap-host-volumes**
+
+Check command object for the `check_vmware_esx` plugin. Shows all datastore volumes info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_subselect | **Optional.** Volume name to be checked the free space.
+vmware_gigabyte | **Optional.** Output in GB instead of MB.
+vmware_usedspace | **Optional.** Output used space instead of free. Defaults to "false".
+vmware_alertonly | **Optional.** List only alerting volumes. Defaults to "false".
+vmware_exclude | **Optional.** Blacklist volumes name. No value defined as default.
+vmware_include | **Optional.** Whitelist volumes name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_warn | **Optional.** The warning threshold for volumes. Defaults to "80%".
+vmware_crit | **Optional.** The critical threshold for volumes. Defaults to "90%".
+vmware_spaceleft | **Optional.** This has to be used in conjunction with thresholds as mentioned above.
+
+
+**vmware-esx-soap-host-io**
+
+Check command object for the `check_vmware_esx` plugin. Shows all disk io info. Without subselect no thresholds can be given. All I/O values are aggregated from historical intervals over the past 24 hours with a 5 minute sample rate.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-io-aborted**
+
+Check command object for the `check_vmware_esx` plugin. Number of aborted SCSI commands.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-resets**
+
+Check command object for the `check_vmware_esx` plugin. Number of SCSI bus resets.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-read**
+
+Check command object for the `check_vmware_esx` plugin. Average number of kilobytes read from the disk each second.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-read-latency**
+
+Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) to process a SCSI read command issued from the Guest OS to the virtual machine.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-write**
+
+Check command object for the `check_vmware_esx` plugin. Average number of kilobytes written to disk each second.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-write-latency**
+
+Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) taken to process a SCSI write command issued by the Guest OS to the virtual machine.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-usage**
+
+Check command object for the `check_vmware_esx` plugin. Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-kernel-latency**
+
+Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) spent by VMkernel processing each SCSI command.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-device-latency**
+
+Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) to complete a SCSI command from the physical device.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-queue-latency**
+
+Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) spent in the VMkernel queue.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-io-total-latency**
+
+Check command object for the `check_vmware_esx` plugin. Average amount of time (ms) taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine. The sum of kernelWriteLatency and deviceWriteLatency.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-host-media**
+
+Check command object for the `check_vmware_esx` plugin. List vm's with attached host mounted media like cd,dvd or floppy drives. This is important for monitoring because a virtual machine with a mount cd or dvd drive can not be moved to another host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist VMs name. No value defined as default.
+vmware_include | **Optional.** Whitelist VMs name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-service**
+
+Check command object for the `check_vmware_esx` plugin. Shows host service info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist services name. No value defined as default.
+vmware_include | **Optional.** Whitelist services name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-runtime**
+
+Check command object for the `check_vmware_esx` plugin. Shows runtime info: VMs, overall status, connection state, health, storagehealth, temperature and sensor are represented as one value and without thresholds.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist VMs name. No value defined as default.
+vmware_include | **Optional.** Whitelist VMs name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+
+
+**vmware-esx-soap-host-runtime-con**
+
+Check command object for the `check_vmware_esx` plugin. Shows connection state.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-runtime-listvms**
+
+Check command object for the `check_vmware_esx` plugin. List of VMware machines and their status.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist VMs name. No value defined as default.
+vmware_include | **Optional.** Whitelist VMs name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-runtime-status**
+
+Check command object for the `check_vmware_esx` plugin. Overall object status (gray/green/red/yellow).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-host-runtime-health**
+
+Check command object for the `check_vmware_esx` plugin. Checks cpu/storage/memory/sensor status.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist status name. No value defined as default.
+vmware_include | **Optional.** Whitelist status name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+
+
+**vmware-esx-soap-host-runtime-health-listsensors**
+
+Check command object for the `check_vmware_esx` plugin. List all available sensors(use for listing purpose only).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist status name. No value defined as default.
+vmware_include | **Optional.** Whitelist status name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+
+
+**vmware-esx-soap-host-runtime-health-nostoragestatus**
+
+Check command object for the `check_vmware_esx` plugin. This is to avoid a double alarm if you use **vmware-esx-soap-host-runtime-health** and **vmware-esx-soap-host-runtime-storagehealth**.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist status name. No value defined as default.
+vmware_include | **Optional.** Whitelist status name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+
+
+**vmware-esx-soap-host-runtime-storagehealth**
+
+Check command object for the `check_vmware_esx` plugin. Local storage status check.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist storage name. No value defined as default.
+vmware_include | **Optional.** Whitelist storage name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-runtime-temp**
+
+Check command object for the `check_vmware_esx` plugin. Lists all temperature sensors.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist sensor name. No value defined as default.
+vmware_include | **Optional.** Whitelist sensor name. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-runtime-issues**
+
+Check command object for the `check_vmware_esx` plugin. Lists all configuration issues for the host.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist configuration issues. No value defined as default.
+vmware_include | **Optional.** Whitelist configuration issues. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-storage**
+
+Check command object for the `check_vmware_esx` plugin. Shows Host storage info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist adapters, luns and paths. No value defined as default.
+vmware_include | **Optional.** Whitelist adapters, luns and paths. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+
+
+**vmware-esx-soap-host-storage-adapter**
+
+Check command object for the `check_vmware_esx` plugin. List host bus adapters.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist adapters. No value defined as default.
+vmware_include | **Optional.** Whitelist adapters. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-storage-lun**
+
+Check command object for the `check_vmware_esx` plugin. List SCSI logical units. The listing will include: LUN, canonical name of the disc, all of displayed name which is not part of the canonical name and status.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_exclude | **Optional.** Blacklist luns. No value defined as default.
+vmware_include | **Optional.** Whitelist luns. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+**vmware-esx-soap-host-storage-path**
+
+Check command object for the `check_vmware_esx` plugin. List multipaths and the associated paths.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_host | **Required.** ESX or ESXi hostname.
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. In case the check is done through a Datacenter/vCenter host.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_alertonly | **Optional.** List only alerting units. Important here to avoid masses of data. Defaults to "false".
+vmware_exclude | **Optional.** Blacklist paths. No value defined as default.
+vmware_include | **Optional.** Whitelist paths. No value defined as default.
+vmware_isregexp | **Optional.** Treat blacklist and whitelist expressions as regexp.
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+vmware_standbyok | **Optional.** For storage systems where a standby multipath is ok and not a warning. Defaults to false.
+
+
+**vmware-esx-soap-vm-cpu**
+
+Check command object for the `check_vmware_esx` plugin. Shows all CPU usage info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+
+**vmware-esx-soap-vm-cpu-ready**
+
+Check command object for the `check_vmware_esx` plugin. Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-cpu-wait**
+
+Check command object for the `check_vmware_esx` plugin. CPU time spent in wait state. The wait total includes time spent the CPU idle, CPU swap wait, and CPU I/O wait states. High or growing wait time can be a hint I/O bottlenecks.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-cpu-usage**
+
+Check command object for the `check_vmware_esx` plugin. Amount of actively used virtual CPU, as a percentage of total available CPU. This is the host's view of the CPU usage, not the guest operating system view. It is the average CPU utilization over all available virtual CPUs in the virtual machine.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** Warning threshold in percent. Defaults to "80%".
+vmware_crit | **Optional.** Critical threshold in percent. Defaults to "90%".
+
+
+**vmware-esx-soap-vm-mem**
+
+Check command object for the `check_vmware_esx` plugin. Shows all memory info, except overall.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-mem-usage**
+
+Check command object for the `check_vmware_esx` plugin. Average mem usage in percentage of configured virtual machine "physical" memory.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** Warning threshold in percent. Defaults to "80%".
+vmware_crit | **Optional.** Critical threshold in percent. Defaults to "90%".
+
+
+**vmware-esx-soap-vm-mem-consumed**
+
+Check command object for the `check_vmware_esx` plugin. Amount of guest physical memory in MB consumed by the virtual machine for guest memory. Consumed memory does not include overhead memory. It includes shared memory and memory that might be reserved, but not actually used. Use this metric for charge-back purposes.<br>
+**vm consumed memory = memory granted -- memory saved**
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-mem-memctl**
+
+Check command object for the `check_vmware_esx` plugin. Amount of guest physical memory that is currently reclaimed from the virtual machine through ballooning. This is the amount of guest physical memory that has been allocated and pinned by the balloon driver.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+
+**vmware-esx-soap-vm-net**
+
+Check command object for the `check_vmware_esx` plugin. Shows net info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-net-usage**
+
+Check command object for the `check_vmware_esx` plugin. Overall network usage in KBps(Kilobytes per Second).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-net-receive**
+
+Check command object for the `check_vmware_esx` plugin. Receive in KBps(Kilobytes per Second).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-net-send**
+
+Check command object for the `check_vmware_esx` plugin. Send in KBps(Kilobytes per Second).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-io**
+
+Check command object for the `check_vmware_esx` plugin. Shows all disk io info. Without subselect no thresholds can be given. All I/O values are aggregated from historical intervals over the past 24 hours with a 5 minute sample rate.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-io-read**
+
+Check command object for the `check_vmware_esx` plugin. Average number of kilobytes read from the disk each second.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session - IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-io-write**
+
+Check command object for the `check_vmware_esx` plugin. Average number of kilobytes written to disk each second.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-io-usage**
+
+Check command object for the `check_vmware_esx` plugin. Aggregated disk I/O rate.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-runtime**
+
+Check command object for the `check_vmware_esx` plugin. Shows virtual machine runtime info.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-runtime-con**
+
+Check command object for the `check_vmware_esx` plugin. Shows the connection state.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-runtime-powerstate**
+
+Check command object for the `check_vmware_esx` plugin. Shows virtual machine power state: poweredOn, poweredOff or suspended.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-runtime-status**
+
+Check command object for the `check_vmware_esx` plugin. Overall object status (gray/green/red/yellow).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+
+**vmware-esx-soap-vm-runtime-consoleconnections**
+
+Check command object for the `check_vmware_esx` plugin. Console connections to virtual machine.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_warn | **Optional.** The warning threshold. No value defined as default.
+vmware_crit | **Optional.** The critical threshold. No value defined as default.
+
+
+**vmware-esx-soap-vm-runtime-gueststate**
+
+Check command object for the `check_vmware_esx` plugin. Guest OS status. Needs VMware Tools installed and running.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+
+**vmware-esx-soap-vm-runtime-tools**
+
+Check command object for the `check_vmware_esx` plugin. Guest OS status. VMware tools status.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_openvmtools | **Optional** Prevent CRITICAL state for installed and running Open VM Tools.
+vmware_novmtools | **Optional** Prevent CRITICAL state for missing VMware tools.
+
+
+**vmware-esx-soap-vm-runtime-issues**
+
+Check command object for the `check_vmware_esx` plugin. All issues for the virtual machine.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+vmware_datacenter | **Optional.** Datacenter/vCenter hostname. Conflicts with **vmware_host**.
+vmware_host | **Optional.** ESX or ESXi hostname. Conflicts with **vmware_datacenter**.
+vmware_vmname | **Required.** Virtual machine name.
+vmware_sslport | **Optional.** SSL port connection. Defaults to "443".
+vmware_ignoreunknown | **Optional.** Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3). Defaults to "false".
+vmware_ignorewarning | **Optional.** Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view). With this option the plugin will return OK (0) instead of WARNING (1). Defaults to "false".
+vmware_timeout | **Optional.** Seconds before plugin times out. Defaults to "90".
+vmware_trace | **Optional.** Set verbosity level of vSphere API request/respond trace.
+vmware_sessionfile | **Optional.** Session file name enhancement.
+vmware_sessionfiledir | **Optional.** Path to store the **vmware_sessionfile** file. Defaults to "/var/spool/icinga2/tmp".
+vmware_nosession | **Optional.** No auth session -- IT SHOULD BE USED FOR TESTING PURPOSES ONLY!. Defaults to "false".
+vmware_username | **Optional.** The username to connect to Host or vCenter server. No value defined as default.
+vmware_password | **Optional.** The username's password. No value defined as default.
+vmware_authfile | **Optional.** Use auth file instead username/password to session connect. No effect if **vmware_username** and **vmware_password** are defined <br> **Authentication file content:** <br> username=vmuser <br> password=p@ssw0rd
+vmware_multiline | **Optional.** Multiline output in overview. This mean technically that a multiline output uses a HTML **\<br\>** for the GUI. No value defined as default.
+
+
+### Web <a id="plugin-contrib-web"></a>
+
+This category includes all plugins for web-based checks.
+
+#### apache-status <a id="plugin-contrib-command-apache-status"></a>
+
+The [check_apache_status.pl](https://github.com/lbetz/check_apache_status) plugin
+uses the [/server-status](https://httpd.apache.org/docs/current/mod/mod_status.html)
+HTTP endpoint to monitor status metrics for the Apache webserver.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|----------------------------------------------------------------------------------
+apache_status_address | **Optional.** Host address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+apache_status_port | **Optional.** HTTP port.
+apache_status_uri | **Optional.** URL to use, instead of the default (http://`apache_status_address`/server-status).
+apache_status_ssl | **Optional.** Set to use SSL connection.
+apache_status_no_validate | **Optional.** Skip SSL certificate validation.
+apache_status_username | **Optional.** Username for basic auth.
+apache_status_password | **Optional.** Password for basic auth.
+apache_status_timeout | **Optional.** Timeout in seconds.
+apache_status_unreachable | **Optional.** Return CRITICAL if socket timed out or http code >= 500.
+apache_status_warning | **Optional.** Warning threshold (number of open slots, busy workers and idle workers that will cause a WARNING) like ':20,50,:50'.
+apache_status_critical | **Optional.** Critical threshold (number of open slots, busy workers and idle workers that will cause a CRITICAL) like ':10,25,:20'.
+
+
+#### ssl_cert <a id="plugin-check-command-ssl_cert"></a>
+
+The [check_ssl_cert](https://github.com/matteocorti/check_ssl_cert) plugin
+uses the openssl binary (and optional curl) to check a X.509 certificate.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|--------------
+ssl_cert_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+ssl_cert_port | **Optional.** TCP port number (default: 443).
+ssl_cert_proxy | **Optional.** Proxy server to use for connecting to the host. Sets http_proxy and the s_client -proxy option.
+ssl_cert_file | **Optional.** Local file path. Works only if `ssl_cert_address` is set to "localhost".
+ssl_cert_warn | **Optional.** Minimum number of days a certificate has to be valid.
+ssl_cert_critical | **Optional.** Minimum number of days a certificate has to be valid to issue a critical status.
+ssl_cert_cn | **Optional.** Pattern to match the CN of the certificate.
+ssl_cert_altnames | **Optional.** Matches the pattern specified in -n with alternate
+ssl_cert_issuer | **Optional.** Pattern to match the issuer of the certificate.
+ssl_cert_org | **Optional.** Pattern to match the organization of the certificate.
+ssl_cert_email | **Optional.** Pattern to match the email address contained in the certificate.
+ssl_cert_serial | **Optional.** Pattern to match the serial number.
+ssl_cert_noauth | **Optional.** Ignore authority warnings (expiration only)
+ssl_cert_match_host | **Optional.** Match CN with the host name.
+ssl_cert_selfsigned | **Optional.** Allow self-signed certificate.
+ssl_cert_sni | **Optional.** Sets the TLS SNI (Server Name Indication) extension.
+ssl_cert_timeout | **Optional.** Seconds before connection times out (default: 15)
+ssl_cert_protocol | **Optional.** Use the specific protocol {http,smtp,pop3,imap,ftp,xmpp,irc,ldap} (default: http).
+ssl_cert_clientcert | **Optional.** Use client certificate to authenticate.
+ssl_cert_clientpass | **Optional.** Set passphrase for client certificate.
+ssl_cert_ssllabs | **Optional.** SSL Labs assessment
+ssl_cert_ssllabs_nocache | **Optional.** Forces a new check by SSL Labs
+ssl_cert_rootcert | **Optional.** Root certificate or directory to be used for certificate validation.
+ssl_cert_ignore_signature | **Optional.** Do not check if the certificate was signed with SHA1 od MD5.
+ssl_cert_ssl_version | **Optional.** Force specific SSL version out of {ssl2,ssl3,tls1,tls1_1,tls1_2}.
+ssl_cert_disable_ssl_versions | **Optional.** Disable specific SSL versions out of {ssl2,ssl3,tls1,tls1_1,tls1_2}. Multiple versions can be given as array.
+ssl_cert_cipher | **Optional.** Cipher selection: force {ecdsa,rsa} authentication.
+ssl_cert_ignore_expiration | **Optional.** Ignore expiration date.
+ssl_cert_ignore_host_cn | **Optional.** Do not complain if the CN does not match.
+ssl_cert_ignore_ocsp | **Optional.** Do not check revocation with OCSP.
+ssl_cert_ignore_ocsp_errors | **Optional.** Continue if the OCSP status cannot be checked.
+ssl_cert_ignore_ocsp_timeout | **Optional.** Ignore OCSP result when timeout occurs while checking.
+ssl_cert_ignore_sct | **Optional.** Do not check for signed certificate timestamps.
+ssl_cert_ignore_tls_renegotiation | **Optional.** Do not check for renegotiation.
+
+
+#### jmx4perl <a id="plugin-contrib-command-jmx4perl"></a>
+
+The [check_jmx4perl](https://metacpan.org/pod/distribution/jmx4perl/scripts/check_jmx4perl) plugin
+uses the HTTP API exposed by the [Jolokia](https://jolokia.org)
+web application and queries Java message beans on an application server. It is
+part of the `JMX::Jmx4Perl` Perl module which includes detailed
+[documentation](https://metacpan.org/pod/distribution/jmx4perl/scripts/check_jmx4perl).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+-----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------
+jmx4perl_url | **Required.** URL to agent web application. Defaults to "http://$address$:8080/jolokia".
+jmx4perl_product | **Optional.** Name of app server product (e.g. jboss), by default is uses an auto detection facility.
+jmx4perl_alias | **Optional.** Alias name for attribute (e.g. MEMORY_HEAP_USED). All available aliases can be viewed by executing `jmx4perl aliases` on the command line.
+jmx4perl_mbean | **Optional.** MBean name (e.g. java.lang:type=Memory).
+jmx4perl_attribute | **Optional.** Attribute name (e.g. HeapMemoryUsage).
+jmx4perl_operation | **Optional.** Operation to execute.
+jmx4perl_value | **Optional.** Shortcut for specifying mbean/attribute/path. Slashes within names must be escaped with backslash.
+jmx4perl_delta | **Optional.** Switches on incremental mode. Optional argument are seconds used for normalizing.
+jmx4perl_path | **Optional.** Inner path for extracting a single value from a complex attribute or return value (e.g. used).
+jmx4perl_target | **Optional.** JSR-160 Service URL specifing the target server.
+jmx4perl_target_user | **Optional.** Username to use for JSR-160 connection.
+jmx4perl_target_password | **Optional.** Password to use for JSR-160 connection.
+jmx4perl_proxy | **Optional.** Proxy to use.
+jmx4perl_user | **Optional.** User for HTTP authentication.
+jmx4perl_password | **Optional.** Password for HTTP authentication.
+jmx4perl_name | **Optional.** Name to use for output, by default a standard value based on the MBean and attribute will be used.
+jmx4perl_method | **Optional.** HTTP method to use, either get or post. By default a method is determined automatically based on the request type.
+jmx4perl_base | **Optional.** Base name, which when given, interprets critical and warning values as relative in the range 0 .. 100%. Must be given in the form mbean/attribute/path.
+jmx4perl_base_mbean | **Optional.** Base MBean name, interprets critical and warning values as relative in the range 0 .. 100%. Requires "jmx4perl_base_attribute".
+jmx4perl_base_attribute | **Optional.** Base attribute for a relative check. Requires "jmx4perl_base_mbean".
+jmx4perl_base_path | **Optional.** Base path for relative checks, where this path is used on the base attribute's value.
+jmx4perl_unit | **Optional.** Unit of measurement of the data retrieved. Recognized values are [B\|KB\|MN\|GB\|TB] for memory values and [us\|ms\|s\|m\|h\|d] for time values.
+jmx4perl_null | **Optional.** Value which should be used in case of a null return value of an operation or attribute. Defaults to null.
+jmx4perl_string | **Optional.** Force string comparison for critical and warning checks. Defaults to false.
+jmx4perl_numeric | **Optional.** Force numeric comparison for critical and warning checks. Defaults to false.
+jmx4perl_critical | **Optional.** Critical threshold for value.
+jmx4perl_warning | **Optional.** Warning threshold for value.
+jmx4perl_label | **Optional.** Label to be used for printing out the result of the check. For placeholders which can be used see the documentation.
+jmx4perl_perfdata | **Optional.** Whether performance data should be omitted, which are included by default. Defaults to "on" for numeric values, to "off" for strings.
+jmx4perl_unknown_is_critical | **Optional.** Map UNKNOWN errors to errors with a CRITICAL status. Defaults to false.
+jmx4perl_timeout | **Optional.** Seconds before plugin times out. Defaults to "15".
+jmx4perl_config | **Optional.** Path to configuration file.
+jmx4perl_server | **Optional.** Symbolic name of server url to use, which needs to be configured in the configuration file.
+jmx4perl_check | **Optional.** Name of a check configuration as defined in the configuration file, use array if you need arguments.
+
+
+#### kdc <a id="plugin-contrib-command-kdc"></a>
+
+The [check_kdc](https://exchange.nagios.org/directory/Plugins/Security/check_kdc/details) plugin
+uses the Kerberos `kinit` binary to monitor Kerberos 5 KDC by acquiring a ticket.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------------------------------------------------------------------
+kdc_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+kdc_port | **Optional** Port on which KDC runs (default 88).
+kdc_principal | **Required** Principal name to authenticate as (including realm).
+kdc_keytab | **Required** Keytab file containing principal's key.
+
+
+#### nginx_status <a id="plugin-contrib-command-nginx_status"></a>
+
+The [check_nginx_status.pl](https://github.com/regilero/check_nginx_status) plugin
+uses the [/nginx_status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html)
+HTTP endpoint which provides metrics for monitoring Nginx.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------------|----------------------------------------------------------------------------------
+nginx_status_host_address | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+nginx_status_port | **Optional.** the http port.
+nginx_status_url | **Optional.** URL to use, instead of the default (http://`nginx_status_hostname`/nginx_status).
+nginx_status_servername | **Optional.** ServerName to use if you specified an IP to match the good Virtualhost in your target.
+nginx_status_ssl | **Optional.** set to use ssl connection.
+nginx_status_disable_sslverify | **Optional.** set to disable SSL hostname verification.
+nginx_status_user | **Optional.** Username for basic auth.
+nginx_status_pass | **Optional.** Password for basic auth.
+nginx_status_realm | **Optional.** Realm for basic auth.
+nginx_status_maxreach | **Optional.** Number of max processes reached (since last check) that should trigger an alert.
+nginx_status_timeout | **Optional.** timeout in seconds.
+nginx_status_warn | **Optional.** Warning threshold (number of active connections, ReqPerSec or ConnPerSec that will cause a WARNING) like '10000,100,200'.
+nginx_status_critical | **Optional.** Critical threshold (number of active connections, ReqPerSec or ConnPerSec that will cause a CRITICAL) like '20000,200,300'.
+
+
+#### rbl <a id="plugin-contrib-command-rbl"></a>
+
+The [check_rbl](https://github.com/matteocorti/check_rbl) plugin
+uses the `Net::DNS` Perl library to check whether your SMTP server
+is blacklisted.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------|--------------------------------------------------------------------------
+rbl_hostname | **Optional.** The address or name of the SMTP server to check. Defaults to "$address$" if the host's `address` attribute is set, `address6` otherwise.
+rbl_server | **Required** List of RBL servers as an array.
+rbl_warning | **Optional** Number of blacklisting servers for a warning.
+rbl_critical | **Optional** Number of blacklisting servers for a critical.
+rbl_timeout | **Optional** Seconds before plugin times out (default: 15).
+
+
+#### squid <a id="plugin-contrib-command-squid"></a>
+
+The [check_squid](https://exchange.icinga.com/exchange/check_squid) plugin
+uses the `squidclient` binary to monitor a [Squid proxy](http://www.squid-cache.org).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+squid_hostname | **Optional.** The host's address. Defaults to "$address$" if the host's `address` attribute is set, "$address6$" otherwise.
+squid_data | **Optional.** Data to fetch (default: Connections) available data: Connections Cache Resources Memory FileDescriptors.
+squid_port | **Optional.** Port number (default: 3128).
+squid_user | **Optional.** WWW user.
+squid_password | **Optional.** WWW password.
+squid_warning | **Optional.** Warning threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format.
+squid_critical | **Optional.** Critical threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format.
+squid_client | **Optional.** Path of squidclient (default: /usr/bin/squidclient).
+squid_timeout | **Optional.** Seconds before plugin times out (default: 15).
+
+
+#### webinject <a id="plugin-contrib-command-webinject"></a>
+
+The [check_webinject](https://labs.consol.de/de/nagios/check_webinject/index.html) plugin
+uses [WebInject](http://www.webinject.org/manual.html) to test web applications
+and web services in an automated fashion.
+It can be used to test individual system components that have HTTP interfaces
+(JSP, ASP, CGI, PHP, AJAX, Servlets, HTML Forms, XML/SOAP Web Services, REST, etc),
+and can be used as a test harness to create a suite of HTTP level automated functional,
+acceptance, and regression tests. A test harness allows you to run many test cases
+and collect/report your results. WebInject offers real-time results
+display and may also be used for monitoring system response times.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|--------------
+webinject_config_file | **Optional.** There is a configuration file named 'config.xml' that is used to store configuration settings for your project. You can use this to specify which test case files to run and to set some constants and settings to be used by WebInject.
+webinject_output | **Optional.** This option is followed by a directory name or a prefix to prepended to the output files. This is used to specify the location for writing output files (http.log, results.html, and results.xml). If a directory name is supplied (use either an absolute or relative path and make sure to add the trailing slash), all output files are written to this directory. If the trailing slash is omitted, it is assumed to a prefix and this will be prepended to the output files. You may also use a combination of a directory and prefix.
+webinject_no_output | **Optional.** Suppresses all output to STDOUT except the results summary.
+webinject_timeout | **Optional.** The value [given in seconds] will be compared to the global time elapsed to run all the tests. If the tests have all been successful, but have taken more time than the 'globaltimeout' value, a warning message is sent back to Icinga.
+webinject_report_type | **Optional.** This setting is used to enable output formatting that is compatible for use with specific external programs. The available values you can set this to are: nagios, mrtg, external and standard.
+webinject_testcase_file | **Optional.** When you launch WebInject in console mode, you can optionally supply an argument for a testcase file to run. It will look for this file in the directory that webinject.pl resides in. If no filename is passed from the command line, it will look in config.xml for testcasefile declarations. If no files are specified, it will look for a default file named 'testcases.xml' in the current [webinject] directory. If none of these are found, the engine will stop and give you an error.
+
+#### varnish <a id="plugin-contrib-command-varnish"></a>
+
+The [check_varnish](https://github.com/varnish/varnish-nagios) plugin,
+also available in the [monitoring-plugins-contrib](https://packages.debian.org/sid/nagios-plugins-contrib) on debian,
+uses the `varnishstat` binary to monitor [varnish](https://varnish-cache.org/).
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+varnish_name | **Optional.** Specify the Varnish instance name
+varnish_param | **Optional.** Specify the parameter to check (see below). The default is 'ratio'.
+varnish_critical | **Optional.** Set critical threshold: [@][lo:]hi
+varnish_warning | **Optional.** Set warning threshold: [@][lo:]hi
+
+For *varnish_param*, all items reported by varnishstat(1) are available - use the
+identifier listed in the left column by `varnishstat -l`. In
+addition, the following parameters are available:
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+uptime | How long the cache has been running (in seconds)
+ratio | The cache hit ratio expressed as a percentage of hits to hits + misses. Default thresholds are 95 and 90.
+usage | Cache file usage as a percentage of the total cache space.
+
+#### haproxy <a id="plugin-contrib-command-haproxy"></a>
+
+The [check_haproxy](https://salsa.debian.org/nagios-team/pkg-nagios-plugins-contrib/blob/master/check_haproxy/check_haproxy) plugin,
+also available in the [monitoring-plugins-contrib](https://packages.debian.org/nagios-plugins-contrib) on debian,
+uses the `haproxy` csv statistics page to monitor [haproxy](https://www.haproxy.org/) response time. The plugin output performance data for backends sessions and statistics response time.
+
+This plugin need to access the csv statistics page. You can configure it in haproxy by adding a new frontend:
+```
+frontend stats
+ bind 127.0.0.1:80
+ stats enablestats
+ stats uri /stats
+```
+
+The statistics page will be available at `http://127.0.0.1/stats;csv;norefresh`.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+------------------------|----------------------------------------------------------------------------------
+haproxy_username | **Optional.** Username for HTTP Auth
+haproxy_password | **Optional.** Password for HTTP Auth
+haproxy_url | **Required.** URL of the HAProxy csv statistics page.
+haproxy_timeout | **Optional.** Seconds before plugin times out (default: 10)
+haproxy_warning | **Optional.** Warning request time threshold (in seconds)
+haproxy_critical | **Optional.** Critical request time threshold (in seconds)
+
+#### haproxy_status <a id="plugin-contrib-command-haproxy_status"></a>
+
+The [check_haproxy_status](https://github.com/jonathanio/monitoring-nagios-haproxy) plugin,
+uses the `haproxy` statistics socket to monitor [haproxy](https://www.haproxy.org/) frontends/backends.
+
+This plugin need read/write access to the statistics socket with an operator level. You can configure it in the global section of haproxy to allow icinga user to use it:
+```
+stats socket /run/haproxy/admin.sock user haproxy group icinga mode 660 level operator
+```
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+----------------------------|----------------------------------------------------------------------------------
+haproxy\_status\_default | **Optional.** Set/Override the defaults which will be applied to all checks (unless specifically set by --overrides).
+haproxy\_status\_frontends | **Optional.** Enable checks for the frontends in HAProxy (that they're marked as OPEN and the session limits haven't been reached).
+haproxy\_status\_nofrontends| **Optional.** Disable checks for the frontends in HAProxy (that they're marked as OPEN and the session limits haven't been reached).
+haproxy\_status\_backends | **Optional.** Enable checks for the backends in HAProxy (that they have the required quorum of servers, and that the session limits haven't been reached).
+haproxy\_status\_nobackends | **Optional.** Disable checks for the backends in HAProxy (that they have the required quorum of servers, and that the session limits haven't been reached).
+haproxy\_status\_servers | **Optional.** Enable checks for the servers in HAProxy (that they haven't reached the limits for the sessions or for queues).
+haproxy\_status\_noservers | **Optional.** Disable checks for the servers in HAProxy (that they haven't reached the limits for the sessions or for queues).
+haproxy\_status\_overrides | **Optional.** Override the defaults for a particular frontend or backend, in the form {name}:{override}, where {override} is the same format as --defaults above.
+haproxy\_status\_socket | **Required.** Path to the socket check_haproxy should connect to
+
+#### phpfpm_status <a id="plugin-contrib-command-phpfpm_status"></a>
+
+The [check_phpfpm_status](https://github.com/regilero/check_phpfpm_status) plugin,
+uses the `php-fpm` status page to monitor php-fpm.
+
+Custom variables passed as [command parameters](03-monitoring-basics.md#command-passing-parameters):
+
+Name | Description
+--------------------------|----------------------------------------------------------------------------------
+phpfpm\_status\_hostname | **Required.** name or IP address of host to check
+phpfpm\_status\_port | **Optional.** Http port, or Fastcgi port when using --fastcgi
+phpfpm\_status\_url | **Optional.** Specific URL (only the path part of it in fact) to use, instead of the default /fpm-status
+phpfpm\_status\_servername| **Optional.** ServerName, (host header of HTTP request) use it if you specified an IP in -H to match the good Virtualhost in your target
+phpfpm\_status\_fastcgi | **Optional.** If set, connect directly to php-fpm via network or local socket, using fastcgi protocol instead of HTTP.
+phpfpm\_status\_user | **Optional.** Username for basic auth
+phpfpm\_status\_pass | **Optional.** Password for basic auth
+phpfpm\_status\_realm | **Optional.** Realm for basic auth
+phpfpm\_status\_debug | **Optional.** If set, debug mode (show http request response)
+phpfpm\_status\_timeout | **Optional.** timeout in seconds (Default: 15)
+phpfpm\_status\_ssl | **Optional.** Wether we should use HTTPS instead of HTTP. Note that you can give some extra parameters to this settings. Default value is 'TLSv1' but you could use things like 'TLSv1_1' or 'TLSV1_2' (or even 'SSLv23:!SSLv2:!SSLv3' for old stuff).
+phpfpm\_status\_verifyssl | **Optional.** If set, verify certificate and hostname from ssl cert, default is 0 (no security), set it to 1 to really make SSL peer name and certificater checks.
+phpfpm\_status\_cacert | **Optional.** Full path to the cacert.pem certificate authority used to verify ssl certificates (use with --verifyssl). if not given the cacert from Mozilla::CA cpan plugin will be used.
+phpfpm\_status\_warn | **Optional.** MIN_AVAILABLE_PROCESSES,PROC_MAX_REACHED,QUEUE_MAX_REACHED number of available workers, or max states reached that will cause a warning. -1 for no warning
+phpfpm\_status\_critical | **Optional.** MIN_AVAILABLE_PROCESSES,PROC_MAX_REACHED,QUEUE_MAX_REACHED number of available workers, or max states reached that will cause an error, -1 for no CRITICAL
diff --git a/doc/11-cli-commands.md b/doc/11-cli-commands.md
new file mode 100644
index 0000000..457656c
--- /dev/null
+++ b/doc/11-cli-commands.md
@@ -0,0 +1,734 @@
+# Icinga 2 CLI Commands <a id="cli-commands"></a>
+
+Icinga 2 comes with a number of CLI commands which support bash autocompletion.
+
+These CLI commands will allow you to use certain functionality
+provided by and around Icinga 2.
+
+Each CLI command provides its own help and usage information, so please
+make sure to always run them with the `--help` parameter.
+
+Run `icinga2` without any arguments to get a list of all available global
+options.
+
+```
+# icinga2
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 <command> [<arguments>]
+
+Supported commands:
+ * api setup (setup for API)
+ * ca list (lists all certificate signing requests)
+ * ca restore (restores a removed certificate request)
+ * ca remove (removes an outstanding certificate request)
+ * ca sign (signs an outstanding certificate request)
+ * console (Icinga debug console)
+ * daemon (starts Icinga 2)
+ * feature disable (disables specified feature)
+ * feature enable (enables specified feature)
+ * feature list (lists all available features)
+ * node setup (set up node)
+ * node wizard (wizard for node setup)
+ * object list (lists all objects)
+ * pki new-ca (sets up a new CA)
+ * pki new-cert (creates a new CSR)
+ * pki request (requests a certificate)
+ * pki save-cert (saves another Icinga 2 instance's certificate)
+ * pki sign-csr (signs a CSR)
+ * pki ticket (generates a ticket)
+ * pki verify (verify TLS certificates: CN, signed by CA, is CA; Print certificate)
+ * variable get (gets a variable)
+ * variable list (lists all variables)
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+
+## Icinga 2 CLI Bash Autocompletion <a id="cli-commands-autocompletion"></a>
+
+Bash Auto-Completion (pressing `<TAB>`) is provided only for the corresponding context.
+
+While `--config` suggests and auto-completes files and directories on disk,
+`feature enable` only suggests disabled features.
+
+RPM and Debian packages install the bash completion files into
+`/etc/bash_completion.d/icinga2`.
+
+You need to install the `bash-completion` package if not already installed.
+
+RHEL/CentOS/Fedora:
+
+```bash
+yum install bash-completion
+```
+
+SUSE:
+
+```bash
+zypper install bash-completion
+```
+
+Debian/Ubuntu:
+
+```bash
+apt-get install bash-completion
+```
+
+Ensure that the `bash-completion.d` directory is added to your shell
+environment. You can manually source the icinga2 bash-completion file
+into your current session and test it:
+
+```bash
+source /etc/bash-completion.d/icinga2
+```
+
+
+## Icinga 2 CLI Global Options <a id="cli-commands-global-options"></a>
+
+### Application Type
+
+By default the `icinga2` binary loads the `icinga` library. A different application type
+can be specified with the `--app` command-line option.
+Note: This is not needed by the average Icinga user, only developers.
+
+### Libraries
+
+Instead of loading libraries using the [`library` config directive](17-language-reference.md#library)
+you can also use the `--library` command-line option.
+Note: This is not needed by the average Icinga user, only developers.
+
+### Constants
+
+[Global constants](17-language-reference.md#constants) can be set using the `--define` command-line option.
+
+### Config Include Path <a id="config-include-path"></a>
+
+When including files you can specify that the include search path should be
+checked. You can do this by putting your configuration file name in angle
+brackets like this:
+
+```
+include <test.conf>
+```
+
+This causes Icinga 2 to search its include path for the configuration file
+`test.conf`. By default the installation path for the [Icinga Template Library](10-icinga-template-library.md#icinga-template-library)
+is the only search directory.
+
+Using the `--include` command-line option additional search directories can be
+added.
+
+## CLI command: Api <a id="cli-command-api"></a>
+
+Provides helper functions to enable and setup the
+[Icinga 2 API](12-icinga2-api.md#icinga2-api-setup).
+
+### CLI command: Api Setup <a id="cli-command-api-setup "></a>
+
+```
+# icinga2 api setup --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 api setup [<arguments>]
+
+Setup for Icinga 2 API.
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Command options:
+ --cn arg The certificate's common name
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Get support: <https://icinga.com/support/>
+Documentation: <https://icinga.com/docs/>
+Icinga home page: <https://icinga.com/>
+```
+
+## CLI command: Ca <a id="cli-command-ca"></a>
+
+List and manage incoming certificate signing requests. More details
+can be found in the [signing methods](06-distributed-monitoring.md#distributed-monitoring-setup-sign-certificates-master)
+chapter. This CLI command is available since v2.8.
+
+```
+# icinga2 ca --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 <command> [<arguments>]
+
+Supported commands:
+ * ca list (lists all certificate signing requests)
+ * ca sign (signs an outstanding certificate request)
+ * ca restore (restores a removed certificate request)
+ * ca remove (removes an outstanding certificate request)
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+
+### CLI command: Ca List <a id="cli-command-ca-list"></a>
+
+```
+icinga2 ca list --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 ca list [<arguments>]
+
+Lists pending certificate signing requests.
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Command options:
+ --all List all certificate signing requests, including
+ signed. Note: Old requests are automatically
+ cleaned by Icinga after 1 week.
+ --removed List all removed CSRs (for use with 'ca restore')
+ --json encode output as JSON
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Get support: <https://icinga.com/support/>
+Documentation: <https://icinga.com/docs/>
+Icinga home page: <https://icinga.com/>
+```
+
+## CLI command: Console <a id="cli-command-console"></a>
+
+The CLI command `console` can be used to debug and evaluate Icinga 2 config expressions,
+e.g. to test [functions](17-language-reference.md#functions) in your local sandbox.
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => function test(name) {
+<1> .. log("Hello " + name)
+<1> .. }
+null
+<2> => test("World")
+information/config: Hello World
+null
+<3> =>
+```
+
+Further usage examples can be found in the [library reference](18-library-reference.md#library-reference) chapter.
+
+```
+# icinga2 console --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 console [<arguments>]
+
+Interprets Icinga script expressions.
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Command options:
+ -c [ --connect ] arg connect to an Icinga 2 instance
+ -e [ --eval ] arg evaluate expression and terminate
+ -r [ --file ] arg evaluate a file and terminate
+ --syntax-only only validate syntax (requires --eval or --file)
+ --sandbox enable sandbox mode
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+
+On operating systems without the `libedit` library installed there is no
+support for line-editing or a command history. However you can
+use the `rlwrap` program if you require those features:
+
+```bash
+rlwrap icinga2 console
+```
+
+The debug console can be used to connect to a running Icinga 2 instance using
+the [REST API](12-icinga2-api.md#icinga2-api). [API permissions](12-icinga2-api.md#icinga2-api-permissions)
+are required for executing config expressions and auto-completion.
+
+> **Note**
+>
+> The debug console does not currently support TLS certificate verification.
+>
+> Runtime modifications are not validated and might cause the Icinga 2
+> daemon to crash or behave in an unexpected way. Use these runtime changes
+> at your own risk and rather *inspect and debug objects read-only*.
+
+You can specify the API URL using the `--connect` parameter.
+
+Although the password can be specified there process arguments on UNIX platforms are
+usually visible to other users (e.g. through `ps`). In order to securely specify the
+user credentials the debug console supports two environment variables:
+
+ Environment variable | Description
+ ---------------------|-------------
+ ICINGA2_API_USERNAME | The API username.
+ ICINGA2_API_PASSWORD | The API password.
+
+Here's an example:
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/'
+Icinga 2 (version: v2.11.0)
+<1> =>
+```
+
+Once connected you can inspect variables and execute other expressions by entering them at the prompt:
+
+```
+<1> => var h = get_host("icinga2-agent1.localdomain")
+null
+<2> => h.last_check_result
+{
+ active = true
+ check_source = "icinga2-agent1.localdomain"
+ command = [ "/usr/local/sbin/check_ping", "-H", "127.0.0.1", "-c", "5000,100%", "-w", "3000,80%" ]
+ execution_end = 1446653527.174983
+ execution_start = 1446653523.152673
+ exit_status = 0.000000
+ output = "PING OK - Packet loss = 0%, RTA = 0.11 ms"
+ performance_data = [ "rta=0.114000ms;3000.000000;5000.000000;0.000000", "pl=0%;80;100;0" ]
+ schedule_end = 1446653527.175133
+ schedule_start = 1446653583.150000
+ state = 0.000000
+ type = "CheckResult"
+ vars_after = {
+ attempt = 1.000000
+ reachable = true
+ state = 0.000000
+ state_type = 1.000000
+ }
+ vars_before = {
+ attempt = 1.000000
+ reachable = true
+ state = 0.000000
+ state_type = 1.000000
+ }
+}
+<3> =>
+```
+
+You can use the `--eval` parameter to evaluate a single expression in batch mode.
+Using the `--file` option you can specify a file which should be evaluated.
+The output format for batch mode is JSON.
+
+The `--syntax-only` option can be used in combination with `--eval` or `--file`
+to check a script for syntax errors. In this mode the script is parsed to identify
+syntax errors but not evaluated.
+
+Here's an example that retrieves the command that was used by Icinga to check the `icinga2-agent1.localdomain` host:
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/' --eval 'get_host("icinga2-agent1.localdomain").last_check_result.command' | python -m json.tool
+[
+ "/usr/local/sbin/check_ping",
+ "-H",
+ "127.0.0.1",
+ "-c",
+ "5000,100%",
+ "-w",
+ "3000,80%"
+]
+```
+
+## CLI command: Daemon <a id="cli-command-daemon"></a>
+
+The CLI command `daemon` provides the functionality to start/stop Icinga 2.
+Furthermore it allows to run the [configuration validation](11-cli-commands.md#config-validation).
+
+```
+# icinga2 daemon --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 daemon [<arguments>]
+
+Starts Icinga 2.
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Command options:
+ -c [ --config ] arg parse a configuration file
+ -z [ --no-config ] start without a configuration file
+ -C [ --validate ] exit after validating the configuration
+ --dump-objects write icinga2.debug cache file for icinga2 object list
+ -e [ --errorlog ] arg log fatal errors to the specified log file (only
+ works in combination with --daemonize or
+ --close-stdio)
+ -d [ --daemonize ] detach from the controlling terminal
+ --close-stdio do not log to stdout (or stderr) after startup
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+### Config Files <a id="cli-command-daemon-config-files"></a>
+
+You can specify one or more configuration files with the `--config` option.
+Configuration files are processed in the order they're specified on the command-line.
+
+When no configuration file is specified and the `--no-config` is not used
+Icinga 2 automatically falls back to using the configuration file
+`ConfigDir + "/icinga2.conf"` (where ConfigDir is usually `/etc/icinga2`).
+
+### Validation <a id="cli-command-daemon-validation"></a>
+
+The `--validate` option can be used to check if configuration files
+contain errors. If any errors are found, the exit status is 1, otherwise 0
+is returned. More details in the [configuration validation](11-cli-commands.md#config-validation) chapter.
+
+## CLI command: Feature <a id="cli-command-feature"></a>
+
+The `feature enable` and `feature disable` commands can be used to enable and disable features:
+
+```
+# icinga2 feature disable <tab>
+--app --define --include --log-level --version checker graphite mainlog
+--color --help --library --script-debugger api command ido-mysql notification
+```
+
+```
+# icinga2 feature enable <tab>
+--app --define --include --log-level --version debuglog ido-pgsql livestatus perfdata syslog
+--color --help --library --script-debugger compatlog gelf influxdb opentsdb statusdata
+```
+
+The `feature list` command shows which features are currently enabled:
+
+```
+# icinga2 feature list
+Disabled features: compatlog debuglog gelf ido-pgsql influxdb livestatus opentsdb perfdata statusdata syslog
+Enabled features: api checker command graphite ido-mysql mainlog notification
+```
+
+## CLI command: Node <a id="cli-command-node"></a>
+
+Provides the functionality to setup master and client
+nodes in a [distributed monitoring](06-distributed-monitoring.md#distributed-monitoring) scenario.
+
+```
+# icinga2 node --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 <command> [<arguments>]
+
+Supported commands:
+ * node setup (set up node)
+ * node wizard (wizard for node setup)
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+## CLI command: Object <a id="cli-command-object"></a>
+
+The `object` CLI command can be used to list all configuration objects and their
+attributes. The command also shows where each of the attributes was modified and as such
+provides debug information for further configuration problem analysis.
+That way you can also identify which objects have been created from your [apply rules](17-language-reference.md#apply).
+
+Configuration modifications are not immediately updated. Furthermore there is a known issue with
+[group assign expressions](17-language-reference.md#group-assign) which are not reflected in the host object output.
+You need to run `icinga2 daemon -C --dump-objects` in order to update the `icinga2.debug` cache file.
+
+More information can be found in the [troubleshooting](15-troubleshooting.md#troubleshooting-list-configuration-objects) section.
+
+```
+# icinga2 object --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 <command> [<arguments>]
+
+Supported commands:
+ * object list (lists all objects)
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+## CLI command: Pki <a id="cli-command-pki"></a>
+
+Provides the CLI commands to
+
+* generate a new certificate authority (CA)
+* generate a new CSR or self-signed certificate
+* sign a CSR and return a certificate
+* save a master certificate manually
+* request a signed certificate from the master
+* generate a new ticket for the client setup
+
+This functionality is used by the [node setup/wizard](11-cli-commands.md#cli-command-node) CLI commands.
+You will need them in the [distributed monitoring chapter](06-distributed-monitoring.md#distributed-monitoring).
+
+```
+# icinga2 pki --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.12.0)
+
+Usage:
+ icinga2 <command> [<arguments>]
+
+Supported commands:
+ * pki new-ca (sets up a new CA)
+ * pki new-cert (creates a new CSR)
+ * pki request (requests a certificate)
+ * pki save-cert (saves another Icinga 2 instance's certificate)
+ * pki sign-csr (signs a CSR)
+ * pki ticket (generates a ticket)
+ * pki verify (verify TLS certificates: CN, signed by CA, is CA; Print certificate)
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+## CLI command: Variable <a id="cli-command-variable"></a>
+
+Lists all configured variables (constants) in a similar fashion like [object list](11-cli-commands.md#cli-command-object).
+
+```
+# icinga2 variable --help
+icinga2 - The Icinga 2 network monitoring daemon (version: v2.11.0)
+
+Usage:
+ icinga2 <command> [<arguments>]
+
+Supported commands:
+ * variable get (gets a variable)
+ * variable list (lists all variables)
+
+Global options:
+ -h [ --help ] show this help message
+ -V [ --version ] show version information
+ --color use VT100 color codes even when stdout is not a
+ terminal
+ -D [ --define ] arg define a constant
+ -a [ --app ] arg application library name (default: icinga)
+ -l [ --library ] arg load a library
+ -I [ --include ] arg add include search directory
+ -x [ --log-level ] arg specify the log level for the console log.
+ The valid value is either debug, notice,
+ information (default), warning, or critical
+ -X [ --script-debugger ] whether to enable the script debugger
+
+Report bugs at <https://github.com/Icinga/icinga2>
+Icinga home page: <https://icinga.com/>
+```
+
+## Enabling/Disabling Features <a id="enable-features"></a>
+
+Icinga 2 provides configuration files for some commonly used features. These
+are installed in the `/etc/icinga2/features-available` directory and can be
+enabled and disabled using the `icinga2 feature enable` and `icinga2 feature disable`
+[CLI commands](11-cli-commands.md#cli-command-feature), respectively.
+
+The `icinga2 feature enable` CLI command creates symlinks in the
+`/etc/icinga2/features-enabled` directory which is included by default
+in the example configuration file.
+
+You can view a list of enabled and disabled features:
+
+```
+# icinga2 feature list
+Disabled features: api command compatlog debuglog graphite icingastatus ido-mysql ido-pgsql livestatus notification perfdata statusdata syslog
+Enabled features: checker mainlog notification
+```
+
+Using the `icinga2 feature enable` command you can enable features:
+
+```
+# icinga2 feature enable graphite
+Enabling feature graphite. Make sure to restart Icinga 2 for these changes to take effect.
+```
+
+You can disable features using the `icinga2 feature disable` command:
+
+```
+# icinga2 feature disable ido-mysql livestatus
+Disabling feature ido-mysql. Make sure to restart Icinga 2 for these changes to take effect.
+Disabling feature livestatus. Make sure to restart Icinga 2 for these changes to take effect.
+```
+
+The `icinga2 feature enable` and `icinga2 feature disable` commands do not
+restart Icinga 2. You will need to restart Icinga 2 using the init script
+after enabling or disabling features.
+
+
+
+## Configuration Validation <a id="config-validation"></a>
+
+Once you've edited the configuration files make sure to tell Icinga 2 to validate
+the configuration changes. Icinga 2 will log any configuration error including
+a hint on the file, the line number and the affected configuration line itself.
+
+The following example creates an apply rule without any `assign` condition.
+
+```
+apply Service "my-ping4" {
+ import "generic-service"
+ check_command = "ping4"
+ //assign where host.address
+}
+```
+
+Validate the configuration:
+
+```
+# icinga2 daemon -C
+
+[2014-05-22 17:07:25 +0200] critical/ConfigItem: Location:
+/etc/icinga2/conf.d/tests/my.conf(5): }
+/etc/icinga2/conf.d/tests/my.conf(6):
+/etc/icinga2/conf.d/tests/my.conf(7): apply Service "my-ping4" {
+ ^^^^^^^^^^^^^
+/etc/icinga2/conf.d/tests/my.conf(8): import "test-generic-service"
+/etc/icinga2/conf.d/tests/my.conf(9): check_command = "ping4"
+
+Config error: 'apply' is missing 'assign'
+[2014-05-22 17:07:25 +0200] critical/ConfigItem: 1 errors, 0 warnings.
+Icinga 2 detected configuration errors.
+```
+
+If you encounter errors during configuration validation, please make sure
+to read the [troubleshooting](15-troubleshooting.md#troubleshooting) chapter.
+
+You can also use the [CLI command](11-cli-commands.md#cli-command-object) `icinga2 object list`
+after validation passes to analyze object attributes, inheritance or created
+objects by apply rules.
+Find more on troubleshooting with `object list` in [this chapter](15-troubleshooting.md#troubleshooting-list-configuration-objects).
+
+
+## Reload on Configuration Changes <a id="config-change-reload"></a>
+
+Every time you have changed your configuration you should first tell Icinga 2
+to [validate](11-cli-commands.md#config-validation). If there are no validation errors, you can
+safely reload the Icinga 2 daemon.
+
+```bash
+systemctl reload icinga2
+```
+
+The `reload` action will send the `SIGHUP` signal to the Icinga 2 daemon
+which will validate the configuration in a separate process and not stop
+the other events like check execution, notifications, etc.
diff --git a/doc/12-icinga2-api.md b/doc/12-icinga2-api.md
new file mode 100644
index 0000000..81c9426
--- /dev/null
+++ b/doc/12-icinga2-api.md
@@ -0,0 +1,3032 @@
+# REST API <a id="icinga2-api"></a>
+
+* [Setup](12-icinga2-api.md#icinga2-api-setup)
+* [Introduction](12-icinga2-api.md#icinga2-api-introduction)
+* Endpoints
+ * [Config Objects](12-icinga2-api.md#icinga2-api-config-objects)
+ * [Actions](12-icinga2-api.md#icinga2-api-actions)
+ * [Event Streams](12-icinga2-api.md#icinga2-api-event-streams)
+ * [Status and Statistics](12-icinga2-api.md#icinga2-api-status)
+ * [Config Management](12-icinga2-api.md#icinga2-api-config-management)
+ * [Types](12-icinga2-api.md#icinga2-api-types)
+ * [Templates](12-icinga2-api.md#icinga2-api-config-templates)
+ * [Variables](12-icinga2-api.md#icinga2-api-variables)
+ * [Debug Console](12-icinga2-api.md#icinga2-api-console)
+* [API Clients](12-icinga2-api.md#icinga2-api-clients)
+ * [Programmatic Examples](12-icinga2-api.md#icinga2-api-clients-programmatic-examples)
+
+
+## Setting up the API <a id="icinga2-api-setup"></a>
+
+You can run the CLI command `icinga2 api setup` to enable the
+`api` [feature](11-cli-commands.md#enable-features) and set up
+certificates as well as a new API user `root` with an auto-generated password in the
+`/etc/icinga2/conf.d/api-users.conf` configuration file:
+
+```bash
+icinga2 api setup
+```
+
+Make sure to restart Icinga 2 to enable the changes you just made:
+
+```bash
+systemctl restart icinga2
+```
+
+If you prefer to set up the API manually, you will have to perform the following steps:
+
+* Set up X.509 TLS certificates for Icinga 2
+* Enable the `api` feature (`icinga2 feature enable api`)
+* Create an `ApiUser` object for authentication
+
+The next chapter provides a quick overview of how you can use the API.
+
+## Introduction <a id="icinga2-api-introduction"></a>
+
+The Icinga 2 API allows you to manage configuration objects
+and resources in a simple, programmatic way using HTTP requests.
+
+The URL endpoints are logically separated allowing you to easily
+make calls to
+
+* query, create, modify and delete [config objects](12-icinga2-api.md#icinga2-api-config-objects)
+* perform [actions](12-icinga2-api.md#icinga2-api-actions) (reschedule checks, etc.)
+* subscribe to [event streams](12-icinga2-api.md#icinga2-api-event-streams)
+* [manage configuration packages](12-icinga2-api.md#icinga2-api-config-management)
+* evaluate [script expressions](12-icinga2-api.md#icinga2-api-console)
+
+### Requests <a id="icinga2-api-requests"></a>
+
+Any tool capable of making HTTP requests can communicate with
+the API, for example [curl](https://curl.haxx.se/).
+
+Requests are only allowed to use the HTTPS protocol so that
+traffic remains encrypted.
+
+By default the Icinga 2 API listens on port `5665` which is shared with
+the cluster stack. The port can be changed by setting the `bind_port` attribute
+for the [ApiListener](09-object-types.md#objecttype-apilistener)
+object in the `/etc/icinga2/features-available/api.conf`
+configuration file.
+
+Supported request methods:
+
+ Method | Usage
+ -------|--------
+ GET | Retrieve information about configuration objects. Any request using the GET method is read-only and does not affect any objects.
+ POST | Update attributes of a specified configuration object.
+ PUT | Create a new object. The PUT request must include all attributes required to create a new object.
+ DELETE | Remove an object created by the API. The DELETE method is idempotent and does not require any check if the object actually exists.
+
+All requests except `GET` require the following `Accept` header:
+
+```
+Accept: application/json
+```
+
+Each URL is prefixed with the API version (currently "/v1").
+
+HTTP header size is limited to 8KB per request.
+
+### Responses <a id="icinga2-api-responses"></a>
+
+Successful requests will send back a response body containing a `results`
+list. Depending on the number of affected objects in your request, the
+`results` list may contain more than one entry.
+
+The output will be sent back as a JSON object:
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Object was created."
+ }
+ ]
+}
+```
+
+> **Tip**
+>
+> You can use the [pretty](12-icinga2-api.md#icinga2-api-parameters-global) parameter to beautify the JSON response.
+
+You can also use [jq](https://stedolan.github.io/jq/) or `python -m json.tool`
+in combination with curl on the CLI.
+
+```bash
+curl ... | jq
+curl ... | python -m json.tool
+```
+
+jq also has additional filter capabilities, as shown in [this blogpost](https://www.netways.de/blog/2018/08/24/json-in-bequem/).
+
+```bash
+curl ... |jq '{name: .results[].name}'
+```
+
+For programmatic examples in various languages, check the chapter
+[below](12-icinga2-api.md#icinga2-api-clients).
+
+> **Note**
+>
+> Future versions of Icinga 2 might set additional fields. Your application
+> should gracefully handle fields it is not familiar with, for example by
+> ignoring them.
+
+### HTTP Statuses <a id="icinga2-api-http-statuses"></a>
+
+The API will return standard [HTTP statuses](https://www.ietf.org/rfc/rfc2616.txt)
+including error codes.
+
+When an error occurs, the response body will contain additional information
+about the problem and its source. Set `verbose` to true to retrieve more
+insights into what may be causing the error.
+
+A status code between 200 and 299 generally means that the request was
+successful.
+
+Return codes within the 400 range indicate that there was a problem with the
+request. Either you did not authenticate correctly, you are missing the authorization
+for your requested action, the requested object does not exist or the request
+was malformed.
+
+A status in the range of 500 generally means that there was a server-side problem
+and Icinga 2 is unable to process your request.
+
+### Security <a id="icinga2-api-security"></a>
+
+* HTTPS only.
+* TLS v1.2+ is required.
+* TLS cipher lists are hardened [by default](09-object-types.md#objecttype-apilistener).
+* Authentication is [required](12-icinga2-api.md#icinga2-api-authentication).
+
+### Authentication <a id="icinga2-api-authentication"></a>
+
+There are two different ways for authenticating against the Icinga 2 API:
+
+* Username and password using HTTP basic auth
+* X.509 client certificate
+
+In order to configure a new API user you'll need to add a new [ApiUser](09-object-types.md#objecttype-apiuser)
+configuration object. In this example `root` will be the basic auth username
+and the `password` attribute contains the basic auth password.
+
+```
+# vim /etc/icinga2/conf.d/api-users.conf
+
+object ApiUser "root" {
+ password = "icinga"
+}
+```
+
+Alternatively you can use X.509 client certificates by specifying the `client_cn`
+the API should trust. The X.509 certificate has to be signed by the CA certificate
+that is configured in the [ApiListener](09-object-types.md#objecttype-apilistener) object.
+
+```
+# vim /etc/icinga2/conf.d/api-users.conf
+
+object ApiUser "root" {
+ client_cn = "CertificateCommonName"
+}
+```
+
+An `ApiUser` object can have both authentication methods configured.
+
+#### Authentication Test <a id="icinga2-api-authentication-test"></a>
+
+You can test authentication by sending a GET request to the API:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1'
+```
+
+In case you get an error message make sure to check the API user credentials.
+
+When using client certificates for authentication you'll need to pass your client certificate
+and private key to the curl call:
+
+```bash
+curl -k --cert example.localdomain.crt --key example.localdomain.key 'https://example.localdomain:5665/v1/status'
+```
+
+In case of an error make sure to verify the client certificate and CA.
+
+The curl parameter `-k` disables certificate verification and should therefore
+only be used for testing. In order to securely check each connection you'll need to
+specify the trusted CA certificate using the curl parameter`--cacert`:
+
+```bash
+curl -u root:icinga --cacert ca.crt 'icinga2.node1.localdomain:5665/v1'
+```
+
+Read the next chapter on [API permissions](12-icinga2-api.md#icinga2-api-permissions)
+in order to configure authorization settings for your newly created API user.
+
+### Permissions <a id="icinga2-api-permissions"></a>
+
+By default an API user does not have any permissions to perform
+actions on the URL endpoints.
+
+Permissions for API users must be specified in the `permissions` attribute
+as array. The array items can be a list of permission strings with wildcard
+matches. Please notice, that the permission system that is used by the API differs from the permission system used by the Icinga Web 2 frontend or other parts of Icinga 2.
+
+The permission system mainly relies on the url scheme of the API endpoints (See listing below).
+
+Example for an API user with all permissions:
+
+```
+permissions = [ "*" ]
+```
+
+Note that you can use wildcards to include all possible hierarchically lower items. Here's another example that only allows the user
+to perform read-only object queries for hosts and services:
+
+```
+permissions = [ "objects/query/Host", "objects/query/Service" ]
+```
+
+You can also further restrict permissions by specifying a filter expression. The
+filter expression has to be a [lambda function](17-language-reference.md#nullary-lambdas)
+which must return a boolean value.
+
+The following example allows the API user to query all hosts and services which have a
+custom variable `os` that matches the regular expression `^Linux`.
+The [regex function](18-library-reference.md#global-functions-regex) is available as global function.
+
+```
+permissions = [
+ {
+ permission = "objects/query/Host"
+ filter = {{ regex("^Linux", host.vars.os) }}
+ },
+ {
+ permission = "objects/query/Service"
+ filter = {{ regex("^Linux", service.vars.os) }}
+ }
+]
+```
+
+More information about filters can be found in the [filters](12-icinga2-api.md#icinga2-api-filters) chapter.
+
+Prior to setting complex permissions, ensure to always [test](12-icinga2-api.md#icinga2-api-authentication-test)
+them step by step.
+
+
+#### Overview <a id="icinga2-api-permissions-overview"></a>
+
+Permissions are tied to a maximum HTTP request size to prevent abuse, responses sent by Icinga are not limited.
+An API user with all permissions ("\*") may send up to 512 MB regardless of the endpoint.
+
+Available permissions for specific URL endpoints:
+
+ Permissions | URL Endpoint | Supports filters | Max body size in MB
+ ------------------------------|---------------|-------------------|---------------------
+ actions/&lt;action&gt; | /v1/actions | Yes | 1
+ config/query | /v1/config | No | 1
+ config/modify | /v1/config | No | 512
+ console | /v1/console | No | 1
+ events/&lt;type&gt; | /v1/events | No | 1
+ objects/query/&lt;type&gt; | /v1/objects | Yes | 1
+ objects/create/&lt;type&gt; | /v1/objects | No | 1
+ objects/modify/&lt;type&gt; | /v1/objects | Yes | 1
+ objects/delete/&lt;type&gt; | /v1/objects | Yes | 1
+ status/query | /v1/status | Yes | 1
+ templates/&lt;type&gt; | /v1/templates | Yes | 1
+ types | /v1/types | Yes | 1
+ variables | /v1/variables | Yes | 1
+
+The required actions or types can be replaced by using a wildcard match ("\*").
+
+
+### Parameters <a id="icinga2-api-parameters"></a>
+
+Depending on the request method there are two ways of passing parameters to the request:
+
+* JSON object as request body (all request methods other than `GET`)
+* Query string as URL parameter (all request methods)
+
+Reserved characters by the HTTP protocol must be [URL-encoded](https://en.wikipedia.org/wiki/Percent-encoding)
+as query string, e.g. a space character becomes `%20`.
+
+Example for a URL-encoded query string:
+
+```
+/v1/objects/hosts?filter=match(%22example.localdomain*%22,host.name)&attrs=name&attrs=state
+```
+
+Here are the exact same query parameters as a JSON object:
+
+```json
+{ "filter": "match(\"example.localdomain*\",host.name)", "attrs": [ "host.name", "host.state" ] }
+```
+
+The [match function](18-library-reference.md#global-functions-match) is available as global function
+in Icinga 2.
+
+Whenever filters and other URL parameters don't work due to encoding issues,
+consider passing them in the request body. For GET requests, this method is explained
+[here](12-icinga2-api.md#icinga2-api-requests-method-override).
+
+You can use [jo](https://github.com/jpmens/jo) to format JSON strings on the shell. An example
+for API actions shown [here](12-icinga2-api.md#icinga2-api-actions-unix-timestamps).
+
+
+### Global Parameters <a id="icinga2-api-parameters-global"></a>
+
+Name | Description
+----------------|--------------------
+pretty | Pretty-print the JSON response.
+verbose | Add verbose debug information inside the `diagnostic_information` key into the response if available. This helps with troubleshooting failing requests.
+
+Example as URL parameter:
+
+```
+/v1/objects/hosts?pretty=1
+```
+
+Example as JSON object:
+
+```json
+{ "pretty": true }
+```
+
+### Request Method Override <a id="icinga2-api-requests-method-override"></a>
+
+`GET` requests do not allow you to send a request body. In case you cannot pass everything as URL
+parameters (e.g. complex filters or JSON-encoded dictionaries) you can use the `X-HTTP-Method-Override`
+header. This comes in handy when you are using HTTP proxies disallowing `PUT` or `DELETE` requests too.
+
+Query an existing object by sending a `POST` request with `X-HTTP-Method-Override: GET` as request header:
+
+```bash
+curl -k -s -S -i -u 'root:icinga' -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5665/v1/objects/hosts'
+```
+
+Delete an existing object by sending a `POST` request with `X-HTTP-Method-Override: DELETE` as request header:
+
+```bash
+curl -k -s -S -i -u 'root:icinga' -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: DELETE' -X POST \
+ 'https://localhost:5665/v1/objects/hosts/example.localdomain'
+```
+
+Query objects with complex filters. For a detailed introduction into filter, please
+read the [following chapter](12-icinga2-api.md#icinga2-api-filters).
+
+```bash
+curl -k -s -S -i -u 'root:icinga' -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5665/v1/objects/services' \
+ -d '{ "filter": "service.state==2 && match(\"ping*\",service.name)" }'
+```
+
+### Filters <a id="icinga2-api-filters"></a>
+
+#### Simple Filters <a id="icinga2-api-simple-filters"></a>
+
+By default actions and queries operate on all objects unless further restricted by the user. For
+example, the following query returns all `Host` objects:
+
+```
+https://localhost:5665/v1/objects/hosts
+```
+
+If you're only interested in a single object, you can limit the output to that object by specifying its name:
+
+```
+https://localhost:5665/v1/objects/hosts?host=localhost
+```
+
+**The name of the URL parameter is the lower-case version of the type the query applies to.** For
+example, for `Host` objects the URL parameter therefore is `host`, for `Service` objects it is
+`service` and so on.
+
+You can also specify multiple objects:
+
+```
+https://localhost:5665/v1/objects/hosts?hosts=first-host&hosts=second-host
+```
+
+Again -- like in the previous example -- the name of the URL parameter is the lower-case version of the type. However, because we're specifying multiple objects here the **plural form** of the type is used.
+
+When specifying names for objects which have composite names like for example services the
+full name has to be used:
+
+```
+https://localhost:5665/v1/objects/services?service=localhost!ping6
+```
+
+The full name of an object can be obtained by looking at the `__name` attribute.
+
+#### Advanced Filters <a id="icinga2-api-advanced-filters"></a>
+
+Most of the information provided in this chapter applies to both permission filters (as used when
+configuring `ApiUser` objects) and filters specified in queries.
+
+Advanced filters allow users to filter objects using lambda expressions.
+The syntax for these filters is the same like for [apply rule expressions](03-monitoring-basics.md#using-apply-expressions).
+
+The `filter` parameter can only be specified once, complex filters must
+be defined once in the provided string value.
+
+> **Note**
+>
+> Filters used as URL parameter must be URL-encoded. The following examples
+> are **not URL-encoded** for better readability.
+
+Example matching all services in NOT-OK state:
+
+```
+https://localhost:5665/v1/objects/services?filter=service.state!=ServiceOK
+```
+
+Example [matching](18-library-reference.md#global-functions-match) all hosts by a name string pattern:
+
+```
+https://localhost:5665/v1/objects/hosts?filter=match("example.localdomain*",host.name)
+```
+
+Example for all hosts which are in the host group `linux-servers`:
+```
+https://localhost:5665/v1/objects/hosts?filter="linux-servers" in host.groups
+```
+
+> **Tip**
+>
+> Best practice for filters is to use [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+> for GET requests and always pass them in the request body.
+
+User-specified filters are run in a sandbox environment which ensures that filters cannot
+modify Icinga's state, for example object attributes or global variables.
+
+When querying objects of a specific type the filter expression is evaluated for each object
+of that type. The object is made available to the filter expression as a variable whose name
+is the lower-case version of the object's type name.
+
+For example when querying objects of type `Host` the variable in the filter expression is named
+`host`. Additionally related objects such as the host's check command are also made available
+(e.g., via the `check_command` variable). The variable names are the exact same as for the `joins`
+query parameter; see [object query joins](12-icinga2-api.md#icinga2-api-config-objects-query-joins)
+for details.
+
+The object is also made available via the `obj` variable. This makes it easier to build
+filters which can be used for more than one object type (e.g., for permissions).
+
+Some queries can be performed for more than just one object type. One example is the 'reschedule-check'
+action which can be used for both hosts and services. When using advanced filters you will also have to specify the
+type using the `type` parameter:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' -X POST \
+ 'https://localhost:5665/v1/actions/reschedule-check' \
+ -d '{ "type": "Service", "filter": "service.name==\"ping6\"", "pretty": true }'
+```
+
+##### Filter Variables <a id="icinga2-api-advanced-filters-variables"></a>
+
+Filter values need to be escaped in the same way as in the Icinga 2 DSL.
+
+The example below is not valid:
+
+```
+-d '{ "type": "Host", "filter": ""linux-servers" in host.groups" }'
+```
+
+The double quotes need to be escaped with a preceeding backslash:
+
+```
+-d '{ "type": "Host", "filter": "\"linux-servers\" in host.groups" }'
+```
+
+You can use the `filter_vars` attribute to avoid additional escaping.
+This follows the same principle as with parameter binding known from RDBMS.
+Specify a placeholder variable inside the `filter` string, and actually
+assign its value inside the `filter_vars` dictionary.
+
+That way you can also keep the `filter` string the same for different
+requests with only changing the `filter_vars`.
+
+```bash
+curl -k -s -S -i -u 'root:icinga' -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5665/v1/objects/hosts' \
+ -d '{ "filter": "group in host.groups", "filter_vars": { "group": "linux-servers" }, "pretty": true }'
+```
+
+We're using [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override) here because
+the HTTP specification does not allow message bodies for GET requests.
+
+The `filters_vars` attribute can only be used inside the request body, but not as
+a URL parameter because there is no way to specify a dictionary in a URL.
+
+The example from [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+can be enhanced to avoid additional parameter value escaping.
+
+```bash
+curl -k -s -S -i -u 'root:icinga' -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5665/v1/objects/services' \
+ -d '{ "filter": "service.state==state && match(pattern,service.name)", "filter_vars": { "state": 2, "pattern": "ping*" } }'
+```
+
+## Config Objects <a id="icinga2-api-config-objects"></a>
+
+Provides methods to manage configuration objects:
+
+* [creating objects](12-icinga2-api.md#icinga2-api-config-objects-create)
+* [querying objects](12-icinga2-api.md#icinga2-api-config-objects-query)
+* [modifying objects](12-icinga2-api.md#icinga2-api-config-objects-modify)
+* [deleting objects](12-icinga2-api.md#icinga2-api-config-objects-delete)
+
+### API Objects and Cluster Config Sync <a id="icinga2-api-config-objects-cluster-sync"></a>
+
+Newly created or updated objects can be synced throughout your
+Icinga 2 cluster. Set the `zone` attribute to the zone this object
+belongs to and let the API and cluster handle the rest.
+
+Objects without a zone attribute are only synced in the same zone the Icinga instance belongs to.
+
+> **Note**
+>
+> Cluster nodes must accept configuration for creating, modifying
+> and deleting objects. Ensure that `accept_config` is set to `true`
+> in the [ApiListener](09-object-types.md#objecttype-apilistener) object
+> on each node.
+
+If you add a new cluster instance, or reconnect an instance which has been offline
+for a while, Icinga 2 takes care of the initial object sync for all objects
+created by the API.
+
+### Querying Objects <a id="icinga2-api-config-objects-query"></a>
+
+You can request information about configuration objects by sending
+a `GET` query to the `/v1/objects/<type>` URL endpoint. `<type` has
+to be replaced with the plural name of the object type you are interested
+in:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/objects/hosts'
+```
+
+A list of all available configuration types is available in the
+[object types](09-object-types.md#object-types) chapter.
+
+The following URL parameters are available:
+
+ Parameters | Type | Description
+ -----------|--------------|----------------------------
+ attrs | Array | **Optional.** Limited attribute list in the output.
+ joins | Array | **Optional.** Join related object types and their attributes specified as list (`?joins=host` for the entire set, or selectively by `?joins=host.name`).
+ meta | Array | **Optional.** Enable meta information using `?meta=used_by` (references from other objects) and/or `?meta=location` (location information) specified as list. Defaults to disabled.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) may be provided.
+
+Instead of using a filter you can optionally specify the object name in the
+URL path when querying a single object. For objects with composite names
+(e.g. services) the full name (e.g. `example.localdomain!http`) must be specified:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/objects/services/example.localdomain!http'
+```
+
+You can limit the output to specific attributes using the `attrs` URL parameter:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/objects/hosts/example.localdomain?attrs=name&attrs=address&pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "attrs": {
+ "name": "example.localdomain",
+ "address": "192.168.1.1"
+ },
+ "joins": {},
+ "meta": {},
+ "name": "example.localdomain",
+ "type": "Host"
+ }
+ ]
+}
+```
+
+#### Object Queries Result <a id="icinga2-api-config-objects-query-result"></a>
+
+Each response entry in the results array contains the following attributes:
+
+ Attribute | Type | Description
+ -----------|------------|--------------
+ name | String | Full object name.
+ type | String | Object type.
+ attrs | Dictionary | Object attributes (can be filtered using the URL parameter `attrs`).
+ joins | Dictionary | [Joined object types](12-icinga2-api.md#icinga2-api-config-objects-query-joins) as key, attributes as nested dictionary. Disabled by default.
+ meta | Dictionary | Contains `used_by` object references. Disabled by default, enable it using `?meta=used_by` as URL parameter.
+
+#### Object Query Joins <a id="icinga2-api-config-objects-query-joins"></a>
+
+Icinga 2 knows about object relations. For example it can optionally return
+information about the host when querying service objects.
+
+The following query retrieves all host attributes:
+
+```
+https://localhost:5665/v1/objects/services?joins=host
+```
+
+Instead of requesting all host attributes you can also limit the output to specific
+attributes:
+
+```
+https://localhost:5665/v1/objects/services?joins=host.name&joins=host.address
+```
+
+You can request that all available joins are returned in the result set by using
+the `all_joins` query parameter.
+
+```
+https://localhost:5665/v1/objects/services?all_joins=1
+```
+
+> **Note**
+>
+> For performance reasons you should only request attributes which your application
+> requires.
+
+Please note that the object type refers to the URL endpoint with `/v1/objects/<object type>`
+where the following joins are available:
+
+ Object Type | Object Relations (`joins` prefix name)
+ -------------|------------------------------------------
+ Service | host, check\_command, check\_period, event\_command, command\_endpoint
+ Host | check\_command, check\_period, event\_command, command\_endpoint
+ Notification | host, service, command, period
+ Dependency | child\_host, child\_service, parent\_host, parent\_service, period
+ User | period
+ Zones | parent
+
+Here's an example that retrieves all service objects for hosts which have had their `os`
+custom variable set to `Linux`. The result set contains the `display_name` and `check_command`
+attributes for the service. The query also returns the host's `name` and `address` attribute
+via a join:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/objects/services?attrs=display_name&attrs=check_command&joins=host.name&joins=host.address&filter=host.vars.os==%22Linux%22&pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "attrs": {
+ "check_command": "ping4",
+ "display_name": "ping4"
+ },
+ "joins": {
+ "host": {
+ "address": "192.168.1.1",
+ "name": "example.localdomain"
+ }
+ },
+ "meta": {},
+ "name": "example.localdomain!ping4",
+ "type": "Service"
+ },
+ {
+ "attrs": {
+ "check_command": "ssh",
+ "display_name": "ssh"
+ },
+ "joins": {
+ "host": {
+ "address": "192.168.1.1",
+ "name": "example.localdomain"
+ }
+ },
+ "meta": {},
+ "name": "example.localdomain!ssh",
+ "type": "Service"
+ }
+ ]
+}
+```
+
+> **Tip**
+>
+> Use [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+> and pass everything in the request body like this:
+
+```bash
+curl -k -s -S -i -u 'root:icinga' -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5665/v1/objects/services' \
+ -d '{ "attrs": [ "display_name", "check_command" ], "joins": [ "host.name", "host.address" ], "filter": "host.vars.os==\"Linux\"", "pretty": true }'
+```
+
+In case you want to fetch all [comments](09-object-types.md#objecttype-comment)
+for hosts and services, you can use the following query URL (similar example
+for downtimes):
+
+```
+https://localhost:5665/v1/objects/comments?joins=host&joins=service
+```
+
+This is another example for listing all service objects which are unhandled problems (state is not OK
+and no downtime or acknowledgement set). We're using [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+here because we want to pass all query attributes in the request body.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://127.0.0.1:5665/v1/objects/services' \
+-d '{ "joins": [ "host.name", "host.address" ], "attrs": [ "name", "state", "downtime_depth", "acknowledgement" ], "filter": "service.state != ServiceOK && service.downtime_depth == 0.0 && service.acknowledgement == 0.0", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "attrs": {
+ "acknowledgement": 0.0,
+ "downtime_depth": 0.0,
+ "name": "10807-service",
+ "state": 3.0
+ },
+ "joins": {
+ "host": {
+ "address": "",
+ "name": "10807-host"
+ }
+ },
+ "meta": {},
+ "name": "10807-host!10807-service",
+ "type": "Service"
+ }
+ ]
+}
+```
+
+In order to list all acknowledgements without expire time, you query the `/v1/objects/comments`
+URL endpoint with `joins` and `filter` request parameters using the [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+method:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5665/v1/objects/comments' \
+ -d '{ "joins": [ "service.name", "service.acknowledgement", "service.acknowledgement_expiry" ], "attrs": [ "author", "text" ], "filter": "service.acknowledgement!=0 && service.acknowledgement_expiry==0", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "attrs": {
+ "author": "icingaadmin",
+ "text": "maintenance work"
+ },
+ "joins": {
+ "service": {
+ "__name": "example.localdomain!disk /",
+ "acknowledgement": 1.0,
+ "acknowledgement_expiry": 0.0
+ }
+ },
+ "meta": {},
+ "name": "example.localdomain!disk /!example.localdomain-1495457222-0",
+ "type": "Comment"
+ }
+ ]
+}
+```
+
+### Creating Config Objects <a id="icinga2-api-config-objects-create"></a>
+
+New objects must be created by sending a PUT request. The following
+parameters need to be passed inside the JSON body:
+
+ Parameters | Type | Description
+ ------------------|--------------|--------------------------
+ templates | Array | **Optional.** Import existing configuration templates for this object type. Note: These templates must either be statically configured or provided in [config packages](12-icinga2-api.md#icinga2-api-config-management)-
+ attrs | Dictionary | **Required.** Set specific object attributes for this [object type](09-object-types.md#object-types).
+ ignore\_on\_error | Boolean | **Optional.** Ignore object creation errors and return an HTTP 200 status instead.
+
+The object name must be specified as part of the URL path. For objects with composite names (e.g. services)
+the full name (e.g. `example.localdomain!http`) must be specified.
+
+If attributes are of the Dictionary type, you can also use the indexer format. This might be necessary to only override specific custom variables and keep all other existing custom variables (e.g. from templates):
+
+```
+"attrs": { "vars.os": "Linux" }
+```
+
+Example for creating the new host object `example.localdomain`:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X PUT 'https://localhost:5665/v1/objects/hosts/example.localdomain' \
+ -d '{ "templates": [ "generic-host" ], "attrs": { "address": "192.168.1.1", "check_command": "hostalive", "vars.os" : "Linux" }, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Object was created."
+ }
+ ]
+}
+```
+
+If the configuration validation fails, the new object will not be created and the response body
+contains a detailed error message. The following example is missing the `check_command` attribute
+which is required for host objects:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X PUT 'https://localhost:5665/v1/objects/hosts/example.localdomain' \
+ -d '{ "attrs": { "address": "192.168.1.1", "vars.os" : "Linux" }, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 500.0,
+ "errors": [
+ "Error: Validation failed for object 'example.localdomain' of type 'Host'; Attribute 'check_command': Attribute must not be empty."
+ ],
+ "status": "Object could not be created."
+ }
+ ]
+}
+```
+
+Service objects must be created using their full name ("hostname!servicename") referencing an existing host object:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X PUT 'https://localhost:5665/v1/objects/services/example.localdomain!realtime-load' \
+ -d '{ "templates": [ "generic-service" ], "attrs": { "check_command": "load", "check_interval": 1,"retry_interval": 1 } }'
+```
+
+Example for a new CheckCommand object:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X PUT 'https://localhost:5665/v1/objects/checkcommands/mytest' \
+ -d '{ "templates": [ "plugin-check-command" ], "attrs": { "command": [ "/usr/local/sbin/check_http" ], "arguments": { "-I": "$mytest_iparam$" } } }'
+```
+
+### Modifying Objects <a id="icinga2-api-config-objects-modify"></a>
+
+Existing objects must be modified by sending a `POST` request. The following
+parameters need to be passed inside the JSON body:
+
+| Parameters | Type | Description |
+|----------------|------------|----------------------------------------------------------------------------------------------------------------------------|
+| attrs | Dictionary | **Optional.** Set specific object attributes for this [object type](09-object-types.md#object-types). |
+| restore\_attrs | Array | **Optional.** Discard modifications of specific object attributes for this [object type](09-object-types.md#object-types). |
+
+One of the above is required.
+
+!!! info
+
+ If a particular attribute is given in both sets,
+ it's first restored and then set to the desired new value.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters)
+parameter should be provided.
+
+> **Note**:
+>
+> Modified attributes do not trigger a re-evaluation of existing
+> static [apply rules](03-monitoring-basics.md#using-apply) and [group assignments](03-monitoring-basics.md#group-assign-intro).
+> Delete and re-create the objects if you require such changes or
+> consider funding [this feature request](https://github.com/Icinga/icinga2/issues/4084).
+>
+> Furthermore you cannot modify templates which have already been resolved
+> during [object creation](12-icinga2-api.md#icinga2-api-config-objects-create).
+> There are attributes which can only be set for [PUT requests](12-icinga2-api.md#icinga2-api-config-objects-create) such as `groups`
+> or `zone`. A complete list of `no_user_modify` attributes can be fetched from the [types](12-icinga2-api.md#icinga2-api-types) URL endpoint.
+
+If attributes are of the [Dictionary](17-language-reference.md#dictionary) type, you can also use the indexer format:
+
+```
+"attrs": { "vars.os": "Linux" }
+```
+
+The following example updates the `address` attribute and the custom variable `os` for the `example.localdomain` host:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/objects/hosts/example.localdomain' \
+ -d '{ "attrs": { "address": "192.168.1.2", "vars.os" : "Windows" }, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "name": "example.localdomain",
+ "status": "Attributes updated.",
+ "type": "Host"
+ }
+ ]
+}
+```
+
+To undo such modifications to specific object attributes,
+list the latter in the `restore_attrs` parameter. E.g.:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/objects/hosts/example.localdomain' \
+ -d '{ "restore_attrs": [ "address", "vars.os" ] }, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "name": "example.localdomain",
+ "status": "Attributes updated.",
+ "type": "Host"
+ }
+ ]
+}
+```
+
+Giving `attrs` with the original value would have almost the same effect.
+But in this case Icinga would still store that value as a modified attribute,
+overriding DSL/Director config (changes). In contrast, `restore_attrs` tells
+Icinga to actually forget particular modified attributes, so that changes to
+them via Director or plain config are effective again.
+
+### Deleting Objects <a id="icinga2-api-config-objects-delete"></a>
+
+You can delete objects created using the API by sending a `DELETE`
+request.
+
+ Parameters | Type | Description
+ -----------|---------|---------------
+ cascade | Boolean | **Optional.** Delete objects depending on the deleted objects (e.g. services on a host).
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) should be provided.
+
+Example for deleting the host object `example.localdomain`:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X DELETE 'https://localhost:5665/v1/objects/hosts/example.localdomain?cascade=1&pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "name": "example.localdomain",
+ "status": "Object was deleted.",
+ "type": "Host"
+ }
+ ]
+}
+```
+
+## Actions <a id="icinga2-api-actions"></a>
+
+There are several actions available for Icinga 2 provided by the `/v1/actions`
+URL endpoint. You can run actions by sending a `POST` request.
+
+The following actions are also used by [Icinga Web 2](https://icinga.com/products/icinga-web-2/):
+
+* sending check results to Icinga from scripts, remote agents, etc.
+* scheduling downtimes from external scripts or cronjobs
+* acknowledging problems
+* adding comments
+
+All actions return a 200 `OK` or an appropriate error code for each
+action performed on each object matching the supplied filter.
+
+Actions which affect the Icinga Application itself such as disabling
+notification on a program-wide basis must be applied by updating the
+[IcingaApplication object](12-icinga2-api.md#icinga2-api-config-objects)
+called `app`.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/objects/icingaapplications/app' \
+ -d '{ "attrs": { "enable_notifications": false } }'
+```
+
+### Unix Timestamp Handling <a id="icinga2-api-actions-unix-timestamps"></a>
+
+If you don't want to write JSON manually, especially for adding the `start_time`
+and `end_time` parameters, you can use [jo](https://github.com/jpmens/jo) to format this.
+
+```bash
+jo -p pretty=true type=Service filter="service.name==\"ping4\"" author=icingaadmin comment="IPv4 network maintenance" fixed=true start_time=$(date +%s -d "+0 hour") end_time=$(date +%s -d "+1 hour")
+```
+
+```json
+{
+ "pretty": true,
+ "type": "Service",
+ "filter": "service.name==\"ping4\"",
+ "author": "icingaadmin",
+ "comment": "IPv4 network maintenance",
+ "fixed": true,
+ "start_time": 1557414097,
+ "end_time": 1557417697
+}
+```
+
+Now wrap this into the actual curl command:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/schedule-downtime' \
+ -d "$(jo -p pretty=true type=Service filter="service.name==\"ping4\"" author=icingaadmin comment="IPv4 network maintanence" fixed=true start_time=$(date +%s -d "+0 hour") end_time=$(date +%s -d "+1 hour"))"
+```
+
+Note: This requires GNU date. On macOS, install `coreutils` from Homebrew and use `gdate`.
+
+### process-check-result <a id="icinga2-api-actions-process-check-result"></a>
+
+Process a check result for a host or a service.
+
+Send a `POST` request to the URL endpoint `/v1/actions/process-check-result`.
+
+ Parameter | Type | Description
+ ------------------ | -------------- | --------------
+ exit\_status | Number | **Required.** For services: 0=OK, 1=WARNING, 2=CRITICAL, 3=UNKNOWN, for hosts: 0=UP, 1=DOWN.
+ plugin\_output | String | **Required.** One or more lines of the plugin main output. Does **not** contain the performance data.
+ performance\_data | Array<code>&#124;</code>String | **Optional.** The performance data as array of strings. The raw performance data string can be used too.
+ check\_command | Array<code>&#124;</code>String | **Optional.** The first entry should be the check commands path, then one entry for each command line option followed by an entry for each of its argument. Alternativly a single string can be used.
+ check\_source | String | **Optional.** Usually the name of the `command_endpoint`
+ execution\_start | Timestamp | **Optional.** The timestamp where a script/process started its execution.
+ execution\_end | Timestamp | **Optional.** The timestamp where a script/process ended its execution. This timestamp is used in features to determine e.g. the metric timestamp.
+ ttl | Number | **Optional.** Time-to-live duration in seconds for this check result. The next expected check result is `now + ttl` where freshness checks are executed.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+Example for the service `passive-ping`:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/process-check-result' \
+-d '{ "type": "Service", "filter": "host.name==\"icinga2-master1.localdomain\" && service.name==\"passive-ping\"", "exit_status": 2, "plugin_output": "PING CRITICAL - Packet loss = 100%", "performance_data": [ "rta=5000.000000ms;3000.000000;5000.000000;0.000000", "pl=100%;80;100;0" ], "check_source": "example.localdomain", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully processed check result for object 'icinga2-master1.localdomain!passive-ping'."
+ }
+ ]
+}
+```
+
+You can avoid URL encoding of white spaces in object names by using the `filter` attribute in the request body.
+
+Example for using the `Host` type and filter by the host name:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/process-check-result' \
+ -d '{ "filter": "host.name==\"example.localdomain\"", "type": "Host", "exit_status": 1, "plugin_output": "Host is not available." }'
+```
+
+
+> **Note**
+>
+> Multi-line plugin output requires the following format: The first line is treated as `short` plugin output corresponding
+> to the first line of the plugin output. Subsequent lines are treated as `long` plugin output. Please note that the
+> performance data is separated from the plugin output and has to be passed as `performance_data` attribute.
+
+### reschedule-check <a id="icinga2-api-actions-reschedule-check"></a>
+
+Reschedule a check for hosts and services. The check can be forced if required.
+
+Send a `POST` request to the URL endpoint `/v1/actions/reschedule-check`.
+
+ Parameter | Type | Description
+ -------------|-----------|--------------
+ next\_check | Timestamp | **Optional.** The next check will be run at this time. If omitted, the current time is used.
+ force | Boolean | **Optional.** Defaults to `false`. If enabled, the checks are executed regardless of time period restrictions and checks being disabled per object or on a global basis.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+The example reschedules all services with the name "ping6" to immediately perform a check
+(`next_check` default), ignoring any time periods or whether active checks are
+allowed for the service (`force=true`).
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/reschedule-check' \
+ -d '{ "type": "Service", "filter": "service.name==\"ping6\"", "force": true, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully rescheduled check for object 'icinga2-master1.localdomain!ping6'."
+ }
+ ]
+}
+```
+
+### send-custom-notification <a id="icinga2-api-actions-send-custom-notification"></a>
+
+Send a custom notification for hosts and services. This notification
+type can be forced being sent to all users.
+
+Send a `POST` request to the URL endpoint `/v1/actions/send-custom-notification`.
+
+ Parameter | Type | Description
+ ----------|---------|--------------
+ author | String | **Required.** Name of the author, may be empty.
+ comment | String | **Required.** Comment text, may be empty.
+ force | Boolean | **Optional.** Default: false. If true, the notification is sent regardless of downtimes or whether notifications are enabled or not.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+Example for a custom host notification announcing a global maintenance to
+host owners:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/send-custom-notification' \
+ -d '{ "type": "Host", "author": "icingaadmin", "comment": "System is going down for maintenance", "force": true, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully sent custom notification for object 'host0'."
+ },
+ {
+ "code": 200.0,
+ "status": "Successfully sent custom notification for object 'host1'."
+ }
+ ]
+}
+```
+
+### delay-notification <a id="icinga2-api-actions-delay-notification"></a>
+
+Delay notifications for a host or a service.
+Note that this will only have an effect if the service stays in the same problem
+state that it is currently in. If the service changes to another state, a new
+notification may go out before the time you specify in the `timestamp` argument.
+
+Send a `POST` request to the URL endpoint `/v1/actions/delay-notification`.
+
+ Parameter | Type | Description
+ ----------|-----------|--------------
+ timestamp | Timestamp | **Required.** Delay notifications until this timestamp.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/delay-notification' \
+ -d '{ "type": "Service", "timestamp": 1446389894, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully delayed notifications for object 'host0!service0'."
+ },
+ {
+ "code": 200.0,
+ "status": "Successfully delayed notifications for object 'host1!service1'."
+ }
+ ]
+}
+```
+
+### acknowledge-problem <a id="icinga2-api-actions-acknowledge-problem"></a>
+
+Allows you to acknowledge the current problem for hosts or services. By
+acknowledging the current problem, future notifications (for the same state if `sticky` is set to `false`)
+are disabled.
+
+Send a `POST` request to the URL endpoint `/v1/actions/acknowledge-problem`.
+
+ Parameter | Type | Description
+ ---------------------|-----------|--------------
+ author | String | **Required.** Name of the author, may be empty.
+ comment | String | **Required.** Comment text, may be empty.
+ expiry | Timestamp | **Optional.** Whether the acknowledgement will be removed at the timestamp.
+ sticky | Boolean | **Optional.** Whether the acknowledgement will be set until the service or host fully recovers. Defaults to `false`.
+ notify | Boolean | **Optional.** Whether a notification of the `Acknowledgement` type will be sent. Defaults to `false`.
+ persistent | Boolean | **Optional.** When the comment is of type `Acknowledgement` and this is set to `true`, the comment will remain after the acknowledgement recovers or expires. Defaults to `false`.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+The following example acknowledges all services which are in a hard critical state and sends out
+a notification for them:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/acknowledge-problem' \
+ -d '{ "type": "Service", "filter": "service.state==2 && service.state_type==1", "author": "icingaadmin", "comment": "Global outage. Working on it.", "notify": true, "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully acknowledged problem for object 'icinga2-satellite1.localdomain!ping4'."
+ },
+ {
+ "code": 200.0,
+ "status": "Successfully acknowledged problem for object 'icinga2-satellite2.localdomain!ping4'."
+ }
+ ]
+}
+```
+
+### remove-acknowledgement <a id="icinga2-api-actions-remove-acknowledgement"></a>
+
+Removes the acknowledgements for services or hosts. Once the acknowledgement has
+been removed the next notification will be sent again.
+
+Send a `POST` request to the URL endpoint `/v1/actions/remove-acknowledgement`.
+
+ Parameter | Type | Description
+ ----------|--------|--------------
+ author | String | **Optional.** Name of the removal requestor.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+The example removes all service acknowledgements:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/remove-acknowledgement' \
+ -d '{ "type": "Service", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully removed acknowledgement for object 'host0!service0'."
+ },
+ {
+ "code": 200.0,
+ "status": "Successfully removed acknowledgement for object 'example2.localdomain!aws-health'."
+ }
+ ]
+}
+```
+
+### add-comment <a id="icinga2-api-actions-add-comment"></a>
+
+Adds a `comment` from an `author` to services or hosts.
+
+Send a `POST` request to the URL endpoint `/v1/actions/add-comment`.
+
+ Parameter | Type | Description
+ ----------|-----------|--------------
+ author | string | **Required.** Name of the author, may be empty.
+ comment | string | **Required.** Comment text, may be empty.
+ expiry | Timestamp | **Optional.** Comment expiry time.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+The following example adds a comment for all `ping4` services:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/add-comment' \
+ -d '{ "type": "Service", "filter": "service.name==\"ping4\"", "author": "icingaadmin", "comment": "Troubleticket #123456789 opened.", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "legacy_id": 26.0,
+ "name": "icinga2-satellite1.localdomain!ping4!7e7861c8-8008-4e8d-9910-2a0bb26921bd",
+ "status": "Successfully added comment 'icinga2-satellite1.localdomain!ping4!7e7861c8-8008-4e8d-9910-2a0bb26921bd' for object 'icinga2-satellite1.localdomain!ping4'."
+ },
+ {
+ "code": 200.0,
+ "legacy_id": 27.0,
+ "name": "icinga2-satellite2.localdomain!ping4!9a4c43f5-9407-a536-18bf-4a6cc4b73a9f",
+ "status": "Successfully added comment 'icinga2-satellite2.localdomain!ping4!9a4c43f5-9407-a536-18bf-4a6cc4b73a9f' for object 'icinga2-satellite2.localdomain!ping4'."
+ }
+ ]
+}
+```
+
+### remove-comment <a id="icinga2-api-actions-remove-comment"></a>
+
+Remove the comment using its `name` attribute , returns `OK` if the
+comment did not exist.
+**Note**: This is **not** the legacy ID but the comment name returned by
+Icinga 2 when [adding a comment](12-icinga2-api.md#icinga2-api-actions-add-comment).
+
+Send a `POST` request to the URL endpoint `/v1/actions/remove-comment`.
+
+ Parameter | Type | Description
+ ----------|--------|--------------
+ author | String | **Optional.** Name of the removal requestor.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host`, `Service` and `Comment`.
+
+Example for a simple filter using the `comment` URL parameter:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/remove-comment' \
+ -d '{ "comment": "icinga2-satellite2.localdomain!ping4!9a4c43f5-9407-a536-18bf-4a6cc4b73a9f", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully removed comment 'icinga2-satellite2.localdomain!ping4!9a4c43f5-9407-a536-18bf-4a6cc4b73a9f'."
+ }
+ ]
+}
+```
+
+Example for removing all service comments using a service name filter for `ping4`:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/remove-comment'
+ -d '{ "type": "Service", "filter": "service.name==\"ping4\"", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully removed all comments for object 'icinga2-satellite1.localdomain!ping4'."
+ },
+ {
+ "code": 200.0,
+ "status": "Successfully removed all comments for object 'icinga2-satellite2.localdomain!ping4'."
+ }
+ ]
+}
+```
+
+### schedule-downtime <a id="icinga2-api-actions-schedule-downtime"></a>
+
+Schedule a downtime for hosts and services.
+
+Send a `POST` request to the URL endpoint `/v1/actions/schedule-downtime`.
+
+ Parameter | Type | Description
+ --------------|-----------|--------------
+ author | String | **Required.** Name of the author.
+ comment | String | **Required.** Comment text.
+ start\_time | Timestamp | **Required.** Timestamp marking the beginning of the downtime.
+ end\_time | Timestamp | **Required.** Timestamp marking the end of the downtime.
+ fixed | Boolean | **Optional.** Defaults to `true`. If true, the downtime is `fixed` otherwise `flexible`. See [downtimes](08-advanced-topics.md#downtimes) for more information.
+ duration | Number | **Required for flexible downtimes.** Duration of the downtime in seconds if `fixed` is set to false.
+ all\_services | Boolean | **Optional for host downtimes.** Sets downtime for [all services](12-icinga2-api.md#icinga2-api-actions-schedule-downtime-host-all-services) for the matched host objects. If `child_options` are set, all child hosts and their services will schedule a downtime too. Defaults to `false`.
+ trigger\_name | String | **Optional.** Sets the trigger for a triggered downtime. See [downtimes](08-advanced-topics.md#downtimes) for more information on triggered downtimes.
+ child\_options| String | **Optional.** Schedule child downtimes. `DowntimeNoChildren` does not do anything, `DowntimeTriggeredChildren` schedules child downtimes triggered by this downtime, `DowntimeNonTriggeredChildren` schedules non-triggered downtimes. Defaults to `DowntimeNoChildren`.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host` and `Service`.
+
+Example for scheduling a downtime for all `ping4` services:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/schedule-downtime' \
+ -d '{ "type": "Service", "filter": "service.name==\"ping4\"", "start_time": 1446388806, "end_time": 1446389806, "author": "icingaadmin", "comment": "IPv4 network maintenance", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "legacy_id": 2.0,
+ "name": "icinga2-satellite1.localdomain!ping4!ecc5fa55-a5b8-4189-a013-a5d4bb47af34",
+ "status": "Successfully scheduled downtime 'icinga2-satellite1.localdomain!ping4!ecc5fa55-a5b8-4189-a013-a5d4bb47af34' for object 'icinga2-satellite1.localdomain!ping4'."
+ },
+ {
+ "code": 200.0,
+ "legacy_id": 3.0,
+ "name": "icinga2-satellite2.localdomain!ping4!abc59032-4589-abcd-4567-ecf67856c347",
+ "status": "Successfully scheduled downtime 'icinga2-satellite2.localdomain!ping4!abc59032-4589-abcd-4567-ecf67856c347' for object 'icinga2-satellite2.localdomain!ping4'."
+ }
+ ]
+}
+```
+
+In case you want to target just a single service on a host, modify the filter
+like this:
+
+```
+"filter": "host.name==\"icinga2-satellite1.localdomain\" && service.name==\"ping4\""
+```
+
+#### Schedule Host Downtime(s) with all Services <a id="icinga2-api-actions-schedule-downtime-host-all-services"></a>
+
+Schedule a downtime for one (or multiple) hosts and all of their services.
+Note the `all_services` attribute.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/schedule-downtime' \
+ -d "$(jo -p pretty=true type=Host filter="match(\"*satellite*\", host.name)" all_services=true author=icingaadmin comment="Cluster upgrade maintenance" fixed=true start_time=$(date +%s -d "+0 hour") end_time=$(date +%s -d "+1 hour"))"
+```
+
+### remove-downtime <a id="icinga2-api-actions-remove-downtime"></a>
+
+Remove the downtime using its `name` attribute , returns `OK` if the
+downtime did not exist.
+**Note**: This is **not** the legacy ID but the downtime name returned by
+Icinga 2 when [scheduling a downtime](12-icinga2-api.md#icinga2-api-actions-schedule-downtime).
+
+Send a `POST` request to the URL endpoint `/v1/actions/remove-downtime`.
+
+ Parameter | Type | Description
+ ----------|--------|--------------
+ author | String | **Optional.** Name of the removal requestor.
+
+In addition to these parameters a [filter](12-icinga2-api.md#icinga2-api-filters) must be provided. The valid types for this action are `Host`, `Service` and `Downtime`.
+
+When removing a host downtime, service downtimes on this host are automatically deleted if they were created using
+the `all_services` option. Other downtimes created using the `child_options` option are not affected.
+
+Example for a simple filter using the `downtime` URL parameter:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/remove-downtime' \
+ -d '{ "downtime": "icinga2-satellite2.localdomain!ping4!abc59032-4589-abcd-4567-ecf67856c347", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully removed downtime 'icinga2-satellite2.localdomain!ping4!abc59032-4589-abcd-4567-ecf67856c347'."
+ }
+ ]
+}
+```
+
+Example for removing all host downtimes using a host name filter for `icinga2-satellite2.localdomain`:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/remove-downtime' \
+ -d '{ "type": "Host", "filter": "host.name==\"icinga2-satellite2.localdomain\"", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully removed all downtimes for object 'icinga2-satellite2.localdomain'."
+ }
+ ]
+}
+```
+
+Example for removing a downtime from a host but not the services filtered by the author name. This example uses
+filter variables explained in the [advanced filters](12-icinga2-api.md#icinga2-api-advanced-filters) chapter.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/remove-downtime' \
+ -d $'{
+ "type": "Downtime",
+ "filter": "host.name == filterHost && !service && downtime.author == filterAuthor",
+ "filter_vars": {
+ "filterHost": "icinga2-satellite1.localdomain",
+ "filterAuthor": "icingaadmin"
+ },
+ "pretty": true
+}'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Successfully removed downtime 'icinga2-satellite1.localdomain!ecc5fa55-a5b8-ef34-abcd-a5d41234af34'."
+ }
+ ]
+}
+```
+
+### shutdown-process <a id="icinga2-api-actions-shutdown-process"></a>
+
+Shuts down Icinga. May or may not return.
+
+Send a `POST` request to the URL endpoint `/v1/actions/shutdown-process`.
+
+This action does not support a target type or filter.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/shutdown-process?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Shutting down Icinga 2."
+ }
+ ]
+}
+```
+
+### restart-process <a id="icinga2-api-actions-restart-process"></a>
+
+Restarts Icinga. May or may not return.
+
+Send a `POST` request to the URL endpoint `/v1/actions/restart-process`.
+
+This action does not support a target type or filter.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/restart-process?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Restarting Icinga 2."
+ }
+ ]
+}
+```
+
+### generate-ticket <a id="icinga2-api-actions-generate-ticket"></a>
+
+Generates a PKI ticket for [CSR auto-signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing).
+This can be used in combination with satellite/client setups requesting this ticket number.
+
+> **Note**
+>
+> This must be used on the local host, or e.g. by a Puppet master.
+> Doing so remotely may result in security issues with cluster
+> trust relationships.
+
+Send a `POST` request to the URL endpoint `/v1/actions/generate-ticket`.
+
+ Parameter | Type | Description
+ --------------|-----------|--------------
+ cn | String | **Required.** The host's common name for which the ticket should be generated.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/generate-ticket' \
+ -d '{ "cn": "icinga2-agent1.localdomain", "pretty": true }'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Generated PKI ticket '4f75d2ecd253575fe9180938ebff7cbca262f96e' for common name 'icinga2-agent1.localdomain'.",
+ "ticket": "4f75d2ecd253575fe9180938ebff7cbca262f96e"
+ }
+ ]
+}
+```
+
+### execute-command <a id="icinga2-api-actions-execute-command"></a>
+
+Executes a particular check/notification/event-command on a particular
+endpoint in the context of a particular checkable. Example use cases:
+
+* Test a check command without actually triggering notifications
+* Reboot a node via an event command
+* Test a notification command without actually reproducing the notification reason
+
+Send a `POST` request to the URL endpoint `/v1/actions/execute-command`.
+
+ Parameter | Type | Description
+ --------------|------------|--------------
+ ttl | Number | **Required.** The time to live of the execution expressed in seconds.
+ command_type | String | **Optional.** The command type: `CheckCommand` or `EventCommand` or `NotificationCommand`. Default: `EventCommand`
+ command | String | **Optional.** The command to execute. Its type must the same as `command_type`. It can be a macro string. Default: depending on the `command_type` it's either `$check_command$`, `$event_command$` or `$notification_command$`
+ endpoint | String | **Optional.** The endpoint to execute the command on. It can be a macro string. Default: `$command_endpoint$`.
+ macros | Dictionary | **Optional.** Macro overrides. Default: `{}`
+ user | String | **Optional.** The user used for the notification command.
+ notification | String | **Optional.** The notification used for the notification command.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/actions/execute-command' \
+ -d '{"type": "Service", "service": "agent!custom_service", "ttl": 15, "macros": { "command_endpoint": "master", "ls_dir": "/tmp/foo" }, "command": "custom_command", "command_type": "CheckCommand" }'
+```
+
+```json
+{
+ "results": [
+ {
+ "checkable": "agent!custom_service",
+ "code": 202.0,
+ "execution": "3541d906-9afe-4c0e-ae6d-f549ee9bb3e7",
+ "status": "Accepted"
+ }
+ ]
+}
+```
+
+You may poll the state of the execution by [querying](#icinga2-api-config-objects-query) the checkable's attribute `executions`.
+
+## Event Streams <a id="icinga2-api-event-streams"></a>
+
+Event streams can be used to receive check results, downtimes, comments,
+acknowledgements, etc. as a "live stream" from Icinga.
+
+You can for example forward these types into your own backend. Process the
+metrics and correlate them with notifications and state changes e.g. in Elasticsearch
+with the help of [Icingabeat](https://icinga.com/docs/icingabeat/latest/). Another use
+case are aligned events and creating/resolving tickets automatically in your ticket system.
+
+You can subscribe to event streams by sending a `POST` request to the URL endpoint `/v1/events`.
+The following parameters need to be specified (either as URL parameters or in a JSON-encoded message body):
+
+ Parameter | Type | Description
+ -----------|--------------|-------------
+ types | Array | **Required.** Event type(s). Multiple types as URL parameters are supported.
+ queue | String | **Required.** Unique queue name. Multiple HTTP clients can use the same queue as long as they use the same event types and filter.
+ filter | String | **Optional.** Filter for specific event attributes using [filter expressions](12-icinga2-api.md#icinga2-api-filters).
+
+### Event Stream Types <a id="icinga2-api-event-streams-types"></a>
+
+The following event stream types are available:
+
+ Type | Description
+ -----------------------|--------------
+ CheckResult | Check results for hosts and services.
+ StateChange | Host/service state changes.
+ Notification | Notification events including notified users for hosts and services.
+ AcknowledgementSet | Acknowledgement set on hosts and services.
+ AcknowledgementCleared | Acknowledgement cleared on hosts and services.
+ CommentAdded | Comment added for hosts and services.
+ CommentRemoved | Comment removed for hosts and services.
+ DowntimeAdded | Downtime added for hosts and services.
+ DowntimeRemoved | Downtime removed for hosts and services.
+ DowntimeStarted | Downtime started for hosts and services.
+ DowntimeTriggered | Downtime triggered for hosts and services.
+ ObjectCreated | Object created for all Icinga 2 objects.
+ ObjectDeleted | Object deleted for all Icinga 2 objects.
+ ObjectModified | Object modified for all Icinga 2 objects.
+
+Note: Each type requires [API permissions](12-icinga2-api.md#icinga2-api-permissions)
+being set.
+
+Example for all downtime events:
+
+```
+&types=DowntimeAdded&types=DowntimeRemoved&types=DowntimeTriggered
+
+-d '{ "types": ["DowntimeAdded", "DowntimeRemoved", "DowntimeTriggered"] }'
+```
+
+Example for all object events:
+
+```
+&types=ObjectCreated&types=ObjectDeleted&types=ObjectModified
+
+-d '{ "types": ["ObjectCreated", "ObjectDeleted", "ObjectModified"] }'
+```
+
+#### <a id="icinga2-api-event-streams-type-checkresult"></a> Event Stream Type: CheckResult
+
+ Name | Type | Description
+ -----------------|---------------|--------------------------
+ type | String | Event type `CheckResult`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ host | String | [Host](09-object-types.md#objecttype-host) name.
+ service | String | [Service](09-object-types.md#objecttype-service) name. Optional if this is a host check result.
+ check\_result | CheckResult | Serialized [CheckResult](08-advanced-topics.md#advanced-value-types-checkresult) value type.
+ downtime\_depth | Number | Amount of active downtimes on the checkable.
+ acknowledgement | Boolean | Whether the object is acknowledged.
+
+#### <a id="icinga2-api-event-streams-type-statechange"></a> Event Stream Type: StateChange
+
+ Name | Type | Description
+ -----------------|---------------|--------------------------
+ type | String | Event type `StateChange`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ host | String | [Host](09-object-types.md#objecttype-host) name.
+ service | String | [Service](09-object-types.md#objecttype-service) name. Optional if this is a host state change.
+ state | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state.
+ state\_type | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state type.
+ check\_result | CheckResult | Serialized [CheckResult](08-advanced-topics.md#advanced-value-types-checkresult) value type.
+ downtime\_depth | Number | Amount of active downtimes on the checkable.
+ acknowledgement | Boolean | Whether the object is acknowledged.
+
+#### <a id="icinga2-api-event-streams-type-notification"></a> Event Stream Type: Notification
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `Notification`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ host | String | [Host](09-object-types.md#objecttype-host) name.
+ service | String | [Service](09-object-types.md#objecttype-service) name. Optional if this is a host notification.
+ command | String | [NotificationCommand](09-object-types.md#objecttype-notificationcommand) name.
+ users | Array | List of notified [user](09-object-types.md#objecttype-user) names.
+ notification\_type | String | [$notification.type$](03-monitoring-basics.md#notification-runtime-macros) runtime macro value.
+ author | String | [$notification.author$](03-monitoring-basics.md#notification-runtime-macros) runtime macro value.
+ text | String | [$notification.comment$](03-monitoring-basics.md#notification-runtime-macros) runtime macro value.
+ check\_result | CheckResult | Serialized [CheckResult](08-advanced-topics.md#advanced-value-types-checkresult) value type.
+
+#### <a id="icinga2-api-event-streams-type-flapping"></a> Event Stream Type: Flapping
+
+ Name | Type | Description
+ ------------------|---------------|--------------------------
+ type | String | Event type `Flapping`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ host | String | [Host](09-object-types.md#objecttype-host) name.
+ service | String | [Service](09-object-types.md#objecttype-service) name. Optional if this is a host flapping event.
+ state | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state.
+ state\_type | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state type.
+ is\_flapping | Boolean | Whether this object is flapping.
+ current\_flapping | Number | Current flapping value in percent (added in 2.8).
+ threshold\_low | Number | Low threshold in percent (added in 2.8).
+ threshold\_high | Number | High threshold in percent (added in 2.8).
+
+#### <a id="icinga2-api-event-streams-type-acknowledgementset"></a> Event Stream Type: AcknowledgementSet
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `AcknowledgementSet`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ host | String | [Host](09-object-types.md#objecttype-host) name.
+ service | String | [Service](09-object-types.md#objecttype-service) name. Optional if this is a host acknowledgement.
+ state | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state.
+ state\_type | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state type.
+ author | String | Acknowledgement author set via [acknowledge-problem](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) action.
+ comment | String | Acknowledgement comment set via [acknowledge-problem](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) action.
+ acknowledgement\_type | Number | 0 = None, 1 = Normal, 2 = Sticky. `sticky` can be set via [acknowledge-problem](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) action.
+ notify | Boolean | Notifications were enabled via [acknowledge-problem](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) action.
+ expiry | Timestamp | Acknowledgement expire time set via [acknowledge-problem](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) action.
+
+#### <a id="icinga2-api-event-streams-type-acknowledgementcleared"></a> Event Stream Type: AcknowledgementCleared
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `AcknowledgementCleared`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ host | String | [Host](09-object-types.md#objecttype-host) name.
+ service | String | [Service](09-object-types.md#objecttype-service) name. Optional if this is a host acknowledgement.
+ state | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state.
+ state\_type | Number | [Host](09-object-types.md#objecttype-host) or [service](09-object-types.md#objecttype-service) state type.
+
+#### <a id="icinga2-api-event-streams-type-commentadded"></a> Event Stream Type: CommentAdded
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `CommentAdded`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ comment | Dictionary | Serialized [Comment](09-object-types.md#objecttype-comment) object.
+
+#### <a id="icinga2-api-event-streams-type-commentremoved"></a> Event Stream Type: CommentRemoved
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `CommentRemoved`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ comment | Dictionary | Serialized [Comment](09-object-types.md#objecttype-comment) object.
+
+#### <a id="icinga2-api-event-streams-type-downtimeadded"></a> Event Stream Type: DowntimeAdded
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `DowntimeAdded`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ downtime | Dictionary | Serialized [Downtime](09-object-types.md#objecttype-downtime) object.
+
+#### <a id="icinga2-api-event-streams-type-downtimeremoved"></a> Event Stream Type: DowntimeRemoved
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `DowntimeRemoved`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ downtime | Dictionary | Serialized [Downtime](09-object-types.md#objecttype-downtime) object.
+
+
+#### <a id="icinga2-api-event-streams-type-downtimestarted"></a> Event Stream Type: DowntimeStarted
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `DowntimeStarted`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ downtime | Dictionary | Serialized [Downtime](09-object-types.md#objecttype-downtime) object.
+
+
+#### <a id="icinga2-api-event-streams-type-downtimetriggered"></a> Event Stream Type: DowntimeTriggered
+
+ Name | Type | Description
+ --------------|---------------|--------------------------
+ type | String | Event type `DowntimeTriggered`.
+ timestamp | Timestamp | Unix timestamp when the event happened.
+ downtime | Dictionary | Serialized [Downtime](09-object-types.md#objecttype-downtime) object.
+
+
+### Event Stream Filter <a id="icinga2-api-event-streams-filter"></a>
+
+Event streams can be filtered by attributes using the prefix `event.`.
+
+Example for the `CheckResult` type with the `exit_code` set to `2`:
+
+```
+&types=CheckResult&filter=event.check_result.exit_status==2
+
+-d '{ "types": [ "CheckResult" ], "filter": "event.check_result.exit_status==2" }'
+```
+
+Example for the `CheckResult` type with the service [matching](18-library-reference.md#global-functions-match)
+the string pattern "random\*":
+
+```
+&types=CheckResult&filter=match%28%22random*%22,event.service%29
+
+-d { "types": [ "CheckResult" ], "filter": "match(\"random*\", event.service)" }
+```
+
+### Event Stream Response <a id="icinga2-api-event-streams-response"></a>
+
+The event stream response is separated with new lines. The HTTP client
+must support long-polling and HTTP/1.1. HTTP/1.0 is not supported.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/events' \
+ -d '{ "queue": "myqueue", "types": [ "CheckResult" ], "filter": "event.check_result.exit_status==2" }'
+```
+
+```
+{"check_result":{ ... },"host":"example.localdomain","service":"ping4","timestamp":1445421319.7226390839,"type":"CheckResult"}
+{"check_result":{ ... },"host":"example.localdomain","service":"ping4","timestamp":1445421324.7226390839,"type":"CheckResult"}
+{"check_result":{ ... },"host":"example.localdomain","service":"ping4","timestamp":1445421329.7226390839,"type":"CheckResult"}
+```
+
+## Status and Statistics <a id="icinga2-api-status"></a>
+
+Send a `GET` request to the URL endpoint `/v1/status` to retrieve status information and statistics for Icinga 2.
+
+Example:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/status?pretty=1'
+```
+
+```
+{
+ "results": [
+ {
+ "name": "ApiListener",
+ "perfdata": [ ... ],
+ "status": [ ... ]
+ },
+ ...
+ {
+ "name": "IcingaAplication",
+ "perfdata": [ ... ],
+ "status": [ ... ]
+ },
+ ...
+ ]
+}
+```
+
+You can limit the output by specifying a status type in the URL, e.g. `IcingaApplication`:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/status/IcingaApplication?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "perfdata": [],
+ "status": {
+ "icingaapplication": {
+ "app": {
+ "enable_event_handlers": true,
+ "enable_flapping": true,
+ "enable_host_checks": true,
+ "enable_notifications": true,
+ "enable_perfdata": true,
+ "enable_service_checks": true,
+ "node_name": "example.localdomain",
+ "pid": 59819.0,
+ "program_start": 1443019345.093372,
+ "version": "v2.3.0-573-g380a131"
+ }
+ }
+ }
+ }
+ ]
+}
+```
+
+## Configuration Management <a id="icinga2-api-config-management"></a>
+
+The main idea behind configuration management is that external applications
+can create configuration packages and stages based on configuration files and
+directory trees. This replaces any additional SSH connection and whatnot to
+dump configuration files to Icinga 2 directly.
+
+In case you are pushing a new configuration stage to a package, Icinga 2 will
+validate the configuration asynchronously and populate a status log which
+can be fetched in a separated request. Once the validation succeeds,
+a reload is triggered by default.
+
+This functionality was primarly developed for the [Icinga Director](https://icinga.com/docs/director/latest/)
+but can be used with your own deployments too. It also solves the problem
+with certain runtime objects (zones, endpoints) and can be used to
+deploy global templates in [global cluster zones](06-distributed-monitoring.md#distributed-monitoring-global-zone-config-sync).
+
+
+### Create a Config Package <a id="icinga2-api-config-management-create-package"></a>
+
+Send a `POST` request to a new config package called `example-cmdb` in this example. This
+creates a new empty configuration package.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+-X POST 'https://localhost:5665/v1/config/packages/example-cmdb?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "package": "example-cmdb",
+ "status": "Created package."
+ }
+ ]
+}
+```
+
+Package names with the `_` prefix are reserved for internal packages and must not be used.
+You can recognize `_api`, `_etc` and `_cluster` when querying specific objects and packages.
+
+Each configuration object stores the package source in the `package` attribute.
+
+### Create a Stage: Upload Configuration <a id="icinga2-api-config-management-create-config-stage"></a>
+
+Configuration files in packages are managed in stages. Stages provide a way
+to maintain multiple configuration versions for a package. Once a new stage
+is deployed, the content is validated and set as active stage on success.
+
+On failure, the older stage remains active, and the caller can fetch the `startup.log`
+from this stage deployment attempt to see what exactly failed. You can see that
+in the Director's deployment log.
+
+Send a `POST` request to the URL endpoint `/v1/config/stages` and add the name of an existing
+configuration package to the URL path (e.g. `example-cmdb`).
+The request body must contain the `files` attribute with the value being
+a dictionary of file targets and their content.
+
+Optional attributes include `reload` (defaults to `true`) and `activate` (defaults to `true`).
+The `reload` attribute will tell icinga2 to reload after stage config validation.
+The `activate` attribute will tell icinga2 to activate the stage if it validates.
+If `activate` is set to `false`, `reload` must also be `false`.
+
+The file path requires one of these two directories inside its path:
+
+ Directory | Description
+ ------------|------------------------------------
+ conf.d | Local configuration directory.
+ zones.d | Configuration directory for cluster zones, each zone must be put into its own zone directory underneath. Supports the [cluster config sync](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync).
+
+Example for a local configuration in the `conf.d` directory:
+
+```
+"files": { "conf.d/host1.conf": "object Host \"local-host\" { address = \"127.0.0.1\", check_command = \"hostalive\" }" }
+```
+
+Example for a host configuration inside the `satellite` zone in the `zones.d` directory:
+
+```
+"files": { "zones.d/satellite/host2.conf": "object Host \"satellite-host\" { address = \"192.168.1.100\", check_command = \"hostalive\" }" }
+```
+
+
+The example below will create a new file called `test.conf` in the `conf.d`
+directory. Note: This example contains an error (`chec_command`). This is
+intentional.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' -X POST \
+-d '{ "files": { "conf.d/test.conf": "object Host \"cmdb-host\" { chec_command = \"dummy\" }" }, "pretty": true }' \
+'https://localhost:5665/v1/config/stages/example-cmdb'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "package": "example-cmdb",
+ "stage": "7e7861c8-8008-4e8d-9910-2a0bb26921bd",
+ "status": "Created stage. Reload triggered."
+ }
+ ]
+}
+```
+
+The Icinga 2 API returns the `package` name this stage was created for, and also
+generates a unique name for the `stage` attribute you'll need for later requests.
+
+Icinga 2 automatically restarts the daemon in order to activate the new config stage. This
+can be disabled by setting `reload` to `false` in the request.
+If the validation for the new config stage failed, the old stage
+and its configuration objects will remain active.
+
+Activation may be inhibited even for stages that validate correctly by setting
+`activate` to `false`. This may be useful for validating the contents of a stage
+without making it active, for example in a CI (continuous integration) system.
+
+> **Note**
+>
+> Old stages are not purged automatically. You can [remove stages](12-icinga2-api.md#icinga2-api-config-management-delete-config-stage) that are no longer in use.
+
+Icinga 2 creates the following files in the configuration package
+stage after configuration validation:
+
+ File | Description
+ ------------|--------------
+ status | Contains the [configuration validation](11-cli-commands.md#config-validation) exit code (everything else than 0 indicates an error).
+ startup.log | Contains the [configuration validation](11-cli-commands.md#config-validation) output.
+
+You can [fetch these files](12-icinga2-api.md#icinga2-api-config-management-fetch-config-package-stage-files)
+in order to verify that the new configuration was deployed successfully. Please follow the chapter below
+to learn more about this.
+
+
+### List Configuration Packages and their Stages <a id="icinga2-api-config-management-list-config-packages"></a>
+
+A list of packages and their stages can be retrieved by sending a `GET` request to the URL endpoint `/v1/config/packages`.
+
+The following example contains one configuration package `example-cmdb`. The package does not currently
+have an active stage.
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/config/packages?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "active-stage": "",
+ "name": "example-cmdb",
+ "stages": [
+ "7e7861c8-8008-4e8d-9910-2a0bb26921bd"
+ ]
+ }
+ ]
+}
+```
+
+### List Configuration Package Stage Files <a id="icinga2-api-config-management-list-config-package-stage-files"></a>
+
+In order to retrieve a list of files for a stage you can send a `GET` request to
+the URL endpoint `/v1/config/stages`. You need to include
+the package name (`example-cmdb`) and stage name (`7e7861c8-8008-4e8d-9910-2a0bb26921bd`) in the URL:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/config/stages/example-cmdb/7e7861c8-8008-4e8d-9910-2a0bb26921bd?pretty=1'
+```
+
+```
+{
+ "results": [
+...
+ {
+ "name": "startup.log",
+ "type": "file"
+ },
+ {
+ "name": "status",
+ "type": "file"
+ },
+ {
+ "name": "conf.d",
+ "type": "directory"
+ },
+ {
+ "name": "zones.d",
+ "type": "directory"
+ },
+ {
+ "name": "conf.d/test.conf",
+ "type": "file"
+ }
+ ]
+}
+```
+
+### Fetch Configuration Package Stage Files <a id="icinga2-api-config-management-fetch-config-package-stage-files"></a>
+
+Send a `GET` request to the URL endpoint `/v1/config/files` and add
+the package name, the stage name and the relative path to the file to the URL path.
+
+> **Note**
+>
+> The returned files are plain-text instead of JSON-encoded.
+
+The following example fetches the configuration file `conf.d/test.conf`:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/config/files/example-cmdb/7e7861c8-8008-4e8d-9910-2a0bb26921bd/conf.d/test.conf'
+```
+
+```
+object Host "cmdb-host" { chec_command = "dummy" }
+```
+
+You can fetch a [list of existing files](12-icinga2-api.md#icinga2-api-config-management-list-config-package-stage-files)
+in a configuration stage and then specifically request their content.
+
+### Configuration Package Stage Errors <a id="icinga2-api-config-management-config-package-stage-errors"></a>
+
+Now that we don't have an active stage for `example-cmdb` yet seen [here](12-icinga2-api.md#icinga2-api-config-management-list-config-packages),
+there must have been an error.
+
+In order to check for validation errors you can fetch the `startup.log` file
+by sending a `GET` request to the URL endpoint `/v1/config/files`. You must include
+the package name, stage name and the `startup.log` in the URL path.
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/config/files/example-cmdb/7e7861c8-8008-4e8d-9910-2a0bb26921bd/startup.log'
+```
+
+```
+[...]
+critical/config: Error: Attribute 'chec_command' does not exist.
+Location:
+/var/lib/icinga2/api/packages/example-cmdb/7e7861c8-8008-4e8d-9910-2a0bb26921bd/conf.d/test.conf(1): object Host "cmdb-host" { chec_command = "dummy" }
+ ^^^^^^^^^^^^^^^^^^^^^^
+
+critical/config: 1 error
+```
+
+The output is the exact as known from [configuration validation](11-cli-commands.md#config-validation).
+
+> **Note**
+>
+> The returned output is plain-text instead of JSON-encoded.
+
+
+### Deleting Configuration Package Stage <a id="icinga2-api-config-management-delete-config-stage"></a>
+
+You can send a `DELETE` request to the URL endpoint `/v1/config/stages`
+in order to purge a configuration stage. You must include the package and
+stage name inside the URL path.
+
+The following example removes the failed configuration stage `7e7861c8-8008-4e8d-9910-2a0bb26921bd`
+in the `example-cmdb` configuration package:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X DELETE 'https://localhost:5665/v1/config/stages/example-cmdb/7e7861c8-8008-4e8d-9910-2a0bb26921bd?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Stage deleted."
+ }
+ ]
+}
+```
+
+### Deleting Configuration Package <a id="icinga2-api-config-management-delete-config-package"></a>
+
+In order to completely purge a configuration package and its stages
+you can send a `DELETE` request to the URL endpoint `/v1/config/packages`
+with the package name in the URL path.
+
+This example entirely deletes the configuration package `example-cmdb`:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' -X DELETE \
+'https://localhost:5665/v1/config/packages/example-cmdb?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "package": "example-cmdb",
+ "status": "Deleted package."
+ }
+ ]
+}
+```
+
+## Types <a id="icinga2-api-types"></a>
+
+You can retrieve the configuration object types by sending a `GET` request to URL
+endpoint `/v1/types`.
+
+Each response entry in the results array contains the following attributes:
+
+ Attribute | Type | Description
+ ----------------|--------------|---------------------
+ name | String | The type name.
+ plural\_name | String | The plural type name.
+ fields | Dictionary | Available fields including details on e.g. the type and attribute accessibility.
+ abstract | Boolean | Whether objects can be instantiated for this type.
+ base | Boolean | The base type (e.g. `Service` inherits fields and prototype methods from `Checkable`).
+ prototype\_keys | Array | Available prototype methods.
+
+In order to view a specific configuration object type specify its name inside the URL path:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/types/Object?pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "abstract": false,
+ "fields": {
+ "type": {
+ "array_rank": 0.0,
+ "attributes": {
+ "config": false,
+ "navigation": false,
+ "no_user_modify": false,
+ "no_user_view": false,
+ "required": false,
+ "state": false
+ },
+ "id": 0.0,
+ "type": "String"
+ }
+ },
+ "name": "Object",
+ "plural_name": "Objects",
+ "prototype_keys": [
+ "clone",
+ "notify_attribute",
+ "to_string"
+ ]
+ }
+ ]
+}
+```
+
+## Config Templates <a id="icinga2-api-config-templates"></a>
+
+Provides methods to manage configuration templates:
+
+* [querying templates](12-icinga2-api.md#icinga2-api-config-templates-query)
+
+Creation, modification and deletion of templates at runtime is not supported.
+
+### Querying Templates <a id="icinga2-api-config-templates-query"></a>
+
+You can request information about configuration templates by sending
+a `GET` query to the `/v1/templates/<type>` URL endpoint. `<type` has
+to be replaced with the plural name of the object type you are interested
+in:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/templates/hosts'
+```
+
+A list of all available configuration types is available in the
+[object types](09-object-types.md#object-types) chapter.
+
+A [filter](12-icinga2-api.md#icinga2-api-filters) may be provided for this query type. The
+template object can be accessed in the filter using the `tmpl` variable. In this
+example the [match function](18-library-reference.md#global-functions-match) is used to
+check a wildcard string pattern against `tmpl.name`.
+The `filter` attribute is passed inside the request body thus requiring to use [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+here.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5661/v1/templates/hosts' \
+ -d '{ "filter": "match(\"g*\", tmpl.name)" }'
+```
+
+Instead of using a filter you can optionally specify the template name in the
+URL path when querying a single object:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/templates/hosts/generic-host'
+```
+
+The result set contains the type, name as well as the location of the template.
+
+## Variables <a id="icinga2-api-variables"></a>
+
+Provides methods to manage global variables:
+
+* [querying variables](12-icinga2-api.md#icinga2-api-variables-query)
+
+### Querying Variables <a id="icinga2-api-variables-query"></a>
+
+You can request information about global variables by sending
+a `GET` query to the `/v1/variables/` URL endpoint:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/variables'
+```
+
+A [filter](12-icinga2-api.md#icinga2-api-filters) may be provided for this query type. The
+variable information object can be accessed in the filter using the `variable` variable.
+The `filter` attribute is passed inside the request body thus requiring to use [X-HTTP-Method-Override](12-icinga2-api.md#icinga2-api-requests-method-override)
+here.
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -H 'X-HTTP-Method-Override: GET' -X POST \
+ 'https://localhost:5661/v1/variables' \
+ -d '{ "filter": "variable.type in [ \"String\", \"Number\" ]" }'
+```
+
+Instead of using a filter you can optionally specify the variable name in the
+URL path when querying a single variable:
+
+```bash
+curl -k -s -S -i -u root:icinga 'https://localhost:5665/v1/variables/PrefixDir'
+```
+
+The result set contains the type, name and value of the global variable.
+
+## Debug Console <a id="icinga2-api-console"></a>
+
+You can inspect variables and execute other expressions by sending a `POST` request to the URL endpoint `/v1/console/execute-script`.
+In order to receive auto-completion suggestions, send a `POST` request to the URL endpoint `/v1/console/auto-complete-script`.
+
+> **Note**
+>
+> This functionality is used by the [debug console](11-cli-commands.md#cli-command-console). Do not use this in production, unless
+> you are aware of the fact that expressions and commands may crash the daemon, or lead into
+> unwanted behaviour. Use this URL endpoint **read-only** when needed.
+
+The following parameters need to be specified (either as URL parameters or in a JSON-encoded message body):
+
+ Parameter | Type | Description
+ -----------|--------------|-------------
+ session | String | **Optional.** The session ID. Ideally this should be a GUID or some other unique identifier.
+ command | String | **Required.** Command expression for execution or auto-completion.
+ sandboxed | Number | **Optional.** Whether runtime changes are allowed or forbidden. Defaults to disabled.
+
+The [API permission](12-icinga2-api.md#icinga2-api-permissions) `console` is required for executing
+expressions.
+
+> **Note**
+>
+> Runtime modifications via `execute-script` calls are not validated and might cause the Icinga 2
+> daemon to crash or behave in an unexpected way. Use these runtime changes at your own risk.
+
+If you specify a session identifier, the same script context can be reused for multiple requests. This allows you to, for example, set a local variable in a request and use that local variable in another request. Sessions automatically expire after a set period of inactivity (currently 30 minutes).
+
+Example for fetching the command line from the local host's last check result:
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/console/execute-script?command=get_host(NodeName).last_check_result.command&sandboxed=0&session=bb75fd7c-c686-407d-9688-582c04227756&pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "result": [
+ "/usr/local/sbin/check_ping",
+ "-H",
+ "127.0.0.1",
+ "-c",
+ "5000,100%",
+ "-w",
+ "3000,80%"
+ ],
+ "status": "Executed successfully."
+ }
+ ]
+}
+```
+
+Example for fetching auto-completion suggestions for the `Host.` type. This works in a
+similar fashion when pressing TAB inside the [console CLI command](11-cli-commands.md#cli-command-console):
+
+```bash
+curl -k -s -S -i -u root:icinga -H 'Accept: application/json' \
+ -X POST 'https://localhost:5665/v1/console/auto-complete-script?command=Host.&sandboxed=0&session=bb75fd7c-c686-407d-9688-582c04227756&pretty=1'
+```
+
+```json
+{
+ "results": [
+ {
+ "code": 200.0,
+ "status": "Auto-completed successfully.",
+ "suggestions": [
+ "Host.type",
+ "Host.name",
+ "Host.prototype",
+ "Host.base",
+ "Host.register_attribute_handler",
+ "Host.clone",
+ "Host.notify_attribute",
+ "Host.to_string"
+ ]
+ }
+ ]
+}
+```
+
+## API Clients <a id="icinga2-api-clients"></a>
+
+After its initial release in 2015, community members
+and developers have been working hard to add more REST API
+clients and integrations into DevOps tools.
+
+* [Libraries](12-icinga2-api.md#icinga2-api-clients-libraries)
+* [Status](12-icinga2-api.md#icinga2-api-clients-status)
+* [Management](12-icinga2-api.md#icinga2-api-clients-management)
+* [Event Streams](12-icinga2-api.md#icinga2-api-clients-event-streams)
+* [Actions](12-icinga2-api.md#icinga2-api-clients-actions)
+* [REST API Apps](12-icinga2-api.md#icinga2-api-clients-apps)
+
+Additional [programmatic examples](12-icinga2-api.md#icinga2-api-clients-programmatic-examples)
+will help you getting started using the Icinga 2 API in your environment.
+
+### Libraries <a id="icinga2-api-clients-libraries"></a>
+
+Name | Language | Description
+------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------
+[ruby-icinga2](https://github.com/bodsch/ruby-icinga2) | Ruby | Ruby library
+[python-icinga2_api](https://github.com/KevinHonka/Icinga2_Python_API) | Python | Python library
+[python-icinga2-api](https://github.com/fmnisme/python-icinga2api) | Python | Python bindings for Icinga 2 interaction
+[python-icinga2-api-continued](https://github.com/joni1993/icinga2apic) | Python | Python bindings for Icinga 2 interaction forked and continued from fmnisme's python binding
+[go-icinga2](https://github.com/xert/go-icinga2) | Golang | Golang functions and type definitions
+[go-icinga2-api](https://github.com/lrsmith/go-icinga2-api/) | Golang | Golang implementation used inside the Terraform provider
+[go-icinga2-client](https://github.com/Nexinto/go-icinga2-client) | Golang | Golang implementation for the Rancher integration.
+[Monitoring::Icinga2::Client::REST](https://metacpan.org/release/THESEAL/Monitoring-Icinga2-Client-REST-2.0.0) | Perl | Perl bindings.
+[Icinga 2 API in PHP](https://github.com/uniwue-rz/icinga2-api) | PHP | PHP implementation. For other examples, look into Icinga Web 2 and Director.
+
+### Status <a id="icinga2-api-clients-status"></a>
+
+Name | Language | Description
+------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------
+[Dashing](https://github.com/dnsmichi/dashing-icinga2) | Ruby, HTML | Dashboard for Dashing querying the REST API for current host/service/global status
+[InfluxDB Telegraf Input](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/icinga2/README.md) | Golang | [Telegraf](https://github.com/influxdata/telegraf) is an agent written in Go for collecting, processing, aggregating, and writing metrics.
+[Icinga Slack Bot](https://github.com/bb-Ricardo/icinga-slack-bot) | Python | It can be used to interact with Icinga2 from your Slack client. It uses the Icinga2 API to get Host/Service status details. Simple status filters can be used to narrow down the returned status list.
+[Icinga 2 Slack Bot](https://github.com/mlabouardy/icinga2-slack-bot) | Golang | Query host/service details from a [Slack](https://slack.com/) channel
+[icinga2bot](https://github.com/reikoNeko/icinga2bot) | Python | [Errbot](https://errbot.io/en/latest/user_guide/setup.html) plugin to fetch status and event stream information and forward to XMPP, IRC, etc.
+[IcingaBusyLightAgent](https://github.com/stdevel/IcingaBusylightAgent) | C# | Notification Agent in Systray
+[BitBar for OSX](https://getbitbar.com/plugins/Dev/Icinga2/icinga2.24m.py) | Python | macOS tray app for highlighting the host/service status
+[Icinga 2 Multistatus](https://chrome.google.com/webstore/detail/icinga-multi-status/khabbhcojgkibdeipanmiphceeoiijal/related) | - | Chrome Extension
+[Naglite4](https://github.com/wftech/icinga2-naglite4) | Python | Naglite3 rewrite using the Icinga 2 REST API.
+[icinga-telegram-bot](https://github.com/joni1993/icinga-telegram-bot) | Python | Telegram Bot using the Icinga 2 REST API
+
+### Manage Objects <a id="icinga2-api-clients-management"></a>
+
+Name | Language | Description
+------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------
+[Icinga Director](https://icinga.com/docs/director/latest) | PHP, JS | Icinga 2 configuration interface with a nice frontend, and automated imports for nearly any source.
+[Terraform Provider](https://github.com/terraform-providers/terraform-provider-icinga2) | Golang | Register hosts from Terraform in Icinga 2. [Official docs](https://www.terraform.io/docs/providers/icinga2/index.html).
+[Kube Icinga](https://github.com/gyselroth/kube-icinga) | Typescript | Monitor Kubernetes services / resources using icinga2 (including autodiscovery support)
+[Logstash output for Icinga](https://www.icinga.com/products/integrations/elastic/) | Ruby | Forward check results and create objects from log events
+[Foreman Smart Proxy Monitoring](https://github.com/theforeman/smart_proxy_monitoring) | Ruby | Smart Proxy extension for Foreman creating and deleting hosts and services in Icinga 2
+[Rancher integration](https://github.com/Nexinto/rancher-icinga) | Golang | Registers [Rancher](https://rancher.com) resources in Icinga 2 for monitoring.
+[AWS/EC2](https://github.com/Icinga/icinga2-api-examples/tree/master/aws-ec2) | Ruby | Example script for creating and deleting AWS instances in Icinga 2
+[Ansible Host Module](https://docs.ansible.com/ansible/latest/modules/icinga2_host_module.html) | Python | In progress, [Ansible Feature](https://docs.ansible.com/ansible/latest/modules/icinga2_feature_module.html#icinga2-feature-module) is also there.
+[gocinga](https://gitlab.com/sambadevi/gocinga) | Golang | CLI Tool for Icinga, written in go
+
+### Event Streams <a id="icinga2-api-clients-event-streams"></a>
+
+Name | Language | Description
+------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------
+[Elastic Icingabeat](https://icinga.com/docs/icingabeat/latest/) | Golang | Process events and send to Elasticsearch/Logstash outputs
+[Request Tracker ticket integration](https://github.com/bytemine/icinga2rt) | Golang | Create and update RT tickets
+[Logstash input event stream](https://github.com/bobapple/logstash-input-icinga_eventstream) | Ruby | Forward events as Logstash input
+[Flapjack events](https://github.com/sol1/flapjack-icinga2) | Golang | Dumping events into Redis for Flapjack processing
+[Stackstorm integration](https://github.com/StackStorm-Exchange/stackstorm-icinga2) | Python | Processing events and fetching status information
+[NodeJS consumer](https://community.icinga.com/t/consume-api-event-stream/1010/6) | NodeJS | Example from our community :)
+
+### Actions <a id="icinga2-api-clients-actions"></a>
+
+Name | Language | Description
+------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------
+[Icinga Web 2](https://icinga.com/docs/icingaweb2/latest/) | PHP | Trigger actions via command transport
+[Logstash output for Icinga](https://www.icinga.com/products/integrations/elastic/) | Ruby | Forward check results and create objects from log events
+[OTRS SystemMonitoring](https://github.com/OTRS/SystemMonitoring) | Perl | Acknowledge problems in Icinga 2 from OTRS tickets
+[mqttwarn](https://github.com/jpmens/mqttwarn#icinga2) | Python | Forward check results from mqttwarn to Icinga 2
+[Lita handler](https://github.com/tuxmea/lita-icinga2) | Ruby | List, recheck and acknowledge through a #chatops bot called [Lita](https://github.com/litaio/lita)
+[Sakuli forwarder](http://sakuli.readthedocs.io/en/latest/forwarder-icinga2api/) | Java | Forward check results from tests from [Sakuli](https://github.com/ConSol/sakuli) to Icinga 2
+[OpsGenie actions](https://www.opsgenie.com/docs/integrations/icinga2-integration) | Golang, Java | Integrate Icinga 2 into OpsGenie
+
+
+### REST API Apps <a id="icinga2-api-clients-apps"></a>
+
+Name | Language | Description
+------------------------------------------------------------------------------------------------|---------------|--------------------------------------------------------
+Browser plugins | - | [Postman for Chrome](https://www.getpostman.com), [RESTED for Firefox](https://addons.mozilla.org/en-US/firefox/addon/rested/)
+[Postman](https://www.getpostman.com/) | - | App instead of browser plugin
+[Cocoa Rest Client](https://mmattozzi.github.io/cocoa-rest-client/) | - | macOS app
+[Paw for MacOS](https://paw.cloud) | (exported) | Paw is a full-featured HTTP client that lets you test and describe the APIs you build or consume. It has a beautiful native macOS interface to compose requests, inspect server responses, generate client code and export API definitions.
+
+
+### Programmatic Examples <a id="icinga2-api-clients-programmatic-examples"></a>
+
+The following languages are covered:
+
+* [Python](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-python)
+* [Ruby](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-ruby)
+* [PHP](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-php)
+* [Perl](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-perl)
+* [Golang](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-golang)
+* [Powershell](12-icinga2-api.md#icinga2-api-clients-programmatic-examples-powershell)
+
+The [request method](icinga2-api-requests) is `POST` using [X-HTTP-Method-Override: GET](12-icinga2-api.md#icinga2-api-requests-method-override)
+which allows you to send a JSON request body. The examples request specific service
+attributes joined with host attributes. `attrs` and `joins` are therefore specified
+as array.
+The `filter` attribute [matches](18-library-reference.md#global-functions-match)
+on all services with `ping` in their name.
+
+#### Example API Client in Python <a id="icinga2-api-clients-programmatic-examples-python"></a>
+
+The following example uses **Python** and the `requests` and `json` module:
+
+```
+# pip install requests
+# pip install json
+
+$ vim icinga.py
+
+#!/usr/bin/env python
+
+import requests, json
+
+# Replace 'localhost' with your FQDN and certificate CN
+# for TLS verification
+request_url = "https://localhost:5665/v1/objects/services"
+headers = {
+ 'Accept': 'application/json',
+ 'X-HTTP-Method-Override': 'GET'
+ }
+data = {
+ "attrs": [ "name", "state", "last_check_result" ],
+ "joins": [ "host.name", "host.state", "host.last_check_result" ],
+ "filter": "match(\"ping*\", service.name)",
+}
+
+r = requests.post(request_url,
+ headers=headers,
+ auth=('root', 'icinga'),
+ data=json.dumps(data),
+ verify="pki/icinga2-ca.crt")
+
+print "Request URL: " + str(r.url)
+print "Status code: " + str(r.status_code)
+
+if (r.status_code == 200):
+ print "Result: " + json.dumps(r.json())
+else:
+ print r.text
+ r.raise_for_status()
+
+$ python icinga.py
+```
+
+#### Example API Client in Ruby <a id="icinga2-api-clients-programmatic-examples-ruby"></a>
+
+The following example uses **Ruby** and the `rest_client` gem:
+
+```
+# gem install rest_client
+
+$ vim icinga.rb
+
+#!/usr/bin/ruby
+
+require 'rest_client'
+
+# Replace 'localhost' with your FQDN and certificate CN
+# for TLS verification
+request_url = "https://localhost:5665/v1/objects/services"
+headers = {
+ "Accept" => "application/json",
+ "X-HTTP-Method-Override" => "GET"
+}
+data = {
+ "attrs" => [ "name", "state", "last_check_result" ],
+ "joins" => [ "host.name", "host.state", "host.last_check_result" ],
+ "filter" => "match(\"ping*\", service.name)",
+}
+
+r = RestClient::Resource.new(
+ URI.encode(request_url),
+ :headers => headers,
+ :user => "root",
+ :password => "icinga",
+ :ssl_ca_file => "pki/icinga2-ca.crt")
+
+begin
+ response = r.post(data.to_json)
+rescue => e
+ response = e.response
+end
+
+puts "Status: " + response.code.to_s
+if response.code == 200
+ puts "Result: " + (JSON.pretty_generate JSON.parse(response.body))
+else
+ puts "Error: " + response
+end
+
+$ ruby icinga.rb
+```
+
+A more detailed example can be found in the [Dashing demo](https://github.com/Icinga/dashing-icinga2).
+
+#### Example API Client in PHP <a id="icinga2-api-clients-programmatic-examples-php"></a>
+
+The following example uses **PHP** and its `curl` library:
+
+```
+$ vim icinga.php
+
+#!/usr/bin/env php
+<?php
+# Replace 'localhost' with your FQDN and certificate CN
+# for TLS verification
+$request_url = "https://localhost:5665/v1/objects/services";
+$username = "root";
+$password = "icinga";
+$headers = array(
+ 'Accept: application/json',
+ 'X-HTTP-Method-Override: GET'
+);
+$data = array(
+ attrs => array('name', 'state', 'last_check_result'),
+ joins => array('host.name', 'host.state', 'host.last_check_result'),
+ filter => 'match("ping*", service.name)',
+);
+
+$ch = curl_init();
+curl_setopt_array($ch, array(
+ CURLOPT_URL => $request_url,
+ CURLOPT_HTTPHEADER => $headers,
+ CURLOPT_USERPWD => $username . ":" . $password,
+ CURLOPT_RETURNTRANSFER => true,
+ CURLOPT_CAINFO => "pki/icinga2-ca.crt",
+ CURLOPT_POST => count($data),
+ CURLOPT_POSTFIELDS => json_encode($data)
+));
+
+$response = curl_exec($ch);
+if ($response === false) {
+ print "Error: " . curl_error($ch) . "(" . $response . ")\n";
+}
+
+$code = curl_getinfo($ch, CURLINFO_HTTP_CODE);
+curl_close($ch);
+print "Status: " . $code . "\n";
+
+if ($code == 200) {
+ $response = json_decode($response, true);
+ print_r($response);
+}
+?>
+
+$ php icinga.php
+```
+
+#### Example API Client in Perl <a id="icinga2-api-clients-programmatic-examples-perl"></a>
+
+The following example uses **Perl** and the `Rest::Client` module:
+
+```
+# perl -MCPAN -e 'install REST::Client'
+# perl -MCPAN -e 'install JSON'
+# perl -MCPAN -e 'install MIME::Base64'
+# perl -MCPAN -e 'install Data::Dumper'
+
+$ vim icinga.pl
+
+#!/usr/bin/env perl
+
+use strict;
+use warnings;
+use REST::Client;
+use MIME::Base64;
+use JSON;
+use Data::Dumper;
+
+# Replace 'localhost' with your FQDN and certificate CN
+# for TLS verification
+my $request_host = "https://localhost:5665";
+my $userpass = "root:icinga";
+
+my $client = REST::Client->new();
+$client->setHost($request_host);
+$client->setCa("pki/icinga2-ca.crt");
+$client->addHeader("Accept", "application/json");
+$client->addHeader("X-HTTP-Method-Override", "GET");
+$client->addHeader("Authorization", "Basic " . encode_base64($userpass));
+my %json_data = (
+ attrs => ['name', 'state', 'last_check_result'],
+ joins => ['host.name', 'host.state', 'host.last_check_result'],
+ filter => 'match("ping*", service.name)',
+);
+my $data = encode_json(\%json_data);
+$client->POST("/v1/objects/services", $data);
+
+my $status = $client->responseCode();
+print "Status: " . $status . "\n";
+my $response = $client->responseContent();
+if ($status == 200) {
+ print "Result: " . Dumper(decode_json($response)) . "\n";
+} else {
+ print "Error: " . $response . "\n";
+}
+
+$ perl icinga.pl
+```
+
+
+#### Example API Client in Golang <a id="icinga2-api-clients-programmatic-examples-golang"></a>
+
+Requires the Golang build chain.
+
+```
+$ vim icinga.go
+
+package main
+
+import (
+ "bytes"
+ "crypto/tls"
+ "log"
+ "io/ioutil"
+ "net/http"
+)
+
+func main() {
+ var urlBase= "https://localhost:5665"
+ var apiUser= "root"
+ var apiPass= "icinga"
+
+ urlEndpoint := urlBase + "/v1/objects/services"
+
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ httpClient := &http.Client{Transport: tr}
+
+ var requestBody = []byte(`{
+ "attrs": [ "name", "state", "last_check_result" ],
+ "joins": [ "host.name", "host.state", "host.last_check_result" ],
+ "filter": "match(\"ping*\", service.name)"
+ }`)
+
+ req, err := http.NewRequest("POST", urlEndpoint, bytes.NewBuffer(requestBody))
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("X-HTTP-Method-Override", "GET")
+
+ req.SetBasicAuth(apiUser, apiPass)
+
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ log.Fatal("Server error:", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ log.Print("Response status:", resp.Status)
+
+ bodyBytes, _ := ioutil.ReadAll(resp.Body)
+ bodyString := string(bodyBytes)
+
+ if resp.StatusCode == http.StatusOK {
+ log.Print("Result: " + bodyString)
+ } else {
+ log.Fatal(bodyString)
+ }
+}
+```
+
+Build the binary:
+
+```bash
+go build icinga.go
+./icinga
+```
+
+#### Example API Client in Powershell <a id="icinga2-api-clients-programmatic-examples-powershell"></a>
+
+This example compares the given certificate with the certificate from icinga2 for a trusted connection.
+More info: https://stackoverflow.com/a/58494718/9397788
+
+Invoke-RestMethod with PUT is buggy with Powershell 3.0. So we need at least Powershell 4.0.
+https://stackoverflow.com/questions/18278977/powershell-v3-invoke-restmethod-headers
+
+
+```
+$icingaApiHost = "icinga.master.local"
+$IcingaApiPort = 5665
+$icingaApiUser = "root"
+$icingaApiPassword = "icinga"
+
+$requestUrl = "https://{0}:{1}/v1/objects/services" -f $icingaApiHost,$IcingaApiPort
+
+
+# Put the certificate from your master (/etc/icinga2/pki/*.crt) here.
+# You will get it with "openssl s_client -connect <master>:5665" too.
+
+$cert64=@"
+ -----BEGIN CERTIFICATE-----
+ MIIE5TCCAs2gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAlJY2lu
+ Z2EgQ0EwHhcNMTYwNzA3MDYxOTM4WhcNMzEwNzA0MDYxOTM4WjAiMSAwHgYDVQQD
+ DBdpY2luZ2EuZXh0ZXJuMS56bXQuaW5mbzCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ ADCCAgoCggIBAJ2/ufxCb1m8PbUCxLkZqZNLxZ/vpulOcKmOGYm6VBWbOXQA50on
+ IewnMRUDGF9DHajLk1nyUu1TyKxGzBbja+06/kVd/8Muv0MUNF6iC1U3F3h0W9da
+ kk5rK1L+A534csHCFcG3dZkbdOMrh5hy4kMf9c2FEpviL54Fo4e+b3ZJFA6rv5D9
+ 7LzaxfLcsMwXIZ/WRnxjjfnA+RenHeYLlNM8Uk3vqI6tBc1qpFzFeRWMbclFzSLN
+ 5x+J6cuyFjVi+Vv8c6SU6W3ykw8Vvq1QQUixl9lwxqNJNsWWfGR8ycmFiv1ZYxiu
+ HpmuLslExZ2qxdGe/raMBEOGgVsUTDZNyTm/9TxgOa3m9cv3R0YIFUmfoBQ3d51S
+ wburJG2eC0ETpnt0TptwMdTfL+HYVWB71djg2Pb8R3vldnhFVpy9vyx3FyHoN7ZQ
+ h7+r6HK2jpwWo7/jK9ExpglVoV8vUbNYqXiA/lZGEkT3YLwTyUhqXKei3Xu2CGGR
+ UId6fAj6OWk9TLW+OaR9BcS74mpiTWNDlbEP+/LQnUhte8scX5cSqBzy4vpuG1G+
+ NGDbYcG4xn6Pc6qt/QddKU/pB/GbJv9SlHU8SjSt09oG9GtuXVjPoZX5msi3NmMy
+ DpAcab5Lx4MgOS/GwRLRI3IjZ3ZK+UkLvRgesSH5/JPUIgfTdr/Eg5dVAgMBAAGj
+ NDAyMAwGA1UdEwEB/wQCMAAwIgYDVR0RBBswGYIXaWNpbmdhLmV4dGVybjEuem10
+ LmluZm8wDQYJKoZIhvcNAQELBQADggIBAEpEJt35KZuvDzU/xrVaVC3ct6CHmXOh
+ DDj5PdwkYtO0vw9WE7aVc88Fs6uhW2BxFkLvm7TpJ6g05egtBozHYrhTEir/fPna
+ rVAD9wEQU6KuSayeToXlgWhKDRAAv1lrQwU4xAAdJP8faxQGc7nAwN/h0g14UTmU
+ LSkyJU4a+1SkEUOs2YCq9vChS3MowO+6I35e98dIA1swHLeQ/QJowspODQvi6pGX
+ VH8FaaqfGwhv+gMwDoAW9hB74VZXO8I3mueZUccPiJXlaojx5hpaHRNRvpdBPclA
+ HHLRQniEOkai2Wg2cft/wq6/fYLE/yv5ej15MNyt3Wjj41DEK5B/bvmN/chOrZlv
+ 8rh3ek12ngVtXF+Jcmfsij8+hj/IOM6SeELtW+c0KRaPoVR7oR9o6ce/dyfiw6Hv
+ iQsAV6x//kytpRnUY3VAH4QTJzQ5bgz1Cwr6H+cWE2ca4MHCtPYaZnDiOv4b/Yz7
+ 97Nrc7QPGewMl0hYeykpLP2hBJldw01NXhztuq1j38vYY38lKCN6v1INUujEUZg7
+ NwgfHUvJmGIE/fwLAvP7do8gf+1MGPEimsgvias5YtDtrEOz7K/oF3Qgk3sepwAz
+ XXlNLnJAY4p0d/sgCCFQnstQMM95X0Y6cfITzkz3HIUcNF2sbvVnn8xHi0TSH/8J
+ tPLHO1xOLz7N
+ -----END CERTIFICATE-----
+"@
+
+# register callback for comparing the certificate
+function set-SSLCertificate {
+ param(
+ $Cert
+ )
+
+ if (-not("validateCert" -as [type])) {
+ add-type -TypeDefinition @"
+ using System.Net.Security;
+ using System.Security.Cryptography.X509Certificates;
+
+ public static class ValidateCert {
+ static X509Certificate2 MyCert;
+
+ public static bool Validate(object sender,
+ X509Certificate cert,
+ X509Chain chain,
+ SslPolicyErrors sslPolicyErrors) {
+ if (MyCert.Equals(cert)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ public static RemoteCertificateValidationCallback GetDelegate(X509Certificate2 Cert) {
+ MyCert = Cert;
+ return new RemoteCertificateValidationCallback(ValidateCert.Validate);
+ }
+ }
+"@
+ }
+ [System.Net.ServicePointManager]::ServerCertificateValidationCallback = [validateCert]::GetDelegate($Cert)
+}
+
+# convert base64 based certificate to X509 certificate
+function get-x509 {
+ param(
+ [string]
+ $Cert64
+ )
+
+ $CertBin=[System.Convert]::FromBase64String(($Cert64.Trim(" ") -replace "-.*-",""))
+
+ Write-Host ($Cert64.Trim(" ") -replace "-.*-","")
+
+ [System.Security.Cryptography.X509Certificates.X509Certificate2]$CertBin
+}
+
+# Allow TLS 1.2. Old powershell (.net) uses TLS 1.0 only. Icinga2 >2.10 needs TLS 1.2
+[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]'Ssl3,Tls,Tls11,Tls12'
+
+$SecPass = ConvertTo-SecureString $icingaApiPassword -AsPlainText -Force
+$Cred = New-Object System.Management.Automation.PSCredential($icingaApiUser, $SecPass)
+
+$Cert = get-x509 $Cert64
+set-SSLCertificate $Cert
+
+$httpHeaders = @{
+ "X-HTTP-Method-Override" = "GET"
+ "accept" = "application/json"
+}
+
+$attrs = @( "name", "state", "last_check_result" )
+$joins = @( "host.name", "host.state", "host.last_check_result")
+$filter = 'match("ping*", service.name)'
+
+$data = @{
+ "attrs" = $attrs
+ "joins" = $joins
+ "filter" = $filter
+}
+
+$result = Invoke-RestMethod -Uri $requestUrl -Method "POST" -Body (ConvertTo-Json -InputObject $data) -Credential $Cred -ContentType "application/json" -Headers $httpHeaders
+
+foreach ($s in $result.results) {
+ Write-Host "Service " $s.attrs.name " on Host " $s.joins.host.name "State " $s.attrs.state " Output: " $s.attrs.last_check_result.output
+ # Debug
+ Write-Host "Debug: Attributes " $s.attrs | ConvertTo-Json
+ Write-Host "Debug: Joins Host" $s.joins.host | ConvertTo-Json
+ Write-Host "`n"
+}
+```
+
+Run the Powershell ISE as administrator, and execute the script as you change it.
+
+![Icinga 2 API Windows Powershell ISE Script](images/api/icinga2_api_powershell_ise.png)
+
+
+Alternatively, save the code and run it in Powershell:
+
+```
+.\icinga.ps1
+```
diff --git a/doc/13-addons.md b/doc/13-addons.md
new file mode 100644
index 0000000..953b7f0
--- /dev/null
+++ b/doc/13-addons.md
@@ -0,0 +1,258 @@
+# Icinga 2 Addons and Integrations <a id="addons"></a>
+
+For an uptodate overview of all integrations and modules,
+please visit [https://icinga.com/products/](https://icinga.com/products/).
+
+## Syntax Highlighting <a id="configuration-syntax-highlighting"></a>
+
+Icinga 2 provides configuration examples for syntax highlighting using the `vim` and `nano` editors.
+
+### Using Vim <a id="configuration-syntax-highlighting-vim"></a>
+
+Install the package `vim-icinga2` with your distribution's package manager.
+
+Ensure that syntax highlighting is enabled e.g. by editing the user's `vimrc`
+configuration file:
+
+```
+# vim ~/.vimrc
+syntax on
+```
+
+Test it:
+
+```bash
+vim /etc/icinga2/conf.d/templates.conf
+```
+
+![Vim with syntax highlighting](images/addons/vim-syntax.png "Vim with Icinga 2 syntax highlighting")
+
+
+### Using Nano <a id="configuration-syntax-highlighting-nano"></a>
+
+Install the package `nano-icinga2` with your distribution's package manager.
+
+**Note:** On Debian, Ubuntu and Raspbian, the syntax files are installed with the `icinga2-common` package already.
+
+Copy the `/etc/nanorc` sample file to your home directory.
+
+```bash
+cp /etc/nanorc ~/.nanorc
+```
+
+Include the `icinga2.nanorc` file.
+
+```
+$ vim ~/.nanorc
+
+## Icinga 2
+include "/usr/share/nano/icinga2.nanorc"
+```
+
+Test it:
+
+```bash
+nano /etc/icinga2/conf.d/templates.conf
+```
+
+![Nano with syntax highlighting](images/addons/nano-syntax.png "Nano with Icinga 2 syntax highlighting")
+
+## Icinga Reporting <a id="addons-reporting"></a>
+
+The [Icinga Reporting Module](https://icinga.com/docs/reporting/latest/)
+is the framework and foundation we created to handle data collected
+by Icinga 2 and other data providers. By definition Icinga Reporting does not collect
+or calculate any data. The framework processes usable data from data providers such as
+Icinga’s IDO or Icinga Web 2 modules and makes them available in different formats.
+
+It can display the data directly within the Icinga web interface or export it to PDF,
+JSON or CSV format. With scheduled reports you can receive the prepared data periodically
+via email.
+
+![Icinga Reporting](images/addons/icinga_reporting.png)
+
+Follow along in this [hands-on blog post](https://icinga.com/2019/06/17/icinga-reporting-hands-on/).
+
+
+## Graphs and Metrics <a id="addons-graphs-metrics"></a>
+
+### Graphite <a id="addons-graphing-graphite"></a>
+
+[Graphite](https://graphite.readthedocs.org/en/latest/) is a time-series database
+storing collected metrics and making them available through restful apis
+and web interfaces.
+
+Graphite consists of 3 software components:
+
+* carbon -- a Twisted daemon that listens for time-series data
+* whisper -- a simple database library for storing time-series data (similar in design to RRD)
+* graphite webapp -- a Django webapp that renders graphs on-demand using Cairo
+
+You need to install Graphite first, then proceed with configuring it in Icinga 2.
+
+Use the [GraphiteWriter](14-features.md#graphite-carbon-cache-writer) feature
+for sending real-time metrics from Icinga 2 to Graphite.
+
+```bash
+icinga2 feature enable graphite
+```
+
+A popular alternative frontend for Graphite is for example [Grafana](https://grafana.org).
+
+Integration in Icinga Web 2 is possible by installing the official [graphite module](https://icinga.com/docs/graphite/latest/).
+
+![Icinga Web 2 Detail View with Graphite](images/addons/icingaweb2_graphite.png)
+
+
+### InfluxDB <a id="addons-graphing-influxdb"></a>
+
+[InfluxDB](https://influxdb.com) is a time series, metrics, and analytics database.
+It’s written in Go and has no external dependencies.
+
+Use the [InfluxdbWriter](14-features.md#influxdb-writer) feature
+for sending real-time metrics from Icinga 2 to InfluxDB v1.
+
+```bash
+icinga2 feature enable influxdb
+```
+
+Use the [Influxdb2Writer](14-features.md#influxdb-writer) feature
+for sending real-time metrics from Icinga 2 to InfluxDB v2.
+
+```bash
+icinga2 feature enable influxdb2
+```
+
+A popular frontend for InfluxDB is for example [Grafana](https://grafana.org).
+
+Integration in Icinga Web 2 is possible by installing the community [Grafana module](https://github.com/Mikesch-mp/icingaweb2-module-grafana).
+
+![Icinga Web 2 Detail View with Grafana](images/addons/icingaweb2_grafana.png)
+
+
+### PNP <a id="addons-graphing-pnp"></a>
+
+[PNP](https://www.pnp4nagios.org) is a graphing addon.
+
+[PNP](https://www.pnp4nagios.org) is an addon which adds a graphical representation of the performance data collected
+by the monitoring plugins. The data is stored as rrd (round robin database) files.
+
+Use your distribution's package manager to install the `pnp4nagios` package.
+
+If you're planning to use it, configure it to use the
+[bulk mode with npcd and npcdmod](https://docs.pnp4nagios.org/pnp-0.6/modes#bulk_mode_with_npcd_and_npcdmod)
+in combination with Icinga 2's [PerfdataWriter](14-features.md#writing-performance-data-files). NPCD collects the performance
+data files which Icinga 2 generates.
+
+Enable performance data writer in icinga 2
+
+```bash
+icinga2 feature enable perfdata
+```
+
+Configure npcd to use the performance data created by Icinga 2:
+
+```bash
+vim /etc/pnp4nagios/npcd.cfg
+```
+
+Set `perfdata_spool_dir = /var/spool/icinga2/perfdata` and restart the `npcd` daemon.
+
+There's also an Icinga Web 2 module for direct PNP graph integration
+available at [Icinga Exchange](https://exchange.icinga.com/icinga/PNP).
+
+## Visualization <a id="addons-visualization"></a>
+
+### Maps <a id="addons-visualization-maps"></a>
+
+This community module displays host objects as markers on openstreetmap in Icinga Web 2.
+It uses the data provided by the monitoring module and as such the [DB IDO](14-features.md#db-ido)
+from Icinga 2.
+
+If you configure multiple hosts with the same coordinates, i.e. servers in a datacenter, a clustered view is rendered.
+
+Check the [Map module docs](https://github.com/nbuchwitz/icingaweb2-module-map) for more details on
+installation, configuration and integration.
+
+![Icinga Web 2 Maps](images/addons/icingaweb2_maps.png)
+
+### Business Process <a id="addons-business-process"></a>
+
+Create top-level views of your applications in a graphical editor.
+Rules express dependencies between existing hosts and services and
+let you alert on application level. Business processes are displayed
+in a tree or list overview and can be added to any dashboard.
+
+![Icinga Web 2 Business Process](images/addons/icingaweb2_businessprocess.png)
+
+Read more [here](https://icinga.com/products/icinga-business-process-modelling/).
+
+### Certificate Monitoring <a id="addons-visualization-certificate-monitoring"></a>
+
+Monitor your certificates in an efficient and comfortable way. Be aware of required
+actions and view all details at a glance.
+
+![Icinga Certificate Monitoring](images/addons/icinga_certificate_monitoring.png)
+
+Read more [here](https://icinga.com/products/icinga-certificate-monitoring/)
+and [here](https://icinga.com/2019/06/03/monitoring-automation-with-icinga-certificate-monitoring/).
+
+### Dashing Dashboard <a id="addons-visualization-dashing-dashboard"></a>
+
+The [Icinga 2 dashboard](https://github.com/dnsmichi/dashing-icinga2) is built
+on top of Dashing and uses the [REST API](12-icinga2-api.md#icinga2-api) to visualize what's going
+on with your monitoring. It combines several popular widgets and provides development
+instructions for your own implementation.
+
+The dashboard also allows to embed the [Icinga Web 2](https://icinga.com/products/icinga-web-2/)
+host and service problem lists as Iframe.
+
+![Dashing dashboard](images/addons/dashing_icinga2.png)
+
+
+## Log Monitoring <a id="log-monitoring"></a>
+
+Using [Logstash](https://www.elastic.co/guide/en/logstash/current/introduction.html) or
+[Graylog](https://www.graylog.org) in your infrastructure and correlate events with your monitoring
+is even simpler these days.
+
+* Use the `GelfWriter` feature to write Icinga 2's check and notification events to Graylog or Logstash.
+* Configure the logstash `nagios` output to send passive traps to Icinga 2 using the external command pipe.
+* Execute a plugin to check Graylog alert streams.
+
+More details can be found in [this blog post](https://icinga.com/2014/12/02/team-icinga-at-osmc-2014/).
+
+## Notification Scripts and Interfaces <a id="notification-scripts-interfaces"></a>
+
+There's a variety of resources available, for example different notification scripts such as:
+
+* E-Mail ([examples](03-monitoring-basics.md#alert-notifications) provided)
+* SMS
+* Pager (XMPP, etc.)
+* Twitter
+* IRC
+* Ticket systems
+* etc.
+
+Blog posts and howtos:
+
+* [Environmental Monitoring and Alerting](https://icinga.com/2019/09/02/environmental-monitoring-and-alerting-via-text-message/)
+
+Additionally external services can be [integrated with Icinga 2](https://icinga.com/products/integrations/):
+
+* [Pagerduty](https://icinga.com/products/integrations/pagerduty/)
+* [VictorOps](https://icinga.com/products/integrations/victorops/)
+* [StackStorm](https://icinga.com/products/integrations/stackstorm/)
+
+More information can be found on the [Icinga Website](https://icinga.com/).
+
+## Configuration Management Tools <a id="configuration-tools"></a>
+
+Checkout these specific integrations:
+
+* [Ansible Roles](https://icinga.com/products/integrations/)
+* [Puppet Module](https://icinga.com/products/integrations/puppet/)
+* [Chef Cookbook](https://icinga.com/products/integrations/chef/)
+
+If you're looking for different config management integrations -- we're happy
+to add them upstream, so please get in touch with the [Icinga team](https://icinga.com/community/).
diff --git a/doc/14-features.md b/doc/14-features.md
new file mode 100644
index 0000000..19cb54b
--- /dev/null
+++ b/doc/14-features.md
@@ -0,0 +1,1438 @@
+# Icinga 2 Features <a id="icinga2-features"></a>
+
+## Logging <a id="logging"></a>
+
+Icinga 2 supports three different types of logging:
+
+* File logging
+* Syslog (on Linux/UNIX)
+* Console logging (`STDOUT` on tty)
+
+You can enable additional loggers using the `icinga2 feature enable`
+and `icinga2 feature disable` commands to configure loggers:
+
+Feature | Description
+----------------|------------
+debuglog | Debug log (path: `/var/log/icinga2/debug.log`, severity: `debug` or higher)
+journald | Systemd Journal (severity: `warning` or higher)
+mainlog | Main log (path: `/var/log/icinga2/icinga2.log`, severity: `information` or higher)
+syslog | Syslog (severity: `warning` or higher)
+windowseventlog | Windows Event Log (severity: `information` or higher)
+
+By default file the `mainlog` feature is enabled. When running Icinga 2
+on a terminal log messages with severity `information` or higher are
+written to the console.
+
+### Log Rotation <a id="logging-logrotate"></a>
+
+Packages provide a configuration file for [logrotate](https://linux.die.net/man/8/logrotate)
+on Linux/Unix. Typically this is installed into `/etc/logrotate.d/icinga2`
+and modifications won't be overridden on upgrade.
+
+Instead of sending the reload HUP signal, logrotate
+sends the USR1 signal to notify the Icinga daemon
+that it has rotate the log file. Icinga reopens the log
+files then:
+
+* `/var/log/icinga2/icinga2.log` (requires `mainlog` enabled)
+* `/var/log/icinga2/debug.log` (requires `debuglog` enabled)
+* `/var/log/icinga2/error.log`
+
+By default, log files will be rotated daily.
+
+## Core Backends <a id="core-backends"></a>
+
+### REST API <a id="core-backends-api"></a>
+
+The REST API is documented [here](12-icinga2-api.md#icinga2-api) as a core feature.
+
+### Icinga DB <a id="core-backends-icingadb"></a>
+
+Icinga DB is a set of components for publishing, synchronizing and
+visualizing monitoring data in the Icinga ecosystem, consisting of:
+
+* Icinga 2 with its `icingadb` feature enabled,
+ responsible for publishing monitoring data to a Redis server, i.e. configuration and its runtime updates,
+ check results, state changes, downtimes, acknowledgements, notifications, and other events such as flapping
+* The [Icinga DB daemon](https://icinga.com/docs/icinga-db),
+ which synchronizes the data between the Redis server and a database
+* And Icinga Web with the
+ [Icinga DB Web](https://icinga.com/docs/icinga-db-web) module enabled,
+ which connects to both Redis and the database to display and work with the most up-to-date data
+
+![Icinga DB Architecture](images/icingadb/icingadb-architecture.png)
+
+To set up a Redis server and the Icinga DB feature, please follow the steps from the
+Icinga 2 [Installation](02-installation.md) guide. For the feature configuration options,
+see its [Icinga DB object type](09-object-types.md#icingadb) documentation.
+
+## Metrics <a id="metrics"></a>
+
+Whenever a host or service check is executed, or received via the REST API,
+best practice is to provide performance data.
+
+This data is parsed by features sending metrics to time series databases (TSDB):
+
+* [Graphite](14-features.md#graphite-carbon-cache-writer)
+* [InfluxDB](14-features.md#influxdb-writer)
+* [OpenTSDB](14-features.md#opentsdb-writer)
+
+Metrics, state changes and notifications can be managed with the following integrations:
+
+* [Elastic Stack](14-features.md#elastic-stack-integration)
+* [Graylog](14-features.md#graylog-integration)
+
+
+### Graphite Writer <a id="graphite-carbon-cache-writer"></a>
+
+[Graphite](13-addons.md#addons-graphing-graphite) is a tool stack for storing
+metrics and needs to be running prior to enabling the `graphite` feature.
+
+Icinga 2 writes parsed metrics directly to Graphite's Carbon Cache
+TCP port, defaulting to `2003`.
+
+You can enable the feature using
+
+```bash
+icinga2 feature enable graphite
+```
+
+By default the [GraphiteWriter](09-object-types.md#objecttype-graphitewriter) feature
+expects the Graphite Carbon Cache to listen at `127.0.0.1` on TCP port `2003`.
+
+#### Graphite Schema <a id="graphite-carbon-cache-writer-schema"></a>
+
+The current naming schema is defined as follows. The [Icinga Web 2 Graphite module](https://icinga.com/products/integrations/graphite/)
+depends on this schema.
+
+The default prefix for hosts and services is configured using
+[runtime macros](03-monitoring-basics.md#runtime-macros)like this:
+
+```
+icinga2.$host.name$.host.$host.check_command$
+icinga2.$host.name$.services.$service.name$.$service.check_command$
+```
+
+You can customize the prefix name by using the `host_name_template` and
+`service_name_template` configuration attributes.
+
+The additional levels will allow fine granular filters and also template
+capabilities, e.g. by using the check command `disk` for specific
+graph templates in web applications rendering the Graphite data.
+
+The following characters are escaped in prefix labels:
+
+ Character | Escaped character
+ --------------|--------------------------
+ whitespace | _
+ . | _
+ \ | _
+ / | _
+
+Metric values are stored like this:
+
+```
+<prefix>.perfdata.<perfdata-label>.value
+```
+
+The following characters are escaped in performance labels
+parsed from plugin output:
+
+ Character | Escaped character
+ --------------|--------------------------
+ whitespace | _
+ \ | _
+ / | _
+ :: | .
+
+Note that labels may contain dots (`.`) allowing to
+add more subsequent levels inside the Graphite tree.
+`::` adds support for [multi performance labels](http://my-plugin.de/wiki/projects/check_multi/configuration/performance)
+and is therefore replaced by `.`.
+
+By enabling `enable_send_thresholds` Icinga 2 automatically adds the following threshold metrics:
+
+```
+<prefix>.perfdata.<perfdata-label>.min
+<prefix>.perfdata.<perfdata-label>.max
+<prefix>.perfdata.<perfdata-label>.warn
+<prefix>.perfdata.<perfdata-label>.crit
+```
+
+By enabling `enable_send_metadata` Icinga 2 automatically adds the following metadata metrics:
+
+```
+<prefix>.metadata.current_attempt
+<prefix>.metadata.downtime_depth
+<prefix>.metadata.acknowledgement
+<prefix>.metadata.execution_time
+<prefix>.metadata.latency
+<prefix>.metadata.max_check_attempts
+<prefix>.metadata.reachable
+<prefix>.metadata.state
+<prefix>.metadata.state_type
+```
+
+Metadata metric overview:
+
+ metric | description
+ -------------------|------------------------------------------
+ current_attempt | current check attempt
+ max_check_attempts | maximum check attempts until the hard state is reached
+ reachable | checked object is reachable
+ downtime_depth | number of downtimes this object is in
+ acknowledgement | whether the object is acknowledged or not
+ execution_time | check execution time
+ latency | check latency
+ state | current state of the checked object
+ state_type | 0=SOFT, 1=HARD state
+
+The following example illustrates how to configure the storage schemas for Graphite Carbon
+Cache.
+
+```
+[icinga2_default]
+# intervals like PNP4Nagios uses them per default
+pattern = ^icinga2\.
+retentions = 1m:2d,5m:10d,30m:90d,360m:4y
+```
+
+#### Graphite in Cluster HA Zones <a id="graphite-carbon-cache-writer-cluster-ha"></a>
+
+The Graphite feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
+in cluster zones since 2.11.
+
+By default, all endpoints in a zone will activate the feature and start
+writing metrics to a Carbon Cache socket. In HA enabled scenarios,
+it is possible to set `enable_ha = true` in all feature configuration
+files. This allows each endpoint to calculate the feature authority,
+and only one endpoint actively writes metrics, the other endpoints
+pause the feature.
+
+When the cluster connection breaks at some point, the remaining endpoint(s)
+in that zone will automatically resume the feature. This built-in failover
+mechanism ensures that metrics are written even if the cluster fails.
+
+The recommended way of running Graphite in this scenario is a dedicated server
+where Carbon Cache/Relay is running as receiver.
+
+
+### InfluxDB Writer <a id="influxdb-writer"></a>
+
+Once there are new metrics available, Icinga 2 will directly write them to the
+defined InfluxDB v1/v2 HTTP API.
+
+You can enable the feature using
+
+```bash
+icinga2 feature enable influxdb
+```
+
+or
+
+```bash
+icinga2 feature enable influxdb2
+```
+
+By default the
+[InfluxdbWriter](09-object-types.md#objecttype-influxdbwriter)/[Influxdb2Writer](09-object-types.md#objecttype-influxdb2writer)
+features expect the InfluxDB daemon to listen at `127.0.0.1` on port `8086`.
+
+Measurement names and tags are fully configurable by the end user. The Influxdb(2)Writer
+object will automatically add a `metric` tag to each data point. This correlates to the
+perfdata label. Fields (value, warn, crit, min, max, unit) are created from data if available
+and the configuration allows it. If a value associated with a tag is not able to be
+resolved, it will be dropped and not sent to the target host.
+
+Backslashes are allowed in tag keys, tag values and field keys, however they are also
+escape characters when followed by a space or comma, but cannot be escaped themselves.
+As a result all trailling slashes in these fields are replaced with an underscore. This
+predominantly affects Windows paths e.g. `C:\` becomes `C:_`.
+
+The database/bucket is assumed to exist so this object will make no attempt to create it currently.
+
+If [SELinux](22-selinux.md#selinux) is enabled, it will not allow access for Icinga 2 to InfluxDB until the [boolean](22-selinux.md#selinux-policy-booleans)
+`icinga2_can_connect_all` is set to true as InfluxDB is not providing its own policy.
+
+More configuration details can be found [here for v1](09-object-types.md#objecttype-influxdbwriter)
+and [here for v2](09-object-types.md#objecttype-influxdb2writer).
+
+#### Instance Tagging <a id="influxdb-writer-instance-tags"></a>
+
+Consider the following service check:
+
+```
+apply Service "disk" for (disk => attributes in host.vars.disks) {
+ import "generic-service"
+ check_command = "disk"
+ display_name = "Disk " + disk
+ vars.disk_partitions = disk
+ assign where host.vars.disks
+}
+```
+
+This is a typical pattern for checking individual disks, NICs, TLS certificates etc associated
+with a host. What would be useful is to have the data points tagged with the specific instance
+for that check. This would allow you to query time series data for a check on a host and for a
+specific instance e.g. /dev/sda. To do this quite simply add the instance to the service variables:
+
+```
+apply Service "disk" for (disk => attributes in host.vars.disks) {
+ ...
+ vars.instance = disk
+ ...
+}
+```
+
+Then modify your writer configuration to add this tag to your data points if the instance variable
+is associated with the service:
+
+```
+object InfluxdbWriter "influxdb" {
+ ...
+ service_template = {
+ measurement = "$service.check_command$"
+ tags = {
+ hostname = "$host.name$"
+ service = "$service.name$"
+ instance = "$service.vars.instance$"
+ }
+ }
+ ...
+}
+```
+
+#### InfluxDB in Cluster HA Zones <a id="influxdb-writer-cluster-ha"></a>
+
+The InfluxDB feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
+in cluster zones since 2.11.
+
+By default, all endpoints in a zone will activate the feature and start
+writing metrics to the InfluxDB HTTP API. In HA enabled scenarios,
+it is possible to set `enable_ha = true` in all feature configuration
+files. This allows each endpoint to calculate the feature authority,
+and only one endpoint actively writes metrics, the other endpoints
+pause the feature.
+
+When the cluster connection breaks at some point, the remaining endpoint(s)
+in that zone will automatically resume the feature. This built-in failover
+mechanism ensures that metrics are written even if the cluster fails.
+
+The recommended way of running InfluxDB in this scenario is a dedicated server
+where the InfluxDB HTTP API or Telegraf as Proxy are running.
+
+### Elastic Stack Integration <a id="elastic-stack-integration"></a>
+
+[Icingabeat](https://icinga.com/products/integrations/elastic/) is an Elastic Beat that fetches data
+from the Icinga 2 API and sends it either directly to [Elasticsearch](https://www.elastic.co/products/elasticsearch)
+or [Logstash](https://www.elastic.co/products/logstash).
+
+More integrations:
+
+* [Logstash output](https://icinga.com/products/integrations/elastic/) for the Icinga 2 API.
+* [Logstash Grok Pattern](https://icinga.com/products/integrations/elastic/) for Icinga 2 logs.
+
+#### Elasticsearch Writer <a id="elasticsearch-writer"></a>
+
+This feature forwards check results, state changes and notification events
+to an [Elasticsearch](https://www.elastic.co/products/elasticsearch) installation over its HTTP API.
+
+The check results include parsed performance data metrics if enabled.
+
+> **Note**
+>
+> Elasticsearch 5.x or 6.x are required. This feature has been successfully tested with
+> Elasticsearch 5.6.7 and 6.3.1.
+
+
+
+Enable the feature and restart Icinga 2.
+
+```bash
+icinga2 feature enable elasticsearch
+```
+
+The default configuration expects an Elasticsearch instance running on `localhost` on port `9200`
+ and writes to an index called `icinga2`.
+
+More configuration details can be found [here](09-object-types.md#objecttype-elasticsearchwriter).
+
+#### Current Elasticsearch Schema <a id="elastic-writer-schema"></a>
+
+The following event types are written to Elasticsearch:
+
+* icinga2.event.checkresult
+* icinga2.event.statechange
+* icinga2.event.notification
+
+Performance data metrics must be explicitly enabled with the `enable_send_perfdata`
+attribute.
+
+Metric values are stored like this:
+
+```
+check_result.perfdata.<perfdata-label>.value
+```
+
+The following characters are escaped in perfdata labels:
+
+ Character | Escaped character
+ ------------|--------------------------
+ whitespace | _
+ \ | _
+ / | _
+ :: | .
+
+Note that perfdata labels may contain dots (`.`) allowing to
+add more subsequent levels inside the tree.
+`::` adds support for [multi performance labels](http://my-plugin.de/wiki/projects/check_multi/configuration/performance)
+and is therefore replaced by `.`.
+
+Icinga 2 automatically adds the following threshold metrics
+if existing:
+
+```
+check_result.perfdata.<perfdata-label>.min
+check_result.perfdata.<perfdata-label>.max
+check_result.perfdata.<perfdata-label>.warn
+check_result.perfdata.<perfdata-label>.crit
+```
+
+#### Elasticsearch in Cluster HA Zones <a id="elasticsearch-writer-cluster-ha"></a>
+
+The Elasticsearch feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
+in cluster zones since 2.11.
+
+By default, all endpoints in a zone will activate the feature and start
+writing events to the Elasticsearch HTTP API. In HA enabled scenarios,
+it is possible to set `enable_ha = true` in all feature configuration
+files. This allows each endpoint to calculate the feature authority,
+and only one endpoint actively writes events, the other endpoints
+pause the feature.
+
+When the cluster connection breaks at some point, the remaining endpoint(s)
+in that zone will automatically resume the feature. This built-in failover
+mechanism ensures that events are written even if the cluster fails.
+
+The recommended way of running Elasticsearch in this scenario is a dedicated server
+where you either have the Elasticsearch HTTP API, or a TLS secured HTTP proxy,
+or Logstash for additional filtering.
+
+### Graylog Integration <a id="graylog-integration"></a>
+
+#### GELF Writer <a id="gelfwriter"></a>
+
+The `Graylog Extended Log Format` (short: [GELF](https://docs.graylog.org/en/latest/pages/gelf.html))
+can be used to send application logs directly to a TCP socket.
+
+While it has been specified by the [Graylog](https://www.graylog.org) project as their
+[input resource standard](https://docs.graylog.org/en/latest/pages/sending_data.html), other tools such as
+[Logstash](https://www.elastic.co/products/logstash) also support `GELF` as
+[input type](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-gelf.html).
+
+You can enable the feature using
+
+```bash
+icinga2 feature enable gelf
+```
+
+By default the `GelfWriter` object expects the GELF receiver to listen at `127.0.0.1` on TCP port `12201`.
+The default `source` attribute is set to `icinga2`. You can customize that for your needs if required.
+
+Currently these events are processed:
+* Check results
+* State changes
+* Notifications
+
+#### Graylog/GELF in Cluster HA Zones <a id="gelf-writer-cluster-ha"></a>
+
+The Gelf feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
+in cluster zones since 2.11.
+
+By default, all endpoints in a zone will activate the feature and start
+writing events to the Graylog HTTP API. In HA enabled scenarios,
+it is possible to set `enable_ha = true` in all feature configuration
+files. This allows each endpoint to calculate the feature authority,
+and only one endpoint actively writes events, the other endpoints
+pause the feature.
+
+When the cluster connection breaks at some point, the remaining endpoint(s)
+in that zone will automatically resume the feature. This built-in failover
+mechanism ensures that events are written even if the cluster fails.
+
+The recommended way of running Graylog in this scenario is a dedicated server
+where you have the Graylog HTTP API listening.
+
+### OpenTSDB Writer <a id="opentsdb-writer"></a>
+
+While there are some OpenTSDB collector scripts and daemons like tcollector available for
+Icinga 1.x it's more reasonable to directly process the check and plugin performance
+in memory in Icinga 2. Once there are new metrics available, Icinga 2 will directly
+write them to the defined TSDB TCP socket.
+
+You can enable the feature using
+
+```bash
+icinga2 feature enable opentsdb
+```
+
+By default the `OpenTsdbWriter` object expects the TSD to listen at
+`127.0.0.1` on port `4242`.
+
+The current default naming schema is:
+
+```
+icinga.host.<perfdata_metric_label>
+icinga.service.<servicename>.<perfdata_metric_label>
+```
+
+for host and service checks. The tag `host` is always applied.
+
+Icinga also sends perfdata warning, critical, minimum and maximum threshold values to OpenTSDB.
+These are stored as new OpenTSDB metric names appended with `_warn`, `_crit`, `_min`, `_max`.
+Values are only stored when the corresponding threshold exists in Icinga's perfdata.
+
+Example:
+```
+icinga.service.<servicename>.<perfdata_metric_label>
+icinga.service.<servicename>.<perfdata_metric_label>._warn
+icinga.service.<servicename>.<perfdata_metric_label>._crit
+icinga.service.<servicename>.<perfdata_metric_label>._min
+icinga.service.<servicename>.<perfdata_metric_label>._max
+```
+
+To make sure Icinga 2 writes a valid metric into OpenTSDB some characters are replaced
+with `_` in the target name:
+
+```
+\ : (and space)
+```
+
+The resulting name in OpenTSDB might look like:
+
+```
+www-01 / http-cert / response time
+icinga.http_cert.response_time
+```
+
+In addition to the performance data retrieved from the check plugin, Icinga 2 sends
+internal check statistic data to OpenTSDB:
+
+ metric | description
+ -------------------|------------------------------------------
+ current_attempt | current check attempt
+ max_check_attempts | maximum check attempts until the hard state is reached
+ reachable | checked object is reachable
+ downtime_depth | number of downtimes this object is in
+ acknowledgement | whether the object is acknowledged or not
+ execution_time | check execution time
+ latency | check latency
+ state | current state of the checked object
+ state_type | 0=SOFT, 1=HARD state
+
+While reachable, state and state_type are metrics for the host or service the
+other metrics follow the current naming schema
+
+```
+icinga.check.<metricname>
+```
+
+with the following tags
+
+ tag | description
+ --------|------------------------------------------
+ type | the check type, one of [host, service]
+ host | hostname, the check ran on
+ service | the service name (if type=service)
+
+> **Note**
+>
+> You might want to set the tsd.core.auto_create_metrics setting to `true`
+> in your opentsdb.conf configuration file.
+
+#### OpenTSDB Metric Prefix <a id="opentsdb-metric-prefix"></a>
+Functionality exists to modify the built in OpenTSDB metric names that the plugin
+writes to. By default this is `icinga.host` and `icinga.service.<servicename>`.
+
+These prefixes can be modified as necessary to any arbitary string. The prefix
+configuration also supports Icinga macros, so if you rather use `<checkcommand>`
+or any other variable instead of `<servicename>` you may do so.
+
+To configure OpenTSDB metric name prefixes, create or modify the `host_template` and/or
+`service_template` blocks in the `opentsdb.conf` file, to add a `metric` definition.
+These modifications go hand in hand with the **OpenTSDB Custom Tag Support** detailed below,
+and more information around macro use can be found there.
+
+Additionally, using custom Metric Prefixes or your own macros in the prefix may be
+helpful if you are using the **OpenTSDB Generic Metric** functionality detailed below.
+
+An example configuration which includes prefix name modification:
+
+```
+object OpenTsdbWriter "opentsdb" {
+ host = "127.0.0.1"
+ port = 4242
+ host_template = {
+ metric = "icinga.myhost"
+ tags = {
+ location = "$host.vars.location$"
+ checkcommand = "$host.check_command$"
+ }
+ }
+ service_template = {
+ metric = "icinga.service.$service.check_command$"
+ }
+}
+```
+
+The above configuration will output the following naming schema:
+```
+icinga.myhost.<perfdata_metric_label>
+icinga.service.<check_command_name>.<perfdata_metric_label>
+```
+Note how `<perfdata_metric_label>` is always appended in the default naming schema mode.
+
+#### OpenTSDB Generic Metric Naming Schema <a id="opentsdb-generic-metrics"></a>
+
+An alternate naming schema (`Generic Metrics`) is available where OpenTSDB metric names are more generic
+and do not include the Icinga perfdata label in the metric name. Instead,
+perfdata labels are stored in a tag `label` which is stored along with each perfdata value.
+
+This ultimately reduces the number of unique OpenTSDB metric names which may make
+querying aggregate data easier. This also allows you to store all perfdata values for a
+particular check inside one OpenTSDB metric name for each check.
+
+This alternate naming schema can be enabled by setting the following in the OpenTSDBWriter config:
+`enable_generic_metrics = true`
+
+> **Tip**
+> Consider using `Generic Metrics` along with the **OpenTSDB Metric Prefix** naming options
+> described above
+
+An example of this naming schema when compared to the default is:
+
+```
+icinga.host
+icinga.service.<servicename>
+```
+
+> **Note**
+> Note how `<perfdata_metric_label>` does not appear in the OpenTSDB metric name
+> when using `Generic Metrics`. Instead, a new tag `label` appears on each value written
+> to OpenTSDB which contains the perfdata label.
+
+#### Custom Tags <a id="opentsdb-custom-tags"></a>
+
+In addition to the default tags listed above, it is possible to send
+your own custom tags with your data to OpenTSDB.
+
+Note that custom tags are sent **in addition** to the default hostname,
+type and service name tags. If you do not include this section in the
+config file, no custom tags will be included.
+
+Custom tags can be custom attributes or built in attributes.
+
+Consider a host object:
+
+```
+object Host "my-server1" {
+ address = "10.0.0.1"
+ check_command = "hostalive"
+ vars.location = "Australia"
+}
+```
+
+and a service object:
+
+```
+object Service "ping" {
+ host_name = "localhost"
+ check_command = "my-ping"
+
+ vars.ping_packets = 10
+}
+```
+
+It is possible to send `vars.location` and `vars.ping_packets` along
+with performance data. Additionally, any other attribute can be sent
+as a tag, such as `check_command`.
+
+You can make use of the `host_template` and `service_template` blocks
+in the `opentsdb.conf` configuration file.
+
+An example OpenTSDB configuration file which makes use of custom tags:
+
+```
+object OpenTsdbWriter "opentsdb" {
+ host = "127.0.0.1"
+ port = 4242
+ host_template = {
+ tags = {
+ location = "$host.vars.location$"
+ checkcommand = "$host.check_command$"
+ }
+ }
+ service_template = {
+ tags = {
+ location = "$host.vars.location$"
+ pingpackets = "$service.vars.ping_packets$"
+ checkcommand = "$service.check_command$"
+ }
+ }
+}
+```
+
+Depending on what keyword the macro begins with, will determine what
+attributes are available in the macro context. The below table explains
+what attributes are available with links to each object type.
+
+ start of macro | description
+ ---------------|------------------------------------------
+ \$host...$ | Attributes available on a [Host object](09-object-types.md#objecttype-host)
+ \$service...$ | Attributes available on a [Service object](09-object-types.md#objecttype-service)
+ \$icinga...$ | Attributes available on the [IcingaApplication object](09-object-types.md#objecttype-icingaapplication)
+
+> **Note**
+>
+> Ensure you do not name your custom attributes with a dot in the name.
+> Dots located inside a macro tell the interpreter to expand a
+> dictionary.
+>
+> Do not do this in your object configuration:
+>
+> `vars["my.attribute"]`
+>
+> as you will be unable to reference `my.attribute` because it is not a
+> dictionary.
+>
+> Instead, use underscores or another character:
+>
+> `vars.my_attribute` or `vars["my_attribute"]`
+
+
+
+#### OpenTSDB in Cluster HA Zones <a id="opentsdb-writer-cluster-ha"></a>
+
+The OpenTSDB feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
+in cluster zones since 2.11.
+
+By default, all endpoints in a zone will activate the feature and start
+writing events to the OpenTSDB listener. In HA enabled scenarios,
+it is possible to set `enable_ha = true` in all feature configuration
+files. This allows each endpoint to calculate the feature authority,
+and only one endpoint actively writes metrics, the other endpoints
+pause the feature.
+
+When the cluster connection breaks at some point, the remaining endpoint(s)
+in that zone will automatically resume the feature. This built-in failover
+mechanism ensures that metrics are written even if the cluster fails.
+
+The recommended way of running OpenTSDB in this scenario is a dedicated server
+where you have OpenTSDB running.
+
+
+### Writing Performance Data Files <a id="writing-performance-data-files"></a>
+
+PNP and Graphios use performance data collector daemons to fetch
+the current performance files for their backend updates.
+
+Therefore the Icinga 2 [PerfdataWriter](09-object-types.md#objecttype-perfdatawriter)
+feature allows you to define the output template format for host and services helped
+with Icinga 2 runtime vars.
+
+```
+host_format_template = "DATATYPE::HOSTPERFDATA\tTIMET::$icinga.timet$\tHOSTNAME::$host.name$\tHOSTPERFDATA::$host.perfdata$\tHOSTCHECKCOMMAND::$host.check_command$\tHOSTSTATE::$host.state$\tHOSTSTATETYPE::$host.state_type$"
+service_format_template = "DATATYPE::SERVICEPERFDATA\tTIMET::$icinga.timet$\tHOSTNAME::$host.name$\tSERVICEDESC::$service.name$\tSERVICEPERFDATA::$service.perfdata$\tSERVICECHECKCOMMAND::$service.check_command$\tHOSTSTATE::$host.state$\tHOSTSTATETYPE::$host.state_type$\tSERVICESTATE::$service.state$\tSERVICESTATETYPE::$service.state_type$"
+```
+
+The default templates are already provided with the Icinga 2 feature configuration
+which can be enabled using
+
+```bash
+icinga2 feature enable perfdata
+```
+
+By default all performance data files are rotated in a 15 seconds interval into
+the `/var/spool/icinga2/perfdata/` directory as `host-perfdata.<timestamp>` and
+`service-perfdata.<timestamp>`.
+External collectors need to parse the rotated performance data files and then
+remove the processed files.
+
+#### Perfdata Files in Cluster HA Zones <a id="perfdata-writer-cluster-ha"></a>
+
+The Perfdata feature supports [high availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-features)
+in cluster zones since 2.11.
+
+By default, all endpoints in a zone will activate the feature and start
+writing metrics to the local spool directory. In HA enabled scenarios,
+it is possible to set `enable_ha = true` in all feature configuration
+files. This allows each endpoint to calculate the feature authority,
+and only one endpoint actively writes metrics, the other endpoints
+pause the feature.
+
+When the cluster connection breaks at some point, the remaining endpoint(s)
+in that zone will automatically resume the feature. This built-in failover
+mechanism ensures that metrics are written even if the cluster fails.
+
+The recommended way of running Perfdata is to mount the perfdata spool
+directory via NFS on a central server where PNP with the NPCD collector
+is running on.
+
+
+
+
+
+## Deprecated Features <a id="deprecated-features"></a>
+
+### IDO Database (DB IDO) <a id="db-ido"></a>
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+The IDO (Icinga Data Output) feature for Icinga 2 takes care of exporting all
+configuration and status information into a database. The IDO database is used
+by Icinga Web 2 as data backend. You can either use a
+[MySQL](#ido-with-mysql) or [PostgreSQL](#ido-with-postgresql) database.
+
+#### IDO with MySQL <a id="ido-with-mysql"></a>
+
+##### Install IDO Feature <a id="installing-database-mysql-modules"></a>
+
+The next step is to install the `icinga2-ido-mysql` package using your
+distribution's package manager.
+
+###### Debian / Ubuntu
+
+```bash
+apt-get install icinga2-ido-mysql
+```
+
+!!! note
+
+ The packages provide a database configuration wizard by
+ default. You can skip the automated setup and install/upgrade the
+ database manually if you prefer.
+
+###### CentOS 7
+
+!!! info
+
+ Note that installing `icinga2-ido-mysql` is only supported on CentOS 7 as CentOS 8 is EOL.
+
+```bash
+yum install icinga2-ido-mysql
+```
+
+###### RHEL 8
+
+```bash
+dnf install icinga2-ido-mysql
+```
+
+###### RHEL 7
+
+```bash
+yum install icinga2-ido-mysql
+```
+
+###### SLES
+
+```bash
+zypper install icinga2-ido-mysql
+```
+
+###### Amazon Linux
+
+```bash
+yum install icinga2-ido-mysql
+```
+
+##### Set up MySQL database <a id="setting-up-mysql-db"></a>
+
+Set up a MySQL database for Icinga 2:
+
+```bash
+# mysql -u root -p
+
+CREATE DATABASE icinga;
+GRANT ALTER, CREATE, SELECT, INSERT, UPDATE, DELETE, DROP, CREATE VIEW, INDEX, EXECUTE ON icinga.* TO 'icinga'@'localhost' IDENTIFIED BY 'icinga';
+quit
+```
+
+Please note that the example above uses the very simple password 'icinga' (in `IDENTIFIED BY 'icinga'`).
+Please choose a better password for your installation.
+
+After creating the database you can import the Icinga 2 IDO schema using the
+following command. Enter the icinga password into the prompt when asked.
+
+```bash
+mysql -u icinga -p icinga < /usr/share/icinga2-ido-mysql/schema/mysql.sql
+```
+
+##### Enable the IDO MySQL feature <a id="enable-ido-mysql"></a>
+
+The package provides a new configuration file that is installed in
+`/etc/icinga2/features-available/ido-mysql.conf`. You can update
+the database credentials in this file.
+
+All available attributes are explained in the
+[IdoMysqlConnection object](09-object-types.md#objecttype-idomysqlconnection)
+chapter.
+
+Enable the `ido-mysql` feature configuration file using the `icinga2` command:
+
+```bash
+# icinga2 feature enable ido-mysql
+Module 'ido-mysql' was enabled.
+Make sure to restart Icinga 2 for these changes to take effect.
+```
+
+Restart Icinga 2.
+
+```bash
+systemctl restart icinga2
+```
+
+#### IDO with PostgreSQL <a id="ido-with-postgresql"></a>
+
+##### Install IDO Feature <a id="installing-database-postgresql-modules"></a>
+
+The next step is to install the `icinga2-ido-pgsql` package using your
+distribution's package manager.
+
+###### Debian / Ubuntu
+
+```bash
+apt-get install icinga2-ido-pgsql
+```
+
+!!! note
+
+ Upstream Debian packages provide a database configuration wizard by default.
+ You can skip the automated setup and install/upgrade the database manually
+ if you prefer that.
+
+###### CentOS 7
+
+!!! info
+
+ Note that installing `icinga2-ido-pgsql` is only supported on CentOS 7 as CentOS 8 is EOL.
+
+```bash
+yum install icinga2-ido-pgsql
+```
+
+###### RHEL 8
+
+```bash
+dnf install icinga2-ido-pgsql
+```
+
+###### RHEL 7
+
+```bash
+yum install icinga2-ido-pgsql
+```
+
+###### SLES
+
+```bash
+zypper install icinga2-ido-pgsql
+```
+
+###### Amazon Linux
+
+```bash
+yum install icinga2-ido-pgsql
+```
+
+##### Set up PostgreSQL database
+
+Set up a PostgreSQL database for Icinga 2:
+
+```bash
+cd /tmp
+sudo -u postgres psql -c "CREATE ROLE icinga WITH LOGIN PASSWORD 'icinga'"
+sudo -u postgres createdb -O icinga -E UTF8 icinga
+```
+
+!!! note
+
+ It is assumed here that your locale is set to utf-8, you may run into problems otherwise.
+
+Locate your `pg_hba.conf` configuration file and add the icinga user with `md5` as authentication method
+and restart the postgresql server. Common locations for `pg_hba.conf` are either
+`/etc/postgresql/*/main/pg_hba.conf` or `/var/lib/pgsql/data/pg_hba.conf`.
+
+```
+# icinga
+local icinga icinga md5
+host icinga icinga 127.0.0.1/32 md5
+host icinga icinga ::1/128 md5
+
+# "local" is for Unix domain socket connections only
+local all all ident
+# IPv4 local connections:
+host all all 127.0.0.1/32 ident
+# IPv6 local connections:
+host all all ::1/128 ident
+```
+
+Restart PostgreSQL:
+
+```bash
+systemctl restart postgresql
+```
+
+After creating the database and permissions you need to import the IDO database
+schema using the following command:
+
+```bash
+export PGPASSWORD=icinga
+psql -U icinga -d icinga < /usr/share/icinga2-ido-pgsql/schema/pgsql.sql
+```
+
+##### Enable the IDO PostgreSQL feature <a id="enable-ido-postgresql"></a>
+
+The package provides a new configuration file that is installed in
+`/etc/icinga2/features-available/ido-pgsql.conf`. You can update
+the database credentials in this file.
+
+All available attributes are explained in the
+[IdoPgsqlConnection object](09-object-types.md#objecttype-idopgsqlconnection)
+chapter.
+
+Enable the `ido-pgsql` feature configuration file using the `icinga2` command:
+
+```
+# icinga2 feature enable ido-pgsql
+Module 'ido-pgsql' was enabled.
+Make sure to restart Icinga 2 for these changes to take effect.
+```
+
+Restart Icinga 2.
+
+```bash
+systemctl restart icinga2
+```
+
+#### Configuration
+
+Details on the configuration can be found in the
+[IdoMysqlConnection](09-object-types.md#objecttype-idomysqlconnection) and
+[IdoPgsqlConnection](09-object-types.md#objecttype-idopgsqlconnection)
+object configuration documentation.
+
+#### DB IDO Health <a id="db-ido-health"></a>
+
+If the monitoring health indicator is critical in Icinga Web 2,
+you can use the following queries to manually check whether Icinga 2
+is actually updating the IDO database.
+
+Icinga 2 writes its current status to the `icinga_programstatus` table
+every 10 seconds. The query below checks 60 seconds into the past which is a reasonable
+amount of time -- adjust it for your requirements. If the condition is not met,
+the query returns an empty result.
+
+> **Tip**
+>
+> Use [check plugins](05-service-monitoring.md#service-monitoring-plugins) to monitor the backend.
+
+Replace the `default` string with your instance name if different.
+
+Example for MySQL:
+
+```
+# mysql -u root -p icinga -e "SELECT status_update_time FROM icinga_programstatus ps
+ JOIN icinga_instances i ON ps.instance_id=i.instance_id
+ WHERE (UNIX_TIMESTAMP(ps.status_update_time) > UNIX_TIMESTAMP(NOW())-60)
+ AND i.instance_name='default';"
+
++---------------------+
+| status_update_time |
++---------------------+
+| 2014-05-29 14:29:56 |
++---------------------+
+```
+
+Example for PostgreSQL:
+
+```
+# export PGPASSWORD=icinga; psql -U icinga -d icinga -c "SELECT ps.status_update_time FROM icinga_programstatus AS ps
+ JOIN icinga_instances AS i ON ps.instance_id=i.instance_id
+ WHERE ((SELECT extract(epoch from status_update_time) FROM icinga_programstatus) > (SELECT extract(epoch from now())-60))
+ AND i.instance_name='default'";
+
+status_update_time
+------------------------
+ 2014-05-29 15:11:38+02
+(1 Zeile)
+```
+
+A detailed list on the available table attributes can be found in the [DB IDO Schema documentation](24-appendix.md#schema-db-ido).
+
+#### DB IDO in Cluster HA Zones <a id="db-ido-cluster-ha"></a>
+
+The DB IDO feature supports [High Availability](06-distributed-monitoring.md#distributed-monitoring-high-availability-db-ido) in
+the Icinga 2 cluster.
+
+By default, both endpoints in a zone calculate the
+endpoint which activates the feature, the other endpoint
+automatically pauses it. If the cluster connection
+breaks at some point, the paused IDO feature automatically
+does a failover.
+
+You can disable this behaviour by setting `enable_ha = false`
+in both feature configuration files.
+
+#### DB IDO Cleanup <a id="db-ido-cleanup"></a>
+
+Objects get deactivated when they are deleted from the configuration.
+This is visible with the `is_active` column in the `icinga_objects` table.
+Therefore all queries need to join this table and add `WHERE is_active=1` as
+condition. Deleted objects preserve their history table entries for later SLA
+reporting.
+
+Historical data isn't purged by default. You can enable the least
+kept data age inside the `cleanup` configuration attribute for the
+IDO features [IdoMysqlConnection](09-object-types.md#objecttype-idomysqlconnection)
+and [IdoPgsqlConnection](09-object-types.md#objecttype-idopgsqlconnection).
+
+Example if you prefer to keep notification history for 30 days:
+
+```
+ cleanup = {
+ notifications_age = 30d
+ contactnotifications_age = 30d
+ }
+```
+
+The historical tables are populated depending on the data `categories` specified.
+Some tables are empty by default.
+
+#### DB IDO Tuning <a id="db-ido-tuning"></a>
+
+As with any application database, there are ways to optimize and tune the database performance.
+
+General tips for performance tuning:
+
+* [MariaDB KB](https://mariadb.com/kb/en/library/optimization-and-tuning/)
+* [PostgreSQL Wiki](https://wiki.postgresql.org/wiki/Performance_Optimization)
+
+Re-creation of indexes, changed column values, etc. will increase the database size. Ensure to
+add health checks for this, and monitor the trend in your Grafana dashboards.
+
+In order to optimize the tables, there are different approaches. Always keep in mind to have a
+current backup and schedule maintenance downtime for these kind of tasks!
+
+MySQL:
+
+```
+mariadb> OPTIMIZE TABLE icinga_statehistory;
+```
+
+> **Important**
+>
+> Tables might not support optimization at runtime. This can take a **long** time.
+>
+> `Table does not support optimize, doing recreate + analyze instead`.
+
+If you want to optimize all tables in a specified database, there is a script called `mysqlcheck`.
+This also allows to repair broken tables in the case of emergency.
+
+```bash
+mysqlcheck --optimize icinga
+```
+
+PostgreSQL:
+
+```
+icinga=# vacuum;
+VACUUM
+```
+
+> **Note**
+>
+> Don't use `VACUUM FULL` as this has a severe impact on performance.
+
+### Compat Log Files <a id="compat-logging"></a>
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+The Icinga 1.x log format is considered being the `Compat Log`
+in Icinga 2 provided with the `CompatLogger` object.
+
+These logs are used for informational representation in
+external web interfaces parsing the logs, but also to generate
+SLA reports and trends.
+The [Livestatus](14-features.md#setting-up-livestatus) feature uses these logs
+for answering queries to historical tables.
+
+The `CompatLogger` object can be enabled with
+
+```bash
+icinga2 feature enable compatlog
+```
+
+By default, the Icinga 1.x log file called `icinga.log` is located
+in `/var/log/icinga2/compat`. Rotated log files are moved into
+`var/log/icinga2/compat/archives`.
+
+### External Command Pipe <a id="external-commands"></a>
+
+> **Note**
+>
+> Please use the [REST API](12-icinga2-api.md#icinga2-api) as modern and secure alternative
+> for external actions.
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+Icinga 2 provides an external command pipe for processing commands
+triggering specific actions (for example rescheduling a service check
+through the web interface).
+
+In order to enable the `ExternalCommandListener` configuration use the
+following command and restart Icinga 2 afterwards:
+
+```bash
+icinga2 feature enable command
+```
+
+Icinga 2 creates the command pipe file as `/var/run/icinga2/cmd/icinga2.cmd`
+using the default configuration.
+
+Web interfaces and other Icinga addons are able to send commands to
+Icinga 2 through the external command pipe, for example for rescheduling
+a forced service check:
+
+```
+# /bin/echo "[`date +%s`] SCHEDULE_FORCED_SVC_CHECK;localhost;ping4;`date +%s`" >> /var/run/icinga2/cmd/icinga2.cmd
+
+# tail -f /var/log/messages
+
+Oct 17 15:01:25 icinga-server icinga2: Executing external command: [1382014885] SCHEDULE_FORCED_SVC_CHECK;localhost;ping4;1382014885
+Oct 17 15:01:25 icinga-server icinga2: Rescheduling next check for service 'ping4'
+```
+
+A list of currently supported external commands can be found [here](24-appendix.md#external-commands-list-detail).
+
+Detailed information on the commands and their required parameters can be found
+on the [Icinga 1.x documentation](https://docs.icinga.com/latest/en/extcommands2.html).
+
+
+### Livestatus <a id="setting-up-livestatus"></a>
+
+> **Note**
+>
+> This feature is DEPRECATED and may be removed in future releases.
+> Check the [roadmap](https://github.com/Icinga/icinga2/milestones).
+
+The [MK Livestatus](https://mathias-kettner.de/checkmk_livestatus.html) project
+implements a query protocol that lets users query their Icinga instance for
+status information. It can also be used to send commands.
+
+The Livestatus component that is distributed as part of Icinga 2 is a
+re-implementation of the Livestatus protocol which is compatible with MK
+Livestatus.
+
+> **Tip**
+>
+> Only install the Livestatus feature if your web interface or addon requires
+> you to do so.
+> [Icinga Web 2](https://icinga.com/docs/icinga-web-2/latest/doc/02-Installation/) does not need
+> Livestatus.
+
+Details on the available tables and attributes with Icinga 2 can be found
+in the [Livestatus Schema](24-appendix.md#schema-livestatus) section.
+
+You can enable Livestatus using icinga2 feature enable:
+
+```bash
+icinga2 feature enable livestatus
+```
+
+After that you will have to restart Icinga 2:
+
+```bash
+systemctl restart icinga2
+```
+
+By default the Livestatus socket is available in `/var/run/icinga2/cmd/livestatus`.
+
+In order for queries and commands to work you will need to add your query user
+(e.g. your web server) to the `icingacmd` group:
+
+```bash
+usermod -a -G icingacmd www-data
+```
+
+The Debian packages use `nagios` as the user and group name. Make sure to change `icingacmd` to
+`nagios` if you're using Debian.
+
+Change `www-data` to the user you're using to run queries.
+
+In order to use the historical tables provided by the livestatus feature (for example, the
+`log` table) you need to have the `CompatLogger` feature enabled. By default these logs
+are expected to be in `/var/log/icinga2/compat`. A different path can be set using the
+`compat_log_path` configuration attribute.
+
+```bash
+icinga2 feature enable compatlog
+```
+
+#### Livestatus Sockets <a id="livestatus-sockets"></a>
+
+Other to the Icinga 1.x Addon, Icinga 2 supports two socket types
+
+* Unix socket (default)
+* TCP socket
+
+Details on the configuration can be found in the [LivestatusListener](09-object-types.md#objecttype-livestatuslistener)
+object configuration.
+
+#### Livestatus GET Queries <a id="livestatus-get-queries"></a>
+
+> **Note**
+>
+> All Livestatus queries require an additional empty line as query end identifier.
+> The `nc` tool (`netcat`) provides the `-U` parameter to communicate using
+> a unix socket.
+
+There also is a Perl module available in CPAN for accessing the Livestatus socket
+programmatically: [Monitoring::Livestatus](https://metacpan.org/release/NIERLEIN/Monitoring-Livestatus-0.74)
+
+
+Example using the unix socket:
+
+```
+# echo -e "GET services\n" | /usr/bin/nc -U /var/run/icinga2/cmd/livestatus
+
+Example using the tcp socket listening on port `6558`:
+
+# echo -e 'GET services\n' | netcat 127.0.0.1 6558
+
+# cat servicegroups <<EOF
+GET servicegroups
+
+EOF
+
+(cat servicegroups; sleep 1) | netcat 127.0.0.1 6558
+```
+
+#### Livestatus COMMAND Queries <a id="livestatus-command-queries"></a>
+
+A list of available external commands and their parameters can be found [here](24-appendix.md#external-commands-list-detail)
+
+```bash
+echo -e 'COMMAND <externalcommandstring>' | netcat 127.0.0.1 6558
+```
+
+#### Livestatus Filters <a id="livestatus-filters"></a>
+
+and, or, negate
+
+ Operator | Negate | Description
+ ----------|----------|-------------
+ = | != | Equality
+ ~ | !~ | Regex match
+ =~ | !=~ | Equality ignoring case
+ ~~ | !~~ | Regex ignoring case
+ < | | Less than
+ > | | Greater than
+ <= | | Less than or equal
+ >= | | Greater than or equal
+
+
+#### Livestatus Stats <a id="livestatus-stats"></a>
+
+Schema: "Stats: aggregatefunction aggregateattribute"
+
+ Aggregate Function | Description
+ -------------------|--------------
+ sum | &nbsp;
+ min | &nbsp;
+ max | &nbsp;
+ avg | sum / count
+ std | standard deviation
+ suminv | sum (1 / value)
+ avginv | suminv / count
+ count | ordinary default for any stats query if not aggregate function defined
+
+Example:
+
+```
+GET hosts
+Filter: has_been_checked = 1
+Filter: check_type = 0
+Stats: sum execution_time
+Stats: sum latency
+Stats: sum percent_state_change
+Stats: min execution_time
+Stats: min latency
+Stats: min percent_state_change
+Stats: max execution_time
+Stats: max latency
+Stats: max percent_state_change
+OutputFormat: json
+ResponseHeader: fixed16
+```
+
+#### Livestatus Output <a id="livestatus-output"></a>
+
+* CSV
+
+CSV output uses two levels of array separators: The members array separator
+is a comma (1st level) while extra info and host|service relation separator
+is a pipe (2nd level).
+
+Separators can be set using ASCII codes like:
+
+```
+Separators: 10 59 44 124
+```
+
+* JSON
+
+Default separators.
+
+#### Livestatus Error Codes <a id="livestatus-error-codes"></a>
+
+ Code | Description
+ ----------|--------------
+ 200 | OK
+ 404 | Table does not exist
+ 452 | Exception on query
+
+#### Livestatus Tables <a id="livestatus-tables"></a>
+
+ Table | Join |Description
+ --------------|-----------|----------------------------
+ hosts | &nbsp; | host config and status attributes, services counter
+ hostgroups | &nbsp; | hostgroup config, status attributes and host/service counters
+ services | hosts | service config and status attributes
+ servicegroups | &nbsp; | servicegroup config, status attributes and service counters
+ contacts | &nbsp; | contact config and status attributes
+ contactgroups | &nbsp; | contact config, members
+ commands | &nbsp; | command name and line
+ status | &nbsp; | programstatus, config and stats
+ comments | services | status attributes
+ downtimes | services | status attributes
+ timeperiods | &nbsp; | name and is inside flag
+ endpoints | &nbsp; | config and status attributes
+ log | services, hosts, contacts, commands | parses [compatlog](09-object-types.md#objecttype-compatlogger) and shows log attributes
+ statehist | hosts, services | parses [compatlog](09-object-types.md#objecttype-compatlogger) and aggregates state change attributes
+ hostsbygroup | hostgroups | host attributes grouped by hostgroup and its attributes
+ servicesbygroup | servicegroups | service attributes grouped by servicegroup and its attributes
+ servicesbyhostgroup | hostgroups | service attributes grouped by hostgroup and its attributes
+
+The `commands` table is populated with `CheckCommand`, `EventCommand` and `NotificationCommand` objects.
+
+A detailed list on the available table attributes can be found in the [Livestatus Schema documentation](24-appendix.md#schema-livestatus).
diff --git a/doc/15-troubleshooting.md b/doc/15-troubleshooting.md
new file mode 100644
index 0000000..4cc7338
--- /dev/null
+++ b/doc/15-troubleshooting.md
@@ -0,0 +1,1997 @@
+# Icinga 2 Troubleshooting <a id="troubleshooting"></a>
+
+## Required Information <a id="troubleshooting-information-required"></a>
+
+Please ensure to provide any detail which may help reproduce and understand your issue.
+Whether you ask on the [community channels](https://community.icinga.com) or you
+create an issue at [GitHub](https://github.com/Icinga), make sure
+that others can follow your explanations. If necessary, draw a picture and attach it for
+better illustration. This is especially helpful if you are troubleshooting a distributed
+setup.
+
+We've come around many community questions and compiled this list. Add your own
+findings and details please.
+
+* Describe the expected behavior in your own words.
+* Describe the actual behavior in one or two sentences.
+* Ensure to provide general information such as:
+ * How was Icinga 2 installed (and which repository in case) and which distribution are you using
+ * `icinga2 --version`
+ * `icinga2 feature list`
+ * `icinga2 daemon -C`
+ * [Icinga Web 2](https://icinga.com/products/icinga-web-2/) version (screenshot from System - About)
+ * [Icinga Web 2 modules](https://icinga.com/products/icinga-web-2-modules/) e.g. the Icinga Director (optional)
+* Configuration insights:
+ * Provide complete configuration snippets explaining your problem in detail
+ * Your [icinga2.conf](04-configuration.md#icinga2-conf) file
+ * If you run multiple Icinga 2 instances, the [zones.conf](04-configuration.md#zones-conf) file (or `icinga2 object list --type Endpoint` and `icinga2 object list --type Zone`) from all affected nodes.
+* Logs
+ * Relevant output from your main and [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) in `/var/log/icinga2`. Please add step-by-step explanations with timestamps if required.
+ * The newest Icinga 2 crash log if relevant, located in `/var/log/icinga2/crash`
+* Additional details
+ * If the check command failed, what's the output of your manual plugin tests?
+ * In case of [debugging](21-development.md#development) Icinga 2, the full back traces and outputs
+
+## Analyze your Environment <a id="troubleshooting-analyze-environment"></a>
+
+There are many components involved on a server running Icinga 2. When you
+analyze a problem, keep in mind that basic system administration knowledge
+is also key to identify bottlenecks and issues.
+
+> **Tip**
+>
+> [Monitor Icinga 2](08-advanced-topics.md#monitoring-icinga) and use the hints for further analysis.
+
+* Analyze the system's performance and dentify bottlenecks and issues.
+* Collect details about all applications (e.g. Icinga 2, MySQL, Apache, Graphite, Elastic, etc.).
+* If data is exchanged via network (e.g. central MySQL cluster) ensure to monitor the bandwidth capabilities too.
+* Add graphs from Grafana or Graphite as screenshots to your issue description
+
+Install tools which help you to do so. Opinions differ, let us know if you have any additions here!
+
+### Analyse your Linux/Unix Environment <a id="troubleshooting-analyze-environment-linux"></a>
+
+[htop](https://hisham.hm/htop/) is a better replacement for `top` and helps to analyze processes
+interactively.
+
+```bash
+yum install htop
+apt-get install htop
+```
+
+If you are for example experiencing performance issues, open `htop` and take a screenshot.
+Add it to your question and/or bug report.
+
+Analyse disk I/O performance in Grafana, take a screenshot and obfuscate any sensitive details.
+Attach it when posting a question to the community channels.
+
+The [sysstat](https://github.com/sysstat/sysstat) package provides a number of tools to
+analyze the performance on Linux. On FreeBSD you could use `systat` for example.
+
+```bash
+yum install sysstat
+apt-get install sysstat
+```
+
+Example for `vmstat` (summary of memory, processes, etc.):
+
+```bash
+# summary
+vmstat -s
+# print timestamps, format in MB, stats every 1 second, 5 times
+vmstat -t -S M 1 5
+```
+
+Example for `iostat`:
+
+```bash
+watch -n 1 iostat
+```
+
+Example for `sar`:
+
+```bash
+sar # cpu
+sar -r # ram
+sar -q # load avg
+sar -b # I/O
+```
+
+`sysstat` also provides the `iostat` binary. On FreeBSD you could use `systat` for example.
+
+If you are missing checks and metrics found in your analysis, add them to your monitoring!
+
+### Analyze your Windows Environment <a id="troubleshooting-analyze-environment-windows"></a>
+
+A good tip for Windows are the tools found inside the [Sysinternals Suite](https://technet.microsoft.com/en-us/sysinternals/bb842062.aspx).
+
+You can also start `perfmon` and analyze specific performance counters.
+Keep notes which could be important for your monitoring, and add service
+checks later on.
+
+> **Tip**
+>
+> Use an administrative Powershell to gain more insights.
+
+```
+cd C:\ProgramData\icinga2\var\log\icinga2
+
+Get-Content .\icinga2.log -tail 10 -wait
+```
+
+## Enable Debug Output <a id="troubleshooting-enable-debug-output"></a>
+
+### Enable Debug Output on Linux/Unix <a id="troubleshooting-enable-debug-output-linux"></a>
+
+Enable the `debuglog` feature:
+
+```bash
+icinga2 feature enable debuglog
+service icinga2 restart
+```
+
+The debug log file can be found in `/var/log/icinga2/debug.log`.
+
+You can tail the log files with an administrative shell:
+
+```bash
+cd /var/log/icinga2
+tail -f debug.log
+```
+
+Alternatively you may run Icinga 2 in the foreground with debugging enabled. Specify the console
+log severity as an additional parameter argument to `-x`.
+
+```bash
+/usr/sbin/icinga2 daemon -x notice
+```
+
+The [log severity](09-object-types.md#objecttype-filelogger) can be one of `critical`, `warning`, `information`, `notice`
+and `debug`.
+
+### Enable Debug Output on Windows <a id="troubleshooting-enable-debug-output-windows"></a>
+
+Open a Powershell with administrative privileges and enable the debug log feature.
+
+```
+C:\> cd C:\Program Files\ICINGA2\sbin
+
+C:\Program Files\ICINGA2\sbin> .\icinga2.exe feature enable debuglog
+```
+
+Ensure that the Icinga 2 service already writes the main log into `C:\ProgramData\icinga2\var\log\icinga2`.
+Restart the Icinga 2 service in an administrative Powershell and open the newly created `debug.log` file.
+
+```
+C:\> Restart-Service icinga2
+
+C:\> Get-Service icinga2
+```
+
+You can tail the log files with an administrative Powershell:
+
+```
+C:\> cd C:\ProgramData\icinga2\var\log\icinga2
+
+C:\ProgramData\icinga2\var\log\icinga2> Get-Content .\debug.log -tail 10 -wait
+```
+
+## Icinga starts/restarts/reloads very slowly
+
+### Try swapping out the allocator
+
+Icinga performs a lot of memory allocations, especially during startup.
+Swapping out the allocator may increase the startup performance.
+The following instructions assume you run Linux and systemd.
+
+On RHEL or derivates add the EPEL repository first (if not already done).
+Let your package manager search for package names containing "jemalloc".
+Pick preferably one named "libjemalloc" followed by a number,
+just "jemalloc" otherwise, and install it.
+
+Run `ldconfig -p |grep libjemalloc`. It should print something similar to:
+
+```
+ libjemalloc.so.2 (libc6,x86-64) => /lib/x86_64-linux-gnu/libjemalloc.so.2
+```
+
+I.e. a relative file name followed by an absolute one. Remember the latter.
+
+Measure how long Icinga needs to load its config without and with libjemalloc:
+
+```bash
+time icinga2 daemon -C
+
+time env LD_PRELOAD=/lib/x86_64-linux-gnu/libjemalloc.so.2 icinga2 daemon -C
+```
+
+Replace `/lib/x86_64-linux-gnu/libjemalloc.so.2` with the absolute path
+you actually got from `ldconfig -p`!
+
+Please do us a favor and share your results
+[with us](https://community.icinga.com/t/icinga-reloads-config-slowly-try-jemalloc/11032).
+
+If it's faster with libjemalloc, do the following to persist the change.
+
+Run `systemctl edit icinga2.service`. This will open an editor.
+Add the following, save the file and close the editor.
+
+```
+[Service]
+Environment=LD_PRELOAD=/lib/x86_64-linux-gnu/libjemalloc.so.2
+```
+
+Replace `/lib/x86_64-linux-gnu/libjemalloc.so.2` with the absolute path
+you actually got from `ldconfig -p`!
+
+Restart Icinga. Verify whether your changes took effect and enjoy the speed:
+
+```
+# lsof -p `cat /var/run/icinga2/icinga2.pid` |grep libjemalloc
+icinga2 7764 nagios mem REG 8,5 744776 2631636 /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
+#
+```
+
+### Optimise apply rules and group assign conditions
+
+#### Remove actually unused apply rules
+
+If `icinga2 daemon -C` warns you like shown below and the respective apply rule
+should indeed create no objects, consider removing it. At least comment it out.
+
+```
+[...] warning/ApplyRule: Apply rule '...' (...) for type '...' does not match anywhere!
+```
+
+In Director you can't comment out anything as in Icinga DSL,
+but you can disable apply rules (temporarily or permanently).
+
+Same for `assign where` conditions in groups. In this case removing just
+the `assign where` line(s) is enough if you'd like to keep the group itself.
+
+#### Avoid creating single objects via apply rules
+
+If possible, replace constructs like the immediately following with the below one.
+
+```
+apply Service "syspatch" {
+ // ...
+ assign where host.name == "firewall"
+}
+```
+
+```
+object Service "syspatch" {
+ // ...
+ host_name = "firewall"
+}
+```
+
+This way Icinga won't have to evaluate
+`host.name == "firewall"` for all of your hosts.
+At least upgrade to Icinga v2.13.6+ which optimizes apply rules as shown above.
+
+Group membership assign conditions aren't optimized by v2.13.6.
+But they can just be replaced with constructs like this one:
+
+```
+object Host "firewall" {
+ // ...
+ groups = [ "OpenBSD hosts" ]
+}
+```
+
+#### Reduce `assign where` filter complexity
+
+If neither removals, nor flat objects, nor Icinga v2.13.6+
+are an option, at least keep the filter as simple as possible.
+
+The less complex an `assign where` filter is,
+the more time Icinga saves while loading its config.
+**I.e. the following is a bad example.**
+
+```
+assign where ( match("MySQL*", host.name) || match("Maria*", host.name)
+ || match("Postgre*", host.name) || match("Redis*", host.name) || match("Elastic*", host.name) )
+```
+
+Preferably only a single custom var is checked. Ideally just whether it's set.
+(`assign where host.vars.db` or "is true (or set)" operator in Director.)
+Otherwise, prefer `==` over `match()`. (In Director avoid `*` wildcards.)
+
+If you're using the Director, you can create such a custom var automatically and
+pattern-based via a _Sync rule_. Navigate to its _Properties_ and add a new property:
+
+* Destination Field: Custom variable (vars.)
+* Custom variable: e.g. `db`
+* Source Column: Custom expression
+* Source Expression: e.g. `true`
+* Set based on filter: Yes
+* Filter Expression: e.g. `host=MySQL*|host=Maria*|host=Postgre*|host=Redis*|host=Elastic*`
+* Merge Policy: replace
+
+At least reorder operands of an `&&` chain so that a mismatch
+terminates the whole chain as soon as possible. E.g. prefer
+`assign where host.vars.production && match("MySQL*", host.name)`,
+not `assign where match("MySQL*", host.name) && host.vars.production`.
+For all test hosts `host.vars.production` will be false and terminate
+the `&&` rather than also evaluating `match("MySQL*", host.name)`.
+
+### Try reducing concurrency (threads)
+
+!!! note
+
+ This section applies rather to systems with more than eight CPU cores.
+ In case of less consider upgrading your hardware instead.
+
+Yes, reducing and not increasing. By default, Icinga 2 already starts as many
+threads as there are CPU cores according to the OS (unaware of SMT aka
+Hyper-Threading). So there's no point in increasing as Icinga would not gain
+additional CPU time.
+
+But more threads also require more synchronization between them. This may
+outweigh the CPU time gain and even worsen the performance. So reducing may
+indeed help or at least save CPU time and power at no cost.
+
+Start with benchmarking your Icinga 2 config with `time icinga2 daemon -C` on
+the node in question. The results will be most accurate during normal operation,
+i.e. while Icinga is running, but not reloading (e.g. due to config deployments).
+
+Icinga accepts the argument `-DConfiguration.Concurrency=` with the number (of
+threads) immediately after the "=". Start with 8 and finish with the number of
+CPU cores. Write down the times. I.e.:
+
+* `time icinga2 daemon -C -DConfiguration.Concurrency=8`
+* `time icinga2 daemon -C -DConfiguration.Concurrency=10`
+* `time icinga2 daemon -C -DConfiguration.Concurrency=12`
+* ...
+
+If significantly less threads than CPU cores significantly reduce the time
+(reported as "real") Icinga 2 needs to load its configuration, pick the number
+with the best time and persist it in your init daemon. In case of systemd copy
+the `ExecStart=` line from output of `systemctl cat icinga2.service` first.
+Next, run `systemctl edit icinga2.service`. This will open an editor. Add
+`[Service]` (if not already present) and the copied `ExecStart=` line. Append
+`-DConfiguration.Concurrency=` and the chosen number so that the result looks
+like this:
+
+```
+[Service]
+ExecStart=/usr/sbin/icinga2 daemon --close-stdio -e ${ICINGA2_ERROR_LOG} -DConfiguration.Concurrency=42
+```
+
+Save the file and close the editor. Restart Icinga.
+Finally verify whether your changes took effect and enjoy the speed.
+
+## Configuration Troubleshooting <a id="troubleshooting-configuration"></a>
+
+### List Configuration Objects <a id="troubleshooting-list-configuration-objects"></a>
+
+The `icinga2 object list` CLI command can be used to list all configuration objects and their
+attributes. The tool also shows where each of the attributes was modified.
+
+> **Tip**
+>
+> Use the Icinga 2 API to access [config objects at runtime](12-icinga2-api.md#icinga2-api-config-objects) directly.
+
+That way you can also identify which objects have been created from your [apply rules](17-language-reference.md#apply).
+
+```
+# icinga2 object list
+
+Object 'localhost!ssh' of type 'Service':
+ * __name = 'localhost!ssh'
+ * check_command = 'ssh'
+ % = modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 5:3-5:23
+ * check_interval = 60
+ % = modified in '/etc/icinga2/conf.d/templates.conf', lines 24:3-24:21
+ * host_name = 'localhost'
+ % = modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 4:3-4:25
+ * max_check_attempts = 3
+ % = modified in '/etc/icinga2/conf.d/templates.conf', lines 23:3-23:24
+ * name = 'ssh'
+ * retry_interval = 30
+ % = modified in '/etc/icinga2/conf.d/templates.conf', lines 25:3-25:22
+ * templates = [ 'ssh', 'generic-service' ]
+ % += modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 1:0-7:1
+ % += modified in '/etc/icinga2/conf.d/templates.conf', lines 22:1-26:1
+ * type = 'Service'
+ * vars
+ % += modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 6:3-6:19
+ * sla = '24x7'
+ % = modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 6:3-6:19
+
+[...]
+```
+
+On Windows, use an administrative Powershell:
+
+```
+C:\> cd C:\Program Files\ICINGA2\sbin
+
+C:\Program Files\ICINGA2\sbin> .\icinga2.exe object list
+```
+
+You can also filter by name and type:
+
+```
+# icinga2 object list --name *ssh* --type Service
+Object 'localhost!ssh' of type 'Service':
+ * __name = 'localhost!ssh'
+ * check_command = 'ssh'
+ % = modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 5:3-5:23
+ * check_interval = 60
+ % = modified in '/etc/icinga2/conf.d/templates.conf', lines 24:3-24:21
+ * host_name = 'localhost'
+ % = modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 4:3-4:25
+ * max_check_attempts = 3
+ % = modified in '/etc/icinga2/conf.d/templates.conf', lines 23:3-23:24
+ * name = 'ssh'
+ * retry_interval = 30
+ % = modified in '/etc/icinga2/conf.d/templates.conf', lines 25:3-25:22
+ * templates = [ 'ssh', 'generic-service' ]
+ % += modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 1:0-7:1
+ % += modified in '/etc/icinga2/conf.d/templates.conf', lines 22:1-26:1
+ * type = 'Service'
+ * vars
+ % += modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 6:3-6:19
+ * sla = '24x7'
+ % = modified in '/etc/icinga2/conf.d/hosts/localhost/ssh.conf', lines 6:3-6:19
+
+Found 1 Service objects.
+
+[2014-10-15 14:27:19 +0200] information/cli: Parsed 175 objects.
+```
+
+Configuration modifications are not immediately updated. Furthermore there is a known issue with
+[group assign expressions](17-language-reference.md#group-assign) which are not reflected in the host object output.
+You need to `icinga2 daemon -C --dump-objects` in order to update the `icinga2.debug` cache file.
+
+### Apply rules do not match <a id="apply-rules-do-not-match"></a>
+
+You can analyze apply rules and matching objects by using the [script debugger](20-script-debugger.md#script-debugger).
+
+### Where are the check command definitions? <a id="check-command-definitions"></a>
+
+Icinga 2 features a number of built-in [check command definitions](10-icinga-template-library.md#icinga-template-library) which are
+included with
+
+```
+include <itl>
+include <plugins>
+```
+
+in the [icinga2.conf](04-configuration.md#icinga2-conf) configuration file. These files are not considered
+configuration files and will be overridden on upgrade, so please send modifications as proposed patches upstream.
+The default include path is set to `/usr/share/icinga2/includes` with the constant `IncludeConfDir`.
+
+You should add your own command definitions to a new file in `conf.d/` called `commands.conf`
+or similar.
+
+### Configuration is ignored <a id="configuration-ignored"></a>
+
+* Make sure that the line(s) are not [commented out](17-language-reference.md#comments) (starting with `//` or `#`, or
+encapsulated by `/* ... */`).
+* Is the configuration file included in [icinga2.conf](04-configuration.md#icinga2-conf)?
+
+Run the [configuration validation](11-cli-commands.md#config-validation) and add `notice` as log severity.
+Search for the file which should be included i.e. using the `grep` CLI command.
+
+```bash
+icinga2 daemon -C -x notice | grep command
+```
+
+### Configuration attributes are inherited from <a id="configuration-attribute-inheritance"></a>
+
+Icinga 2 allows you to import templates using the [import](17-language-reference.md#template-imports) keyword. If these templates
+contain additional attributes, your objects will automatically inherit them. You can override
+or modify these attributes in the current object.
+
+The [object list](15-troubleshooting.md#troubleshooting-list-configuration-objects) CLI command allows you to verify the attribute origin.
+
+### Configuration Value with Single Dollar Sign <a id="configuration-value-dollar-sign"></a>
+
+In case your configuration validation fails with a missing closing dollar sign error message, you
+did not properly escape the single dollar sign preventing its usage as [runtime macro](03-monitoring-basics.md#runtime-macros).
+
+```
+critical/config: Error: Validation failed for Object 'ping4' (Type: 'Service') at /etc/icinga2/zones.d/global-templates/windows.conf:24: Closing $ not found in macro format string 'top-syntax=${list}'.
+```
+
+Correct the custom variable value to
+
+```
+"top-syntax=$${list}"
+```
+
+
+## Checks Troubleshooting <a id="troubleshooting-checks"></a>
+
+### Executed Command for Checks <a id="checks-executed-command"></a>
+
+* Use the Icinga 2 API to [query](12-icinga2-api.md#icinga2-api-config-objects-query) host/service objects
+for their check result containing the executed shell command.
+* Use the Icinga 2 [console cli command](11-cli-commands.md#cli-command-console)
+to fetch the checkable object, its check result and the executed shell command.
+* Alternatively enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) and look for the executed command.
+
+Example for a service object query using a [regex match](18-library-reference.md#global-functions-regex)
+on the name:
+
+```
+$ curl -k -s -u root:icinga -H 'Accept: application/json' -H 'X-HTTP-Method-Override: GET' -X POST 'https://localhost:5665/v1/objects/services' \
+-d '{ "filter": "regex(pattern, service.name)", "filter_vars": { "pattern": "^http" }, "attrs": [ "__name", "last_check_result" ], "pretty": true }'
+{
+ "results": [
+ {
+ "attrs": {
+ "__name": "example.localdomain!http",
+ "last_check_result": {
+ "active": true,
+ "check_source": "example.localdomain",
+ "command": [
+ "/usr/local/sbin/check_http",
+ "-I",
+ "127.0.0.1",
+ "-u",
+ "/"
+ ],
+
+ ...
+
+ }
+ },
+ "joins": {},
+ "meta": {},
+ "name": "example.localdomain!http",
+ "type": "Service"
+ }
+ ]
+}
+```
+
+Alternatively when using the Director, navigate into the Service Detail View
+in Icinga Web and pick `Inspect` to query the details.
+
+Example for using the `icinga2 console` CLI command evaluation functionality:
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/' \
+--eval 'get_service("example.localdomain", "http").last_check_result.command' | python -m json.tool
+[
+ "/usr/local/sbin/check_http",
+ "-I",
+ "127.0.0.1",
+ "-u",
+ "/"
+]
+```
+
+Example for searching the debug log:
+
+```bash
+icinga2 feature enable debuglog
+systemctl restart icinga2
+tail -f /var/log/icinga2/debug.log | grep "notice/Process"
+```
+
+
+### Checks are not executed <a id="checks-not-executed"></a>
+
+* First off, decide whether the checks are executed locally, or remote in a distributed setup.
+
+If the master does not receive check results from the satellite, move your analysis to the satellite
+and verify why the checks are not executed there.
+
+* Check the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) to see if the check command gets executed.
+* Verify that failed dependencies do not prevent command execution.
+* Make sure that the plugin is executable by the Icinga 2 user (run a manual test).
+* Make sure the [checker](11-cli-commands.md#enable-features) feature is enabled.
+* Use the Icinga 2 API [event streams](12-icinga2-api.md#icinga2-api-event-streams) to receive live check result streams.
+
+Test a plugin as icinga user.
+
+```bash
+sudo -u icinga /usr/lib/nagios/plugins/check_ping -4 -H 127.0.0.1 -c 5000,100% -w 3000,80%
+```
+
+> **Note**
+>
+> **Never test plugins as root, but the icinga daemon user.** The environment and permissions differ.
+>
+> Also, the daemon user **does not** spawn a terminal shell (Bash, etc.) so it won't read anything from .bashrc
+> and variants. The Icinga daemon only relies on sysconfig environment variables being set.
+
+
+Enable the checker feature.
+
+```
+# icinga2 feature enable checker
+The feature 'checker' is already enabled.
+```
+
+Fetch all check result events matching the `event.service` name `random`:
+
+```bash
+curl -k -s -u root:icinga -H 'Accept: application/json' -X POST \
+ 'https://localhost:5665/v1/events?queue=debugchecks&types=CheckResult&filter=match%28%22random*%22,event.service%29'
+```
+
+
+### Analyze Check Source <a id="checks-check-source"></a>
+
+Sometimes checks are not executed on the remote host, but on the master and so on.
+This could lead into unwanted results or NOT-OK states.
+
+The `check_source` attribute is the best indication where a check command
+was actually executed. This could be a satellite with synced configuration
+or a client as remote command bridge -- both will return the check source
+as where the plugin is called.
+
+Example for retrieving the check source from all `disk` services using a
+[regex match](18-library-reference.md#global-functions-regex) on the name:
+
+```
+$ curl -k -s -u root:icinga -H 'Accept: application/json' -H 'X-HTTP-Method-Override: GET' -X POST 'https://localhost:5665/v1/objects/services' \
+-d '{ "filter": "regex(pattern, service.name)", "filter_vars": { "pattern": "^disk" }, "attrs": [ "__name", "last_check_result" ], "pretty": true }'
+{
+ "results": [
+ {
+ "attrs": {
+ "__name": "icinga2-agent1.localdomain!disk",
+ "last_check_result": {
+ "active": true,
+ "check_source": "icinga2-agent1.localdomain",
+
+ ...
+
+ }
+ },
+ "joins": {},
+ "meta": {},
+ "name": "icinga2-agent1.localdomain!disk",
+ "type": "Service"
+ }
+ ]
+}
+```
+
+Alternatively when using the Director, navigate into the Service Detail View
+in Icinga Web and pick `Inspect` to query the details.
+
+Example with the debug console:
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/' \
+--eval 'get_service("icinga2-agent1.localdomain", "disk").last_check_result.check_source' | python -m json.tool
+
+"icinga2-agent1.localdomain"
+```
+
+
+### NSClient++ Check Errors with nscp-local <a id="nsclient-check-errors-nscp-local"></a>
+
+The [nscp-local](10-icinga-template-library.md#nscp-check-local) CheckCommand object definitions call the local `nscp.exe` command.
+If a Windows client service check fails to find the `nscp.exe` command, the log output would look like this:
+
+```
+Command ".\nscp.exe" "client" "-a" "drive=d" "-a" "show-all" "-b" "-q" "check_drivesize" failed to execute: 2, "The system cannot find the file specified."
+```
+
+or
+
+```
+Command ".
+scp.exe" "client" "-a" "drive=d" "-a" "show-all" "-b" "-q" "check_drivesize" failed to execute: 2, "The system cannot find the file specified."
+```
+
+The above actually prints `.\\nscp.exe` where the escaped `\n` character gets interpreted as new line.
+
+Both errors lead to the assumption that the `NscpPath` constant is empty or set to a `.` character.
+This could mean the following:
+
+* The command is **not executed on the Windows client**. Check the [check_source](15-troubleshooting.md#checks-check-source) attribute from the check result.
+* You are using an outdated NSClient++ version (0.3.x or 0.4.x) which is not compatible with Icinga 2.
+* You are using a custom NSClient++ installer which does not register the correct GUID for NSClient++
+
+More troubleshooting:
+
+Retrieve the `NscpPath` constant on your Windows client:
+
+```
+C:\Program Files\ICINGA2\sbin\icinga2.exe variable get NscpPath
+```
+
+If the variable is returned empty, manually test how Icinga 2 would resolve
+its path (this can be found inside the ITL):
+
+```
+C:\Program Files\ICINGA2\sbin\icinga2.exe console --eval "dirname(msi_get_component_path(\"{5C45463A-4AE9-4325-96DB-6E239C034F93}\"))"
+```
+
+If this command does not return anything, NSClient++ is not properly installed.
+Verify that inside the `Programs and Features` (`appwiz.cpl`) control panel.
+
+
+### Check Thresholds Not Applied <a id="check-thresholds-not-applied"></a>
+
+This could happen with [clients as command endpoint execution](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint).
+
+If you have for example a client host `icinga2-agent1.localdomain`
+and a service `disk` check defined on the master, the warning and
+critical thresholds are sometimes to applied and unwanted notification
+alerts are raised.
+
+This happens because the client itself includes a host object with
+its `NodeName` and a basic set of checks in the [conf.d](04-configuration.md#conf-d)
+directory, i.e. `disk` with the default thresholds.
+
+Clients which have the `checker` feature enabled will attempt
+to execute checks for local services and send their results
+back to the master.
+
+If you now have the same host and service objects on the
+master you will receive wrong check results from the client.
+
+Solution:
+
+* Disable the `checker` feature on clients: `icinga2 feature disable checker`.
+* Remove the inclusion of [conf.d](04-configuration.md#conf-d) as suggested in the [client setup docs](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint).
+
+### Check Fork Errors <a id="check-fork-errors"></a>
+
+Newer versions of systemd on Linux limit spawned processes for
+services.
+
+* v227 introduces the `TasksMax` setting to units which allows to specify the spawned process limit.
+* v228 adds `DefaultTasksMax` in the global `systemd-system.conf` with a default setting of 512 processes.
+* v231 changes the default value to 15%
+
+This can cause problems with Icinga 2 in large environments with many
+commands executed in parallel starting with systemd v228. Some distributions
+also may have changed the defaults.
+
+The error message could look like this:
+
+```
+2017-01-12T11:55:40.742685+01:00 icinga2-master1 kernel: [65567.582895] cgroup: fork rejected by pids controller in /system.slice/icinga2.service
+```
+
+In order to solve the problem, increase the value for `DefaultTasksMax`
+or set it to `infinity`.
+
+```bash
+mkdir /etc/systemd/system/icinga2.service.d
+cat >/etc/systemd/system/icinga2.service.d/limits.conf <<EOF
+[Service]
+DefaultTasksMax=infinity
+EOF
+
+systemctl daemon-reload
+systemctl restart icinga2
+```
+
+An example is available inside the GitHub repository in [etc/initsystem](https://github.com/Icinga/icinga2/tree/master/etc/initsystem).
+
+External Resources:
+
+* [Fork limit for cgroups](https://lwn.net/Articles/663873/)
+* [systemd changelog](https://github.com/systemd/systemd/blob/master/NEWS)
+* [Icinga 2 upstream issue](https://github.com/Icinga/icinga2/issues/5611)
+* [systemd upstream discussion](https://github.com/systemd/systemd/issues/3211)
+
+### Systemd Watchdog <a id="check-systemd-watchdog"></a>
+
+Usually Icinga 2 is a mission critical part of infrastructure and should be
+online at all times. In case of a recoverable crash (e.g. OOM) you may want to
+restart Icinga 2 automatically. With systemd it is as easy as overriding some
+settings of the Icinga 2 systemd service by creating
+`/etc/systemd/system/icinga2.service.d/override.conf` with the following
+content:
+
+```
+[Service]
+Restart=always
+RestartSec=1
+StartLimitInterval=10
+StartLimitBurst=3
+```
+
+Using the watchdog can also help with monitoring Icinga 2, to activate and use it add the following to the override:
+
+```
+WatchdogSec=30s
+```
+
+This way systemd will kill Icinga 2 if it does not notify for over 30 seconds. A timeout of less than 10 seconds is not
+recommended. When the watchdog is activated, `Restart=` can be set to `watchdog` to restart Icinga 2 in the case of a
+watchdog timeout.
+
+Run `systemctl daemon-reload && systemctl restart icinga2` to apply the changes.
+Now systemd will always try to restart Icinga 2 (except if you run
+`systemctl stop icinga2`). After three failures in ten seconds it will stop
+trying because you probably have a problem that requires manual intervention.
+
+### Late Check Results <a id="late-check-results"></a>
+
+[Icinga Web 2](https://icinga.com/products/icinga-web-2/) provides
+a dashboard overview for `overdue checks`.
+
+The REST API provides the [status](12-icinga2-api.md#icinga2-api-status) URL endpoint with some generic metrics
+on Icinga and its features.
+
+```bash
+curl -k -s -u root:icinga 'https://localhost:5665/v1/status?pretty=1' | less
+```
+
+You can also calculate late check results via the REST API:
+
+* Fetch the `last_check` timestamp from each object
+* Compare the timestamp with the current time and add `check_interval` multiple times (change it to see which results are really late, like five times check_interval)
+
+You can use the [icinga2 console](11-cli-commands.md#cli-command-console) to connect to the instance, fetch all data
+and calculate the differences. More infos can be found in [this blogpost](https://icinga.com/2016/08/11/analyse-icinga-2-problems-using-the-console-api/).
+
+```
+# ICINGA2_API_USERNAME=root ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://localhost:5665/'
+
+<1> => var res = []; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res.add([s.__name, DateTime(s.last_check).to_string()]) }; res
+
+[ [ "10807-host!10807-service", "2016-06-10 15:54:55 +0200" ], [ "mbmif.int.netways.de!disk /", "2016-01-26 16:32:29 +0100" ] ]
+```
+
+Or if you are just interested in numbers, call [len](18-library-reference.md#array-len) on the result array `res`:
+
+```
+<2> => var res = []; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res.add([s.__name, DateTime(s.last_check).to_string()]) }; res.len()
+
+2.000000
+```
+
+If you need to analyze that problem multiple times, just add the current formatted timestamp
+and repeat the commands.
+
+```
+<23> => DateTime(get_time()).to_string()
+
+"2017-04-04 16:09:39 +0200"
+
+<24> => var res = []; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res.add([s.__name, DateTime(s.last_check).to_string()]) }; res.len()
+
+8287.000000
+```
+
+More details about the Icinga 2 DSL and its possibilities can be
+found in the [language](17-language-reference.md#language-reference) and [library](18-library-reference.md#library-reference) reference chapters.
+
+### Late Check Results in Distributed Environments <a id="late-check-results-distributed"></a>
+
+When it comes to a distributed HA setup, each node is responsible for a load-balanced amount of checks.
+Host and Service objects provide the attribute `paused`. If this is set to `false`, the current node
+actively attempts to schedule and execute checks. Otherwise the node does not feel responsible.
+
+```
+<3> => var res = {}; for (s in get_objects(Service).filter(s => s.last_check < get_time() - 2 * s.check_interval)) { res[s.paused] += 1 }; res
+{
+ @false = 2.000000
+ @true = 1.000000
+}
+```
+
+You may ask why this analysis is important? Fair enough - if the numbers are not inverted in a HA zone
+with two members, this may give a hint that the cluster nodes are in a split-brain scenario, or you've
+found a bug in the cluster.
+
+
+If you are running a cluster setup where the master/satellite executes checks on the client via
+[top down command endpoint](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint) mode,
+you might want to know which zones are affected.
+
+This analysis assumes that clients which are not connected, have the string `connected` in their
+service check result output and their state is `UNKNOWN`.
+
+```
+<4> => var res = {}; for (s in get_objects(Service)) { if (s.state==3) { if (match("*connected*", s.last_check_result.output)) { res[s.zone] += [s.host_name] } } }; for (k => v in res) { res[k] = len(v.unique()) }; res
+
+{
+ Asia = 31.000000
+ Europe = 214.000000
+ USA = 207.000000
+}
+```
+
+The result set shows the configured zones and their affected hosts in a unique list. The output also just prints the numbers
+but you can adjust this by omitting the `len()` call inside the for loop.
+
+## Notifications Troubleshooting <a id="troubleshooting-notifications"></a>
+
+### Notifications are not sent <a id="troubleshooting-notifications-not-sent"></a>
+
+* Check the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) to see if a notification is triggered.
+* If yes, verify that all conditions are satisfied.
+* Are any errors on the notification command execution logged?
+
+Please ensure to add these details with your own description
+to any question or issue posted to the community channels.
+
+Verify the following configuration:
+
+* Is the host/service `enable_notifications` attribute set, and if so, to which value?
+* Do the [notification](09-object-types.md#objecttype-notification) attributes `states`, `types`, `period` match the notification conditions?
+* Do the [user](09-object-types.md#objecttype-user) attributes `states`, `types`, `period` match the notification conditions?
+* Are there any notification `begin` and `end` times configured?
+* Make sure the [notification](11-cli-commands.md#enable-features) feature is enabled.
+* Does the referenced NotificationCommand work when executed as Icinga user on the shell?
+
+If notifications are to be sent via mail, make sure that the mail program specified inside the
+[NotificationCommand object](09-object-types.md#objecttype-notificationcommand) exists.
+The name and location depends on the distribution so the preconfigured setting might have to be
+changed on your system.
+
+
+Examples:
+
+```
+# icinga2 feature enable notification
+The feature 'notification' is already enabled.
+```
+
+```bash
+icinga2 feature enable debuglog
+systemctl restart icinga2
+
+grep Notification /var/log/icinga2/debug.log > /root/analyze_notification_problem.log
+```
+
+You can use the Icinga 2 API [event streams](12-icinga2-api.md#icinga2-api-event-streams) to receive live notification streams:
+
+```bash
+curl -k -s -u root:icinga -H 'Accept: application/json' -X POST 'https://localhost:5665/v1/events?queue=debugnotifications&types=Notification'
+```
+
+
+## Feature Troubleshooting <a id="troubleshooting-features"></a>
+
+### Feature is not working <a id="feature-not-working"></a>
+
+* Make sure that the feature configuration is enabled by symlinking from `features-available/`
+to `features-enabled` and that the latter is included in [icinga2.conf](04-configuration.md#icinga2-conf).
+* Are the feature attributes set correctly according to the documentation?
+* Any errors on the logs?
+
+Look up the [object type](09-object-types.md#object-types) for the required feature and verify it is enabled:
+
+```bash
+icinga2 object list --type <feature object type>
+```
+
+Example for the `graphite` feature:
+
+```bash
+icinga2 object list --type GraphiteWriter
+```
+
+Look into the log and check whether the feature logs anything specific for this matter.
+
+```bash
+grep GraphiteWriter /var/log/icinga2/icinga2.log
+```
+
+## REST API Troubleshooting <a id="troubleshooting-api"></a>
+
+In order to analyse errors on API requests, you can explicitly enable the [verbose parameter](12-icinga2-api.md#icinga2-api-parameters-global).
+
+```
+$ curl -k -s -u root:icinga -H 'Accept: application/json' -X DELETE 'https://localhost:5665/v1/objects/hosts/example-cmdb?pretty=1&verbose=1'
+{
+ "diagnostic_information": "Error: Object does not exist.\n\n ....",
+ "error": 404.0,
+ "status": "No objects found."
+}
+```
+
+### REST API Troubleshooting: No Objects Found <a id="troubleshooting-api-no-objects-found"></a>
+
+Please note that the `404` status with no objects being found can also originate
+from missing or too strict object permissions for the authenticated user.
+
+This is a security feature to disable object name guessing. If this would not be the
+case, restricted users would be able to get a list of names of your objects just by
+trying every character combination.
+
+In order to analyse and fix the problem, please check the following:
+
+- use an administrative account with full permissions to check whether the objects are actually there.
+- verify the permissions on the affected ApiUser object and fix them.
+
+### Missing Runtime Objects (Hosts, Downtimes, etc.) <a id="troubleshooting-api-missing-runtime-objects"></a>
+
+Runtime objects consume the internal config packages shared with
+the REST API config packages. Each host, downtime, comment, service, etc. created
+via the REST API is stored in the `_api` package.
+
+This includes downtimes and comments, which where sometimes stored in the wrong
+directory path, because the active-stage file was empty/truncated/unreadable at
+this point.
+
+Wrong:
+
+```
+/var/lib/icinga2/api/packages/_api//conf.d/downtimes/1234-5678-9012-3456.conf
+```
+
+Correct:
+
+```
+/var/lib/icinga2/api/packages/_api/dbe0bef8-c72c-4cc9-9779-da7c4527c5b2/conf.d/downtimes/1234-5678-9012-3456.conf
+```
+
+At creation time, the object lives in memory but its storage is broken. Upon restart,
+it is missing and e.g. a missing downtime will re-enable unwanted notifications.
+
+`abcd-ef12-3456-7890` is the active stage name which wasn't correctly
+read by the Icinga daemon. This information is stored in `/var/lib/icinga2/api/packages/_api/active-stage`.
+
+2.11 now limits the direct active-stage file access (this is hidden from the user),
+and caches active stages for packages in-memory.
+
+It also tries to repair the broken package, and logs a new message:
+
+```
+systemctl restart icinga2
+
+tail -f /var/log/icinga2/icinga2.log
+
+[2019-05-10 12:27:15 +0200] information/ConfigObjectUtility: Repairing config package '_api' with stage 'dbe0bef8-c72c-4cc9-9779-da7c4527c5b2'.
+```
+
+If this does not happen, you can manually fix the broken config package, and mark a deployed stage as active
+again, carefully do the following steps with creating a backup before:
+
+Navigate into the API package prefix.
+
+```bash
+cd /var/lib/icinga2/api/packages
+```
+
+Change into the broken package directory and list all directories and files
+ordered by latest changes.
+
+```
+cd _api
+ls -lahtr
+
+drwx------ 4 michi wheel 128B Mar 27 14:39 ..
+-rw-r--r-- 1 michi wheel 25B Mar 27 14:39 include.conf
+-rw-r--r-- 1 michi wheel 405B Mar 27 14:39 active.conf
+drwx------ 7 michi wheel 224B Mar 27 15:01 dbe0bef8-c72c-4cc9-9779-da7c4527c5b2
+drwx------ 5 michi wheel 160B Apr 26 12:47 .
+```
+
+As you can see, the `active-stage` file is missing. When it is there, verify that its content
+is set to the stage directory as follows.
+
+If you have more than one stage directory here, pick the latest modified
+directory. Copy the directory name `abcd-ef12-3456-7890` and
+add it into a new file `active-stage`. This can be done like this:
+
+```bash
+echo "dbe0bef8-c72c-4cc9-9779-da7c4527c5b2" > active-stage
+```
+
+`active.conf` needs to have the correct active stage too, add it again
+like this. Note: This is deep down in the code, use with care!
+
+```bash
+sed -i 's/ActiveStages\["_api"\] = .*/ActiveStages\["_api"\] = "dbe0bef8-c72c-4cc9-9779-da7c4527c5b2"/g' /var/lib/icinga2/api/packages/_api/active.conf
+```
+
+Restart Icinga 2.
+
+```bash
+systemctl restart icinga2
+```
+
+
+> **Note**
+>
+> The internal `_api` config package structure may change in the future. Do not modify
+> things in there manually or with scripts unless guided here or asked by a developer.
+
+
+## Certificate Troubleshooting <a id="troubleshooting-certificate"></a>
+
+Tools for analysing certificates and TLS connections:
+
+- `openssl` binary on Linux/Unix, `openssl.exe` on Windows ([download](https://slproweb.com/products/Win32OpenSSL.html))
+- `sslscan` tool, available [here](https://github.com/rbsec/sslscan) (Linux/Windows)
+
+Note: You can also execute sslscan on Windows using Powershell.
+
+
+### Certificate Verification <a id="troubleshooting-certificate-verification"></a>
+
+Whenever the TLS handshake fails when a client connects to the cluster or the REST API,
+ensure to verify the used certificates.
+
+Print the CA and client certificate and ensure that the following attributes are set:
+
+* Version must be 3.
+* Serial number is a hex-encoded string.
+* Issuer should be your certificate authority (defaults to `Icinga CA` for all certificates generated by CLI commands and automated signing requests).
+* Validity: The certificate must not be expired.
+* Subject with the common name (CN) matches the client endpoint name and its FQDN.
+* v3 extensions must set the basic constraint for `CA:TRUE` (ca.crt) or `CA:FALSE` (client certificate).
+* Subject Alternative Name is set to the resolvable DNS name (required for REST API and browsers).
+
+Navigate into the local certificate store:
+
+```bash
+cd /var/lib/icinga2/certs/
+```
+
+Make sure to verify the agents' certificate and its stored `ca.crt` in `/var/lib/icinga2/certs` and ensure that
+all instances (master, satellite, agent) are signed by the **same CA**.
+
+Compare the `ca.crt` file from the agent node and compare it to your master's `ca.crt` file.
+
+
+Since 2.12, you can use the built-in CLI command `pki verify` to perform TLS certificate validation tasks.
+
+> **Hint**
+>
+> The CLI command uses exit codes aligned to the [Plugin API specification](05-service-monitoring.md#service-monitoring-plugin-api).
+> Run the commands followed with `echo $?` to see the exit code.
+
+These CLI commands can be used on Windows agents too without requiring the OpenSSL binary.
+
+#### Print TLS Certificate <a id="troubleshooting-certificate-verification-print"></a>
+
+Pass the certificate file to the `--cert` CLI command parameter to print its details.
+This prints a shorter version of `openssl x509 -in <file> -text`.
+
+```
+$ icinga2 pki verify --cert icinga2-agent2.localdomain.crt
+
+information/cli: Printing certificate 'icinga2-agent2.localdomain.crt'
+
+ Version: 3
+ Subject: CN = icinga2-agent2.localdomain
+ Issuer: CN = Icinga CA
+ Valid From: Feb 14 11:29:36 2020 GMT
+ Valid Until: Feb 10 11:29:36 2035 GMT
+ Serial: 12:fe:a6:22:f5:e3:db:a2:95:8e:92:b2:af:1a:e3:01:44:c4:70:e0
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names: icinga2-agent2.localdomain
+ Fingerprint: 40 98 A0 77 58 4F CA D1 05 AC 18 53 D7 52 8D D7 9C 7F 5A 23 B4 AF 63 A4 92 9D DC FF 89 EF F1 4C
+```
+
+You can also print the `ca.crt` certificate without any further checks using the `--cert` parameter.
+
+#### Print and Verify CA Certificate <a id="troubleshooting-certificate-verification-print-verify-ca"></a>
+
+The `--cacert` CLI parameter allows to check whether the given certificate file is a public CA certificate.
+
+```
+$ icinga2 pki verify --cacert ca.crt
+
+information/cli: Checking whether certificate 'ca.crt' is a valid CA certificate.
+
+ Version: 3
+ Subject: CN = Icinga CA
+ Issuer: CN = Icinga CA
+ Valid From: Jul 31 12:26:08 2019 GMT
+ Valid Until: Jul 27 12:26:08 2034 GMT
+ Serial: 89:fe:d6:12:66:25:3a:c5:07:c1:eb:d4:e6:f2:df:ca:13:6e:dc:e7
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names:
+ Fingerprint: 9A 11 29 A8 A3 89 F8 56 30 1A E4 0A B2 6B 28 46 07 F0 14 17 BD 19 A4 FC BD 41 40 B5 1A 8F BF 20
+
+information/cli: OK: CA certificate file 'ca.crt' was verified successfully.
+```
+
+In case you pass a wrong certificate, an error is shown and the exit code is `2` (Critical).
+
+```
+$ icinga2 pki verify --cacert icinga2-agent2.localdomain.crt
+
+information/cli: Checking whether certificate 'icinga2-agent2.localdomain.crt' is a valid CA certificate.
+
+ Version: 3
+ Subject: CN = icinga2-agent2.localdomain
+ Issuer: CN = Icinga CA
+ Valid From: Feb 14 11:29:36 2020 GMT
+ Valid Until: Feb 10 11:29:36 2035 GMT
+ Serial: 12:fe:a6:22:f5:e3:db:a2:95:8e:92:b2:af:1a:e3:01:44:c4:70:e0
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names: icinga2-agent2.localdomain
+ Fingerprint: 40 98 A0 77 58 4F CA D1 05 AC 18 53 D7 52 8D D7 9C 7F 5A 23 B4 AF 63 A4 92 9D DC FF 89 EF F1 4C
+
+critical/cli: CRITICAL: The file 'icinga2-agent2.localdomain.crt' does not seem to be a CA certificate file.
+```
+
+#### Verify Certificate is signed by CA Certificate <a id="troubleshooting-certificate-verification-signed-by-ca"></a>
+
+Pass the certificate file to the `--cert` CLI parameter, and the `ca.crt` file to the `--cacert` parameter.
+Common troubleshooting scenarios involve self-signed certificates and untrusted agents resulting in disconnects.
+
+```
+$ icinga2 pki verify --cert icinga2-agent2.localdomain.crt --cacert ca.crt
+
+information/cli: Verifying certificate 'icinga2-agent2.localdomain.crt'
+
+ Version: 3
+ Subject: CN = icinga2-agent2.localdomain
+ Issuer: CN = Icinga CA
+ Valid From: Feb 14 11:29:36 2020 GMT
+ Valid Until: Feb 10 11:29:36 2035 GMT
+ Serial: 12:fe:a6:22:f5:e3:db:a2:95:8e:92:b2:af:1a:e3:01:44:c4:70:e0
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names: icinga2-agent2.localdomain
+ Fingerprint: 40 98 A0 77 58 4F CA D1 05 AC 18 53 D7 52 8D D7 9C 7F 5A 23 B4 AF 63 A4 92 9D DC FF 89 EF F1 4C
+
+information/cli: with CA certificate 'ca.crt'.
+
+ Version: 3
+ Subject: CN = Icinga CA
+ Issuer: CN = Icinga CA
+ Valid From: Jul 31 12:26:08 2019 GMT
+ Valid Until: Jul 27 12:26:08 2034 GMT
+ Serial: 89:fe:d6:12:66:25:3a:c5:07:c1:eb:d4:e6:f2:df:ca:13:6e:dc:e7
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names:
+ Fingerprint: 9A 11 29 A8 A3 89 F8 56 30 1A E4 0A B2 6B 28 46 07 F0 14 17 BD 19 A4 FC BD 41 40 B5 1A 8F BF 20
+
+information/cli: OK: Certificate with CN 'icinga2-agent2.localdomain' is signed by CA.
+```
+
+#### Verify Certificate matches Common Name (CN) <a id="troubleshooting-certificate-verification-common-name-match"></a>
+
+This allows to verify the common name inside the certificate with a given string parameter.
+Typical troubleshooting involve upper/lower case CNs (Windows).
+
+```
+$ icinga2 pki verify --cert icinga2-agent2.localdomain.crt --cn icinga2-agent2.localdomain
+
+information/cli: Verifying common name (CN) 'icinga2-agent2.localdomain in certificate 'icinga2-agent2.localdomain.crt'.
+
+ Version: 3
+ Subject: CN = icinga2-agent2.localdomain
+ Issuer: CN = Icinga CA
+ Valid From: Feb 14 11:29:36 2020 GMT
+ Valid Until: Feb 10 11:29:36 2035 GMT
+ Serial: 12:fe:a6:22:f5:e3:db:a2:95:8e:92:b2:af:1a:e3:01:44:c4:70:e0
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names: icinga2-agent2.localdomain
+ Fingerprint: 40 98 A0 77 58 4F CA D1 05 AC 18 53 D7 52 8D D7 9C 7F 5A 23 B4 AF 63 A4 92 9D DC FF 89 EF F1 4C
+
+information/cli: OK: CN 'icinga2-agent2.localdomain' matches certificate CN 'icinga2-agent2.localdomain'.
+```
+
+In the example below, the certificate uses an upper case CN.
+
+```
+$ icinga2 pki verify --cert icinga2-agent2.localdomain.crt --cn icinga2-agent2.localdomain
+
+information/cli: Verifying common name (CN) 'icinga2-agent2.localdomain in certificate 'icinga2-agent2.localdomain.crt'.
+
+ Version: 3
+ Subject: CN = ICINGA2-agent2.localdomain
+ Issuer: CN = Icinga CA
+ Valid From: Feb 14 11:29:36 2020 GMT
+ Valid Until: Feb 10 11:29:36 2035 GMT
+ Serial: 12:fe:a6:22:f5:e3:db:a2:95:8e:92:b2:af:1a:e3:01:44:c4:70:e0
+
+ Signature Algorithm: sha256WithRSAEncryption
+ Subject Alt Names: ICINGA2-agent2.localdomain
+ Fingerprint: 40 98 A0 77 58 4F CA D1 05 AC 18 53 D7 52 8D D7 9C 7F 5A 23 B4 AF 63 A4 92 9D DC FF 89 EF F1 4C
+
+critical/cli: CRITICAL: CN 'icinga2-agent2.localdomain' does NOT match certificate CN 'icinga2-agent2.localdomain'.
+```
+
+
+
+### Certificate Signing <a id="troubleshooting-certificate-signing"></a>
+
+Icinga offers two methods:
+
+* [CSR Auto-Signing](06-distributed-monitoring.md#distributed-monitoring-setup-csr-auto-signing) which uses a client (an agent or a satellite) ticket generated on the master as trust identifier.
+* [On-Demand CSR Signing](06-distributed-monitoring.md#distributed-monitoring-setup-on-demand-csr-signing) which allows to sign pending certificate requests on the master.
+
+Whenever a signed certificate is not received on the requesting clients, ensure to check the following:
+
+* The ticket was valid and the master's log shows nothing different (CSR Auto-Signing only)
+* If the agent/satellite is directly connected to the CA master, check whether the master actually has performance problems to process the request. If the connection is closed without certificate response, analyse the master's health. It is also advised to upgrade to v2.11 where network stack problems have been fixed.
+* If you're using a 3+ level cluster, check whether the satellite really forwarded the CSR signing request and the master processed it.
+
+Other common errors:
+
+* The generated ticket is invalid. The client receives this error message, as well as the master logs a warning message.
+* The [api](09-object-types.md#objecttype-apilistener) feature does not have the `ticket_salt` attribute set to the generated `TicketSalt` constant by the CLI wizards.
+
+In case you are using On-Demand CSR Signing, `icinga2 ca list` on the master only lists
+pending requests since v2.11. Add `--all` to also see signed requests. Keep in mind that
+old requests are purged after 1 week automatically.
+
+
+### TLS Handshake: Ciphers <a id="troubleshooting-certificate-handshake-ciphers"></a>
+
+Starting with v2.11, the default configured ciphers have been hardened to modern
+standards. This includes TLS v1.2 as minimum protocol version too.
+
+In case the TLS handshake fails with `no shared cipher`, first analyse whether both
+instances support the same ciphers.
+
+#### Client connects to Server <a id="troubleshooting-certificate-handshake-ciphers-client"></a>
+
+Connect using `openssl s_client` and try to reproduce the connection problem.
+
+> **Important**
+>
+> The endpoint with the server role **accepting** the connection picks the preferred
+> cipher. E.g. when a satellite connects to the master, the master chooses the cipher.
+>
+> Keep this in mind where to simulate the client role connecting to a server with
+> CLI tools such as `openssl s_client`.
+
+
+`openssl s_client` tells you about the supported and shared cipher suites
+on the remote server. `openssl ciphers` lists locally available ciphers.
+
+```
+$ openssl s_client -connect 192.168.33.5:5665
+...
+
+---
+SSL handshake has read 2899 bytes and written 786 bytes
+---
+New, TLSv1/SSLv3, Cipher is AES256-GCM-SHA384
+Server public key is 4096 bit
+Secure Renegotiation IS supported
+Compression: NONE
+Expansion: NONE
+No ALPN negotiated
+SSL-Session:
+ Protocol : TLSv1.2
+ Cipher : AES256-GCM-SHA384
+
+...
+```
+
+You can specifically use one cipher or a list with the `-cipher` parameter:
+
+```bash
+openssl s_client -connect 192.168.33.5:5665 -cipher 'ECDHE-RSA-AES256-GCM-SHA384'
+```
+
+In order to fully simulate a connecting client, provide the certificates too:
+
+```bash
+CERTPATH='/var/lib/icinga2/certs'
+HOSTNAME='icinga2.vagrant.demo.icinga.com'
+openssl s_client -connect 192.168.33.5:5665 -cert "${CERTPATH}/${HOSTNAME}.crt" -key "${CERTPATH}/${HOSTNAME}.key" -CAfile "${CERTPATH}/ca.crt" -cipher 'ECDHE-RSA-AES256-GCM-SHA384'
+```
+
+In case to need to change the default cipher list,
+set the [cipher_list](09-object-types.md#objecttype-apilistener) attribute
+in the `api` feature configuration accordingly.
+
+Beware of using insecure ciphers, this may become a
+security risk in your organisation.
+
+#### Server Accepts Client <a id="troubleshooting-certificate-handshake-ciphers-server"></a>
+
+If the master node does not actively connect to the satellite/agent node(s), but instead
+the child node actively connectsm, you can still simulate a TLS handshake.
+
+Use `openssl s_server` instead of `openssl s_client` on the master during the connection
+attempt.
+
+```bash
+openssl s_server -connect 192.168.56.101:5665
+```
+
+Since the server role chooses the preferred cipher suite in Icinga,
+you can test-drive the "agent connects to master" mode here, granted that
+the TCP connection is not blocked by the firewall.
+
+
+#### Cipher Scan Tools <a id="troubleshooting-certificate-handshake-ciphers-scantools"></a>
+
+You can also use different tools to test the available cipher suites, this is what SSL Labs, etc.
+provide for TLS enabled websites as well. [This post](https://superuser.com/questions/109213/how-do-i-list-the-ssl-tls-cipher-suites-a-particular-website-offers)
+highlights some tools and scripts such as [sslscan](https://github.com/rbsec/sslscan) or [testssl.sh](https://github.com/drwetter/testssl.sh/)
+
+Example for sslscan on macOS against a Debian 10 Buster instance
+running v2.11:
+
+```
+$ brew install sslscan
+
+$ sslscan 192.168.33.22:5665
+Version: 1.11.13-static
+OpenSSL 1.0.2f 28 Jan 2016
+
+Connected to 192.168.33.22
+
+Testing SSL server 192.168.33.22 on port 5665 using SNI name 192.168.33.22
+
+ TLS Fallback SCSV:
+Server supports TLS Fallback SCSV
+
+ TLS renegotiation:
+Session renegotiation not supported
+
+ TLS Compression:
+Compression disabled
+
+ Heartbleed:
+TLS 1.2 not vulnerable to heartbleed
+TLS 1.1 not vulnerable to heartbleed
+TLS 1.0 not vulnerable to heartbleed
+
+ Supported Server Cipher(s):
+Preferred TLSv1.2 256 bits ECDHE-RSA-AES256-GCM-SHA384 Curve P-256 DHE 256
+Accepted TLSv1.2 128 bits ECDHE-RSA-AES128-GCM-SHA256 Curve P-256 DHE 256
+Accepted TLSv1.2 256 bits ECDHE-RSA-AES256-SHA384 Curve P-256 DHE 256
+Accepted TLSv1.2 128 bits ECDHE-RSA-AES128-SHA256 Curve P-256 DHE 256
+
+ SSL Certificate:
+Signature Algorithm: sha256WithRSAEncryption
+RSA Key Strength: 4096
+
+Subject: icinga2-debian10.vagrant.demo.icinga.com
+Altnames: DNS:icinga2-debian10.vagrant.demo.icinga.com
+Issuer: Icinga CA
+
+Not valid before: Jul 12 07:39:55 2019 GMT
+Not valid after: Jul 8 07:39:55 2034 GMT
+```
+
+## Distributed Troubleshooting <a id="troubleshooting-cluster"></a>
+
+This applies to any Icinga 2 node in a [distributed monitoring setup](06-distributed-monitoring.md#distributed-monitoring-scenarios).
+
+You should configure the [cluster health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks) if you haven't
+done so already.
+
+> **Note**
+>
+> Some problems just exist due to wrong file permissions or applied packet filters. Make
+> sure to check these in the first place.
+
+### Cluster Troubleshooting Connection Errors <a id="troubleshooting-cluster-connection-errors"></a>
+
+General connection errors could be one of the following problems:
+
+* Incorrect network configuration
+* Packet loss
+* Firewall rules preventing traffic
+
+Use tools like `netstat`, `tcpdump`, `nmap`, etc. to make sure that the cluster communication
+works (default port is `5665`).
+
+```bash
+tcpdump -n port 5665 -i any
+
+netstat -tulpen | grep icinga
+
+nmap icinga2-agent1.localdomain
+```
+
+### Cluster Troubleshooting TLS Errors <a id="troubleshooting-cluster-tls-errors"></a>
+
+If the cluster communication fails with TLS/SSL error messages, make sure to check
+the following
+
+* File permissions on the TLS certificate files
+* Does the used CA match for all cluster endpoints?
+ * Verify the `Issuer` being your trusted CA
+ * Verify the `Subject` containing your endpoint's common name (CN)
+ * Check the validity of the certificate itself
+
+Try to manually connect from `icinga2-agent1.localdomain` to the master node `icinga2-master1.localdomain`:
+
+```
+$ openssl s_client -CAfile /var/lib/icinga2/certs/ca.crt -cert /var/lib/icinga2/certs/icinga2-agent1.localdomain.crt -key /var/lib/icinga2/certs/icinga2-agent1.localdomain.key -connect icinga2-master1.localdomain:5665
+
+CONNECTED(00000003)
+---
+...
+```
+
+If the connection attempt fails or your CA does not match, [verify the certificates](15-troubleshooting.md#troubleshooting-certificate-verification).
+
+
+#### Cluster Troubleshooting Unauthenticated Clients <a id="troubleshooting-cluster-unauthenticated-clients"></a>
+
+Unauthenticated nodes are able to connect. This is required for agent/satellite setups.
+
+Master:
+
+```
+[2015-07-13 18:29:25 +0200] information/ApiListener: New client connection for identity 'icinga2-agent1.localdomain' (unauthenticated)
+```
+
+Agent as command execution bridge:
+
+```
+[2015-07-13 18:29:26 +1000] notice/ClusterEvents: Discarding 'execute command' message from 'icinga2-master1.localdomain': Invalid endpoint origin (client not allowed).
+```
+
+If these messages do not go away, make sure to [verify the master and agent certificates](15-troubleshooting.md#troubleshooting-certificate-verification).
+
+
+### Cluster Troubleshooting Message Errors <a id="troubleshooting-cluster-message-errors"></a>
+
+When the network connection is broken or gone, the Icinga 2 instances will be disconnected.
+If the connection can't be re-established between endpoints in the same HA zone,
+they remain in a Split-Brain-mode and history may differ.
+
+Although the Icinga 2 cluster protocol stores historical events in a [replay log](15-troubleshooting.md#troubleshooting-cluster-replay-log)
+for later synchronisation, you should make sure to check why the network connection failed.
+
+Ensure to setup [cluster health checks](06-distributed-monitoring.md#distributed-monitoring-health-checks)
+to monitor all endpoints and zones connectivity.
+
+
+### Cluster Troubleshooting Command Endpoint Errors <a id="troubleshooting-cluster-command-endpoint-errors"></a>
+
+Command endpoints can be used [for agents](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint)
+as well as inside an [High-Availability cluster](06-distributed-monitoring.md#distributed-monitoring-scenarios).
+
+There is no CLI command for manually executing the check, but you can verify
+the following (e.g. by invoking a forced check from the web interface):
+
+* `/var/log/icinga2/icinga2.log` shows connection and execution errors.
+ * The ApiListener is not enabled to [accept commands](06-distributed-monitoring.md#distributed-monitoring-top-down-command-endpoint). This is visible as `UNKNOWN` check result output.
+ * `CheckCommand` definition not found on the remote client. This is visible as `UNKNOWN` check result output.
+ * Referenced check plugin not found on the remote agent.
+ * Runtime warnings and errors, e.g. unresolved runtime macros or configuration problems.
+* Specific error messages are also populated into `UNKNOWN` check results including a detailed error message in their output.
+* Verify the [check source](15-troubleshooting.md#checks-check-source). This is populated by the node executing the check. You can see that in Icinga Web's detail view or by querying the REST API for this checkable object.
+
+Additional tasks:
+
+* More verbose logs are found inside the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output).
+
+* Use the Icinga 2 API [event streams](12-icinga2-api.md#icinga2-api-event-streams) to receive live check result streams.
+
+Fetch all check result events matching the `event.service` name `remote-client`:
+
+```bash
+curl -k -s -u root:icinga -H 'Accept: application/json' -X POST 'https://localhost:5665/v1/events?queue=debugcommandendpoint&types=CheckResult&filter=match%28%22remote-client*%22,event.service%29'
+```
+
+
+#### Agent Hosts with Command Endpoint require a Zone <a id="troubleshooting-cluster-command-endpoint-errors-agent-hosts-command-endpoint-zone"></a>
+
+2.11 fixes bugs where agent host checks would never be scheduled on
+the master. One requirement is that the checkable host/service
+is put into a zone.
+
+By default, the Director puts the agent host in `zones.d/master`
+and you're good to go. If you manually manage the configuration,
+the config compiler now throws an error with `command_endpoint`
+being set but no `zone` defined.
+
+In case you previously managed the configuration outside of `zones.d`,
+follow along with the following instructions.
+
+The most convenient way with e.g. managing the objects in `conf.d`
+is to move them into the `master` zone.
+
+First, verify the name of your endpoint's zone. The CLI wizards
+use `master` by default.
+
+```
+vim /etc/icinga2/zones.conf
+
+object Zone "master" {
+ ...
+}
+```
+
+Then create a new directory in `zones.d` called `master`, if not existing.
+
+```bash
+mkdir -p /etc/icinga2/zones.d/master
+```
+
+Now move the directory tree from `conf.d` into the `master` zone.
+
+```bash
+mv conf.d/* /etc/icinga2/zones.d/master/
+```
+
+Validate the configuration and reload Icinga.
+
+```bash
+icinga2 daemon -C
+systemctl restart icinga2
+```
+
+Another method is to specify the `zone` attribute manually, but since
+this may lead into other unwanted "not checked" scenarios, we don't
+recommend this for your production environment.
+
+### Cluster Troubleshooting Config Sync <a id="troubleshooting-cluster-config-sync"></a>
+
+In order to troubleshoot this, remember the key things with the config sync:
+
+* Within a config master zone, only one configuration master is allowed to have its config in `/etc/icinga2/zones.d`.
+ * The config master copies the zone configuration from `/etc/icinga2/zones.d` to `/var/lib/icinga2/api/zones`. This storage is the same for all cluster endpoints, and the source for all config syncs.
+ * The config master puts the `.authoritative` marker on these zone files locally. This is to ensure that it doesn't receive config updates from other endpoints. If you have copied the content from `/var/lib/icinga2/api/zones` to another node, ensure to remove them.
+* During startup, the master validates the entire configuration and only syncs valid configuration to other zone endpoints.
+
+Satellites/Agents < 2.11 store the received configuration directly in `/var/lib/icinga2/api/zones`, validating it and reloading the daemon.
+Satellites/Agents >= 2.11 put the received configuration into the staging directory `/var/lib/icinga2/api/zones-stage` first, and will only copy this to the production directory `/var/lib/icinga2/api/zones` once the validation was successful.
+
+The configuration sync logs the operations during startup with the `information` severity level. Received zone configuration is also logged.
+
+Typical errors are:
+
+* The api feature doesn't [accept config](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync). This is logged into `/var/lib/icinga2/icinga2.log`.
+* The received configuration zone is not configured in [zones.conf](04-configuration.md#zones-conf) and Icinga denies it. This is logged into `/var/lib/icinga2/icinga2.log`.
+* The satellite/agent has local configuration in `/etc/icinga2/zones.d` and thinks it is authoritive for this zone. It then denies the received update. Purge the content from `/etc/icinga2/zones.d`, `/var/lib/icinga2/api/zones/*` and restart Icinga to fix this.
+
+#### New configuration does not trigger a reload <a id="troubleshooting-cluster-config-sync-no-reload"></a>
+
+The debug/notice log dumps the calculated checksums for all files and the comparison. Analyse this to troubleshoot further.
+
+A complete sync for the `director-global` global zone can look like this:
+
+```
+[2019-08-01 09:20:25 +0200] notice/JsonRpcConnection: Received 'config::Update' message from 'icinga2-master1.localdomain'
+[2019-08-01 09:20:25 +0200] information/ApiListener: Applying config update from endpoint 'icinga2-master1.localdomain' of zone 'master'.
+[2019-08-01 09:20:25 +0200] notice/ApiListener: Creating config update for file '/var/lib/icinga2/api/zones/director-global/.checksums'.
+[2019-08-01 09:20:25 +0200] notice/ApiListener: Creating config update for file '/var/lib/icinga2/api/zones/director-global/.timestamp'.
+[2019-08-01 09:20:25 +0200] notice/ApiListener: Creating config update for file '/var/lib/icinga2/api/zones/director-global/director/001-director-basics.conf'.
+[2019-08-01 09:20:25 +0200] notice/ApiListener: Creating config update for file '/var/lib/icinga2/api/zones/director-global/director/host_templates.conf'.
+[2019-08-01 09:20:25 +0200] information/ApiListener: Received configuration for zone 'director-global' from endpoint 'icinga2-master1.localdomain'. Comparing the checksums.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Checking for config change between stage and production. Old (4): '{"/.checksums":"c4dd1237e36dcad9142f4d9a81324a7cae7d01543a672299
+b8c1bb08b629b7d1","/.timestamp":"f21c0e6551328812d9f5176e5e31f390de0d431d09800a85385630727b404d83","/director/001-director-basics.conf":"f86583eec81c9bf3a1823a761991fb53d640bd0dc
+6cd12bf8c5e6a275359970f","/director/host_templates.conf":"831e9b7e3ec1e33288e56a51e63c688da1d6316155349382a101f7fce6229ecc"}' vs. new (4): '{"/.checksums":"c4dd1237e36dcad9142f4d
+9a81324a7cae7d01543a672299b8c1bb08b629b7d1","/.timestamp":"f21c0e6551328812d9f5176e5e31f390de0d431d09800a85385630727b404d83","/director/001-director-basics.conf":"f86583eec81c9bf
+3a1823a761991fb53d640bd0dc6cd12bf8c5e6a275359970f","/director/host_templates.conf":"831e9b7e3ec1e33288e56a51e63c688da1d6316155349382a101f7fce6229ecc"}'.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Ignoring old internal file '/.checksums'.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Ignoring old internal file '/.timestamp'.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Checking /director/001-director-basics.conf for old checksum: f86583eec81c9bf3a1823a761991fb53d640bd0dc6cd12bf8c5e6a275359970f.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Checking /director/host_templates.conf for old checksum: 831e9b7e3ec1e33288e56a51e63c688da1d6316155349382a101f7fce6229ecc.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Ignoring new internal file '/.checksums'.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Ignoring new internal file '/.timestamp'.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Checking /director/001-director-basics.conf for new checksum: f86583eec81c9bf3a1823a761991fb53d640bd0dc6cd12bf8c5e6a275359970f.
+[2019-08-01 09:20:25 +0200] debug/ApiListener: Checking /director/host_templates.conf for new checksum: 831e9b7e3ec1e33288e56a51e63c688da1d6316155349382a101f7fce6229ecc.
+[2019-08-01 09:20:25 +0200] information/ApiListener: Stage: Updating received configuration file '/var/lib/icinga2/api/zones-stage/director-global//director/001-director-basics.c
+onf' for zone 'director-global'.
+[2019-08-01 09:20:25 +0200] information/ApiListener: Stage: Updating received configuration file '/var/lib/icinga2/api/zones-stage/director-global//director/host_templates.conf'
+for zone 'director-global'.
+[2019-08-01 09:20:25 +0200] information/ApiListener: Applying configuration file update for path '/var/lib/icinga2/api/zones-stage/director-global' (2209 Bytes).
+
+...
+
+[2019-08-01 09:20:25 +0200] information/ApiListener: Received configuration updates (4) from endpoint 'icinga2-master1.localdomain' are different to production, triggering validation and reload.
+[2019-08-01 09:20:25 +0200] notice/Process: Running command '/usr/lib/x86_64-linux-gnu/icinga2/sbin/icinga2' '--no-stack-rlimit' 'daemon' '--close-stdio' '-e' '/var/log/icinga2/e
+rror.log' '--validate' '--define' 'System.ZonesStageVarDir=/var/lib/icinga2/api/zones-stage/': PID 4532
+[2019-08-01 09:20:25 +0200] notice/Process: PID 4532 ('/usr/lib/x86_64-linux-gnu/icinga2/sbin/icinga2' '--no-stack-rlimit' 'daemon' '--close-stdio' '-e' '/var/log/icinga2/error.l
+og' '--validate' '--define' 'System.ZonesStageVarDir=/var/lib/icinga2/api/zones-stage/') terminated with exit code 0
+[2019-08-01 09:20:25 +0200] information/ApiListener: Config validation for stage '/var/lib/icinga2/api/zones-stage/' was OK, replacing into '/var/lib/icinga2/api/zones/' and trig
+gering reload.
+[2019-08-01 09:20:26 +0200] information/ApiListener: Copying file 'director-global//.checksums' from config sync staging to production zones directory.
+[2019-08-01 09:20:26 +0200] information/ApiListener: Copying file 'director-global//.timestamp' from config sync staging to production zones directory.
+[2019-08-01 09:20:26 +0200] information/ApiListener: Copying file 'director-global//director/001-director-basics.conf' from config sync staging to production zones directory.
+[2019-08-01 09:20:26 +0200] information/ApiListener: Copying file 'director-global//director/host_templates.conf' from config sync staging to production zones directory.
+
+...
+
+[2019-08-01 09:20:26 +0200] notice/Application: Got reload command, forwarding to umbrella process (PID 4236)
+```
+
+In case the received configuration updates are equal to what is running in production, a different message is logged and the validation/reload is skipped.
+
+```
+[2020-02-05 15:18:19 +0200] information/ApiListener: Received configuration updates (4) from endpoint 'icinga2-master1.localdomain' are equal to production, skipping validation and reload.
+```
+
+
+#### Syncing Binary Files is Denied <a id="troubleshooting-cluster-config-sync-binary-denied"></a>
+
+The config sync is built for syncing text configuration files, wrapped into JSON-RPC messages.
+Some users have started to use this as binary file sync instead of using tools built for this:
+rsync, git, Puppet, Ansible, etc.
+
+Starting with 2.11, this attempt is now prohibited and logged.
+
+```
+[2019-08-02 16:03:19 +0200] critical/ApiListener: Ignoring file '/etc/icinga2/zones.d/global-templates/forbidden.exe' for cluster config sync: Does not contain valid UTF8. Binary files are not supported.
+Context:
+ (0) Creating config update for file '/etc/icinga2/zones.d/global-templates/forbidden.exe'
+ (1) Activating object 'api' of type 'ApiListener'
+```
+
+In order to solve this problem, remove the mentioned files from `zones.d` and use an alternate way
+of syncing plugin binaries to your satellites and agents.
+
+
+#### Zones in Zones <a id="troubleshooting-cluster-config-zones-in-zones"></a>
+
+The cluster config sync works in a such manner that any `/etc/icinga2/zones.d/` subdirectory is included only when it is
+named after a known zone by the local `Endpoint`.
+
+If you for example add some configs in to `zones.d/satellite` and forgot to define the `satellite` zone
+in `zones.d/master` or outside in `/etc/icinga2/zones.conf`, the config compiler will not include
+this config from the `zones.d/satellite` zone directory.
+
+Since v2.11, the config compiler is only including directories where a
+zone has been configured. Otherwise, it would include renamed old zones,
+broken zones, etc. and those long-lasting bugs have been now fixed.
+
+Here are some working examples:
+
+**Example: Everything in `zones.conf`**
+
+Each instance needs to know the `Zone` and `Endpoint` definitions for itself and all directly connected instances in order
+to successfully establish a connection with each other. This can be achieved by placing all `Endpoint` and `Zone` definitions
+of all Icinga 2 instances known by the local endpoint in this single file.
+
+```
+vim /etc/icinga2/zones.conf
+
+object Endpoint "icinga2-master1.localdomain" { ... }
+object Endpoint "icinga2-master2.localdomain" { ... }
+
+object Zone "master" {
+ endpoints = [ "icinga2-master1.localdomain", "icinga2-master2.localdomain" ]
+}
+
+object Endpoint "icinga2-satellite1.localdomain" { ... }
+object Endpoint "icinga2-satellite2.localdomain" { ... }
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain", "icinga2-satellite1.localdomain" ]
+ parent = "master"
+}
+```
+
+**Example: Child zones in `zones.d/`**
+
+An additional option that Icinga 2 offers is the possibility to outsource all *child* `Endpoint` definitions of the
+local Icinga 2 instance to the `zones.d/` directory. As an example, we can place the satellite `Zone` and `Endpoint` definition
+from the above example into `zones.d/` underneath a directory named exactly like the local endpoint `Zone` name, which
+in our case is `master`.
+
+```
+mkdir /etc/icinga2/zones.d/master
+vim /etc/icinga2/zones.d/master/satellite.conf
+
+object Endpoint "icinga2-satellite1.localdomain" { ... }
+object Endpoint "icinga2-satellite2.localdomain" { ... }
+
+object Zone "satellite" {
+ endpoints = [ "icinga2-satellite1.localdomain", "icinga2-satellite1.localdomain" ]
+ parent = "master"
+}
+
+...
+```
+
+Once done, you can start deploying actual monitoring objects into the satellite zone.
+
+```
+vim /etc/icinga2/zones.d/satellite/satellite-hosts.conf
+
+object Host "agent" { ... }
+```
+
+Keep in mind that the `agent` host object will never reach the satellite, when the master does not have the
+`satellite` zone configured either in `zones.d/master` nor outside the `zones.d` directory. That's also explained and
+described in the [documentation](06-distributed-monitoring.md#distributed-monitoring-scenarios-master-satellite-agents).
+
+The thing you can do: For `command_endpoint` agents like inside the Director:
+Host -> Agent -> yes, there is no config sync for this zone in place. Therefore,
+it is valid to just sync their zones via the config sync.
+
+#### Director Changes
+
+The following restores the Zone/Endpoint objects as config objects outside of `zones.d`
+in your master/satellite's zones.conf with rendering them as external objects in the Director.
+
+[Example](06-distributed-monitoring.md#distributed-monitoring-scenarios-master-satellite-agents)
+for a 3 level setup with the masters and satellites knowing about the zone hierarchy
+outside defined in [zones.conf](04-configuration.md#zones-conf):
+
+```
+object Endpoint "icinga-master1.localdomain" {
+ //define 'host' attribute to control the connection direction on each instance
+}
+
+object Endpoint "icinga-master2.localdomain" {
+ //...
+}
+
+object Endpoint "icinga-satellite1.localdomain" {
+ //...
+}
+
+object Endpoint "icinga-satellite2.localdomain" {
+ //...
+}
+
+//--------------
+// Zone hierarchy with endpoints, required for the trust relationship and that the cluster config sync knows which zone directory defined in zones.d needs to be synced to which endpoint.
+// That's no different to what is explained in the docs as basic zone trust hierarchy, and is intentionally managed outside in zones.conf there.
+
+object Zone "master" {
+ endpoints = [ "icinga-master1.localdomain", "icinga-master2.localdomain" ]
+}
+
+object Zone "satellite" {
+ endpoints = [ "icinga-satellite1.localdomain", "icinga-satellite2.localdomain" ]
+ parent = "master" // trust
+}
+```
+
+Prepare the above configuration on all affected nodes, satellites are likely uptodate already.
+Then continue with the steps below.
+
+> * backup your database, just to be on the safe side
+> * create all non-external Zone/Endpoint-Objects on all related Icinga Master/Satellite-Nodes (manually in your local zones.conf)
+> * while doing so please do NOT restart Icinga, no deployments
+> * change the type in the Director DB:
+>
+> ```sql
+> UPDATE icinga_zone SET object_type = 'external_object' WHERE object_type = 'object';
+> UPDATE icinga_endpoint SET object_type = 'external_object' WHERE object_type = 'object';
+> ```
+>
+> * render and deploy a new configuration in the Director. It will state that there are no changes. Ignore it, deploy anyways
+>
+> That's it. All nodes should automatically restart, triggered by the deployed configuration via cluster protocol.
+
+
+### Cluster Troubleshooting Overdue Check Results <a id="troubleshooting-cluster-check-results"></a>
+
+If your master does not receive check results (or any other events) from the child zones
+(satellite, clients, etc.), make sure to check whether the client sending in events
+is allowed to do so.
+
+> **Tip**
+>
+> General troubleshooting hints on late check results are documented [here](15-troubleshooting.md#late-check-results).
+
+The [distributed monitoring conventions](06-distributed-monitoring.md#distributed-monitoring-conventions)
+apply. So, if there's a mismatch between your client node's endpoint name and its provided
+certificate's CN, the master will deny all events.
+
+> **Tip**
+>
+> [Icinga Web 2](https://icinga.com/docs/icinga-web-2/latest/doc/01-About/) provides a dashboard view
+> for overdue check results.
+
+Enable the [debug log](15-troubleshooting.md#troubleshooting-enable-debug-output) on the master
+for more verbose insights.
+
+If the client cannot authenticate, it's a more general [problem](15-troubleshooting.md#troubleshooting-cluster-unauthenticated-clients).
+
+The client's endpoint is not configured on nor trusted by the master node:
+
+```
+Discarding 'check result' message from 'icinga2-agent1.localdomain': Invalid endpoint origin (client not allowed).
+```
+
+The check result message sent by the client does not belong to the zone the checkable object is
+in on the master:
+
+```
+Discarding 'check result' message from 'icinga2-agent1.localdomain': Unauthorized access.
+```
+
+
+### Cluster Troubleshooting Replay Log <a id="troubleshooting-cluster-replay-log"></a>
+
+If your `/var/lib/icinga2/api/log` directory grows, it generally means that your cluster
+cannot replay the log on connection loss and re-establishment. A master node for example
+will store all events for not connected endpoints in the same and child zones.
+
+Check the following:
+
+* All clients are connected? (e.g. [cluster health check](06-distributed-monitoring.md#distributed-monitoring-health-checks)).
+* Check your [connection](15-troubleshooting.md#troubleshooting-cluster-connection-errors) in general.
+* Does the log replay work, e.g. are all events processed and the directory gets cleared up over time?
+* Decrease the `log_duration` attribute value for that specific [endpoint](09-object-types.md#objecttype-endpoint).
+
+The cluster health checks also measure the `slave_lag` metric. Use this data to correlate
+graphs with other events (e.g. disk I/O, network problems, etc).
+
+
+### Cluster Troubleshooting: Windows Agents <a id="troubleshooting-cluster-windows-agents"></a>
+
+
+#### Windows Service Exe Path <a id="troubleshooting-cluster-windows-agents-service-exe-path"></a>
+
+Icinga agents can be installed either as x86 or x64 package. If you enable features, or wonder why
+logs are not written, the first step is to analyse which path the Windows service `icinga2` is using.
+
+Start a new administrative Powershell and ensure that the `icinga2` service is running.
+
+```
+C:\Program Files\ICINGA2\sbin> net start icinga2
+```
+
+Use the `Get-WmiObject` function to extract the windows service and its path name.
+
+```
+C:\Program Files\ICINGA2\sbin> Get-WmiObject win32_service | ?{$_.Name -like '*icinga*'} | select Name, DisplayName, State, PathName
+
+Name DisplayName State PathName
+---- ----------- ----- --------
+icinga2 Icinga 2 Running "C:\Program Files\ICINGA2\sbin\icinga2.exe" --scm "daemon"
+```
+
+If you have used the `icinga2.exe` from a different path to enable e.g. the `debuglog` feature,
+navigate into `C:\Program Files\ICINGA2\sbin\` and use the correct exe to control the feature set.
+
+
+#### Windows Agents consuming 100% CPU <a id="troubleshooting-cluster-windows-agents-cpu"></a>
+
+> **Note**
+>
+> The network stack was rewritten in 2.11. This fixes several hanging connections and threads
+> on older Windows agents and master/satellite nodes. Prior to testing the below, plan an upgrade.
+
+Icinga 2 requires the `NodeName` [constant](17-language-reference.md#constants) in various places to run.
+This includes loading the TLS certificates, setting the proper check source,
+and so on.
+
+Typically the Windows setup wizard and also the CLI commands populate the [constants.conf](04-configuration.md#constants-conf)
+file with the auto-detected or user-provided FQDN/Common Name.
+
+If this constant is not set during startup, Icinga will try to resolve the
+FQDN, if that fails, fetch the hostname. If everything fails, it logs
+an error and sets this to `localhost`. This results in undefined behaviour
+if ignored by the admin.
+
+Querying the DNS when not reachable is CPU consuming, and may look like Icinga
+is doing lots of checks, etc. but actually really is just starting up.
+
+In order to fix this, edit the `constants.conf` file and populate
+the `NodeName` constant with the FQDN. Ensure this is the same value
+as the local endpoint object name.
+
+```
+const NodeName = "windows-agent1.domain.com"
+```
+
+
+
+#### Windows blocking Icinga 2 with ephemeral port range <a id="troubleshooting-cluster-windows-agents-ephemeral-port-range"></a>
+
+When you see a message like this in your Windows agent logs:
+
+```
+critical/TcpSocket: Invalid socket: 10055, "An operation on a socket could not be performed because the system lacked sufficient buffer space or because a queue was full."
+```
+
+Windows is blocking Icinga 2 and as such, no more TCP connection handling is possible.
+
+Depending on the version, patch level and installed applications, Windows is changing its
+range of [ephemeral ports](https://en.wikipedia.org/wiki/Ephemeral_port#Range).
+
+In order to solve this, raise the `MaxUserPort` value in the registry.
+
+```
+HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters
+
+Value Name: MaxUserPort Value
+Type: DWORD
+Value data: 65534
+```
+
+More details in [this blogpost](https://www.netways.de/blog/2019/01/24/windows-blocking-icinga-2-with-ephemeral-port-range/)
+and this [MS help entry](https://support.microsoft.com/en-us/help/196271/when-you-try-to-connect-from-tcp-ports-greater-than-5000-you-receive-t).
diff --git a/doc/16-upgrading-icinga-2.md b/doc/16-upgrading-icinga-2.md
new file mode 100644
index 0000000..698fb87
--- /dev/null
+++ b/doc/16-upgrading-icinga-2.md
@@ -0,0 +1,977 @@
+# Upgrading Icinga 2 <a id="upgrading-icinga-2"></a>
+
+Upgrading Icinga 2 is usually quite straightforward.
+Ordinarily the only manual steps involved
+are scheme updates for the IDO database.
+
+Specific version upgrades are described below. Please note that version
+updates are incremental. An upgrade from v2.6 to v2.8 requires to
+follow the instructions for v2.7 too.
+
+## Upgrading to v2.14 <a id="upgrading-to-2-14"></a>
+
+### Dependencies and Redundancy Groups <a id="upgrading-to-2-14-dependencies"></a>
+
+Before Icinga v2.12 all dependencies were cumulative.
+I.e. the child was considered reachable only if no dependency was violated.
+In v2.12 and v2.13, all dependencies were redundant.
+I.e. the child was considered unreachable only if no dependency was fulfilled.
+
+v2.14 restores the pre-v2.12 behavior, but allows to override it.
+I.e. you can still make any number of your dependencies redundant, as you wish.
+For details read the docs' [redundancy groups section](03-monitoring-basics.md#dependencies-redundancy-groups).
+
+### Email Notification Scripts <a id="upgrading-to-2-14-email-notification"></a>
+
+The email notification scripts shipped with Icinga 2 (/etc/icinga2/scripts)
+now link to Icinga DB Web, not the monitoring module.
+Both new and existing installations are affected unless you've altered the scripts.
+
+In the latter case package managers won't upgrade those "config" files in-place,
+but just put files with similar names into the same directory.
+This allows you to patch them by yourself based on diff(1).
+
+On the other hand, if you want to stick to the monitoring module for now,
+add any comments to the notification scripts before upgrading.
+This way package managers won't touch those files.
+
+## Upgrading to v2.13 <a id="upgrading-to-2-13"></a>
+
+### DB IDO Schema Update <a id="upgrading-to-2-13-db-ido"></a>
+
+There is an optional schema update on MySQL which increases the max length of object names from 128 to 255 characters.
+
+Please proceed here for the [MySQL upgrading docs](16-upgrading-icinga-2.md#upgrading-mysql-db).
+
+### Behavior changes <a id="upgrading-to-2-13-behavior-changes"></a>
+
+#### Deletion of child downtimes on services
+
+Service downtimes created while using the `all_services` flag on the [schedule-downtime](12-icinga2-api.md#schedule-downtime) API action
+will now automatically be deleted when deleting the hosts downtime.
+
+#### Windows Event Log
+
+Icinga 2.13 now supports logging to the Windows Event Log. Icinga will now also log messages from the early
+startup phase to the Windows Event Log. These were previously missing from the log file and you could only
+see them by manually starting Icinga in the foreground.
+
+This feature is now enabled and replaces the existing mainlog feature logging to a file. When upgrading, the installer
+will enable the windowseventlog feature and disable the mainlog feature. Logging to a file is still possible.
+If you don't want this configuration migration on upgrade, you can opt-out by installing
+the `%ProgramData%\icinga2\etc\icinga2\features-available\windowseventlog.conf` file before upgrading to Icinga 2.13.
+
+#### Broken API package name validation
+
+This version has replaced a broken regex in the API package validation code which results in package names
+now being validated correctly. Package names should now only consist of alphanumeric characters, dashes and underscores.
+
+This change only applies to newly created packages to support already existing ones.
+
+## Upgrading to v2.12 <a id="upgrading-to-2-12"></a>
+
+* CLI
+ * New `pki verify` CLI command for better [TLS certificate troubleshooting](15-troubleshooting.md#troubleshooting-certificate-verification)
+
+### Behavior changes <a id="upgrading-to-2-12-behavior-changes"></a>
+
+The behavior of multi parent [dependencies](03-monitoring-basics.md#dependencies) was fixed to e.g. render hosts unreachable when both router uplinks are down.
+
+Previous behaviour:
+
+1) parentHost1 DOWN, parentHost2 UP => childHost **not reachable**
+2) parentHost1 DOWN, parentHost2 DOWN => childHost **not reachable**
+
+New behavior:
+
+1) parentHost1 DOWN, parentHost2 UP => childHost **reachable**
+2) parentHost1 DOWN, parentHost2 DOWN => childHost **not reachable**
+
+Please review your [Dependency](09-object-types.md#objecttype-dependency) configuration as 1) may lead to
+different results for
+
+- `last_reachable` via REST API query
+- Notifications not suppressed by faulty reachability calculation anymore
+
+### Breaking changes <a id="upgrading-to-2-12-breaking-changes"></a>
+
+As of v2.12 our [API](12-icinga2-api.md) URL endpoint [`/v1/actions/acknowledge-problem`](12-icinga2-api.md#icinga2-api-actions-acknowledge-problem) refuses acknowledging an already acknowledged checkable by overwriting the acknowledgement.
+To replace an acknowledgement you have to remove the old one before adding the new one.
+
+The deprecated parameters `--cert` and `--key` for the `pki save-cert` CLI command
+have been removed from the command and documentation.
+
+## Upgrading to v2.11 <a id="upgrading-to-2-11"></a>
+
+### Bugfixes for 2.11 <a id="upgrading-to-2-11-bugfixes"></a>
+
+2.11.1 on agents/satellites fixes a problem where 2.10.x as config master would send out an unwanted config marker file,
+thus rendering the agent to think it is autoritative for the config, and never accepting any new
+config files for the zone(s). **If your config master is 2.11.x already, you are not affected by this problem.**
+
+In order to fix this, upgrade to at least 2.11.1, and purge away the local config sync storage once, then restart.
+
+```bash
+yum install icinga2
+
+rm -rf /var/lib/icinga2/api/zones/*
+rm -rf /var/lib/icinga2/api/zones-stage/*
+
+systemctl restart icinga2
+```
+
+2.11.2 fixes a problem where the newly introduced config sync "check-change-then-reload" functionality
+could cause endless reload loops with agents. The most visible parts are failing command endpoint checks
+with "not connected" UNKNOWN state. **Only applies to HA enabled zones with 2 masters and/or 2 satellites.**
+
+In order to fix this, upgrade all agents/satellites to at least 2.11.2 and restart them.
+
+### Packages <a id="upgrading-to-2-11-packages"></a>
+
+EOL distributions where no packages are available with this release:
+
+* SLES 11
+* Ubuntu 14 LTS
+* RHEL/CentOS 6 x86
+
+Raspbian Packages are available inside the `icinga-buster` repository
+on [https://packages.icinga.com](https://packages.icinga.com/raspbian/).
+Please note that Stretch is not supported suffering from compiler
+regressions. Upgrade to Raspbian Buster is highly recommended.
+
+#### Added: Boost 1.66+
+
+The rewrite of our core network stack for cluster and REST API
+requires newer Boost versions, specifically >= 1.66. For technical
+details, please continue reading in [this issue](https://github.com/Icinga/icinga2/issues/7041).
+
+Distribution | Repository providing Boost Dependencies
+---------------------|-------------------------------------
+CentOS 7 | [EPEL repository](02-installation.md#centos-repository)
+RHEL 7 | [EPEL repository](02-installation.md#rhel-repository)
+RHEL/CentOS 6 x64 | [packages.icinga.com](https://packages.icinga.com)
+Fedora | Fedora Upstream
+Debian 10 Buster | Debian Upstream
+Debian 9 Stretch | [Backports repository](02-installation.md#debian-backports-repository) **New since 2.11**
+Debian 8 Jessie | [packages.icinga.com](https://packages.icinga.com)
+Ubuntu 18 Bionic | [packages.icinga.com](https://packages.icinga.com)
+Ubuntu 16 Xenial | [packages.icinga.com](https://packages.icinga.com)
+SLES 15 | SUSE Upstream
+SLES 12 | [packages.icinga.com](https://packages.icinga.com) (replaces the SDK repository requirement)
+
+On platforms where EPEL or Backports cannot satisfy this dependency,
+we provide Boost as package on our [package repository](https://packages.icinga.com)
+for your convenience.
+
+After upgrade, you may remove the old Boost packages (1.53 or anything above)
+if you don't need them anymore.
+
+#### Added: .NET Framework 4.6
+
+We modernized the graphical Windows wizard to use the more recent .NET Framework 4.6. This requires that Windows versions
+older than Windows 10/Windows Server 2016 installs at least [.NET Framework 4.6](https://www.microsoft.com/en-US/download/details.aspx?id=53344). Starting with Windows 10/Windows Server 2016 a .NET Framework 4.6 or higher is installed by default.
+
+The MSI-Installer package checks if the .NET Framework 4.6 or higher is present, if not the installation wizard will abort with an error message telling you to install at least .NET Framework 4.6.
+
+#### Removed: YAJL
+
+Our JSON library, namely [YAJL](https://github.com/lloyd/yajl), isn't maintained anymore
+and may cause [crashes](https://github.com/Icinga/icinga2/issues/6684).
+
+It is replaced by [JSON for Modern C++](https://github.com/nlohmann/json) by Niels Lohmann
+and compiled into the binary as header only include. It helps our way to C++11 and allows
+to fix additional UTF8 issues more easily. Read more about its [design goals](https://github.com/nlohmann/json#design-goals)
+and [benchmarks](https://github.com/miloyip/nativejson-benchmark#parsing-time).
+
+### Core <a id="upgrading-to-2-11-core"></a>
+
+#### Reload Handling <a id="upgrading-to-2-11-core-reload-handling"></a>
+
+2.11 provides fixes for unwanted notifications during restarts.
+The updated systemd service file now uses the `KillMode=mixed` setting.
+
+The reload handling was improved with an umbrella process, which means
+that normal runtime operations include **3 processes**. You may need to
+adjust the local instance monitoring of the [procs](08-advanced-topics.md#monitoring-icinga) check.
+
+More details can be found in the [technical concepts](19-technical-concepts.md#technical-concepts-core-reload) chapter.
+
+#### Downtime Notifications <a id="upgrading-to-2-11-core-downtime-notifications"></a>
+
+Imagine that a host/service changes to a HARD NOT-OK state,
+and its check interval is set to a high interval e.g. 1 hour.
+
+A maintenance downtime prevents the notification being sent,
+but once it ends and the host/service is still in a downtime,
+no immediate notification is re-sent but you'll have to wait
+for the next check.
+
+Another scenario is with one-shot notifications (interval=0)
+which would never notify again after the downtime ends and
+the problem state being intact. The state change logic requires
+to recover and become HARD NOT-OK to notify again.
+
+In order to solve these problems with filtered/suppressed notifications
+in downtimes, v2.11 changes the behaviour like this:
+
+- If there was a notification suppressed in a downtime, the core stores that information
+- Once the downtime ends and the problem state is still intact, Icinga checks whether a re-notification should be sent immediately
+
+A new cluster message was added to keep this in sync amongst HA masters.
+
+> **Important**
+>
+> In order to properly use this new feature, all involved endpoints
+> must be upgraded to v2.11.
+
+### Network Stack <a id="upgrading-to-2-11-network-stack"></a>
+
+The core network stack has been rewritten in 2.11 (some say this could be Icinga 3).
+
+You can read the full story [here](https://github.com/Icinga/icinga2/issues/7041).
+
+The only visible changes for users are:
+
+- No more dead-locks with hanging TLS connections (Cluster, REST API)
+- Better log messages in error cases
+- More robust and stable with using external libraries instead of self-written socket I/O
+
+Coming with this release, we've also updated TLS specific requirements
+explained below.
+
+#### TLS 1.2 <a id="upgrading-to-2-11-network-stack-tls-1-2"></a>
+
+v2.11 raises the minimum required TLS version to 1.2.
+This is available since OpenSSL 1.0.1 (EL6 & Debian Jessie).
+
+Older Icinga satellites/agents need to support TLS 1.2 during the TLS
+handshake.
+
+The `api` feature attribute `tls_protocolmin` now only supports the
+value `TLSv1.2` being the default.
+
+#### Hardened Cipher List <a id="upgrading-to-2-11-network-stack-cipher-list"></a>
+
+The previous default cipher list allowed weak ciphers. There's no sane way
+other than explicitly setting the allowed ciphers.
+
+The new default sets this to:
+
+```
+ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384:AES128-GCM-SHA256
+```
+
+You can override this setting in the [api](09-object-types.md#objecttype-apilistener)
+feature with the `cipher_list` attribute.
+
+In case that one of these ciphers is marked as insecure in the future,
+please let us know with an issue on GitHub.
+
+### Cluster <a id="upgrading-to-2-11-cluster"></a>
+
+#### Agent Hosts with Command Endpoint require a Zone <a id="upgrading-to-2-11-cluster-agent-hosts-command-endpoint-zone"></a>
+
+2.11 fixes bugs where agent host checks would never be scheduled on
+the master. One definite requirement is that the checkable host/service
+is put into a zone.
+
+By default, the Director puts the agent host in `zones.d/master`
+and you're good to go. If you manually manage the configuration,
+the config compiler now throws an error with `command_endpoint`
+being set but no `zone` defined.
+
+The most convenient way with e.g. managing the objects in `conf.d`
+is to move them into the `master` zone. Please continue in the
+[troubleshooting docs](15-troubleshooting.md#troubleshooting-cluster-command-endpoint-errors-agent-hosts-command-endpoint-zone)
+for further instructions.
+
+#### Config Sync <a id="upgrading-to-2-11-cluster-config-sync"></a>
+
+2.11 overhauls the cluster config sync in many ways. This includes the
+following under the hood:
+
+- Synced configuration files are not immediately put into production, but left inside a stage.
+- Unsuccessful config validation never puts the config into production, additional logging and API states are available.
+- Zone directories which are not configured in zones.conf, are not included anymore on secondary master/satellites/clients.
+- Synced config change calculation use checksums instead of timestamps to trigger validation/reload. This is more safe, and the usage of timestamps is now deprecated.
+- Don't allow parallel cluster syncs to avoid race conditions with overridden files.
+- Deleted directories and files are now purged, previous versions had a bug.
+
+Whenever a newer child endpoint receives a configuration update without
+checksums, it will log a warning.
+
+```
+Received configuration update without checksums from parent endpoint satellite1. This behaviour is deprecated. Please upgrade the parent endpoint to 2.11+
+```
+
+This is a gentle reminder to upgrade the master and satellites first,
+prior to installing new clients/agents.
+
+Technical details are available in the [technical concepts](19-technical-concepts.md#technical-concepts-cluster-config-sync) chapter.
+
+Since the config sync change detection now uses checksums, this may fail
+with anything else than syncing configuration text files. Syncing binary
+files were never supported, but rumors say that some users do so.
+
+This is now prohibited and logged.
+
+```
+[2019-08-02 16:03:19 +0200] critical/ApiListener: Ignoring file '/etc/icinga2/zones.d/global-templates/forbidden.exe' for cluster config sync: Does not contain valid UTF8. Binary files are not supported.
+Context:
+ (0) Creating config update for file '/etc/icinga2/zones.d/global-templates/forbidden.exe'
+ (1) Activating object 'api' of type 'ApiListener'
+```
+
+Such binaries wrapped into JSON-RPC cluster messages may always cause changes
+and trigger reload loops. In order to prevent such harm in production,
+use infrastructure tools such as Foreman, Puppet, Ansible, etc. to install
+plugins on the masters, satellites and agents.
+
+##### Config Sync: Zones in Zones <a id="upgrading-to-2-11-cluster-config-sync-zones-in-zones"></a>
+
+The cluster config sync works in the way that configuration
+put into `/etc/icinga2/zones.d` only is included when configured
+outside in `/etc/icinga2/zones.conf`.
+
+If you for example create a "Zone Inception" with defining the
+`satellite` zone in `zones.d/master`, the config compiler does not
+re-run and include this zone config recursively from `zones.d/satellite`.
+
+Since v2.11, the config compiler is only including directories where a
+zone has been configured. Otherwise it would include renamed old zones,
+broken zones, etc. and those long-lasting bugs have been now fixed.
+
+Please consult the [troubleshoot docs](15-troubleshooting.md#troubleshooting-cluster-config-zones-in-zones)
+for concrete examples and solutions.
+
+#### HA-aware Features <a id="upgrading-to-2-11-cluster-ha-aware-features"></a>
+
+v2.11 introduces additional HA functionality similar to the DB IDO feature.
+This enables the feature being active only on one endpoint while the other
+endpoint is paused. When one endpoint is shut down, automatic failover happens.
+
+This feature is turned off by default keeping the current behaviour. If you need
+it active on just one endpoint, set `enable_ha = true` on both endpoints in the
+feature configuration.
+
+This affects the following features:
+
+* [Elasticsearch](09-object-types.md#objecttype-elasticsearchwriter)
+* [Gelf](09-object-types.md#objecttype-gelfwriter)
+* [Graphite](09-object-types.md#objecttype-graphitewriter)
+* [InfluxDB](09-object-types.md#objecttype-influxdbwriter)
+* [OpenTsdb](09-object-types.md#objecttype-opentsdbwriter)
+* [Perfdata](09-object-types.md#objecttype-perfdatawriter) (for PNP)
+
+### HA Failover <a id="upgrading-to-2-11-ha-failover"></a>
+
+The reconnect failover has been improved, and the default `failover_timeout`
+for the DB IDO features has been lowered from 60 to 30 seconds.
+Object authority updates (required for balancing in the cluster) happen
+more frequenty (was 30, is 10 seconds).
+Also the cold startup without object authority updates has been reduced
+from 60 to 30 seconds. This is to allow cluster reconnects (lowered from 60s to 10s in 2.10)
+before actually considering a failover/split brain scenario.
+
+The [IdoMysqlConnection](09-object-types.md#objecttype-idomysqlconnection) and [IdoPgsqlConnection](09-object-types.md#objecttype-idopgsqlconnection)
+objects provide a new attribute named `last_failover` which shows the last failover timestamp.
+This value also is available in the [ido](10-icinga-template-library.md#itl-icinga-ido) CheckCommand output.
+
+
+### CLI Commands <a id="upgrading-to-2-11-cli-commands"></a>
+
+The `troubleshoot` CLI command has been removed. It was never completed,
+and turned out not to provide required details for GitHub issues anyways.
+
+We didn't ask nor endorse users on GitHub/Discourse in the past 2 years, so
+we're removing it without deprecation.
+
+Issue templates, the troubleshooting docs and support knowledge has
+proven to be better.
+
+#### Permissions <a id="upgrading-to-2-11-cli-commands-permissions"></a>
+
+CLI commands such as `api setup`, `node wizard/setup`, `feature enable/disable/list`
+required root permissions previously. Since the file permissions allow
+the Icinga user to change things already, and users kept asking to
+run Icinga on their own webspace without root permissions, this is now possible
+with 2.11.
+
+If you are running the commands with a different user than the
+compiled `ICINGA_USER` and `ICINGA_GROUP` CMake settings (`icinga` everywhere,
+except Debian with `nagios` for historical reasons), ensure that this
+user has the capabilities to change to a different user.
+
+If you still encounter problems, run the aforementioned CLI commands as root,
+or with sudo.
+
+#### CA List Behaviour Change <a id="upgrading-to-2-11-cli-commands-ca-list"></a>
+
+`ca list` only shows the pending certificate signing requests by default.
+
+You can use the new `--all` parameter to show all signing requests.
+Note that Icinga automatically purges signed requests older than 1 week.
+
+#### New: CA Remove/Restore <a id="upgrading-to-2-11-cli-commands-ca-remove-restore"></a>
+
+`ca remove` allows you to remove pending signing requests. Once the
+master receives a CSR, and it is marked as removed, the request is
+denied.
+
+`ca restore` allows you to restore a removed signing request. You
+can list removed signing requests with the new `--removed` parameter
+for `ca list`.
+
+### Configuration <a id="upgrading-to-2-11-configuration"></a>
+
+The deprecated `concurrent_checks` attribute in the [checker feature](09-object-types.md#objecttype-checkercomponent)
+has no effect anymore if set. Please use the [MaxConcurrentChecks](17-language-reference.md#icinga-constants-global-config)
+constant in [constants.conf](04-configuration.md#constants-conf) instead.
+
+### REST API <a id="upgrading-to-2-11-api"></a>
+
+#### Actions <a id="upgrading-to-2-11-api-actions"></a>
+
+The [schedule-downtime](12-icinga2-api.md#icinga2-api-actions-schedule-downtime-host-all-services)
+action supports the `all_services` parameter for Host types. Defaults to false.
+
+#### Config Packages <a id="upgrading-to-2-11-api-config-packages"></a>
+
+Deployed configuration packages require an active stage, with many previous
+allowed. This mechanism is used by the Icinga Director as external consumer,
+and Icinga itself for storing runtime created objects inside the `_api`
+package.
+
+This includes downtimes and comments, which where sometimes stored in the wrong
+directory path, because the active-stage file was empty/truncated/unreadable at
+this point.
+
+2.11 makes this mechanism more stable and detects broken config packages.
+It will also attempt to fix them, the following log entry is perfectly fine.
+
+```
+[2019-05-10 12:12:09 +0200] information/ConfigObjectUtility: Repairing config package '_api' with stage 'dbe0bef8-c72c-4cc9-9779-da7c4527c5b2'.
+```
+
+If you still encounter problems, please follow [this troubleshooting entry](15-troubleshooting.md#troubleshooting-api-missing-runtime-objects).
+
+### DB IDO MySQL Schema <a id="upgrading-to-2-11-db-ido"></a>
+
+The schema for MySQL contains an optional update which
+drops unneeded indexes. You don't necessarily need to apply
+this update.
+
+### Documentation <a id="upgrading-to-2-11-documentation"></a>
+
+* `Custom attributes` have been renamed to `Custom variables` following the name `vars` and their usage in backends and web interfaces.
+The term `custom attribute` still applies, but referring from the web to the core docs is easier.
+* The distributed environment term `client` has been refined into `agent`. Wordings and images have been adjusted, and a `client` only is used as
+general term when requesting something from a parent server role.
+* The images for basics, modes and scenarios in the distributed monitoring chapter have been re-created from scratch.
+* `02-getting-started.md` was renamed to `02-installation.md`, `04-configuring-icinga-2.md` into `04-configuration.md`. Apache redirects will be in place.
+
+## Upgrading to v2.10 <a id="upgrading-to-2-10"></a>
+
+### Path Constant Changes <a id="upgrading-to-2-10-path-constant-changes"></a>
+
+During package upgrades you may see a notice that the configuration
+content of features has changed. This is due to a more general approach
+with path constants in v2.10.
+
+The known constants `SysconfDir` and `LocalStateDir` stay intact and won't
+break on upgrade.
+If you are using these constants in your own custom command definitions
+or other objects, you are advised to revise them and update them according
+to the [documentation](17-language-reference.md#icinga-constants).
+
+Example diff:
+
+```
+object NotificationCommand "mail-service-notification" {
+- command = [ SysconfDir + "/icinga2/scripts/mail-service-notification.sh" ]
++ command = [ ConfigDir + "/scripts/mail-service-notification.sh" ]
+```
+
+If you have the `ICINGA2_RUN_DIR` environment variable configured in the
+sysconfig file, you need to rename it to `ICINGA2_INIT_RUN_DIR`. `ICINGA2_STATE_DIR`
+has been removed and this setting has no effect.
+
+> **Note**
+>
+> This is important if you rely on the sysconfig configuration in your own scripts.
+
+### New Constants <a id="upgrading-to-2-10-path-new-constants"></a>
+
+New [Icinga constants](17-language-reference.md#icinga-constants) have been added in this release.
+
+* `Environment` for specifying the Icinga environment. Defaults to not set.
+* `ApiBindHost` and `ApiBindPort` to allow overriding the default ApiListener values. This will be used for an Icinga addon only.
+
+### Configuration: Namespaces <a id="upgrading-to-2-10-configuration-namespaces"></a>
+
+The keywords `namespace` and `using` are now [reserved](17-language-reference.md#reserved-keywords) for the namespace functionality provided
+with v2.10. Read more about how it works [here](17-language-reference.md#namespaces).
+
+### Configuration: ApiListener <a id="upgrading-to-2-10-configuration-apilistener"></a>
+
+Anonymous JSON-RPC connections in the cluster can now be configured with `max_anonymous_clients`
+attribute.
+The corresponding REST API results from `/v1/status/ApiListener` in `json_rpc` have been renamed
+from `clients` to `anonymous_clients` to better reflect their purpose. Authenticated clients
+are counted as connected endpoints. A similar change is there for the performance data metrics.
+
+The TLS handshake timeout defaults to 10 seconds since v2.8.2. This can now be configured
+with the configuration attribute `tls_handshake_timeout`. Beware of performance issues
+with setting a too high value.
+
+### API: schedule-downtime Action <a id="upgrading-to-2-10-api-schedule-downtime-action"></a>
+
+The attribute `child_options` was previously accepting 0,1,2 for specific child downtime settings.
+This behaviour stays intact, but the new proposed way are specific constants as values (`DowntimeNoChildren`, `DowntimeTriggeredChildren`, `DowntimeNonTriggeredChildren`).
+
+### Notifications: Recovery and Acknowledgement <a id="upgrading-to-2-10-notifications"></a>
+
+When a user should be notified on `Problem` and `Acknowledgement`, v2.10 now checks during
+the `Acknowledgement` notification event whether this user has been notified about a problem before.
+
+```
+ types = [ Problem, Acknowledgement, Recovery ]
+```
+
+If **no** `Problem` notification was sent, and the types filter includes problems for this user,
+the `Acknowledgement` notification is **not** sent.
+
+In contrast to that, the following configuration always sends `Acknowledgement` notifications.
+
+```
+ types = [ Acknowledgement, Recovery ]
+```
+
+This change also restores the old behaviour for `Recovery` notifications. The above configuration
+leaving out the `Problem` type can be used to only receive recovery notifications. If `Problem`
+is added to the types again, Icinga 2 checks whether it has notified a user of a problem when
+sending the recovery notification.
+
+More details can be found in [this PR](https://github.com/Icinga/icinga2/pull/6527).
+
+### Stricter configuration validation
+
+Some config errors are now fatal. While it never worked before, icinga2 refuses to start now!
+
+For example the following started to give a fatal error in 2.10:
+
+```
+ object Zone "XXX" {
+ endpoints = [ "master-server" ]
+ parent = "global-templates"
+ }
+```
+
+```critical/config: Error: Zone 'XXX' can not have a global zone as parent.```
+
+### Package Changes <a id="upgrading-to-2-10-package-changes"></a>
+
+Debian/Ubuntu drops the `libicinga2` package. `apt-get upgrade icinga2`
+won't remove such packages leaving the upgrade in an unsatisfied state.
+
+Please use `apt-get full-upgrade` or `apt-get dist-upgrade` instead, as explained [here](https://github.com/Icinga/icinga2/issues/6695#issuecomment-430585915).
+
+On RHEL/CentOS/Fedora, `icinga2-libs` has been obsoleted. Unfortunately yum's dependency
+resolver doesn't allow to install older versions than 2.10 then. Please
+read [here](https://github.com/Icinga/icinga-packaging/issues/114#issuecomment-429264827)
+for details.
+
+RPM packages dropped the [Classic UI](16-upgrading-icinga-2.md#upgrading-to-2-8-removed-classicui-config-package)
+package in v2.8, Debian/Ubuntu packages were forgotten. This is now the case with this
+release. Icinga 1.x is EOL by the end of 2018, plan your migration to [Icinga Web 2](https://icinga.com/docs/icingaweb2/latest/).
+
+## Upgrading to v2.9 <a id="upgrading-to-2-9"></a>
+
+### Deprecation and Removal Notes <a id="upgrading-to-2-9-deprecation-removal-notes"></a>
+
+- Deprecation of 1.x compatibility features: `StatusDataWriter`, `CompatLogger`, `CheckResultReader`. Their removal is scheduled for 2.11.
+Icinga 1.x is EOL and will be out of support by the end of 2018.
+- Removal of Icinga Studio. It always has been experimental and did not satisfy our high quality standards. We've therefore removed it.
+
+### Sysconfig Changes <a id="upgrading-to-2-9-sysconfig-changes"></a>
+
+The security fixes in v2.8.2 required moving specific runtime settings
+into the Sysconfig file and environment. This included that Icinga 2
+would itself parse this file and read the required variables. This has generated
+numerous false-positive log messages and led to many support questions. v2.9.0
+changes this in the standard way to read these variables from the environment, and use
+sane compile-time defaults.
+
+> **Note**
+>
+> In order to upgrade, remove everything in the sysconfig file and re-apply
+> your changes.
+
+There is a bug with existing sysconfig files where path variables are not expanded
+because systemd [does not support](https://github.com/systemd/systemd/issues/2123)
+shell variable expansion. This worked with SysVInit though.
+
+Edit the sysconfig file and either remove everything, or edit this line
+on RHEL 7. Modify the path for other distributions.
+
+```
+vim /etc/sysconfig/icinga2
+
+-ICINGA2_PID_FILE=$ICINGA2_RUN_DIR/icinga2/icinga2.pid
++ICINGA2_PID_FILE=/run/icinga2/icinga2.pid
+```
+
+If you want to adjust the number of open files for the Icinga application
+for example, you would just add this setting like this on RHEL 7:
+
+```
+vim /etc/sysconfig/icinga2
+
+ICINGA2_RLIMIT_FILES=50000
+```
+
+Restart Icinga 2 afterwards, the systemd service file automatically puts the
+value into the application's environment where this is read on startup.
+
+### Setup Wizard Changes <a id="upgrading-to-2-9-setup-wizard-changes"></a>
+
+Client and satellite setups previously had the example configuration in `conf.d` included
+by default. This caused trouble on config sync, or with locally executed checks generating
+wrong check results for command endpoint clients.
+
+In v2.9.0 `node wizard`, `node setup` and the graphical Windows wizard will disable
+the inclusion by default. You can opt-out and explicitly enable it again if needed.
+
+In addition to the default global zones `global-templates` and `director-global`,
+the setup wizards also offer to specify your own custom global zones and generate
+the required configuration automatically.
+
+The setup wizards also use full qualified names for Zone and Endpoint object generation,
+either the default values (FQDN for clients) or the user supplied input. This removes
+the dependency on the `NodeName` and `ZoneName` constant and helps to immediately see
+the parent-child relationship. Those doing support will also see the benefit in production.
+
+### CLI Command Changes <a id="upgrading-to-2-9-cli-changes"></a>
+
+The [node setup](06-distributed-monitoring.md#distributed-monitoring-automation-cli-node-setup)
+parameter `--master_host` was deprecated and replaced with `--parent_host`.
+This parameter is now optional to allow connection-less client setups similar to the `node wizard`
+CLI command. The `parent_zone` parameter has been added to modify the parent zone name e.g.
+for client-to-satellite setups.
+
+The `api user` command which was released in v2.8.2 turned out to cause huge problems with
+configuration validation, windows restarts and OpenSSL versions. It is therefore removed in 2.9,
+the `password_hash` attribute for the ApiUser object stays intact but has no effect. This is to ensure
+that clients don't break on upgrade. We will revise this feature in future development iterations.
+
+### Configuration Changes <a id="upgrading-to-2-9-config-changes"></a>
+
+The CORS attributes `access_control_allow_credentials`, `access_control_allow_headers` and
+`access_control_allow_methods` are now controlled by Icinga 2 and cannot be changed anymore.
+
+### Unique Generated Names <a id="upgrading-to-2-9-unique-name-changes"></a>
+
+With the removal of RHEL 5 as supported platform, we can finally use real unique IDs.
+This is reflected in generating names for e.g. API stage names. Previously it was a handcrafted
+mix of local FQDN, timestamps and random numbers.
+
+### Custom Vars not updating <a id="upgrading-to-2-9-custom-vars-not-updating"></a>
+
+A rare issue preventing the custom vars of objects created prior to 2.9.0 being updated when changed may occur. To
+remedy this, truncate the customvar tables and restart Icinga 2. The following is an example of how to do this with mysql:
+
+```
+$ mysql -uroot -picinga icinga
+MariaDB [icinga]> truncate icinga_customvariables;
+Query OK, 0 rows affected (0.05 sec)
+MariaDB [icinga]> truncate icinga_customvariablestatus;
+Query OK, 0 rows affected (0.03 sec)
+MariaDB [icinga]> exit
+Bye
+$ sudo systemctl restart icinga2
+```
+
+Custom vars should now stay up to date.
+
+
+## Upgrading to v2.8.2 <a id="upgrading-to-2-8-2"></a>
+
+With version 2.8.2 the location of settings formerly found in `/etc/icinga2/init.conf` has changed. They are now
+located in the sysconfig, `/etc/sysconfig/icinga2` (RPM) or `/etc/default/icinga2` (DPKG) on most systems. The
+`init.conf` file has been removed and its settings will be ignored. These changes are only relevant if you edited the
+`init.conf`. Below is a table displaying the new names for the affected settings.
+
+ Old `init.conf` | New `sysconfig/icinga2`
+ ----------------|------------------------
+ RunAsUser | ICINGA2\_USER
+ RunAsGroup | ICINGA2\_GROUP
+ RLimitFiles | ICINGA2\_RLIMIT\_FILES
+ RLimitProcesses | ICINGA2\_RLIMIT\_PROCESSES
+ RLimitStack | ICINGA2\_RLIMIT\_STACK
+
+## Upgrading to v2.8 <a id="upgrading-to-2-8"></a>
+
+### DB IDO Schema Update to 2.8.0 <a id="upgrading-to-2-8-db-ido"></a>
+
+There are additional indexes and schema fixes which require an update.
+
+Please proceed here for [MySQL](16-upgrading-icinga-2.md#upgrading-mysql-db) or [PostgreSQL](16-upgrading-icinga-2.md#upgrading-postgresql-db).
+
+> **Note**
+>
+> `2.8.1.sql` fixes a unique constraint problem with fresh 2.8.0 installations.
+> You don't need this update if you are upgrading from an older version.
+
+### Changed Certificate Paths <a id="upgrading-to-2-8-certificate-paths"></a>
+
+The default certificate path was changed from `/etc/icinga2/pki` to
+`/var/lib/icinga2/certs`.
+
+ Old Path | New Path
+ ---------------------------------------------------|---------------------------------------------------
+ `/etc/icinga2/pki/icinga2-agent1.localdomain.crt` | `/var/lib/icinga2/certs/icinga2-agent1.localdomain.crt`
+ `/etc/icinga2/pki/icinga2-agent1.localdomain.key` | `/var/lib/icinga2/certs/icinga2-agent1.localdomain.key`
+ `/etc/icinga2/pki/ca.crt` | `/var/lib/icinga2/certs/ca.crt`
+
+This applies to Windows clients in the same way: `%ProgramData%\etc\icinga2\pki`
+was moved to `%ProgramData%\var\lib\icinga2\certs`.
+
+ Old Path | New Path
+ ----------------------------------------------------------------|----------------------------------------------------------------
+ `%ProgramData%\etc\icinga2\pki\icinga2-agent1.localdomain.crt` | `%ProgramData%\var\lib\icinga2\certs\icinga2-agent1.localdomain.crt`
+ `%ProgramData%\etc\icinga2\pki\icinga2-agent1.localdomain.key` | `%ProgramData%\var\lib\icinga2\certs\icinga2-agent1.localdomain.key`
+ `%ProgramData%\etc\icinga2\pki\ca.crt` | `%ProgramData%\var\lib\icinga2\certs\ca.crt`
+
+
+> **Note**
+>
+> The default expected path for client certificates is `/var/lib/icinga2/certs/ + NodeName + {.crt,.key}`.
+> The `NodeName` constant is usually the FQDN and certificate common name (CN). Check the [conventions](06-distributed-monitoring.md#distributed-monitoring-conventions)
+> section inside the Distributed Monitoring chapter.
+
+The [setup CLI commands](06-distributed-monitoring.md#distributed-monitoring-setup-master) and the
+default [ApiListener configuration](06-distributed-monitoring.md#distributed-monitoring-apilistener)
+have been adjusted to these paths too.
+
+The [ApiListener](09-object-types.md#objecttype-apilistener) object attributes `cert_path`, `key_path`
+and `ca_path` have been deprecated and removed from the example configuration.
+
+#### Migration Path <a id="upgrading-to-2-8-certificate-paths-migration-path"></a>
+
+> **Note**
+>
+> Icinga 2 automatically migrates the certificates to the new default location if they
+> are configured and detected in `/etc/icinga2/pki`.
+
+During startup, the migration kicks in and ensures to copy the certificates to the new
+location. This will also happen if someone updates the certificate files in `/etc/icinga2/pki`
+to ensure that the new certificate location always has the latest files.
+
+This has been implemented in the Icinga 2 binary to ensure it works on both Linux/Unix
+and the Windows platform.
+
+If you are not using the built-in CLI commands and setup wizards to deploy the client certificates,
+please ensure to update your deployment tools/scripts. This mainly affects
+
+* Puppet modules
+* Ansible playbooks
+* Chef cookbooks
+* Salt recipes
+* Custom scripts, e.g. Windows Powershell or self-made implementations
+
+In order to support a smooth migration between versions older than 2.8 and future releases,
+the built-in certificate migration path is planned to exist as long as the deprecated
+`ApiListener` object attributes exist.
+
+You are safe to use the existing configuration paths inside the `api` feature.
+
+**Example**
+
+Look at the following example taken from the Director Linux deployment script for clients.
+
+* Ensure that the default certificate path is changed from `/etc/icinga2/pki` to `/var/lib/icinga2/certs`.
+
+```
+-ICINGA2_SSL_DIR="${ICINGA2_CONF_DIR}/pki"
++ICINGA2_SSL_DIR="${ICINGA2_STATE_DIR}/lib/icinga2/certs"
+```
+
+* Remove the ApiListener configuration attributes.
+
+```
+object ApiListener "api" {
+- cert_path = SysconfDir + "/icinga2/pki/${ICINGA2_NODENAME}.crt"
+- key_path = SysconfDir + "/icinga2/pki/${ICINGA2_NODENAME}.key"
+- ca_path = SysconfDir + "/icinga2/pki/ca.crt"
+ accept_commands = true
+ accept_config = true
+}
+```
+
+Test the script with a fresh client installation before putting it into production.
+
+> **Tip**
+>
+> Please support module and script developers in their migration. If you find
+> any project which would require these changes, create an issue or a patchset in a PR
+> and help them out. Thanks in advance!
+
+### On-Demand Signing and CA Proxy <a id="upgrading-to-2-8-on-demand-signing-ca-proxy"></a>
+
+Icinga 2 v2.8 supports the following features inside the cluster:
+
+* Forward signing requests from clients through a satellite instance to a signing master ("CA Proxy").
+* Signing requests without a ticket. The master instance allows to list and sign CSRs ("On-Demand Signing").
+
+In order to use these features, **all instances must be upgraded to v2.8**.
+
+More details in [this chapter](06-distributed-monitoring.md#distributed-monitoring-setup-sign-certificates-master).
+
+### Windows Client <a id="upgrading-to-2-8-windows-client"></a>
+
+Windows versions older than Windows 10/Server 2016 require the [Universal C Runtime for Windows](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows).
+
+### Removed Bottom Up Client Mode <a id="upgrading-to-2-8-removed-bottom-up-client-mode"></a>
+
+This client mode was deprecated in 2.6 and was removed in 2.8.
+
+The node CLI command does not provide `list` or `update-config` anymore.
+
+> **Note**
+>
+> The old migration guide can be found on [GitHub](https://github.com/Icinga/icinga2/blob/v2.7.0/doc/06-distributed-monitoring.md#bottom-up-migration-to-top-down-).
+
+The clients don't need to have a local `conf.d` directory included.
+
+Icinga 2 continues to run with the generated and imported configuration.
+You are advised to [migrate](https://github.com/Icinga/icinga2/issues/4798)
+any existing configuration to the "top down" mode with the help of the
+Icinga Director or config management tools such as Puppet, Ansible, etc.
+
+
+### Removed Classic UI Config Package <a id="upgrading-to-2-8-removed-classicui-config-package"></a>
+
+The config meta package `classicui-config` and the configuration files
+have been removed. You need to manually configure
+this legacy interface. Create a backup of the configuration
+before upgrading and re-configure it afterwards.
+
+
+### Flapping Configuration <a id="upgrading-to-2-8-flapping-configuration"></a>
+
+Icinga 2 v2.8 implements a new flapping detection algorithm which splits the
+threshold configuration into low and high settings.
+
+`flapping_threshold` is deprecated and does not have any effect when flapping
+is enabled. Please remove `flapping_threshold` from your configuration. This
+attribute will be removed in v2.9.
+
+Instead you need to use the `flapping_threshold_low` and `flapping_threshold_high`
+attributes. More details can be found [here](08-advanced-topics.md#check-flapping).
+
+### Deprecated Configuration Attributes <a id="upgrading-to-2-8-deprecated-configuration"></a>
+
+ Object | Attribute
+ --------------|------------------
+ ApiListener | cert\_path (migration happens)
+ ApiListener | key\_path (migration happens)
+ ApiListener | ca\_path (migration happens)
+ Host, Service | flapping\_threshold (has no effect)
+
+## Upgrading to v2.7 <a id="upgrading-to-2-7"></a>
+
+v2.7.0 provided new notification scripts and commands. Please ensure to
+update your configuration accordingly. An advisory has been published [here](https://icinga.com/2017/08/23/advisory-for-icinga-2-v2-7-update-and-mail-notification-scripts/).
+
+In case are having troubles with OpenSSL 1.1.0 and the
+public CA certificates, please read [this advisory](https://icinga.com/2017/08/30/advisory-for-ssl-problems-with-leading-zeros-on-openssl-1-1-0/)
+and check the [troubleshooting chapter](15-troubleshooting.md#troubleshooting).
+
+If Icinga 2 fails to start with an empty reference to `$ICINGA2_CACHE_DIR`
+ensure to set it inside `/etc/sysconfig/icinga2` (RHEL) or `/etc/default/icinga2` (Debian).
+
+RPM packages will put a file called `/etc/sysconfig/icinga2.rpmnew`
+if you have modified the original file.
+
+Example on CentOS 7:
+
+```
+vim /etc/sysconfig/icinga2
+
+ICINGA2_CACHE_DIR=/var/cache/icinga2
+
+systemctl restart icinga2
+```
+
+## Upgrading the MySQL database <a id="upgrading-mysql-db"></a>
+
+If you want to upgrade an existing Icinga 2 instance, check the
+`/usr/share/icinga2-ido-mysql/schema/upgrade` directory for incremental schema upgrade file(s).
+
+> **Note**
+>
+> If there isn't an upgrade file for your current version available, there's nothing to do.
+
+Apply all database schema upgrade files incrementally.
+
+```
+# mysql -u root -p icinga < /usr/share/icinga2-ido-mysql/schema/upgrade/<version>.sql
+```
+
+The Icinga 2 DB IDO feature checks the required database schema version on startup
+and generates an log message if not satisfied.
+
+
+**Example:** You are upgrading Icinga 2 from version `2.4.0` to `2.8.0`. Look into
+the `upgrade` directory:
+
+```
+$ ls /usr/share/icinga2-ido-mysql/schema/upgrade/
+2.0.2.sql 2.1.0.sql 2.2.0.sql 2.3.0.sql 2.4.0.sql 2.5.0.sql 2.6.0.sql 2.8.0.sql
+```
+
+There are two new upgrade files called `2.5.0.sql`, `2.6.0.sql` and `2.8.0.sql`
+which must be applied incrementally to your IDO database.
+
+```bash
+mysql -u root -p icinga < /usr/share/icinga2-ido-mysql/schema/upgrade/2.5.0.sql
+mysql -u root -p icinga < /usr/share/icinga2-ido-mysql/schema/upgrade/2.6.0.sql
+mysql -u root -p icinga < /usr/share/icinga2-ido-mysql/schema/upgrade/2.8.0.sql
+```
+
+## Upgrading the PostgreSQL database <a id="upgrading-postgresql-db"></a>
+
+If you want to upgrade an existing Icinga 2 instance, check the
+`/usr/share/icinga2-ido-pgsql/schema/upgrade` directory for incremental schema upgrade file(s).
+
+> **Note**
+>
+> If there isn't an upgrade file for your current version available, there's nothing to do.
+
+Apply all database schema upgrade files incrementally.
+
+```
+# export PGPASSWORD=icinga
+# psql -U icinga -d icinga < /usr/share/icinga2-ido-pgsql/schema/upgrade/<version>.sql
+```
+
+The Icinga 2 DB IDO feature checks the required database schema version on startup
+and generates an log message if not satisfied.
+
+**Example:** You are upgrading Icinga 2 from version `2.4.0` to `2.8.0`. Look into
+the `upgrade` directory:
+
+```
+$ ls /usr/share/icinga2-ido-pgsql/schema/upgrade/
+2.0.2.sql 2.1.0.sql 2.2.0.sql 2.3.0.sql 2.4.0.sql 2.5.0.sql 2.6.0.sql 2.8.0.sql
+```
+
+There are two new upgrade files called `2.5.0.sql`, `2.6.0.sql` and `2.8.0.sql`
+which must be applied incrementally to your IDO database.
+
+```bash
+export PGPASSWORD=icinga
+psql -U icinga -d icinga < /usr/share/icinga2-ido-pgsql/schema/upgrade/2.5.0.sql
+psql -U icinga -d icinga < /usr/share/icinga2-ido-pgsql/schema/upgrade/2.6.0.sql
+psql -U icinga -d icinga < /usr/share/icinga2-ido-pgsql/schema/upgrade/2.8.0.sql
+```
diff --git a/doc/17-language-reference.md b/doc/17-language-reference.md
new file mode 100644
index 0000000..5686d55
--- /dev/null
+++ b/doc/17-language-reference.md
@@ -0,0 +1,1371 @@
+# Language Reference <a id="language-reference"></a>
+
+## Object Definition <a id="object-definition"></a>
+
+Icinga 2 features an object-based configuration format. You can define new
+objects using the `object` keyword:
+
+```
+object Host "host1.example.org" {
+ display_name = "host1"
+
+ address = "192.168.0.1"
+ address6 = "2001:db8:1234::42"
+}
+```
+
+In general you need to write each statement on a new line. Expressions started
+with `{`, `(` and `[` extend until the matching closing character and can be broken
+up into multiple lines.
+
+Alternatively you can write multiple statements on a single line by separating
+them with a semicolon:
+
+```
+object Host "host1.example.org" {
+ display_name = "host1"
+
+ address = "192.168.0.1"; address6 = "2001:db8:1234::42"
+}
+```
+
+Each object is uniquely identified by its type (`Host`) and name
+(`host1.example.org`). Some types have composite names, e.g. the
+`Service` type which uses the `host_name` attribute and the name
+you specified to generate its object name.
+
+Exclamation marks (!) are not permitted in object names.
+
+Objects can contain a comma-separated list of property
+declarations. Instead of commas semicolons may also be used.
+The following data types are available for property values:
+
+All objects have at least the following attributes:
+
+Attribute | Description
+---------------------|-----------------------------
+name | The name of the object. This attribute can be modified in the object definition to override the name specified with the `object` directive.
+type | The type of the object.
+
+## Expressions <a id="expressions"></a>
+
+The following expressions can be used on the right-hand side of assignments.
+
+### Numeric Literals <a id="numeric-literals"></a>
+
+A floating-point number.
+
+Example:
+
+```
+27.3
+```
+
+### Duration Literals <a id="duration-literals"></a>
+
+Similar to floating-point numbers except for the fact that they support
+suffixes to help with specifying time durations.
+
+Example:
+
+```
+2.5m
+```
+
+Supported suffixes include ms (milliseconds), s (seconds), m (minutes),
+h (hours) and d (days).
+
+Duration literals are converted to seconds by the config parser and
+are treated like numeric literals.
+
+### String Literals <a id="string-literals"></a>
+
+A string.
+
+Example:
+
+```
+"Hello World!"
+```
+
+#### String Literals Escape Sequences <a id="string-literals-escape-sequences"></a>
+
+Certain characters need to be escaped. The following escape sequences
+are supported:
+
+Character | Escape sequence
+--------------------------|------------------------------------
+" | \\"
+\\ | \\\\
+&lt;TAB&gt; | \\t
+&lt;CARRIAGE-RETURN&gt; | \\r
+&lt;LINE-FEED&gt; | \\n
+&lt;BEL&gt; | \\b
+&lt;FORM-FEED&gt; | \\f
+
+In addition to these pre-defined escape sequences you can specify
+arbitrary ASCII characters using the backslash character (\\) followed
+by an ASCII character in octal encoding.
+
+### Multi-line String Literals <a id="multiline-string-literals"></a>
+
+Strings spanning multiple lines can be specified by enclosing them in
+{{{ and }}}.
+
+Example:
+
+```
+{{{This
+is
+a multi-line
+string.}}}
+```
+
+Unlike in ordinary strings special characters do not have to be escaped
+in multi-line string literals.
+
+### Boolean Literals <a id="boolean-literals"></a>
+
+The keywords `true` and `false` are used to denote truth values.
+
+### Null Value <a id="null-value"></a>
+
+The `null` keyword can be used to specify an empty value.
+
+### Dictionary <a id="dictionary"></a>
+
+An unordered list of key-value pairs. Keys must be unique and are
+compared in a case-sensitive manner.
+
+Individual key-value pairs must either be comma-separated or on separate lines.
+The comma after the last key-value pair is optional.
+
+Example:
+
+```
+{
+ address = "192.168.0.1"
+ port = 443
+}
+```
+
+Identifiers may not contain certain characters (e.g. space) or start
+with certain characters (e.g. digits). If you want to use a dictionary
+key that is not a valid identifier, you can enclose the key in double
+quotes.
+
+### Array <a id="array"></a>
+
+An ordered list of values.
+
+Individual array elements must be comma-separated.
+The comma after the last element is optional.
+
+Example:
+
+```
+[ "hello", 42 ]
+```
+
+An array may simultaneously contain values of different types, such as
+strings and numbers.
+
+### Operators <a id="expression-operators"></a>
+
+The following operators are supported in expressions. The operators are sorted by descending precedence.
+
+Operator | Precedence | Examples (Result) | Description
+---------|------------|-----------------------------------------------|--------------------------------
+`()` | 1 | (3 + 3) * 5 | Groups sub-expressions
+`()` | 1 | Math.random() | Calls a function
+`[]` | 1 | a[3] | Array subscript
+`.` | 1 | a.b | Element access
+`!` | 2 | !"Hello" (false), !false (true) | Logical negation of the operand
+`~` | 2 | ~true (false) | Bitwise negation of the operand
+`+` | 2 | +3 | Unary plus
+`-` | 2 | -3 | Unary minus
+`&` | 2 | &var (reference to 'var') | Reference operator
+`*` | 2 | *var | Indirection operator
+`*` | 3 | 5m * 10 (3000) | Multiplies two numbers
+`/` | 3 | 5m / 5 (60) | Divides two numbers
+`%` | 3 | 17 % 12 (5) | Remainder after division
+`+` | 4 | 1 + 3 (4), "hello " + "world" ("hello world") | Adds two numbers; concatenates strings
+`-` | 4 | 3 - 1 (2) | Subtracts two numbers
+`<<` | 5 | 4 << 8 (1024) | Left shift
+`>>` | 5 | 1024 >> 4 (64) | Right shift
+`<` | 6 | 3 < 5 (true) | Less than
+`>` | 6 | 3 > 5 (false) | Greater than
+`<=` | 6 | 3 <= 3 (true) | Less than or equal
+`>=` | 6 | 3 >= 3 (true) | Greater than or equal
+`in` | 7 | "foo" in [ "foo", "bar" ] (true) | Element contained in array
+`!in` | 7 | "foo" !in [ "bar", "baz" ] (true) | Element not contained in array
+`==` | 8 | "hello" == "hello" (true), 3 == 5 (false) | Equal to
+`!=` | 8 | "hello" != "world" (true), 3 != 3 (false) | Not equal to
+`&` | 9 | 7 & 3 (3) | Binary AND
+`^` | 10 | 17 ^ 12 (29) | Bitwise XOR
+<code>&#124;</code> | 11 | 2 &#124; 3 (3) | Binary OR
+`&&` | 12 | true && false (false), 3 && 7 (7), 0 && 7 (0) | Logical AND
+<code>&#124;&#124;</code> | 13 | true &#124;&#124; false (true), 0 &#124;&#124; 7 (7)| Logical OR
+`=` | 14 | a = 3 | Assignment
+`=>` | 15 | x => x * x (function with arg x) | Lambda, for loop
+`?` | 16 | (2 * 3 > 5) ? 1 : 0 (1) | [Ternary operator](17-language-reference.md#conditional-statements-ternary)
+
+### References <a id="references"></a>
+
+A reference to a value can be obtained using the `&` operator. The `*` operator can be used
+to dereference a reference:
+
+```
+var value = "Hello!"
+var p = &value /* p refers to value */
+*p = "Hi!"
+log(value) // Prints "Hi!" because the variable was changed
+```
+
+### Namespaces <a id="namespaces"></a>
+
+Namespaces can be used to organize variables and functions. They are used to avoid name conflicts. The `namespace`
+keyword is used to create a new namespace:
+
+```
+namespace Utils {
+ function calculate() {
+ return 2 + 2
+ }
+}
+```
+
+The namespace is made available as a global variable which has the namespace's name (e.g. `Utils`):
+
+```
+Utils.calculate()
+```
+
+The `using` keyword can be used to make all attributes in a namespace available to a script without having to
+explicitly specify the namespace's name for each access:
+
+```
+using Utils
+calculate()
+```
+
+The `using` keyword only has an effect for the current file and only for code that follows the keyword:
+
+```
+calculate() // This will not work.
+using Utils
+```
+
+The following namespaces are automatically imported as if by using the `using` keyword:
+
+* System
+* System.Configuration
+* Types
+* Icinga
+
+### Function Calls <a id="function-calls"></a>
+
+Functions can be called using the `()` operator:
+
+```
+const MyGroups = [ "test1", "test" ]
+
+{
+ check_interval = len(MyGroups) * 1m
+}
+```
+
+A list of available functions is available in the [Library Reference](18-library-reference.md#library-reference) chapter.
+
+## Assignments <a id="dictionary-operators"></a>
+
+In addition to the `=` operator shown above a number of other operators
+to manipulate attributes are supported. Here's a list of all
+available operators (the outermost `{` `}` stand for a local variable scope):
+
+### Operator = <a id="operator-assignment"></a>
+
+Sets an attribute to the specified value.
+
+Example:
+
+```
+{
+ a = 5
+ a = 7
+}
+```
+
+In this example `a` has the value `7` after both instructions are executed.
+
+### Operator += <a id="operator-additive-assignment"></a>
+
+The += operator is a shortcut. The following expression:
+
+```
+{
+ a = [ "hello" ]
+ a += [ "world" ]
+}
+```
+
+is equivalent to:
+
+```
+{
+ a = [ "hello" ]
+ a = a + [ "world" ]
+}
+```
+
+### Operator -= <a id="operator-substractive-assignment"></a>
+
+The -= operator is a shortcut. The following expression:
+
+```
+{
+ a = 10
+ a -= 5
+}
+```
+
+is equivalent to:
+
+```
+{
+ a = 10
+ a = a - 5
+}
+```
+
+### Operator \*= <a id="operator-multiply-assignment"></a>
+
+The *= operator is a shortcut. The following expression:
+
+```
+{
+ a = 60
+ a *= 5
+}
+```
+
+is equivalent to:
+
+```
+{
+ a = 60
+ a = a * 5
+}
+```
+
+### Operator /= <a id="operator-dividing-assignment"></a>
+
+The /= operator is a shortcut. The following expression:
+
+```
+{
+ a = 300
+ a /= 5
+}
+```
+
+is equivalent to:
+
+```
+{
+ a = 300
+ a = a / 5
+}
+```
+
+## Indexer <a id="indexer"></a>
+
+The indexer syntax provides a convenient way to set dictionary elements.
+
+Example:
+
+```
+{
+ hello.key = "world"
+}
+```
+
+Example (alternative syntax):
+
+```
+{
+ hello["key"] = "world"
+}
+```
+
+This is equivalent to writing:
+
+```
+{
+ hello += {
+ key = "world"
+ }
+}
+```
+
+If the `hello` attribute does not already have a value, it is automatically initialized to an empty dictionary.
+
+## Template Imports <a id="template-imports"></a>
+
+Objects can import attributes from other objects.
+
+Example:
+
+```
+template Host "default-host" {
+ vars.colour = "red"
+}
+
+template Host "test-host" {
+ import "default-host"
+
+ vars.colour = "blue"
+}
+
+object Host "localhost" {
+ import "test-host"
+
+ address = "127.0.0.1"
+ address6 = "::1"
+}
+```
+
+The `default-host` and `test-host` objects are marked as templates
+using the `template` keyword. Unlike ordinary objects templates are not
+instantiated at run-time. Parent objects do not necessarily have to be
+templates, however in general they are.
+
+The `vars` dictionary for the `localhost` object contains all three
+custom variables and the custom variable `colour` has the value `"blue"`.
+
+Parent objects are resolved in the order they're specified using the
+`import` keyword.
+
+Default templates which are automatically imported into all object definitions
+can be specified using the `default` keyword:
+
+```
+template CheckCommand "plugin-check-command" default {
+ // ...
+}
+```
+
+Default templates are imported before any other user-specified statement in an
+object definition is evaluated.
+
+If there are multiple default templates the order in which they are imported
+is unspecified.
+
+## Constants <a id="constants"></a>
+
+Global constants can be set using the `const` keyword:
+
+```
+const VarName = "some value"
+```
+
+Once defined a constant can be accessed from any file. Constants cannot be changed
+once they are set.
+
+> **Tip**
+>
+> Best practice is to manage constants in the [constants.conf](04-configuration.md#constants-conf) file.
+
+### Icinga 2 Specific Constants <a id="icinga-constants"></a>
+
+Icinga 2 provides a number of special global constants. These include directory paths, global configuration
+and runtime parameters for the application version and (build) platform.
+
+#### Directory Path Constants <a id="icinga-constants-director-path"></a>
+
+Constant | Description
+--------------------|-------------------
+ConfigDir |**Read-only.** Main configuration directory. Usually set to `/etc/icinga2`.
+DataDir |**Read-only.** Runtime data for the Icinga daemon. Usually set to `/var/lib/icinga2`.
+LogDir |**Read-only.** Logfiles from the daemon. Usually set to `/var/log/icinga2`.
+CacheDir |**Read-only.** Cached status information of the daemon. Usually set to `/var/cache/icinga2`.
+SpoolDir |**Read-only.** Spool directory for certain data outputs. Usually set to `/var/spool/icinga2`.
+InitRunDir |**Read-only.** Directory for PID files and sockets in daemon mode. Usually set to `/run/icinga2`.
+ZonesDir |**Read-only.** Contains the path of the zones.d directory. Defaults to `ConfigDir + "/zones.d"`.
+
+#### Global Configuration Constants <a id="icinga-constants-global-config"></a>
+
+Constant | Description
+--------------------|-------------------
+Vars |**Read-write.** Contains a dictionary with global custom variables. Not set by default.
+NodeName |**Read-write.** Contains the cluster node name. Set to the local hostname by default.
+ReloadTimeout |**Read-write.** Defines the reload timeout for child processes. Defaults to `300s`.
+Environment |**Read-write.** The name of the Icinga environment. Included in the SNI host name for outbound connections. Not set by default.
+RunAsUser |**Read-write.** Defines the user the Icinga 2 daemon is running as. Set in the Icinga 2 sysconfig.
+RunAsGroup |**Read-write.** Defines the group the Icinga 2 daemon is running as. Set in the Icinga 2 sysconfig.
+MaxConcurrentChecks |**Read-write.** The number of max checks run simultaneously. Defaults to `512`.
+ApiBindHost |**Read-write.** Overrides the default value for the ApiListener `bind_host` attribute. Defaults to `::` if IPv6 is supported by the operating system and to `0.0.0.0` otherwise.
+ApiBindPort |**Read-write.** Overrides the default value for the ApiListener `bind_port` attribute. Not set by default.
+
+#### Application Runtime Constants <a id="icinga-constants-application-runtime"></a>
+
+Constant | Description
+--------------------|-------------------
+PlatformName |**Read-only.** The name of the operating system, e.g. `Ubuntu`.
+PlatformVersion |**Read-only.** The version of the operating system, e.g. `14.04.3 LTS`.
+PlatformKernel |**Read-only.** The name of the operating system kernel, e.g. `Linux`.
+PlatformKernelVersion|**Read-only.** The version of the operating system kernel, e.g. `3.13.0-63-generic`.
+BuildCompilerName |**Read-only.** The name of the compiler Icinga was built with, e.g. `Clang`.
+BuildCompilerVersion|**Read-only.** The version of the compiler Icinga was built with, e.g. `7.3.0.7030031`.
+BuildHostName |**Read-only.** The name of the host Icinga was built on, e.g. `acheron`.
+ApplicationVersion |**Read-only.** The application version, e.g. `2.9.0`.
+
+#### Additional Constants <a id="icinga-constants-additional"></a>
+
+Writable constants can be specified on the CLI using the `--define/-D` parameter.
+
+> **Note for v2.10+**
+>
+> Default paths which include `/etc` and `/var` as base directory continue to work
+> based on the `SysconfDir` and `LocalStateDir` constants respectively.
+
+In addition to that, the constants below are used to define specific file paths. You should never need
+to change them, as they are pre-compiled based on the constants above.
+
+Variable |Description
+--------------------|-------------------
+StatePath |**Read-write.** Contains the path of the Icinga 2 state file. Defaults to `DataDir + "/icinga2.state"`.
+ObjectsPath |**Read-write.** Contains the path of the Icinga 2 objects file. Defaults to `CacheDir + "/icinga2.debug"`.
+PidPath |**Read-write.** Contains the path of the Icinga 2 PID file. Defaults to `InitRunDir + "/icinga2.pid"`.
+PkgDataDir |**Read-only.** Contains the path of the package data directory. Defaults to `PrefixDir + "/share/icinga2"`.
+
+The constants below have been used until Icinga v2.10, and are still intact. You don't need them
+for future builds and configuration based on the newly available constants above.
+
+Variable |Description
+--------------------|-------------------
+PrefixDir |**Read-only.** Contains the installation prefix that was specified with `cmake -DCMAKE_INSTALL_PREFIX`. `Defaults to "/usr/local"`.
+SysconfDir |**Read-only.** Contains the path of the sysconf directory. Defaults to `PrefixDir + "/etc"`.
+LocalStateDir |**Read-only.** Contains the path of the local state directory. Defaults to `PrefixDir + "/var"`.
+RunDir |**Read-only.** Contains the path of the run directory. Defaults to `LocalStateDir + "/run"`.
+
+#### Advanced Constants and Variables <a id="icinga-constants-advanced"></a>
+
+Advanced runtime constants. Please only use them if advised by support or developers.
+
+Variable | Description
+---------------------------|-------------------
+EventEngine |**Read-write.** The name of the socket event engine, can be `poll` or `epoll`. The epoll interface is only supported on Linux.
+AttachDebugger |**Read-write.** Whether to attach a debugger when Icinga 2 crashes. Defaults to `false`.
+
+Advanced sysconfig environment variables, defined in `/etc/sysconfig/icinga2` (RHEL/SLES) or `/etc/default/icinga2` (Debian/Ubuntu).
+
+Variable | Description
+---------------------------|-------------------
+ICINGA2\_RLIMIT\_FILES |**Read-write.** Defines the resource limit for `RLIMIT_NOFILE` that should be set at start-up. Value cannot be set lower than the default `16 * 1024`. 0 disables the setting. Set in Icinga 2 sysconfig.
+ICINGA2\_RLIMIT\_PROCESSES |**Read-write.** Defines the resource limit for `RLIMIT_NPROC` that should be set at start-up. Value cannot be set lower than the default `16 * 1024`. 0 disables the setting. Set in Icinga 2 sysconfig.
+ICINGA2\_RLIMIT\_STACK |**Read-write.** Defines the resource limit for `RLIMIT_STACK` that should be set at start-up. Value cannot be set lower than the default `256 * 1024`. 0 disables the setting. Set in Icinga 2 sysconfig.
+
+#### Debug Constants and Variables <a id="icinga-constants-debug"></a>
+
+These constants are only available in debug builds for developers and help with tracing messages and attaching to debuggers.
+
+Variable | Description
+---------------------------|-------------------
+Internal.DebugJsonRpc | **Read-write.** Setting this to `1` prints the raw JSON-RPC message to STDOUT.
+Internal.DebugWorkerDelay | **Read-write.** Delays the main worker process by X seconds after forked from the umbrella process. This helps with attaching LLDB which cannot follow child forks like GDB.
+
+Example:
+
+```
+$ icinga2 daemon -DInternal.DebugWorkerDelay=120
+Closed FD 6 which we inherited from our parent process.
+[2020-01-29 12:22:33 +0100] information/cli: Icinga application loader (version: v2.11.0-477-gfe8701d77; debug)
+[2020-01-29 12:22:33 +0100] information/RunWorker: DEBUG: Current PID: 85253. Sleeping for 120 seconds to allow lldb/gdb -p <PID> attachment.
+
+$ lldb -p 85253
+(lldb) b icinga::Checkable::ProcessCheckResult
+(lldb) c
+```
+
+
+## Apply <a id="apply"></a>
+
+The `apply` keyword can be used to create new objects which are associated with
+another group of objects.
+
+```
+apply Service "ping" to Host {
+ import "generic-service"
+
+ check_command = "ping4"
+
+ assign where host.name == "localhost"
+}
+```
+
+In this example the `assign where` condition is a boolean expression which is
+evaluated for all objects of type `Host` and a new service with name "ping"
+is created for each matching host. [Expression operators](17-language-reference.md#expression-operators)
+may be used in `assign where` conditions.
+
+The `to` keyword and the target type may be omitted if there is only one target
+type, e.g. for the `Service` type.
+
+Depending on the object type used in the `apply` expression additional local
+variables may be available for use in the `where` condition:
+
+Source Type | Target Type | Variables
+------------------|-------------|--------------
+Service | Host | host
+Dependency | Host | host
+Dependency | Service | host, service
+Notification | Host | host
+Notification | Service | host, service
+ScheduledDowntime | Host | host
+ScheduledDowntime | Service | host, service
+
+Any valid config attribute can be accessed using the `host` and `service`
+variables. For example, `host.address` would return the value of the host's
+"address" attribute -- or null if that attribute isn't set.
+
+More usage examples are documented in the [monitoring basics](03-monitoring-basics.md#using-apply-expressions)
+chapter.
+
+## Apply For <a id="apply-for"></a>
+
+[Apply](17-language-reference.md#apply) rules can be extended with the
+[for loop](17-language-reference.md#for-loops) keyword.
+
+```
+apply Service "prefix-" for (key => value in host.vars.dictionary) to Host {
+ import "generic-service"
+
+ check_command = "ping4"
+ vars.host_value = value
+}
+```
+
+Any valid config attribute can be accessed using the `host` and `service`
+variables. The attribute must be of the Array or Dictionary type. In this example
+`host.vars.dictionary` is of the Dictionary type which needs a key-value-pair
+as iterator.
+
+In this example all generated service object names consist of `prefix-` and
+the value of the `key` iterator. The prefix string can be omitted if not required.
+
+The `key` and `value` variables can be used for object attribute assignment, e.g. for
+setting the `check_command` attribute or custom variables as command parameters.
+
+`apply for` rules are first evaluated against all objects matching the `for loop` list
+and afterwards the `assign where` and `ignore where` conditions are evaluated.
+
+It is not necessary to check attributes referenced in the `for loop` expression
+for their existance using an additional `assign where` condition.
+
+More usage examples are documented in the [monitoring basics](03-monitoring-basics.md#using-apply-for)
+chapter.
+
+## Group Assign <a id="group-assign"></a>
+
+Group objects can be assigned to specific member objects using the `assign where`
+and `ignore where` conditions.
+
+```
+object HostGroup "linux-servers" {
+ display_name = "Linux Servers"
+
+ assign where host.vars.os == "Linux"
+}
+```
+
+In this example the `assign where` condition is a boolean expression which is evaluated
+for all objects of the type `Host`. Each matching host is added as member to the host group
+with the name "linux-servers". Membership exclusion can be controlled using the `ignore where`
+condition. [Expression operators](17-language-reference.md#expression-operators) may be used in `assign where` and
+`ignore where` conditions.
+
+Source Type | Variables
+------------------|--------------
+HostGroup | host
+ServiceGroup | host, service
+UserGroup | user
+
+
+## Boolean Values <a id="boolean-values"></a>
+
+The `assign where`, `ignore where`, `if` and `while` statements, the `!` operator as
+well as the `bool()` function convert their arguments to a boolean value based on the
+following rules:
+
+Description | Example Value | Boolean Value
+---------------------|-------------------|--------------
+Empty value | null | false
+Zero | 0 | false
+Non-zero integer | -23945 | true
+Empty string | "" | false
+Non-empty string | "Hello" | true
+Empty array | [] | false
+Non-empty array | [ "Hello" ] | true
+Empty dictionary | {} | false
+Non-empty dictionary | { key = "value" } | true
+
+For a list of supported expression operators for `assign where` and `ignore where`
+statements, see [expression operators](17-language-reference.md#expression-operators).
+
+## Comments <a id="comments"></a>
+
+The Icinga 2 configuration format supports C/C++-style and shell-style comments.
+
+Example:
+
+```
+/*
+ This is a comment.
+ */
+object Host "localhost" {
+ check_interval = 30 // this is also a comment.
+ retry_interval = 15 # yet another comment
+}
+```
+
+## Includes <a id="includes"></a>
+
+Other configuration files can be included using the `include` directive.
+Paths must be relative to the configuration file that contains the
+`include` directive.
+
+Example:
+
+```
+include "some/other/file.conf"
+include "conf.d/*.conf"
+```
+
+Wildcard includes are not recursive.
+
+Icinga also supports include search paths similar to how they work in a
+C/C++ compiler:
+
+```
+include <itl>
+```
+
+Note the use of angle brackets instead of double quotes. This causes the
+config compiler to search the include search paths for the specified
+file. By default $PREFIX/share/icinga2/include is included in the list of search
+paths. Additional include search paths can be added using
+[command-line options](11-cli-commands.md#config-include-path).
+
+Wildcards are not permitted when using angle brackets.
+
+## Recursive Includes <a id="recursive-includes"></a>
+
+The `include_recursive` directive can be used to recursively include all
+files in a directory which match a certain pattern.
+
+Example:
+
+```
+include_recursive "conf.d", "*.conf"
+include_recursive "templates"
+```
+
+The first parameter specifies the directory from which files should be
+recursively included.
+
+The file names need to match the pattern given in the second parameter.
+When no pattern is specified the default pattern "*.conf" is used.
+
+## Zone Includes <a id="zone-includes"></a>
+
+> **Note**
+>
+> This is an internal functionality consumed by Icinga itself.
+>
+> The preferred way for users managing configuration files in
+> zones is to use the [cluster config sync](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync)
+> or [REST API config packages](12-icinga2-api.md#icinga2-api-config-management).
+
+The `include_zones` recursively includes all subdirectories for the
+given path.
+
+In addition to that it sets the `zone` attribute for all objects created
+in these subdirectories to the name of the subdirectory.
+
+Example:
+
+```
+include_zones "etc", "zones.d", "*.conf"
+include_zones "puppet", "puppet-zones"
+```
+
+The first parameter specifies a tag name for this directive. Each `include_zones`
+invocation should use a unique tag name. When copying the zones' configuration
+files Icinga uses the tag name as the name for the destination directory in
+`/var/lib/icinga2/api/config`.
+
+The second parameter specifies the directory which contains the subdirectories.
+
+The file names need to match the pattern given in the third parameter.
+When no pattern is specified the default pattern "*.conf" is used.
+
+## Library directive <a id="library"></a>
+
+The `library` directive was used to manually load additional
+libraries. Starting with version 2.9 it is no longer necessary to explicitly load
+libraries and this directive has no effect.
+
+## Functions <a id="functions"></a>
+
+Functions can be defined using the `function` keyword.
+
+Example:
+
+```
+function multiply(a, b) {
+ return a * b
+}
+```
+
+When encountering the `return` keyword further execution of the function is terminated and
+the specified value is supplied to the caller of the function:
+
+```
+log(multiply(3, 5))
+```
+
+In this example the `multiply` function we declared earlier is invoked with two arguments (3 and 5).
+The function computes the product of those arguments and makes the result available to the
+function's caller.
+
+When no value is supplied for the `return` statement the function returns `null`.
+
+Functions which do not have a `return` statement have their return value set to the value of the
+last expression which was performed by the function. For example, we could have also written our
+`multiply` function like this:
+
+```
+function multiply(a, b) {
+ a * b
+}
+```
+
+Anonymous functions can be created by omitting the name in the function definition. The
+resulting function object can be used like any other value:
+
+```
+var fn = function() { 3 }
+
+fn() /* Returns 3 */
+```
+
+## Lambda Expressions <a id="lambdas"></a>
+
+Functions can also be declared using the alternative lambda syntax.
+
+Example:
+
+```
+f = (x) => x * x
+```
+
+Multiple statements can be used by putting the function body into braces:
+
+```
+f = (x) => {
+ log("Lambda called")
+ x * x
+}
+```
+
+Just like with ordinary functions the return value is the value of the last statement.
+
+For lambdas which take exactly one argument the braces around the arguments can be omitted:
+
+```
+f = x => x * x
+```
+
+### Lambda Expressions with Closures <a id="lambdas-closures"></a>
+
+Lambda expressions which take a given number of arguments may need additional
+variable values from the outer scope. When the lambda expression does not allow
+to change the interface, [closures](17-language-reference.md#closures) come into play.
+
+```
+var y
+
+f = ((x) use(y) => x == y)
+```
+
+Note that the braces around arguments are always required when using closures.
+
+A more concrete example:
+
+Within the DSL, you want to [filter](18-library-reference.md#array-filter) an array of HostGroup objects by their name.
+The filter function takes one argument being a function callback which either returns
+`true` or `false`. Matching items are collected into the result set.
+
+```
+get_objects(HostGroup).filter((hg) => hg.name == "linux-servers")
+```
+
+Instead of hardcoding the matching hostgroup name into the lambda scope, you want
+to control the value from the outside configuration values, e.g. in a custom variable
+or global constant.
+
+```
+var hg_filter_name = "linux-servers"
+
+get_objects(HostGroup).filter((hg) use(hg_filter_name) => hg.name == hg_filter_name)
+```
+
+You can also use this example vice versa and extract host object matching a specific
+host group name.
+
+```
+var hg_filter_name = "linux-servers"
+
+get_objects(Host).filter((h) use (hg_search_name) => hg_search_name in h.groups).map(h => h.name)
+```
+
+Note that this example makes use of the [map](18-library-reference.md#array-map) method for the Array type which
+extracts the host name attribute from the full object into a new array.
+
+## Abbreviated Lambda Syntax <a id="nullary-lambdas"></a>
+
+Lambdas which take no arguments can also be written using the abbreviated lambda syntax.
+
+Example:
+
+```
+f = {{ 3 }}
+```
+
+This creates a new function which returns the value 3.
+
+## Variable Scopes <a id="variable-scopes"></a>
+
+When setting a variable Icinga checks the following scopes in this order whether the variable
+already exists there:
+
+* Local Scope
+* `this` Scope
+* Global Scope
+
+The local scope contains variables which only exist during the invocation of the current function,
+object or apply statement. Local variables can be declared using the `var` keyword:
+
+```
+function multiply(a, b) {
+ var temp = a * b
+ return temp
+}
+```
+
+Each time the `multiply` function is invoked a new `temp` variable is used which is in no way
+related to previous invocations of the function.
+
+When setting a variable which has not previously been declared as local using the `var` keyword
+the `this` scope is used.
+
+The `this` scope refers to the current object which the function or object/apply statement
+operates on.
+
+```
+object Host "localhost" {
+ check_interval = 5m
+}
+```
+
+In this example the `this` scope refers to the "localhost" object. The `check_interval` attribute
+is set for this particular host.
+
+You can explicitly access the `this` scope using the `this` keyword:
+
+```
+object Host "localhost" {
+ var check_interval = 5m
+
+ /* This explicitly specifies that the attribute should be set
+ * for the host, if we had omitted `this.` the (poorly named)
+ * local variable `check_interval` would have been modified instead.
+ */
+ this.check_interval = 1m
+}
+```
+Similarly the keywords `locals` and `globals` are available to access the local and global scope.
+
+Functions also have a `this` scope. However unlike for object/apply statements the `this` scope for
+a function is set to whichever object was used to invoke the function. Here's an example:
+
+```
+ hm = {
+ h_word = null
+
+ function init(word) {
+ h_word = word
+ }
+ }
+
+ /* Let's invoke the init() function */
+ hm.init("hello")
+```
+
+We're using `hm.init` to invoke the function which causes the value of `hm` to become the `this`
+scope for this function call.
+
+## Closures <a id="closures"></a>
+
+By default `function`s, `object`s and `apply` rules do not have access to variables declared
+outside of their scope (except for global variables).
+
+In order to access variables which are defined in the outer scope the `use` keyword can be used:
+
+```
+function MakeHelloFunction(name) {
+ return function() use(name) {
+ log("Hello, " + name)
+ }
+}
+```
+
+In this case a new variable `name` is created inside the inner function's scope which has the
+value of the `name` function argument.
+
+Alternatively a different value for the inner variable can be specified:
+
+```
+function MakeHelloFunction(name) {
+ return function() use (greeting = "Hello, " + name) {
+ log(greeting)
+ }
+}
+```
+
+## Conditional Statements <a id="conditional-statements"></a>
+
+### Conditional Statements: if/else <a id="conditional-statements-if-else"></a>
+
+Sometimes it can be desirable to only evaluate statements when certain conditions are met. The if/else
+construct can be used to accomplish this.
+
+Example:
+
+```
+a = 3
+
+if (a < 5) {
+ a *= 7
+} else if (a > 10) {
+ a *= 5
+} else {
+ a *= 2
+}
+```
+
+An if/else construct can also be used in place of any other value. The value of an if/else statement
+is the value of the last statement which was evaluated for the branch which was taken:
+
+```
+a = if (true) {
+ log("Taking the 'true' branch")
+ 7 * 3
+} else {
+ log("Taking the 'false' branch")
+ 9
+}
+```
+
+This example prints the log message "Taking the 'true' branch" and the `a` variable is set to 21 (7 * 3).
+
+The value of an if/else construct is null if the condition evaluates to false and no else branch is given.
+
+### Conditional Statements: Ternary Operator <a id="conditional-statements-ternary"></a>
+
+Instead of if/else condition chains, you can also use the ternary operator `?`
+with assignments. Values are separated with a colon `:` character.
+
+```
+cond ? cond_val_true : cond_val_false
+```
+
+Whether the first condition matches, the first value is returned, if not, the else and second
+branch value is returned.
+
+The following example evaluates a condition and either assigns `1` or `0`
+to the local variable.
+
+```
+<1> => var x = (2 * 3 > 5) ? 1 : 0
+null
+<2> => x
+1.000000
+<3> => var x = (2 * 3 > 7) ? 1 : 0
+null
+<4> => x
+0.000000
+```
+
+Additional examples with advanced condition chaining:
+
+```
+<1> => 1 ? 2 : 3 ? 4 : 5 ? 6 : 7
+2.000000
+<2> => 0 ? 2 : 3 ? 4 : 5 ? 6 : 7
+4.000000
+<3> => 0 ? 2 : 0 ? 4 : 5 ? 6 : 7
+6.000000
+<4> => 0 ? 2 : 0 ? 4 : 0 ? 6 : 7
+7.000000
+<5> => 1 + 0 ? 2 : 3 + 4
+2.000000
+<6> => 0 + 0 ? 2 : 3 + 4
+7.000000
+<7> => (()=>{ return 1 ? 2 : 3 })()
+2.000000
+<8> => var x = 1 ? 2 : 3
+null
+<9> => x
+2.000000
+```
+
+
+## While Loops <a id="while-loops"></a>
+
+The `while` statement checks a condition and executes the loop body when the condition evaluates to `true`.
+This is repeated until the condition is no longer true.
+
+Example:
+
+```
+var num = 5
+
+while (num > 5) {
+ log("Test")
+ num -= 1
+}
+```
+
+The `continue` and `break` keywords can be used to control how the loop is executed: The `continue` keyword
+skips over the remaining expressions for the loop body and begins the next loop evaluation. The `break` keyword
+breaks out of the loop.
+
+## For Loops <a id="for-loops"></a>
+
+The `for` statement can be used to iterate over arrays and dictionaries.
+
+Example:
+
+```
+var list = [ "a", "b", "c" ]
+
+for (var item in list) {
+ log("Item: " + item)
+}
+```
+
+The loop body is evaluated once for each item in the array. The variable `item` is declared as a local
+variable just as if the `var` keyword had been used.
+
+Iterating over dictionaries can be accomplished in a similar manner:
+
+```
+var dict = { a = 3, b = 7 }
+
+for (var key => var value in dict) {
+ log("Key: " + key + ", Value: " + value)
+}
+```
+
+The `continue` and `break` keywords can be used to control how the loop is executed: The `continue` keyword
+skips over the remaining expressions for the loop body and begins the next loop evaluation. The `break` keyword
+breaks out of the loop.
+
+The `var` keyword is optional when declaring variables in the loop's header. Variables declared without the `var`
+keyword are nonetheless local to the function.
+
+## Constructors <a id="constructor"></a>
+
+In order to create a new value of a specific type constructor calls may be used.
+
+Example:
+
+```
+var pd = PerfdataValue()
+pd.label = "test"
+pd.value = 10
+```
+
+You can also try to convert an existing value to another type by specifying it as an argument for the constructor call.
+
+Example:
+
+```
+var s = String(3) /* Sets s to "3". */
+```
+
+## Throwing Exceptions <a id="throw"></a>
+
+Built-in commands may throw exceptions to signal errors such as invalid arguments. User scripts can throw exceptions
+using the `throw` keyword.
+
+Example:
+
+```
+throw "An error occurred."
+```
+
+## Handling Exceptions <a id="try-except"></a>
+
+Exceptions can be handled using the `try` and `except` keywords. When an exception occurs while executing code in the
+`try` clause no further statements in the `try` clause are evaluated and the `except` clause is executed instead.
+
+Example:
+
+```
+try {
+ throw "Test"
+
+ log("This statement won't get executed.")
+} except {
+ log("An error occurred in the try clause.")
+}
+```
+
+## Breakpoints <a id="breakpoints"></a>
+
+The `debugger` keyword can be used to insert a breakpoint. It may be used at any place where an assignment would also be a valid expression.
+
+By default breakpoints have no effect unless Icinga is started with the `--script-debugger` command-line option. When the script debugger is enabled Icinga stops execution of the script when it encounters a breakpoint and spawns a console which lets the user inspect the current state of the execution environment.
+
+## Types <a id="types"></a>
+
+All values have a static type. The `typeof` function can be used to determine the type of a value:
+
+```
+typeof(3) /* Returns an object which represents the type for numbers */
+```
+
+The following built-in types are available:
+
+Type | Examples | Description
+-----------|-------------------|------------------------
+Number | 3.7 | A numerical value.
+Boolean | true, false | A boolean value.
+String | "hello" | A string.
+Array | [ "a", "b" ] | An array.
+Dictionary | { a = 3 } | A dictionary.
+
+Depending on which libraries are loaded additional types may become available. The `icinga`
+library implements a whole bunch of other [object types](09-object-types.md#object-types),
+e.g. Host, Service, CheckCommand, etc.
+
+Each type has an associated type object which describes the type's semantics. These
+type objects are made available using global variables which match the type's name:
+
+```
+/* This logs 'true' */
+log(typeof(3) == Number)
+```
+
+The type object's `prototype` property can be used to find out which methods a certain type
+supports:
+
+```
+/* This returns: ["contains","find","len","lower","replace","reverse","split","substr","to_string","trim","upper"] */
+keys(String.prototype)
+```
+
+Additional documentation on type methods is available in the
+[library reference](18-library-reference.md#library-reference).
+
+## Location Information <a id="location-information"></a>
+
+The location of the currently executing script can be obtained using the
+`current_filename` and `current_line` keywords.
+
+Example:
+
+```
+log("Hello from '" + current_filename + "' in line " + current_line)
+```
+
+## Reserved Keywords <a id="reserved-keywords"></a>
+
+These keywords are reserved and must not be used as constants or custom variables.
+
+```
+object
+template
+include
+include_recursive
+include_zones
+library
+null
+true
+false
+const
+var
+this
+globals
+locals
+use
+default
+ignore_on_error
+current_filename
+current_line
+apply
+to
+where
+import
+assign
+ignore
+function
+return
+break
+continue
+for
+if
+else
+while
+throw
+try
+except
+in
+using
+namespace
+```
+You can escape reserved keywords using the `@` character. The following example
+tries to set `vars.include` which references a reserved keyword and generates
+an error:
+
+```
+[2014-09-15 17:24:00 +0200] critical/config: Location:
+/etc/icinga2/conf.d/hosts/localhost.conf(13): vars.sla = "24x7"
+/etc/icinga2/conf.d/hosts/localhost.conf(14):
+/etc/icinga2/conf.d/hosts/localhost.conf(15): vars.include = "some cmdb export field"
+ ^^^^^^^
+/etc/icinga2/conf.d/hosts/localhost.conf(16): }
+/etc/icinga2/conf.d/hosts/localhost.conf(17):
+
+Config error: in /etc/icinga2/conf.d/hosts/localhost.conf: 15:8-15:14: syntax error, unexpected include (T_INCLUDE), expecting T_IDENTIFIER
+[2014-09-15 17:24:00 +0200] critical/config: 1 errors, 0 warnings.
+```
+
+You can escape the `include` keyword by prefixing it with an additional `@` character:
+
+```
+object Host "localhost" {
+ import "generic-host"
+
+ address = "127.0.0.1"
+ address6 = "::1"
+
+ vars.os = "Linux"
+ vars.sla = "24x7"
+
+ vars.@include = "some cmdb export field"
+}
+```
diff --git a/doc/18-library-reference.md b/doc/18-library-reference.md
new file mode 100644
index 0000000..feb5d5d
--- /dev/null
+++ b/doc/18-library-reference.md
@@ -0,0 +1,1961 @@
+# Library Reference <a id="library-reference"></a>
+
+## Global functions <a id="global-functions"></a>
+
+These functions are globally available in [assign/ignore where expressions](03-monitoring-basics.md#using-apply-expressions),
+[functions](17-language-reference.md#functions), [API filters](12-icinga2-api.md#icinga2-api-filters)
+and the [Icinga 2 debug console](11-cli-commands.md#cli-command-console).
+
+You can use the [Icinga 2 debug console](11-cli-commands.md#cli-command-console)
+as a sandbox to test these functions before implementing
+them in your scenarios.
+
+### basename <a id="global-functions-basename"></a>
+
+Signature:
+
+```
+function basename(path)
+```
+
+Returns the filename portion of the specified path.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var path = "/etc/icinga2/scripts/xmpp-notification.pl"
+null
+<2> => basename(path)
+"xmpp-notification.pl"
+```
+
+### bool <a id="global-functions-bool"></a>
+
+Signature:
+
+```
+function bool(value)
+```
+
+Converts the value to a bool.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => bool(1)
+true
+<2> => bool(0)
+false
+```
+
+### cidr_match <a id="global-functions-cidr_match"></a>
+
+Signature:
+
+```
+function cidr_match(pattern, ip, mode)
+```
+
+Returns true if the CIDR pattern matches the IP address, false otherwise.
+
+IPv4 addresses are converted to IPv4-mapped IPv6 addresses before being
+matched against the pattern. The `mode` argument is optional and can be
+either `MatchAll` (in which case all elements for an array have to match) or `MatchAny`
+(in which case at least one element has to match). The default mode is `MatchAll`.
+
+Example for a single IP address:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => host.address = "192.168.56.101"
+null
+<2> => cidr_match("192.168.56.0/24", host.address)
+true
+<3> => cidr_match("192.168.56.0/26", host.address)
+false
+```
+
+Example for an array of IP addresses:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => host.vars.vhost_ips = [ "192.168.56.101", "192.168.56.102", "10.0.10.99" ]
+null
+<2> => cidr_match("192.168.56.0/24", host.vars.vhost_ips, MatchAll)
+false
+<3> => cidr_match("192.168.56.0/24", host.vars.vhost_ips, MatchAny)
+true
+```
+
+### dirname <a id="global-functions-dirname"></a>
+
+Signature:
+
+```
+function dirname(path)
+```
+
+Returns the directory portion of the specified path.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var path = "/etc/icinga2/scripts/xmpp-notification.pl"
+null
+<2> => dirname(path)
+"/etc/icinga2/scripts"
+```
+
+### escape_create_process_arg <a id="global-functions-escape_create_process_arg"></a>
+
+Signature:
+
+```
+function escape_create_process_arg(text)
+```
+
+Escapes a string for use as an argument for CreateProcess(). Windows only.
+
+### escape_shell_arg <a id="global-functions-escape_shell_arg"></a>
+
+Signature:
+
+```
+function escape_shell_arg(text)
+```
+
+Escapes a string for use as a single shell argument.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => escape_shell_arg("'$host.name$' '$service.name$'")
+"''\\''$host.name$'\\'' '\\''$service.name$'\\'''"
+```
+
+### escape_shell_cmd <a id="global-functions-escape_shell_cmd"></a>
+
+Signature:
+
+```
+function escape_shell_cmd(text)
+```
+
+Escapes shell meta characters in a string.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => escape_shell_cmd("/bin/echo 'shell test' $ENV")
+"/bin/echo 'shell test' \\$ENV"
+```
+
+### get_time <a id="global-functions-get_time"></a>
+
+Signature:
+
+```
+function get_time()
+```
+
+Returns the current UNIX timestamp as floating point number.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => get_time()
+1480072135.633008
+<2> => get_time()
+1480072140.401207
+```
+
+### getenv <a id="global-functions-getenv"></a>
+
+Signature:
+
+```
+function getenv(key)
+```
+
+Returns the value from the specified environment variable key.
+
+Example:
+
+```
+$ MY_ENV_VAR=icinga2 icinga2 console
+Icinga 2 (version: v2.11.0)
+Type $help to view available commands.
+<1> => getenv("MY_ENV_VAR")
+"icinga2"
+```
+
+### glob <a id="global-functions-glob"></a>
+
+Signature:
+
+```
+function glob(pathSpec, type)
+```
+
+Returns an array containing all paths which match the
+`pathSpec` argument.
+
+The `type` argument is optional and specifies which types
+of paths are matched. This can be a combination of the `GlobFile`
+and `GlobDirectory` constants. The default value is `GlobFile | GlobDirectory`.
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var pathSpec = "/etc/icinga2/conf.d/*.conf"
+null
+<2> => glob(pathSpec)
+[ "/etc/icinga2/conf.d/app.conf", "/etc/icinga2/conf.d/commands.conf", ... ]
+```
+
+### glob\_recursive <a id="global-functions-glob-recursive"></a>
+
+Signature:
+
+```
+function glob_recursive(path, pattern, type)
+```
+
+Recursively descends into the specified directory and returns an array containing
+all paths which match the `pattern` argument.
+
+The `type` argument is optional and specifies which types
+of paths are matched. This can be a combination of the `GlobFile`
+and `GlobDirectory` constants. The default value is `GlobFile | GlobDirectory`.
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var path = "/etc/icinga2/zones.d/"
+null
+<2> => var pattern = "*.conf"
+null
+<3> => glob_recursive(path, pattern)
+[ "/etc/icinga2/zones.d/global-templates/templates.conf", "/etc/icinga2/zones.d/master/hosts.conf", ... ]
+```
+
+### intersection <a id="global-functions-intersection"></a>
+
+Signature:
+
+```
+function intersection(array, array, ...)
+```
+
+Returns an array containing all unique elements which are common to all
+specified arrays.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var dev_notification_groups = [ "devs", "slack" ]
+null
+<2> => var host_notification_groups = [ "slack", "noc" ]
+null
+<3> => intersection(dev_notification_groups, host_notification_groups)
+[ "slack" ]
+```
+
+### keys <a id="global-functions-keys"></a>
+
+Signature:
+
+```
+function keys(dict)
+```
+
+Returns an array containing the dictionary's keys.
+
+**Note**: Instead of using this global function you are advised to use the type's
+prototype method: [Dictionary#keys](18-library-reference.md#dictionary-keys).
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => host.vars.disks["/"] = {}
+null
+<2> => host.vars.disks["/var"] = {}
+null
+<3> => host.vars.disks.keys()
+[ "/", "/var" ]
+```
+
+### len <a id="global-functions-len"></a>
+
+Signature:
+
+```
+function len(value)
+```
+
+Returns the length of the value, i.e. the number of elements for an array
+or dictionary, or the length of the string in bytes.
+
+**Note**: Instead of using this global function you are advised to use the type's
+prototype method: [Array#len](18-library-reference.md#array-len), [Dictionary#len](18-library-reference.md#dictionary-len) and
+[String#len](18-library-reference.md#string-len).
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => host.groups = [ "linux-servers", "db-servers" ]
+null
+<2> => host.groups.len()
+2.000000
+<3> => host.vars.disks["/"] = {}
+null
+<4> => host.vars.disks["/var"] = {}
+null
+<5> => host.vars.disks.len()
+2.000000
+<6> => host.vars.os_type = "Linux/Unix"
+null
+<7> => host.vars.os_type.len()
+10.000000
+```
+
+### log <a id="global-functions-log"></a>
+
+Signature:
+
+```
+function log(value)
+```
+
+Writes a message to the log. Non-string values are converted to a JSON string.
+
+Signature:
+
+```
+function log(severity, facility, value)
+```
+
+Writes a message to the log. `severity` can be one of `LogDebug`, `LogNotice`,
+`LogInformation`, `LogWarning`, and `LogCritical`.
+
+Non-string values are converted to a JSON string.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => log(LogCritical, "Console", "First line")
+critical/Console: First line
+null
+<2> => var groups = [ "devs", "slack" ]
+null
+<3> => log(LogCritical, "Console", groups)
+critical/Console: ["devs","slack"]
+null
+```
+
+### match <a id="global-functions-match"></a>
+
+Signature:
+
+```
+function match(pattern, value, mode)
+```
+
+Returns true if the wildcard (`?*`) `pattern` matches the `value`, false otherwise.
+The `value` can be of the type [String](18-library-reference.md#string-type) or [Array](18-library-reference.md#array-type) (which
+contains string elements).
+
+The `mode` argument is optional and can be either `MatchAll` (in which case all elements
+for an array have to match) or `MatchAny` (in which case at least one element has to match).
+The default mode is `MatchAll`.
+
+Example for string values:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var name = "db-prod-sfo-657"
+null
+<2> => match("*prod-sfo*", name)
+true
+<3> => match("*-dev-*", name)
+false
+```
+
+Example for an array of string values:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0-28)
+<1> => host.vars.application_types = [ "web-wp", "web-rt", "db-local" ]
+null
+<2> => match("web-*", host.vars.application_types, MatchAll)
+false
+<3> => match("web-*", host.vars.application_types, MatchAny)
+true
+```
+
+### number <a id="global-functions-number"></a>
+
+Signature:
+
+```
+function number(value)
+```
+
+Converts the value to a number.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => number(false)
+0.000000
+<2> => number("78")
+78.000000
+```
+
+### parse_performance_data <a id="global-functions-parse_performance_data"></a>
+
+Signature:
+
+```
+function parse_performance_data(pd)
+```
+
+Parses a single performance data value from a string and returns a
+[PerfdataValue](08-advanced-topics.md#advanced-value-types-perfdatavalue) object.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var pd = "'time'=1480074205.197363;;;"
+null
+<2> => parse_performance_data(pd)
+{
+ counter = false
+ crit = null
+ label = "time"
+ max = null
+ min = null
+ type = "PerfdataValue"
+ unit = ""
+ value = 1480074205.197363
+ warn = null
+}
+```
+
+### path\_exists <a id="global-functions-path-exists"></a>
+
+Signature:
+
+```
+function path_exists(path)
+```
+
+Returns true if the specified path exists, false otherwise.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var path = "/etc/icinga2/scripts/xmpp-notification.pl"
+null
+<2> => path_exists(path)
+true
+```
+
+### random <a id="global-functions-random"></a>
+
+Signature:
+
+```
+function random()
+```
+
+Returns a random value between 0 and RAND\_MAX (as defined in stdlib.h).
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => random()
+1263171996.000000
+<2> => random()
+108402530.000000
+```
+
+### range <a id="global-functions-range"></a>
+
+Signature:
+
+```
+function range(end)
+function range(start, end)
+function range(start, end, increment)
+```
+
+Returns an array of numbers in the specified range.
+If you specify one parameter, the first element starts at `0`.
+The following array numbers are incremented by `1` and stop before
+the specified end.
+If you specify the start and end numbers, the returned array
+number are incremented by `1`. They start at the specified start
+number and stop before the end number.
+Optionally you can specify the incremented step between numbers
+as third parameter.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => range(5)
+[ 0.000000, 1.000000, 2.000000, 3.000000, 4.000000 ]
+<2> => range(2,4)
+[ 2.000000, 3.000000 ]
+<3> => range(2,10,2)
+[ 2.000000, 4.000000, 6.000000, 8.000000 ]
+```
+
+### regex <a id="global-functions-regex"></a>
+
+Signature:
+
+```
+function regex(pattern, value, mode)
+```
+
+Returns true if the regular expression `pattern` matches the `value`, false otherwise.
+The `value` can be of the type [String](18-library-reference.md#string-type) or [Array](18-library-reference.md#array-type) (which
+contains string elements).
+
+The `mode` argument is optional and can be either `MatchAll` (in which case all elements
+for an array have to match) or `MatchAny` (in which case at least one element has to match).
+The default mode is `MatchAll`.
+
+**Tip**: In case you are looking for regular expression tests try [regex101](https://regex101.com).
+
+Example for string values:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => host.vars.os_type = "Linux/Unix"
+null
+<2> => regex("^Linux", host.vars.os_type)
+true
+<3> => regex("^Linux$", host.vars.os_type)
+false
+```
+
+Example for an array of string values:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => host.vars.databases = [ "db-prod1", "db-prod2", "db-dev" ]
+null
+<2> => regex("^db-prod\\d+", host.vars.databases, MatchAny)
+true
+<3> => regex("^db-prod\\d+", host.vars.databases, MatchAll)
+false
+```
+
+### sleep <a id="global-functions-sleep"></a>
+
+Signature:
+
+```
+function sleep(interval)
+```
+
+Sleeps for the specified amount of time (in seconds).
+
+### string <a id="global-functions-string"></a>
+
+Signature:
+
+```
+function string(value)
+```
+
+Converts the value to a string.
+
+**Note**: Instead of using this global function you are advised to use the type's
+prototype method:
+
+* [Number#to_string](18-library-reference.md#number-to_string)
+* [Boolean#to_string](18-library-reference.md#boolean-to_string)
+* [String#to_string](18-library-reference.md#string-to_string)
+* [Object#to_string](18-library-reference.md#object-to-string) for Array and Dictionary types
+* [DateTime#to_string](18-library-reference.md#datetime-tostring)
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => 5.to_string()
+"5"
+<2> => false.to_string()
+"false"
+<3> => "abc".to_string()
+"abc"
+<4> => [ "dev", "slack" ].to_string()
+"[ \"dev\", \"slack\" ]"
+<5> => { "/" = {}, "/var" = {} }.to_string()
+"{\n\t\"/\" = {\n\t}\n\t\"/var\" = {\n\t}\n}"
+<6> => DateTime(2016, 11, 25).to_string()
+"2016-11-25 00:00:00 +0100"
+```
+
+### typeof <a id="global-functions-typeof"></a>
+
+Signature:
+
+```
+function typeof(value)
+```
+
+Returns the [Type](18-library-reference.md#type-type) object for a value.
+
+Example:
+
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => typeof(3) == Number
+true
+<2> => typeof("str") == String
+true
+<3> => typeof(true) == Boolean
+true
+<4> => typeof([ 1, 2, 3]) == Array
+true
+<5> => typeof({ a = 2, b = 3 }) == Dictionary
+true
+```
+
+### union <a id="global-functions-union"></a>
+
+Signature:
+
+```
+function union(array, array, ...)
+```
+
+Returns an array containing all unique elements from the specified arrays.
+
+Example:
+```
+$ icinga2 console
+Icinga 2 (version: v2.11.0)
+<1> => var dev_notification_groups = [ "devs", "slack" ]
+null
+<2> => var host_notification_groups = [ "slack", "noc" ]
+null
+<3> => union(dev_notification_groups, host_notification_groups)
+[ "devs", "noc", "slack" ]
+```
+
+## Scoped Functions <a id="scoped-functions"></a>
+
+This chapter describes functions which are only available
+in a specific scope.
+
+### macro <a id="scoped-functions-macro"></a>
+
+Signature:
+
+```
+function macro("$macro_name$")
+```
+
+The `macro` function can be used to resolve [runtime macro](03-monitoring-basics.md#runtime-macros)
+strings into their values.
+The returned value depends on the attribute value which is resolved
+from the specified runtime macro.
+
+This function is only available in runtime evaluated functions, e.g.
+for [custom variables](03-monitoring-basics.md#custom-variables-functions) which
+use the [abbreviated lambda syntax](17-language-reference.md#nullary-lambdas).
+
+This example sets the `snmp_address` custom variable
+based on `$address$` and `$address6$`.
+
+```
+ vars.snmp_address = {{
+ var addr_v4 = macro("$address$")
+ var addr_v6 = macro("$address6$")
+
+ if (addr_v4) {
+ return addr_v4
+ } else {
+ return "udp6:[" + addr_v6 + "]"
+ }
+ }}
+```
+
+More reference examples are available inside the [Icinga Template Library](10-icinga-template-library.md#icinga-template-library)
+and the [object accessors chapter](08-advanced-topics.md#access-object-attributes-at-runtime).
+
+## Object Accessor Functions <a id="object-accessor-functions"></a>
+
+These functions can be used to retrieve a reference to another object by name.
+
+### get_check_command <a id="objref-get_check_command"></a>
+
+Signature:
+
+```
+function get_check_command(name);
+```
+
+Returns the CheckCommand object with the specified name, or `null` if no such CheckCommand object exists.
+
+### get_event_command <a id="objref-get_event_command"></a>
+
+Signature:
+
+```
+function get_event_command(name);
+```
+
+Returns the EventCommand object with the specified name, or `null` if no such EventCommand object exists.
+
+### get_host <a id="objref-get_host"></a>
+
+Signature:
+
+```
+function get_host(host_name);
+```
+
+Returns the Host object with the specified name, or `null` if no such Host object exists.
+
+### get_host_group <a id="objref-get_host_group"></a>
+
+Signature:
+
+```
+function get_host_group(name);
+```
+
+Returns the HostGroup object with the specified name, or `null` if no such HostGroup object exists.
+
+### get_notification_command <a id="objref-get_notification_command"></a>
+
+Signature:
+
+```
+function get_notification_command(name);
+```
+
+Returns the NotificationCommand object with the specified name, or `null` if no such NotificationCommand object exists.
+
+### get_object <a id="objref-get_object"></a>
+
+Signature:
+
+```
+function get_object(type, name);
+```
+
+Returns the object with the specified type and name, or `null` if no such object exists. `type` must refer
+to a type object.
+
+### get_objects <a id="objref-get_objects"></a>
+
+Signature:
+
+```
+function get_objects(type);
+```
+
+Returns an array of objects whose type matches the specified type. `type` must refer
+to a type object.
+
+### get_service <a id="objref-get_service"></a>
+
+Signature:
+
+```
+function get_service(host_name, service_name);
+function get_service(host, service_name);
+```
+
+Returns the Service object with the specified host name or object and service name pair,
+or `null` if no such Service object exists.
+
+Example in the [debug console](11-cli-commands.md#cli-command-console)
+which fetches the `disk` service object from the current Icinga 2 node:
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/'
+Icinga 2 (version: v2.11.0)
+
+<1> => get_service(NodeName, "disk")
+<2> => get_service(NodeName, "disk").__name
+"icinga2-master1.localdomain!disk"
+
+<3> => get_service(get_host(NodeName), "disk").__name
+"icinga2-master1.localdomain!disk"
+```
+
+### get_service_group <a id="objref-get_service_group"></a>
+
+Signature:
+
+```
+function get_service_group(name);
+```
+
+Returns the ServiceGroup object with the specified name, or `null` if no such ServiceGroup object exists.
+
+### get_services <a id="objref-get_services"></a>
+
+Signature:
+
+```
+function get_services(host_name);
+function get_services(host);
+```
+
+Returns an [array](17-language-reference.md#array) of service objects for the specified host name or object,
+or `null` if no such host object exists.
+
+Example in the [debug console](11-cli-commands.md#cli-command-console)
+which fetches all service objects from the current Icinga 2 node:
+
+```
+$ ICINGA2_API_PASSWORD=icinga icinga2 console --connect 'https://root@localhost:5665/'
+Icinga 2 (version: v2.11.0)
+
+<1> => get_services(NodeName).map(s => s.name)
+[ "disk", "disk /", "http", "icinga", "load", "ping4", "ping6", "procs", "ssh", "users" ]
+```
+
+Note: [map](18-library-reference.md#array-map) takes a [lambda function](17-language-reference.md#lambdas) as argument. In this example
+we only want to collect and print the `name` attribute with `s => s.name`.
+
+This works in a similar fashion for a host object where you can extract all service states
+in using the [map](18-library-reference.md#array-map) functionality:
+
+```
+<2> => get_services(get_host(NodeName)).map(s => s.state)
+[ 2.000000, 2.000000, 2.000000, 0.000000, 0.000000, 0.000000, 2.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000 ]
+```
+
+### get_template <a id="objref-get_template"></a>
+
+Signature:
+
+```
+function get_template(type, name);
+```
+
+Returns the template with the specified type and name, or `null` if no such object exists. `type` must refer
+to a type object.
+
+> **Note**
+>
+> Only the name and debug info attributes are available for templates accessed in the DSL.
+> Object attributes are not available in this scope.
+
+You can use this functionality to check whether a template exists e.g. on a satellite endpoint
+and if not, import a different template.
+
+```
+object Host "icinga-agent47.localdomain" {
+ if (get_template(Host, "master-host-tmpl")) {
+ import "master-host-tmpl"
+ } else {
+ import "generic-host"
+ }
+}
+```
+
+### get_templates <a id="objref-get_templates"></a>
+
+Signature:
+
+```
+function get_templates(type);
+```
+
+Returns an array of templates whose type matches the specified type. `type` must refer
+to a type object.
+
+> **Note**
+>
+> Only the name and debug info attributes are available for templates accessed in the DSL.
+> Object attributes are not available in this scope.
+
+You can use this function to iterate over all available template names, similar to what
+the [templates API URL endpoint](12-icinga2-api.md#icinga2-api-config-templates) provides.
+
+```
+<1> => get_templates(Host).map(n => n.name)
+[ "ssh-agent" ]
+```
+
+### get_time_period <a id="objref-get_time_period"></a>
+
+Signature:
+
+```
+function get_time_period(name);
+```
+
+Returns the TimePeriod object with the specified name, or `null` if no such TimePeriod object exists.
+
+### get_user <a id="objref-get_user"></a>
+
+Signature:
+
+```
+function get_user(name);
+```
+
+Returns the User object with the specified name, or `null` if no such User object exists.
+
+### get_user_group <a id="objref-get_user_group"></a>
+
+Signature:
+
+```
+function get_user_group(name);
+```
+
+Returns the UserGroup object with the specified name, or `null` if no such UserGroup object exists.
+
+## Json object <a id="json-object"></a>
+
+The global `Json` object can be used to encode and decode JSON.
+
+### Json.decode <a id="json-decode"></a>
+
+Signature:
+
+```
+function decode(x);
+```
+
+Decodes a JSON string.
+
+### Json.encode <a id="json-encode"></a>
+
+Signature:
+
+```
+function encode(x);
+```
+
+Encodes an arbitrary value into JSON.
+
+## Math object <a id="math-object"></a>
+
+The global `Math` object can be used to access a number of mathematical constants
+and functions.
+
+### Math.E <a id="math-e"></a>
+
+Euler's constant.
+
+### Math.LN2 <a id="math-ln2"></a>
+
+Natural logarithm of 2.
+
+### Math.LN10 <a id="math-ln10"></a>
+
+Natural logarithm of 10.
+
+### Math.LOG2E <a id="math-log2e"></a>
+
+Base 2 logarithm of E.
+
+### Math.PI <a id="math-pi"></a>
+
+The mathematical constant Pi.
+
+### Math.SQRT1_2 <a id="math-sqrt1_2"></a>
+
+Square root of 1/2.
+
+### Math.SQRT2 <a id="math-sqrt2"></a>
+
+Square root of 2.
+
+### Math.abs <a id="math-abs"></a>
+
+Signature:
+
+```
+function abs(x);
+```
+
+Returns the absolute value of `x`.
+
+### Math.acos <a id="math-acos"></a>
+
+Signature:
+
+```
+function acos(x);
+```
+
+Returns the arccosine of `x`.
+
+### Math.asin <a id="math-asin"></a>
+
+Signature:
+
+```
+function asin(x);
+```
+
+Returns the arcsine of `x`.
+
+### Math.atan <a id="math-atan"></a>
+
+Signature:
+
+```
+function atan(x);
+```
+
+Returns the arctangent of `x`.
+
+### Math.atan2 <a id="math-atan2"></a>
+
+Signature:
+
+```
+function atan2(y, x);
+```
+Returns the arctangent of the quotient of `y` and `x`.
+
+### Math.ceil <a id="math-ceil"></a>
+
+Signature:
+
+```
+function ceil(x);
+```
+
+Returns the smallest integer value not less than `x`.
+
+### Math.cos <a id="math-cos"></a>
+
+Signature:
+
+```
+function cos(x);
+```
+
+Returns the cosine of `x`.
+
+### Math.exp <a id="math-exp"></a>
+
+Signature:
+
+```
+function exp(x);
+```
+
+Returns E raised to the `x`th power.
+
+### Math.floor <a id="math-floor"></a>
+
+Signature:
+
+```
+function floor(x);
+```
+
+Returns the largest integer value not greater than `x`.
+
+### Math.isinf <a id="math-isinf"></a>
+
+Signature:
+
+```
+function isinf(x);
+```
+
+Returns whether `x` is infinite.
+
+### Math.isnan <a id="math-isnan"></a>
+
+Signature:
+
+```
+function isnan(x);
+```
+
+Returns whether `x` is NaN (not-a-number).
+
+### Math.log <a id="math-log"></a>
+
+Signature:
+
+```
+function log(x);
+```
+
+Returns the natural logarithm of `x`.
+
+### Math.max <a id="math-max"></a>
+
+Signature:
+
+```
+function max(...);
+```
+
+Returns the largest argument. A variable number of arguments can be specified.
+If no arguments are given, -Infinity is returned.
+
+### Math.min <a id="math-min"></a>
+
+Signature:
+
+```
+function min(...);
+```
+
+Returns the smallest argument. A variable number of arguments can be specified.
+If no arguments are given, +Infinity is returned.
+
+### Math.pow <a id="math-pow"></a>
+
+Signature:
+
+```
+function pow(x, y);
+```
+
+Returns `x` raised to the `y`th power.
+
+### Math.random <a id="math-random"></a>
+
+Signature:
+
+```
+function random();
+```
+
+Returns a pseudo-random number between 0 and 1.
+
+### Math.round <a id="math-round"></a>
+
+Signature:
+
+```
+function round(x);
+```
+
+Returns `x` rounded to the nearest integer value.
+
+### Math.sign <a id="math-sign"></a>
+
+Signature:
+
+```
+function sign(x);
+```
+
+Returns -1 if `x` is negative, 1 if `x` is positive
+and 0 if `x` is 0.
+
+### Math.sin <a id="math-sin"></a>
+
+Signature:
+
+```
+function sin(x);
+```
+
+Returns the sine of `x`.
+
+### Math.sqrt <a id="math-sqrt"></a>
+
+Signature:
+
+```
+function sqrt(x);
+```
+
+Returns the square root of `x`.
+
+### Math.tan <a id="math-tan"></a>
+
+Signature:
+
+```
+function tan(x);
+```
+
+Returns the tangent of `x`.
+
+## Array type <a id="array-type"></a>
+
+Inherits methods from the [Object type](18-library-reference.md#object-type).
+
+### Array#add <a id="array-add"></a>
+
+Signature:
+
+```
+function add(value);
+```
+
+Adds a new value after the last element in the array.
+
+### Array#all <a id="array-all"></a>
+
+Signature:
+
+```
+function all(func);
+```
+
+Returns true if the array contains only elements for which `func(element)`
+is true, false otherwise.
+
+### Array#any <a id="array-any"></a>
+
+Signature:
+
+```
+function any(func);
+```
+
+Returns true if the array contains at least one element for which `func(element)`
+is true, false otherwise.
+
+### Array#clear <a id="array-clear"></a>
+
+Signature:
+
+```
+function clear();
+```
+
+Removes all elements from the array.
+
+### Array#contains <a id="array-contains"></a>
+
+Signature:
+
+```
+function contains(value);
+```
+
+Returns true if the array contains the specified value, false otherwise.
+
+### Array#filter <a id="array-filter"></a>
+
+Signature:
+
+```
+function filter(func);
+```
+
+Returns a copy of the array containing only the elements for which `func(element)`
+is true.
+
+### Array#freeze <a id="array-freeze"></a>
+
+Signature:
+
+```
+function freeze()
+```
+
+Disallows further modifications to this array. Trying to modify the array will result in an exception.
+
+### Array#get <a id="array-get"></a>
+
+Signature:
+
+```
+function get(index);
+```
+
+Retrieves the element at the specified zero-based index.
+
+### Array#join <a id="array-join"></a>
+
+Signature:
+
+```
+function join(separator);
+```
+
+Joins all elements of the array using the specified separator.
+
+### Array#len <a id="array-len"></a>
+
+Signature:
+
+```
+function len();
+```
+
+Returns the number of elements contained in the array.
+
+### Array#map <a id="array-map"></a>
+
+Signature:
+
+```
+function map(func);
+```
+
+Calls `func(element)` for each of the elements in the array and returns
+a new array containing the return values of these function calls.
+
+### Array#reduce <a id="array-reduce"></a>
+
+Signature:
+
+```
+function reduce(func);
+```
+
+Reduces the elements of the array into a single value by calling the provided
+function `func` as `func(a, b)` repeatedly where `a` and `b` are elements of the array
+or results from previous function calls.
+
+### Array#remove <a id="array-remove"></a>
+
+Signature:
+
+```
+function remove(index);
+```
+
+Removes the element at the specified zero-based index.
+
+### Array#reverse <a id="array-reverse"></a>
+
+Signature:
+
+```
+function reverse();
+```
+
+Returns a new array with all elements of the current array in reverse order.
+
+### Array#set <a id="array-set"></a>
+
+Signature:
+
+```
+function set(index, value);
+```
+
+Sets the element at the zero-based index to the specified value. The `index` must refer to an element
+which already exists in the array.
+
+### Array#shallow_clone <a id="array-shallow-clone"></a>
+
+```
+function shallow_clone();
+```
+
+Returns a copy of the array. Note that for elements which are reference values (e.g. objects such
+as arrays and dictionaries) only the references are copied.
+
+### Array#sort <a id="array-sort"></a>
+
+Signature:
+
+```
+function sort(less_cmp);
+```
+
+Returns a copy of the array where all items are sorted. The items are
+compared using the `<` (less-than) operator. A custom comparator function
+can be specified with the `less_cmp` argument.
+
+### Array#unique <a id="array-unique"></a>
+
+Signature:
+
+```
+function unique();
+```
+
+Returns a copy of the array with all duplicate elements removed. The original order
+of the array is not preserved.
+
+## Boolean type <a id="boolean-type"></a>
+
+### Boolean#to_string <a id="boolean-to_string"></a>
+
+Signature:
+
+```
+function to_string();
+```
+
+The `to_string` method returns a string representation of the boolean value.
+
+Example:
+
+```
+var example = true
+ example.to_string() /* Returns "true" */
+```
+
+## DateTime type <a id="datetime-type"></a>
+
+Inherits methods from the [Object type](18-library-reference.md#object-type).
+
+### DateTime constructor <a id="datetime-ctor"></a>
+
+Signature:
+
+```
+function DateTime()
+function DateTime(unixTimestamp)
+function DateTime(year, month, day)
+function DateTime(year, month, day, hours, minutes, seconds)
+```
+
+Constructs a new DateTime object. When no arguments are specified for the constructor a new
+DateTime object representing the current time is created.
+
+Example:
+
+```
+var d1 = DateTime() /* current time */
+var d2 = DateTime(2016, 5, 21) /* midnight April 21st, 2016 (local time) */
+```
+
+### DateTime arithmetic <a id="datetime-arithmetic"></a>
+
+Subtracting two DateTime objects yields the interval between them, in seconds.
+
+Example:
+
+```
+var delta = DateTime() - DateTime(2016, 5, 21) /* seconds since midnight April 21st, 2016 */
+```
+
+Subtracting a number from a DateTime object yields a new DateTime object that is further in the past:
+
+Example:
+
+```
+var dt = DateTime() - 2 * 60 * 60 /* Current time minus 2 hours */
+```
+
+Adding a number to a DateTime object yields a new DateTime object that is in the future:
+
+Example:
+
+```
+var dt = DateTime() + 24 * 60 * 60 /* Current time plus 24 hours */
+```
+
+### DateTime#format <a id="datetime-format"></a>
+
+Signature:
+
+```
+function format(fmt)
+```
+
+Returns a string representation for the DateTime object using the specified format string.
+The format string may contain format conversion placeholders as specified in strftime(3).
+
+Example:
+
+```
+var s = DateTime(2016, 4, 21).format("%A") /* Sets s to "Thursday". */
+```
+
+### DateTime#to_string <a id="datetime-tostring"></a>
+
+Signature:
+
+```
+function to_string()
+```
+
+Returns a string representation for the DateTime object. Uses a suitable default format.
+
+Example:
+
+```
+var s = DateTime(2016, 4, 21).to_string() /* Sets s to "2016-04-21 00:00:00 +0200". */
+```
+
+## Dictionary type <a id="dictionary-type"></a>
+
+Inherits methods from the [Object type](18-library-reference.md#object-type).
+
+### Dictionary#clear <a id="dictionary-clear"></a>
+
+Signature:
+
+```
+function clear();
+```
+
+Removes all items from the dictionary.
+
+### Dictionary#contains <a id="dictionary-contains"></a>
+
+Signature:
+
+```
+function contains(key);
+```
+
+Returns true if a dictionary item with the specified `key` exists, false otherwise.
+
+### Dictionary#freeze <a id="dictionary-freeze"></a>
+
+Signature:
+
+```
+function freeze()
+```
+
+Disallows further modifications to this dictionary. Trying to modify the dictionary will result in an exception.
+
+### Dictionary#get <a id="dictionary-get"></a>
+
+Signature:
+
+```
+function get(key);
+```
+
+Retrieves the value for the specified `key`. Returns `null` if they `key` does not exist
+in the dictionary.
+
+### Dictionary#keys <a id="dictionary-keys"></a>
+
+Signature:
+
+```
+function keys();
+```
+
+Returns a list of keys for all items that are currently in the dictionary.
+
+### Dictionary#len <a id="dictionary-len"></a>
+
+Signature:
+
+```
+function len();
+```
+
+Returns the number of items contained in the dictionary.
+
+### Dictionary#remove <a id="dictionary-remove"></a>
+
+Signature:
+
+```
+function remove(key);
+```
+
+Removes the item with the specified `key`. Trying to remove an item which does not exist
+is a no-op.
+
+### Dictionary#set <a id="dictionary-set"></a>
+
+Signature:
+
+```
+function set(key, value);
+```
+
+Creates or updates an item with the specified `key` and `value`.
+
+### Dictionary#shallow_clone <a id="dictionary-shallow-clone"></a>
+
+Signature:
+
+```
+function shallow_clone();
+```
+
+Returns a copy of the dictionary. Note that for elements which are reference values (e.g. objects such
+as arrays and dictionaries) only the references are copied.
+
+### Dictionary#values <a id="dictionary-values"></a>
+
+Signature:
+
+```
+function values();
+```
+
+Returns a list of values for all items that are currently in the dictionary.
+
+## Function type <a id="scriptfunction-type"></a>
+
+Inherits methods from the [Object type](18-library-reference.md#object-type).
+
+### Function#call <a id="scriptfunction-call"></a>
+
+Signature:
+
+```
+function call(thisArg, ...);
+```
+
+Invokes the function using an alternative `this` scope. The `thisArg` argument specifies the `this`
+scope for the function. All other arguments are passed directly to the function.
+
+Example:
+
+```
+function set_x(val) {
+ this.x = val
+}
+
+dict = {}
+
+set_x.call(dict, 7) /* Invokes set_x using `dict` as `this` */
+```
+
+### Function#callv <a id="scriptfunction-callv"></a>
+
+Signature:
+
+```
+function callv(thisArg, args);
+```
+
+Invokes the function using an alternative `this` scope. The `thisArg` argument specifies the `this`
+scope for the function. The items in the `args` array are passed to the function as individual arguments.
+
+Example:
+
+```
+function set_x(val) {
+ this.x = val
+}
+
+var dict = {}
+
+var args = [ 7 ]
+
+set_x.callv(dict, args) /* Invokes set_x using `dict` as `this` */
+```
+
+## Number type <a id="number-type"></a>
+
+### Number#to_string <a id="number-to_string"></a>
+
+Signature:
+
+```
+function to_string();
+```
+
+The `to_string` method returns a string representation of the number.
+
+Example:
+
+```
+var example = 7
+ example.to_string() /* Returns "7" */
+```
+
+## Object type <a id="object-type"></a>
+
+This is the base type for all types in the Icinga application.
+
+### Object#clone <a id="object-clone"></a>
+
+Signature:
+
+```
+ function clone();
+```
+
+Returns a copy of the object. Note that for object elements which are
+reference values (e.g. objects such as arrays or dictionaries) the entire
+object is recursively copied.
+
+### Object#to_string <a id="object-to-string"></a>
+
+Signature:
+
+```
+function to_string();
+```
+
+Returns a string representation for the object. Unless overridden this returns a string
+of the format "Object of type '<typename>'" where <typename> is the name of the
+object's type.
+
+Example:
+
+```
+[ 3, true ].to_string() /* Returns "[ 3.000000, true ]" */
+```
+
+### Object#type <a id="object-type-field"></a>
+
+Signature:
+
+String type;
+
+Returns the object's type name. This attribute is read-only.
+
+Example:
+
+```
+get_host("localhost").type /* Returns "Host" */
+```
+
+## String type <a id="string-type"></a>
+
+### String#contains <a id="string-contains"></a>
+
+Signature:
+
+```
+function contains(str);
+```
+
+Returns `true` if the string `str` was found in the string. If the string
+was not found, `false` is returned. Use [find](18-library-reference.md#string-find)
+for getting the index instead.
+
+Example:
+
+```
+"Hello World".contains("World") /* Returns true */
+```
+
+### String#find <a id="string-find"></a>
+
+Signature:
+
+```
+function find(str, start);
+```
+
+Returns the zero-based index at which the string `str` was found in the string. If the string
+was not found, -1 is returned. `start` specifies the zero-based index at which `find` should
+start looking for the string (defaults to 0 when not specified).
+
+Example:
+
+```
+"Hello World".find("World") /* Returns 6 */
+```
+
+### String#len <a id="string-len"></a>
+
+Signature
+
+```
+function len();
+```
+
+Returns the length of the string in bytes. Note that depending on the encoding type of the string
+this is not necessarily the number of characters.
+
+Example:
+
+```
+"Hello World".len() /* Returns 11 */
+```
+
+### String#lower <a id="string-lower"></a>
+
+Signature:
+
+```
+function lower();
+```
+
+Returns a copy of the string with all of its characters converted to lower-case.
+
+Example:
+
+```
+"Hello World".lower() /* Returns "hello world" */
+```
+
+### String#replace <a id="string-replace"></a>
+
+Signature:
+
+```
+function replace(search, replacement);
+```
+
+Returns a copy of the string with all occurences of the string specified in `search` replaced
+with the string specified in `replacement`.
+
+### String#reverse <a id="string-reverse"></a>
+
+Signature:
+
+```
+function reverse();
+```
+
+Returns a copy of the string in reverse order.
+
+### String#split <a id="string-split"></a>
+
+Signature:
+
+```
+function split(delimiters);
+```
+
+Splits a string into individual parts and returns them as an array. The `delimiters` argument
+specifies the characters which should be used as delimiters between parts.
+
+Example:
+
+```
+"x-7,y".split("-,") /* Returns [ "x", "7", "y" ] */
+```
+
+### String#substr <a id="string-substr"></a>
+
+Signature:
+
+```
+function substr(start, len);
+```
+
+Returns a part of a string. The `start` argument specifies the zero-based index at which the part begins.
+The optional `len` argument specifies the length of the part ("until the end of the string" if omitted).
+
+Example:
+
+```
+"Hello World".substr(6) /* Returns "World" */
+```
+
+### String#to_string <a id="string-to_string"></a>
+
+Signature:
+
+```
+function to_string();
+```
+
+Returns a copy of the string.
+
+### String#trim <a id="string-trim"></a>
+
+Signature:
+
+```
+function trim();
+```
+
+Removes trailing whitespaces and returns the string.
+
+### String#upper <a id="string-upper"></a>
+
+Signature:
+
+```
+function upper();
+```
+
+Returns a copy of the string with all of its characters converted to upper-case.
+
+Example:
+
+```
+"Hello World".upper() /* Returns "HELLO WORLD" */
+```
+
+## Type type <a id="type-type"></a>
+
+Inherits methods from the [Object type](18-library-reference.md#object-type).
+
+The `Type` type provides information about the underlying type of an object or scalar value.
+
+All types are registered as global variables. For example, in order to obtain a reference to the `String` type the global variable `String` can be used.
+
+### Type#base <a id="type-base"></a>
+
+Signature:
+
+```
+Type base;
+```
+
+Returns a reference to the type's base type. This attribute is read-only.
+
+Example:
+
+```
+Dictionary.base == Object /* Returns true, because the Dictionary type inherits directly from the Object type. */
+```
+
+### Type#name <a id="type-name"></a>
+
+Signature:
+
+```
+String name;
+```
+
+Returns the name of the type.
+
+### Type#prototype <a id="type-prototype"></a>
+
+Signature:
+
+```
+Object prototype;
+```
+
+Returns the prototype object for the type. When an attribute is accessed on an object that doesn't exist the prototype object is checked to see if an attribute with the requested name exists. If it does, the attribute's value is returned.
+
+The prototype functionality is used to implement methods.
+
+Example:
+
+```
+3.to_string() /* Even though '3' does not have a to_string property the Number type's prototype object does. */
+```
diff --git a/doc/19-technical-concepts.md b/doc/19-technical-concepts.md
new file mode 100644
index 0000000..b3a0561
--- /dev/null
+++ b/doc/19-technical-concepts.md
@@ -0,0 +1,2287 @@
+# Technical Concepts <a id="technical-concepts"></a>
+
+This chapter provides technical concepts and design insights
+into specific Icinga 2 components such as:
+
+* [Application](19-technical-concepts.md#technical-concepts-application)
+* [Configuration](19-technical-concepts.md#technical-concepts-configuration)
+* [Features](19-technical-concepts.md#technical-concepts-features)
+* [Check Scheduler](19-technical-concepts.md#technical-concepts-check-scheduler)
+* [Checks](19-technical-concepts.md#technical-concepts-checks)
+* [Cluster](19-technical-concepts.md#technical-concepts-cluster)
+* [TLS Network IO](19-technical-concepts.md#technical-concepts-tls-network-io)
+
+## Application <a id="technical-concepts-application"></a>
+
+### CLI Commands <a id="technical-concepts-application-cli-commands"></a>
+
+The Icinga 2 application is managed with different CLI sub commands.
+`daemon` takes care about loading the configuration files, running the
+application as daemon, etc.
+Other sub commands allow to enable features, generate and request
+TLS certificates or enter the debug console.
+
+The main entry point for each CLI command parses the command line
+parameters and then triggers the required actions.
+
+### daemon CLI command <a id="technical-concepts-application-cli-commands-daemon"></a>
+
+This CLI command loads the configuration files, starting with `icinga2.conf`.
+The [configuration compiler](19-technical-concepts.md#technical-concepts-configuration) parses the
+file and detects additional file includes, constants, and any other DSL
+specific declaration.
+
+At this stage, the configuration will already be checked against the
+defined grammar in the scanner, and custom object validators will also be
+checked.
+
+If the user provided `-C/--validate`, the CLI command returns with the
+validation exit code.
+
+When running as daemon, additional parameters are checked, e.g. whether
+this application was triggered by a reload, needs to daemonize with fork()
+involved and update the object's authority. The latter is important for
+HA-enabled cluster zones.
+
+## Configuration <a id="technical-concepts-configuration"></a>
+
+### Lexer <a id="technical-concepts-configuration-lexer"></a>
+
+The lexer stage does not understand the DSL itself, it only
+maps specific character sequences into identifiers.
+
+This allows Icinga to detect the beginning of a string with `"`,
+reading the following characters and determining the end of the
+string with again `"`.
+
+Other parts covered by the lexer a escape sequences insides a string,
+e.g. `"\"abc"`.
+
+The lexer also identifiers logical operators, e.g. `&` or `in`,
+specific keywords like `object`, `import`, etc. and comment blocks.
+
+Please check `lib/config/config_lexer.ll` for details.
+
+Icinga uses [Flex](https://github.com/westes/flex) in the first stage.
+
+> Flex (The Fast Lexical Analyzer)
+>
+> Flex is a fast lexical analyser generator. It is a tool for generating programs
+> that perform pattern-matching on text. Flex is a free (but non-GNU) implementation
+> of the original Unix lex program.
+
+### Parser <a id="technical-concepts-configuration-parser"></a>
+
+The parser stage puts the identifiers from the lexer into more
+context with flow control and sequences.
+
+The following comparison is parsed into a left term, an operator
+and a right term.
+
+```
+x > 5
+```
+
+The DSL contains many elements which require a specific order,
+and sometimes only a left term for example.
+
+The parser also takes care of parsing an object declaration for
+example. It already knows from the lexer that `object` marks the
+beginning of an object. It then expects a type string afterwards,
+and the object name - which can be either a string with double quotes
+or a previously defined constant.
+
+An opening bracket `{` in this specific context starts the object
+scope, which also is stored for later scope specific variable access.
+
+If there's an apply rule defined, this follows the same principle.
+The config parser detects the scope of an apply rule and generates
+Icinga 2 C++ code for the parsed string tokens.
+
+```
+assign where host.vars.sla == "24x7"
+```
+
+is parsed into an assign token identifier, and the string expression
+is compiled into a new `ApplyExpression` object.
+
+The flow control inside the parser ensures that for example `ignore where`
+can only be defined when a previous `assign where` was given - or when
+inside an apply for rule.
+
+Another example are specific object types which allow assign expression,
+specifically group objects. Others objects must throw a configuration error.
+
+Please check `lib/config/config_parser.yy` for more details,
+and the [language reference](17-language-reference.md#language-reference) chapter for
+documented DSL keywords and sequences.
+
+> Icinga uses [Bison](https://en.wikipedia.org/wiki/GNU_bison) as parser generator
+> which reads a specification of a context-free language, warns about any parsing
+> ambiguities, and generates a parser in C++ which reads sequences of tokens and
+> decides whether the sequence conforms to the syntax specified by the grammar.
+
+
+### Compiler <a id="technical-concepts-configuration-compiler"></a>
+
+The config compiler initializes the scanner inside the [lexer](19-technical-concepts.md#technical-concepts-configuration-lexer)
+stage.
+
+The configuration files are parsed into memory from inside the [daemon CLI command](19-technical-concepts.md#technical-concepts-application-cli-commands-daemon)
+which invokes the config validation in `ValidateConfigFiles()`. This compiles the
+files into an AST expression which is executed.
+
+At this stage, the expressions generate so-called "config items" which
+are a pre-stage of the later compiled object.
+
+`ConfigItem::CommitItems` takes care of committing the items, and doing a
+rollback on failure. It also checks against matching apply rules from the previous run
+and generates statistics about the objects which can be seen by the config validation.
+
+`ConfigItem::CommitNewItems` collects the registered types and items,
+and checks for a specific required order, e.g. a service object needs
+a host object first.
+
+The following stages happen then:
+
+- **Commit**: A workqueue then commits the items in a parallel fashion for this specific type. The object gets its name, and the AST expression is executed. It is then registered into the item into `m_Object` as reference.
+- **OnAllConfigLoaded**: Special signal for each object to pre-load required object attributes, resolve group membership, initialize functions and timers.
+- **CreateChildObjects**: Run apply rules for this specific type.
+- **CommitNewItems**: Apply rules may generate new config items, this is to ensure that they again run through the stages.
+
+Note that the items are now committed and the configuration is validated and loaded
+into memory. The final config objects are not yet activated though.
+
+This only happens after the validation, when the application is about to be run
+with `ConfigItem::ActivateItems`.
+
+Each item has an object created in `m_Object` which is checked in a loop.
+Again, the dependency order of activated objects is important here, e.g. logger features come first, then
+config objects and last the checker, api, etc. features. This is done by sorting the objects
+based on their type specific activation priority.
+
+The following signals are triggered in the stages:
+
+- **PreActivate**: Setting the `active` flag for the config object.
+- **Activate**: Calls `Start()` on the object, sets the local HA authority and notifies subscribers that this object is now activated (e.g. for config updates in the DB backend).
+
+
+### References <a id="technical-concepts-configuration-references"></a>
+
+* [The Icinga Config Compiler: An Overview](https://www.netways.de/blog/2018/07/12/the-icinga-config-compiler-an-overview/)
+* [A parser/lexer/compiler for the Leonardo language](https://github.com/EmilGedda/Leonardo)
+* [I wrote a programming language. Here’s how you can, too.](https://medium.freecodecamp.org/the-programming-language-pipeline-91d3f449c919)
+* [http://onoffswitch.net/building-a-custom-lexer/](http://onoffswitch.net/building-a-custom-lexer/)
+* [Writing an Interpreter with Lex, Yacc, and Memphis](http://memphis.compilertools.net/interpreter.html)
+* [Flex](https://github.com/westes/flex)
+* [GNU Bison](https://www.gnu.org/software/bison/)
+
+## Core <a id="technical-concepts-core"></a>
+
+### Core: Reload Handling <a id="technical-concepts-core-reload"></a>
+
+The initial design of the reload state machine looks like this:
+
+* receive reload signal SIGHUP
+* fork a child process, start configuration validation in parallel work queues
+* parent process continues with old configuration objects and the event scheduling
+(doing checks, replicating cluster events, triggering alert notifications, etc.)
+* validation NOT ok: child process terminates, parent process continues with old configuration state
+* validation ok: child process signals parent process to terminate and save its current state (all events until now) into the icinga2 state file
+* parent process shuts down writing icinga2.state file
+* child process waits for parent process gone, reads the icinga2 state file and synchronizes all historical and status data
+* child becomes the new session leader
+
+Since Icinga 2.6, there are two processes when checked with `ps aux | grep icinga2` or `pidof icinga2`.
+This was to ensure that feature file descriptors don't leak into the plugin process (e.g. DB IDO MySQL sockets).
+
+Icinga 2.9 changed the reload handling a bit with SIGUSR2 signals
+and systemd notifies.
+
+With systemd, it could occur that the tree was broken thus resulting
+in killing all remaining processes on stop, instead of a clean exit.
+You can read the full story [here](https://github.com/Icinga/icinga2/issues/7309).
+
+With 2.11 you'll now see 3 processes:
+
+- The umbrella process which takes care about signal handling and process spawning/stopping
+- The main process with the check scheduler, notifications, etc.
+- The execution helper process
+
+During reload, the umbrella process spawns a new reload process which validates the configuration.
+Once successful, the new reload process signals the umbrella process that it is finished.
+The umbrella process forwards the signal and tells the old main process to shutdown.
+The old main process writes the icinga2.state file. The umbrella process signals
+the reload process that the main process terminated.
+
+The reload process was in idle wait before, and now continues to read the written
+state file and run the event loop (checks, notifications, "events", ...). The reload
+process itself also spawns the execution helper process again.
+
+
+## Features <a id="technical-concepts-features"></a>
+
+Features are implemented in specific libraries and can be enabled
+using CLI commands.
+
+Features either write specific data or receive data.
+
+Examples for writing data: [DB IDO](14-features.md#db-ido), [Graphite](14-features.md#graphite-carbon-cache-writer), [InfluxDB](14-features.md#influxdb-writer). [GELF](14-features.md#gelfwriter), etc.
+Examples for receiving data: [REST API](12-icinga2-api.md#icinga2-api), etc.
+
+The implementation of features makes use of existing libraries
+and functionality. This makes the code more abstract, but shorter
+and easier to read.
+
+Features register callback functions on specific events they want
+to handle. For example the `GraphiteWriter` feature subscribes to
+new CheckResult events.
+
+Each time Icinga 2 receives and processes a new check result, this
+event is triggered and forwarded to all subscribers.
+
+The GraphiteWriter feature calls the registered function and processes
+the received data. Features which connect Icinga 2 to external interfaces
+normally parse and reformat the received data into an applicable format.
+
+Since this check result signal is blocking, many of the features include a work queue
+with asynchronous task handling.
+
+The GraphiteWriter uses a TCP socket to communicate with the carbon cache
+daemon of Graphite. The InfluxDBWriter is instead writing bulk metric messages
+to InfluxDB's HTTP API, similar to Elasticsearch.
+
+
+## Check Scheduler <a id="technical-concepts-check-scheduler"></a>
+
+The check scheduler starts a thread which loops forever. It waits for
+check events being inserted into `m_IdleCheckables`.
+
+If the current pending check event number is larger than the configured
+max concurrent checks, the thread waits up until it there's slots again.
+
+In addition, further checks on enabled checks, check periods, etc. are
+performed. Once all conditions have passed, the next check timestamp is
+calculated and updated. This also is the timestamp where Icinga expects
+a new check result ("freshness check").
+
+The object is removed from idle checkables, and inserted into the
+pending checkables list. This can be seen via REST API metrics for the
+checker component feature as well.
+
+The actual check execution happens asynchronously using the application's
+thread pool.
+
+Once the check returns, it is removed from pending checkables and again
+inserted into idle checkables. This ensures that the scheduler takes this
+checkable event into account in the next iteration.
+
+### Start <a id="technical-concepts-check-scheduler-start"></a>
+
+When checkable objects get activated during the startup phase,
+the checker feature registers a handler for this event. This is due
+to the fact that the `checker` feature is fully optional, and e.g. not
+used on command endpoint clients.
+
+Whenever such an object activation signal is triggered, Icinga 2 checks
+whether it is [authoritative for this object](19-technical-concepts.md#technical-concepts-cluster-ha-object-authority).
+This means that inside an HA enabled zone with two endpoints, only non-paused checkable objects are
+actively inserted into the idle checkable list for the check scheduler.
+
+### Initial Check <a id="technical-concepts-check-scheduler-initial"></a>
+
+When a new checkable object (host or service) is initially added to the
+configuration, Icinga 2 performs the following during startup:
+
+* `Checkable::Start()` is called and calculates the first check time
+* With a spread delta, the next check time is actually set.
+
+If the next check should happen within a time frame of 60 seconds,
+Icinga 2 calculates a delta from a random value. The minimum of `check_interval`
+and 60 seconds is used as basis, multiplied with a random value between 0 and 1.
+
+In the best case, this check gets immediately executed after application start.
+The worst case scenario is that the check is scheduled 60 seconds after start
+the latest.
+
+The reasons for delaying and spreading checks during startup is that
+the application typically needs more resources at this time (cluster connections,
+feature warmup, initial syncs, etc.). Immediate check execution with
+thousands of checks could lead into performance problems, and additional
+events for each received check results.
+
+Therefore the initial check window is 60 seconds on application startup,
+random seed for all checkables. This is not predictable over multiple restarts
+for specific checkable objects, the delta changes every time.
+
+### Scheduling Offset <a id="technical-concepts-check-scheduler-offset"></a>
+
+There's a high chance that many checkable objects get executed at the same time
+and interval after startup. The initial scheduling spreads that a little, but
+Icinga 2 also attempts to ensure to keep fixed intervals, even with high check latency.
+
+During startup, Icinga 2 calculates the scheduling offset from a random number:
+
+* `Checkable::Checkable()` calls `SetSchedulingOffset()` with `Utility::Random()`
+* The offset is a pseudo-random integral value between `0` and `RAND_MAX`.
+
+Whenever the next check time is updated with `Checkable::UpdateNextCheck()`,
+the scheduling offset is taken into account.
+
+Depending on the state type (SOFT or HARD), either the `retry_interval` or `check_interval`
+is used. If the interval is greater than 1 second, the time adjustment is calculated in the
+following way:
+
+`now * 100 + offset` divided by `interval * 100`, using the remainder (that's what `fmod()` is for)
+and dividing this again onto base 100.
+
+Example: offset is 6500, interval 300, now is 1542190472.
+
+```
+1542190472 * 100 + 6500 = 154219053714
+300 * 100 = 30000
+154219053714 / 30000 = 5140635.1238
+
+(5140635.1238 - 5140635.0) * 30000 = 3714
+3714 / 100 = 37.14
+```
+
+37.15 seconds as an offset would be far too much, so this is again used as a calculation divider for the
+real offset with the base of 5 times the actual interval.
+
+Again, the remainder is calculated from the offset and `interval * 5`. This is divided onto base 100 again,
+with an additional 0.5 seconds delay.
+
+Example: offset is 6500, interval 300.
+
+```
+6500 / 300 = 21.666666666666667
+(21.666666666666667 - 21.0) * 300 = 200
+200 / 100 = 2
+2 + 0.5 = 2.5
+```
+
+The minimum value between the first adjustment and the second offset calculation based on the interval is
+taken, in the above example `2.5` wins.
+
+The actual next check time substracts the adjusted time from the future interval addition to provide
+a more widespread scheduling time among all checkable objects.
+
+`nextCheck = now - adj + interval`
+
+You may ask, what other values can happen with this offset calculation. Consider calculating more examples
+with different interval settings.
+
+Example: offset is 34567, interval 60, now is 1542190472.
+
+```
+1542190472 * 100 + 34567 = 154219081767
+60 * 100 = 6000
+154219081767 / 6000 = 25703180.2945
+(25703180.2945 - 25703180.0) * 6000 / 100 = 17.67
+
+34567 / 60 = 576.116666666666667
+(576.116666666666667 - 576.0) * 60 / 100 + 0.5 = 1.2
+```
+
+`1m` interval starts at `now + 1.2s`.
+
+Example: offset is 12345, interval 86400, now is 1542190472.
+
+```
+1542190472 * 100 + 12345 = 154219059545
+86400 * 100 = 8640000
+154219059545 / 8640000 = 17849.428188078703704
+(17849.428188078703704 - 17849) * 8640000 = 3699545
+3699545 / 100 = 36995.45
+
+12345 / 86400 = 0.142881944444444
+0.142881944444444 * 86400 / 100 + 0.5 = 123.95
+```
+
+`1d` interval starts at `now + 2m4s`.
+
+> **Note**
+>
+> In case you have a better algorithm at hand, feel free to discuss this in a PR on GitHub.
+> It needs to fulfill two things: 1) spread and shuffle execution times on each `next_check` update
+> 2) not too narrowed window for both long and short intervals
+> Application startup and initial checks need to be handled with care in a slightly different
+> fashion.
+
+When `SetNextCheck()` is called, there are signals registered. One of them sits
+inside the `CheckerComponent` class whose handler `CheckerComponent::NextCheckChangedHandler()`
+deletes/inserts the next check event from the scheduling queue. This basically
+is a list with multiple indexes with the keys for scheduling info and the object.
+
+
+## Checks<a id="technical-concepts-checks"></a>
+
+### Check Latency and Execution Time <a id="technical-concepts-checks-latency"></a>
+
+Each check command execution logs the start and end time where
+Icinga 2 (and the end user) is able to calculate the plugin execution time from it.
+
+```cpp
+GetExecutionEnd() - GetExecutionStart()
+```
+
+The higher the execution time, the higher the command timeout must be set. Furthermore
+users and developers are encouraged to look into plugin optimizations to minimize the
+execution time. Sometimes it is better to let an external daemon/script do the checks
+and feed them back via REST API.
+
+Icinga 2 stores the scheduled start and end time for a check. If the actual
+check execution time differs from the scheduled time, e.g. due to performance
+problems or limited execution slots (concurrent checks), this value is stored
+and computed from inside the check result.
+
+The difference between the two deltas is called `check latency`.
+
+```cpp
+(GetScheduleEnd() - GetScheduleStart()) - CalculateExecutionTime()
+```
+
+### Severity <a id="technical-concepts-checks-severity"></a>
+
+The severity attribute is introduced with Icinga v2.11 and provides
+a bit mask calculated value from specific checkable object states.
+
+The severity value is pre-calculated for visualization interfaces
+such as Icinga Web which sorts the problem dashboard by severity by default.
+
+The higher the severity number is, the more important the problem is.
+However, the formula can change across Icinga 2 releases.
+
+
+## Cluster <a id="technical-concepts-cluster"></a>
+
+This documentation refers to technical roles between cluster
+endpoints.
+
+- The `server` or `parent` role accepts incoming connection attempts and handles requests
+- The `client` role actively connects to remote endpoints receiving config/commands, requesting certificates, etc.
+
+A client role is not necessarily bound to the Icinga agent.
+It may also be a satellite which actively connects to the
+master.
+
+### Communication <a id="technical-concepts-cluster-communication"></a>
+
+Icinga 2 uses its own certificate authority (CA) by default. The
+public and private CA keys can be generated on the signing master.
+
+Each node certificate must be signed by the private CA key.
+
+Note: The following description uses `parent node` and `child node`.
+This also applies to nodes in the same cluster zone.
+
+During the connection attempt, a TLS handshake is performed.
+If the public certificate of a child node is not signed by the same
+CA, the child node is not trusted and the connection will be closed.
+
+If the TLS handshake succeeds, the parent node reads the
+certificate's common name (CN) of the child node and looks for
+a local Endpoint object name configuration.
+
+If there is no Endpoint object found, further communication
+(runtime and config sync, etc.) is terminated.
+
+The child node also checks the CN from the parent node's public
+certificate. If the child node does not find any local Endpoint
+object name configuration, it will not trust the parent node.
+
+Both checks prevent accepting cluster messages from an untrusted
+source endpoint.
+
+If an Endpoint match was found, there is one additional security
+mechanism in place: Endpoints belong to a Zone hierarchy.
+
+Several cluster messages can only be sent "top down", others like
+check results are allowed being sent from the child to the parent node.
+
+Once this check succeeds the cluster messages are exchanged and processed.
+
+
+### CSR Signing <a id="technical-concepts-cluster-csr-signing"></a>
+
+In order to make things easier, Icinga 2 provides built-in methods
+to allow child nodes to request a signed certificate from the
+signing master.
+
+Icinga 2 v2.8 introduces the possibility to request certificates
+from indirectly connected nodes. This is required for multi level
+cluster environments with masters, satellites and agents.
+
+CSR Signing in general starts with the master setup. This step
+ensures that the master is in a working CSR signing state with:
+
+* public and private CA key in `/var/lib/icinga2/ca`
+* private `TicketSalt` constant defined inside the `api` feature
+* Cluster communication is ready and Icinga 2 listens on port 5665
+
+The child node setup which is run with CLI commands will now
+attempt to connect to the parent node. This is not necessarily
+the signing master instance, but could also be a parent satellite node.
+
+During this process the child node asks the user to verify the
+parent node's public certificate to prevent MITM attacks.
+
+There are two methods to request signed certificates:
+
+* Add the ticket into the request. This ticket was generated on the master
+beforehand and contains hashed details for which client it has been created.
+The signing master uses this information to automatically sign the certificate
+request.
+
+* Do not add a ticket into the request. It will be sent to the signing master
+which stores the pending request. Manual user interaction with CLI commands
+is necessary to sign the request.
+
+The certificate request is sent as `pki::RequestCertificate` cluster
+message to the parent node.
+
+If the parent node is not the signing master, it stores the request
+in `/var/lib/icinga2/certificate-requests` and forwards the
+cluster message to its parent node.
+
+Once the message arrives on the signing master, it first verifies that
+the sent certificate request is valid. This is to prevent unwanted errors
+or modified requests from the "proxy" node.
+
+After verification, the signing master checks if the request contains
+a valid signing ticket. It hashes the certificate's common name and
+compares the value to the received ticket number.
+
+If the ticket is valid, the certificate request is immediately signed
+with CA key. The request is sent back to the client inside a `pki::UpdateCertificate`
+cluster message.
+
+If the child node was not the certificate request origin, it only updates
+the cached request for the child node and send another cluster message
+down to its child node (e.g. from a satellite to an agent).
+
+
+If no ticket was specified, the signing master waits until the
+`ca sign` CLI command manually signed the certificate.
+
+> **Note**
+>
+> Push notifications for manual request signing is not yet implemented (TODO).
+
+Once the child node reconnects it synchronizes all signed certificate requests.
+This takes some minutes and requires all nodes to reconnect to each other.
+
+
+#### CSR Signing: Clients without parent connection <a id="technical-concepts-cluster-csr-signing-clients-no-connection"></a>
+
+There is an additional scenario: The setup on a child node does
+not necessarily need a connection to the parent node.
+
+This mode leaves the node in a semi-configured state. You need
+to manually copy the master's public CA key into `/var/lib/icinga2/certs/ca.crt`
+on the client before starting Icinga 2.
+
+> **Note**
+>
+> The `client` in this case can be either a satellite or an agent.
+
+The parent node needs to actively connect to the child node.
+Once this connections succeeds, the child node will actively
+request a signed certificate.
+
+The update procedure works the same way as above.
+
+### High Availability <a id="technical-concepts-cluster-ha"></a>
+
+General high availability is automatically enabled between two endpoints in the same
+cluster zone.
+
+**This requires the same configuration and enabled features on both nodes.**
+
+HA zone members trust each other and share event updates as cluster messages.
+This includes for example check results, next check timestamp updates, acknowledgements
+or notifications.
+
+This ensures that both nodes are synchronized. If one node goes away, the
+remaining node takes over and continues as normal.
+
+#### High Availability: Object Authority <a id="technical-concepts-cluster-ha-object-authority"></a>
+
+Cluster nodes automatically determine the authority for configuration
+objects. By default, all config objects are set to `HARunEverywhere` and
+as such the object authority is true for any config object on any instance.
+
+Specific objects can override and influence this setting, e.g. with `HARunOnce`
+instead prior to config object activation.
+
+This is done when the daemon starts and in a regular interval inside
+the ApiListener class, specifically calling `ApiListener::UpdateObjectAuthority()`.
+
+The algorithm works like this:
+
+* Determine whether this instance is assigned to a local zone and endpoint.
+* Collects all endpoints in this zone if they are connected.
+* If there's two endpoints, but only us seeing ourselves and the application start is less than 60 seconds in the past, do nothing (wait for cluster reconnect to take place, grace period).
+* Sort the collected endpoints by name.
+* Iterate over all config types and their respective objects
+ * Ignore !active objects
+ * Ignore objects which are !HARunOnce. This means, they can run multiple times in a zone and don't need an authority update.
+ * If this instance doesn't have a local zone, set authority to true. This is for non-clustered standalone environments where everything belongs to this instance.
+ * Calculate the object authority based on the connected endpoint names.
+ * Set the authority (true or false)
+
+The object authority calculation works "offline" without any message exchange.
+Each instance alculates the SDBM hash of the config object name, puts that in contrast
+modulo the connected endpoints size.
+This index is used to lookup the corresponding endpoint in the connected endpoints array,
+including the local endpoint. Whether the local endpoint is equal to the selected endpoint,
+or not, this sets the authority to `true` or `false`.
+
+```cpp
+authority = endpoints[Utility::SDBM(object->GetName()) % endpoints.size()] == my_endpoint;
+```
+
+`ConfigObject::SetAuthority(bool authority)` triggers the following events:
+
+* Authority is true and object now paused: Resume the object and set `paused` to `false`.
+* Authority is false, object not paused: Pause the object and set `paused` to true.
+
+**This results in activated but paused objects on one endpoint.** You can verify
+that by querying the `paused` attribute for all objects via REST API
+or debug console on both endpoints.
+
+Endpoints inside a HA zone calculate the object authority independent from each other.
+This object authority is important for selected features explained below.
+
+Since features are configuration objects too, you must ensure that all nodes
+inside the HA zone share the same enabled features. If configured otherwise,
+one might have a checker feature on the left node, nothing on the right node.
+This leads to late check results because one half is not executed by the right
+node which holds half of the object authorities.
+
+By default, features are enabled to "Run-Everywhere". Specific features which
+support HA awareness, provide the `enable_ha` configuration attribute. When `enable_ha`
+is set to `true` (usually the default), "Run-Once" is set and the feature pauses on one side.
+
+```
+vim /etc/icinga2/features-enabled/graphite.conf
+
+object GraphiteWriter "graphite" {
+ ...
+ enable_ha = true
+}
+```
+
+Once such a feature is paused, there won't be any more event handling, e.g. the Elasticsearch
+feature won't process any checkresults nor write to the Elasticsearch REST API.
+
+When the cluster connection drops, the feature configuration object is updated with
+the new object authority by the ApiListener timer and resumes its operation. You can see
+that by grepping the log file for `resumed` and `paused`.
+
+```
+[2018-10-24 13:28:28 +0200] information/GraphiteWriter: 'g-ha' paused.
+```
+
+```
+[2018-10-24 13:28:28 +0200] information/GraphiteWriter: 'g-ha' resumed.
+```
+
+Specific features with HA capabilities are explained below.
+
+#### High Availability: Checker <a id="technical-concepts-cluster-ha-checker"></a>
+
+The `checker` feature only executes checks for `Checkable` objects (Host, Service)
+where it is authoritative.
+
+That way each node only executes checks for a segment of the overall configuration objects.
+
+The cluster message routing ensures that all check results are synchronized
+to nodes which are not authoritative for this configuration object.
+
+
+#### High Availability: Notifications <a id="technical-concepts-cluster-notifications"></a>
+
+The `notification` feature only sends notifications for `Notification` objects
+where it is authoritative.
+
+That way each node only executes notifications for a segment of all notification objects.
+
+Notified users and other event details are synchronized throughout the cluster.
+This is required if for example the DB IDO feature is active on the other node.
+
+#### High Availability: DB IDO <a id="technical-concepts-cluster-ha-ido"></a>
+
+If you don't have HA enabled for the IDO feature, both nodes will
+write their status and historical data to their own separate database
+backends.
+
+In order to avoid data separation and a split view (each node would require its
+own Icinga Web 2 installation on top), the high availability option was added
+to the DB IDO feature. This is enabled by default with the `enable_ha` setting.
+
+This requires a central database backend. Best practice is to use a MySQL cluster
+with a virtual IP.
+
+Both Icinga 2 nodes require the connection and credential details configured in
+their DB IDO feature.
+
+During startup Icinga 2 calculates whether the feature configuration object
+is authoritative on this node or not. The order is an alpha-numeric
+comparison, e.g. if you have `master1` and `master2`, Icinga 2 will enable
+the DB IDO feature on `master2` by default.
+
+If the connection between endpoints drops, the object authority is re-calculated.
+
+In order to prevent data duplication in a split-brain scenario where both
+nodes would write into the same database, there is another safety mechanism
+in place.
+
+The split-brain decision which node will write to the database is calculated
+from a quorum inside the `programstatus` table. Each node
+verifies whether the `endpoint_name` column is not itself on database connect.
+In addition to that the DB IDO feature compares the `last_update_time` column
+against the current timestamp plus the configured `failover_timeout` offset.
+
+That way only one active DB IDO feature writes to the database, even if they
+are not currently connected in a cluster zone. This prevents data duplication
+in historical tables.
+
+### Health Checks <a id="technical-concepts-cluster-health-checks"></a>
+
+#### cluster-zone <a id="technical-concepts-cluster-health-checks-cluster-zone"></a>
+
+This built-in check provides the possibility to check for connectivity between
+zones.
+
+If you for example need to know whether the `master` zone is connected and processing
+messages with the child zone called `satellite` in this example, you can configure
+the [cluster-zone](10-icinga-template-library.md#itl-icinga-cluster-zone) check as new service on all `master` zone hosts.
+
+```
+vim /etc/zones.d/master/host1.conf
+
+object Service "cluster-zone-satellite" {
+ check_command = "cluster-zone"
+ host_name = "host1"
+
+ vars.cluster_zone = "satellite"
+}
+```
+
+The check itself changes to NOT-OK if one or more child endpoints in the child zone
+are not connected to parent zone endpoints.
+
+In addition to the overall connectivity check, the log lag is calculated based
+on the to-be-sent replay log. Each instance stores that for its configured endpoint
+objects.
+
+This health check iterates over the target zone (`cluster_zone`) and their endpoints.
+
+The log lag is greater than zero if
+
+* the replay log synchronization is in progress and not yet finished or
+* the endpoint is not connected, and no replay log sync happened (obviously).
+
+The final log lag value is the worst value detected. If satellite1 has a log lag of
+`1.5` and satellite2 only has `0.5`, the computed value will be `1.5.`.
+
+You can control the check state by using optional warning and critical thresholds
+for the log lag value.
+
+If this service exists multiple times, e.g. for each master host object, the log lag
+may differ based on the execution time. This happens for example on restart of
+an instance when the log replay is in progress and a health check is executed at different
+times.
+If the endpoint is not connected, both master instances may have saved a different log replay
+position from the last synchronisation.
+
+The lag value is returned as performance metric key `slave_lag`.
+
+Icinga 2 v2.9+ adds more performance metrics for these values:
+
+* `last_messages_sent` and `last_messages_received` as UNIX timestamp
+* `sum_messages_sent_per_second` and `sum_messages_received_per_second`
+* `sum_bytes_sent_per_second` and `sum_bytes_received_per_second`
+
+
+### Config Sync <a id="technical-concepts-cluster-config-sync"></a>
+
+The visible feature for the user is to put configuration files in `/etc/icinga2/zones.d/<zonename>`
+and have them synced automatically to all involved zones and endpoints.
+
+This not only includes host and service objects being checked
+in a satellite zone, but also additional config objects such as
+commands, groups, timeperiods and also templates.
+
+Additional thoughts and complexity added:
+
+- Putting files into zone directory names removes the burden to set the `zone` attribute on each object in this directory. This is done automatically by the config compiler.
+- Inclusion of `zones.d` happens automatically, the user shouldn't be bothered about this.
+- Before the REST API was created, only static configuration files in `/etc/icinga2/zones.d` existed. With the addition of config packages, additional `zones.d` targets must be registered (e.g. used by the Director)
+- Only one config master is allowed. This one identifies itself with configuration files in `/etc/icinga2/zones.d`. This is not necessarily the zone master seen in the debug logs, that one is important for message routing internally.
+- Objects and templates which cannot be bound into a specific zone (e.g. hosts in the satellite zone) must be made available "globally".
+- Users must be able to deny the synchronisation of specific zones, e.g. for security reasons.
+
+#### Config Sync: Config Master <a id="technical-concepts-cluster-config-sync-config-master"></a>
+
+All zones must be configured and included in the `zones.conf` config file beforehand.
+The zone names are the identifier for the directories underneath the `/etc/icinga2/zones.d`
+directory. If a zone is not configured, it will not be included in the config sync - keep this
+in mind for troubleshooting.
+
+When the config master starts, the content of `/etc/icinga2/zones.d` is automatically
+included. There's no need for an additional entry in `icinga2.conf` like `conf.d`.
+You can verify this by running the config validation on debug level:
+
+```
+icinga2 daemon -C -x debug | grep 'zones.d'
+
+[2019-06-19 15:16:19 +0200] notice/ConfigCompiler: Compiling config file: /etc/icinga2/zones.d/global-templates/commands.conf
+```
+
+Once the config validation succeeds, the startup routine for the daemon
+copies the files into the "production" directory in `/var/lib/icinga2/api/zones`.
+This directory is used for all endpoints where Icinga stores the received configuration.
+With the exception of the config master retrieving this from `/etc/icinga2/zones.d` instead.
+
+These operations are logged for better visibility.
+
+```
+[2019-06-19 15:26:38 +0200] information/ApiListener: Copying 1 zone configuration files for zone 'global-templates' to '/var/lib/icinga2/api/zones/global-templates'.
+[2019-06-19 15:26:38 +0200] information/ApiListener: Updating configuration file: /var/lib/icinga2/api/zones/global-templates//_etc/commands.conf
+```
+
+The master is finished at this point. Depending on the cluster configuration,
+the next iteration is a connected endpoint after successful TLS handshake and certificate
+authentication.
+
+It calls `SendConfigUpdate(client)` which sends the [config::Update](19-technical-concepts.md#technical-concepts-json-rpc-messages-config-update)
+JSON-RPC message including all required zones and their configuration file content.
+
+
+#### Config Sync: Receive Config <a id="technical-concepts-cluster-config-sync-receive-config"></a>
+
+The secondary master endpoint and endpoints in a child zone will be connected to the config
+master. The endpoint receives the [config::Update](19-technical-concepts.md#technical-concepts-json-rpc-messages-config-update)
+JSON-RPC message and processes the content in `ConfigUpdateHandler()`. This method checks
+whether config should be accepted. In addition to that, it locks a local mutex to avoid race conditions
+with multiple syncs in parallel.
+
+After that, the received configuration content is analysed.
+
+> **Note**
+>
+> The cluster design allows that satellite endpoints may connect to the secondary master first.
+> There is no immediate need to always connect to the config master first, especially since
+> the satellite endpoints don't know that.
+>
+> The secondary master not only stores the master zone config files, but also all child zones.
+> This is also the case for any HA enabled zone with more than one endpoint.
+
+
+2.11 puts the received configuration files into a staging directory in
+`/var/lib/icinga2/api/zones-stage`. Previous versions directly wrote the
+files into production which could have led to broken configuration on the
+next manual restart.
+
+```
+[2019-06-19 16:08:29 +0200] information/ApiListener: New client connection for identity 'master1' to [127.0.0.1]:5665
+[2019-06-19 16:08:30 +0200] information/ApiListener: Applying config update from endpoint 'master1' of zone 'master'.
+[2019-06-19 16:08:30 +0200] information/ApiListener: Received configuration for zone 'agent' from endpoint 'master1'. Comparing the checksums.
+[2019-06-19 16:08:30 +0200] information/ApiListener: Stage: Updating received configuration file '/var/lib/icinga2/api/zones-stage/agent//_etc/host.conf' for zone 'agent'.
+[2019-06-19 16:08:30 +0200] information/ApiListener: Applying configuration file update for path '/var/lib/icinga2/api/zones-stage/agent' (176 Bytes).
+[2019-06-19 16:08:30 +0200] information/ApiListener: Received configuration for zone 'master' from endpoint 'master1'. Comparing the checksums.
+[2019-06-19 16:08:30 +0200] information/ApiListener: Applying configuration file update for path '/var/lib/icinga2/api/zones-stage/master' (17 Bytes).
+[2019-06-19 16:08:30 +0200] information/ApiListener: Received configuration from endpoint 'master1' is different to production, triggering validation and reload.
+```
+
+It then validates the received configuration in its own config stage. There is
+an parameter override in place which disables the automatic inclusion of the production
+config in `/var/lib/icinga2/api/zones`.
+
+Once completed, the reload is triggered. This follows the same configurable timeout
+as with the global reload.
+
+```
+[2019-06-19 16:52:26 +0200] information/ApiListener: Config validation for stage '/var/lib/icinga2/api/zones-stage/' was OK, replacing into '/var/lib/icinga2/api/zones/' and triggering reload.
+[2019-06-19 16:52:27 +0200] information/Application: Got reload command: Started new instance with PID '19945' (timeout is 300s).
+[2019-06-19 16:52:28 +0200] information/Application: Reload requested, letting new process take over.
+```
+
+Whenever the staged configuration validation fails, Icinga logs this including a reference
+to the startup log file which includes additional errors.
+
+```
+[2019-06-19 15:45:27 +0200] critical/ApiListener: Config validation failed for staged cluster config sync in '/var/lib/icinga2/api/zones-stage/'. Aborting. Logs: '/var/lib/icinga2/api/zones-stage//startup.log'
+```
+
+
+#### Config Sync: Changes and Reload <a id="technical-concepts-cluster-config-sync-changes-reload"></a>
+
+Whenever a new configuration is received, it is validated and upon success, the
+daemon automatically reloads. While the daemon continues with checks, the reload
+cannot hand over open TCP connections. That being said, reloading the daemon everytime
+a configuration is synchronized would lead into many not connected endpoints.
+
+Therefore the cluster config sync checks whether the configuration files actually
+changed, and will only trigger a reload when such a change happened.
+
+2.11 calculates a checksum from each file content and compares this to the
+production configuration. Previous versions used additional metadata with timestamps from
+files which sometimes led to problems with asynchronous dates.
+
+> **Note**
+>
+> For compatibility reasons, the timestamp metadata algorithm is still intact, e.g.
+> when the client is 2.11 already, but the parent endpoint is still on 2.10.
+
+Icinga logs a warning when this happens.
+
+```
+Received configuration update without checksums from parent endpoint satellite1. This behaviour is deprecated. Please upgrade the parent endpoint to 2.11+
+```
+
+
+The debug log provides more details on the actual checksums and checks. Future output
+may change, use this solely for troubleshooting and debugging whenever the cluster
+config sync fails.
+
+```
+[2019-06-19 16:13:16 +0200] information/ApiListener: Received configuration for zone 'agent' from endpoint 'master1'. Comparing the checksums.
+[2019-06-19 16:13:16 +0200] debug/ApiListener: Checking for config change between stage and production. Old (3): '{"/.checksums":"7ede1276a9a32019c1412a52779804a976e163943e268ec4066e6b6ec4d15d73","/.timestamp":"ec4354b0eca455f7c2ca386fddf5b9ea810d826d402b3b6ac56ba63b55c2892c","/_etc/host.conf":"35d4823684d83a5ab0ca853c9a3aa8e592adfca66210762cdf2e54339ccf0a44"}' vs. new (3): '{"/.checksums":"84a586435d732327e2152e7c9b6d85a340cc917b89ae30972042f3dc344ea7cf","/.timestamp":"0fd6facf35e49ab1b2a161872fa7ad794564eba08624373d99d31c32a7a4c7d3","/_etc/host.conf":"0d62075e89be14088de1979644b40f33a8f185fcb4bb6ff1f7da2f63c7723fcb"}'.
+[2019-06-19 16:13:16 +0200] debug/ApiListener: Checking /_etc/host.conf for checksum: 35d4823684d83a5ab0ca853c9a3aa8e592adfca66210762cdf2e54339ccf0a44
+[2019-06-19 16:13:16 +0200] debug/ApiListener: Path '/_etc/host.conf' doesn't match old checksum '0d62075e89be14088de1979644b40f33a8f185fcb4bb6ff1f7da2f63c7723fcb' with new checksum '35d4823684d83a5ab0ca853c9a3aa8e592adfca66210762cdf2e54339ccf0a44'.
+```
+
+
+#### Config Sync: Trust <a id="technical-concepts-cluster-config-sync-trust"></a>
+
+The config sync follows the "top down" approach, where the master endpoint in the master
+zone is allowed to synchronize configuration to the child zone, e.g. the satellite zone.
+
+Endpoints in the same zone, e.g. a secondary master, receive configuration for the same
+zone and all child zones.
+
+Endpoints in the satellite zone trust the parent zone, and will accept the pushed
+configuration via JSON-RPC cluster messages. By default, this is disabled and must
+be enabled with the `accept_config` attribute in the ApiListener feature (manually or with CLI
+helpers).
+
+The satellite zone will not only accept zone configuration for its own zone, but also
+all configured child zones. That is why it is important to configure the zone hierarchy
+on the satellite as well.
+
+Child zones are not allowed to sync configuration up to the parent zone. Each Icinga instance
+evaluates this in startup and knows on endpoint connect which config zones need to be synced.
+
+
+Global zones have a special trust relationship: They are synced to all child zones, be it
+a satellite zone or agent zone. Since checkable objects such as a Host or a Service object
+must have only one endpoint as authority, they cannot be put into a global zone (denied by
+the config compiler).
+
+Apply rules and templates are allowed, since they are evaluated in the endpoint which received
+the synced configuration. Keep in mind that there may be differences on the master and the satellite
+when e.g. hostgroup membership is used for assign where expressions, but the groups are only
+available on the master.
+
+
+### Cluster: Message Routing <a id="technical-concepts-cluster-message-routing"></a>
+
+One fundamental part of the cluster message routing is the MessageOrigin object.
+This is created when a new JSON-RPC message is received in `JsonRpcConnection::MessageHandler()`.
+
+It contains
+
+- FromZone being extracted from the endpoint object which owns the JsonRpcConnection
+- FromClient being the JsonRpcConnection bound to the endpoint object
+
+These attributes are checked in message receive api handlers for security access. E.g. whether a
+message origin is from a child zone which is not allowed, etc.
+This is explained in the [JSON-RPC messages](19-technical-concepts.md#technical-concepts-json-rpc-messages) chapter.
+
+Whenever such a message is processed on the client, it may trigger additional cluster events
+which are sent back to other endpoints. Therefore it is key to always pass the MessageOrigin
+`origin` when processing these messages locally.
+
+Example:
+
+- Client receives a CheckResult from another endpoint in the same zone, call it `sender` for now
+- Calls ProcessCheckResult() to store the CR and calculcate states, notifications, etc.
+- Calls the OnNewCheckResult() signal to trigger IDO updates
+
+OnNewCheckResult() also calls a registered cluster handler which forwards the CheckResult to other cluster members.
+
+Without any origin details, this CheckResult would be relayed to the `sender` endpoint again.
+Which processes the message, ProcessCheckResult(), OnNewCheckResult(), sends back and so on.
+
+That creates a loop which our cluster protocol needs to prevent at all cost.
+
+RelayMessageOne() takes care of the routing. This involves fetching the targetZone for this message and its endpoints.
+
+- Don't relay messages to ourselves.
+- Don't relay messages to disconnected endpoints.
+- Don't relay the message to the zone through more than one endpoint unless this is our own zone.
+- Don't relay messages back to the endpoint which we got the message from. **THIS**
+- Don't relay messages back to the zone which we got the message from.
+- Only relay message to the zone master if we're not currently the zone master.
+
+```
+ e1 is zone master, e2 and e3 are zone members.
+
+ Message is sent from e2 or e3:
+ !isMaster == true
+ targetEndpoint e1 is zone master -> send the message
+ targetEndpoint e3 is not zone master -> skip it, avoid routing loops
+
+ Message is sent from e1:
+ !isMaster == false -> send the messages to e2 and e3 being the zone routing master.
+```
+
+With passing the `origin` the following condition prevents sending a message back to sender:
+
+```cpp
+if (origin && origin->FromClient && targetEndpoint == origin->FromClient->GetEndpoint()) {
+```
+
+This message then simply gets skipped for this specific Endpoint and is never sent.
+
+This analysis originates from a long-lasting [downtime loop bug](https://github.com/Icinga/icinga2/issues/7198).
+
+## TLS Network IO <a id="technical-concepts-tls-network-io"></a>
+
+### TLS Connection Handling <a id="technical-concepts-tls-network-io-connection-handling"></a>
+
+Icinga supports two connection directions, controlled via the `host` attribute
+inside the Endpoint objects:
+
+* Outgoing connection attempts
+* Incoming connection handling
+
+Once the connection is established, higher layers can exchange JSON-RPC and
+HTTP messages. It doesn't matter which direction these message go.
+
+This offers a big advantage over single direction connections, just like
+polling via HTTP only. Also, connections are kept alive as long as data
+is transmitted.
+
+When the master connects to the child zone member(s), this requires more
+resources there. Keep this in mind when endpoints are not reachable, the
+TCP timeout blocks other resources. Moving a satellite zone in the middle
+between masters and agents helps to split the tasks - the master
+processes and stores data, deploys configuration and serves the API. The
+satellites schedule the checks, connect to the agents and receive
+check results.
+
+Agents/Clients can also connect to the parent endpoints - be it a master or
+a satellite. This is the preferred way out of a DMZ, and also reduces the
+overhead with connecting to e.g. 2000 agents on the master. You can
+benchmark this when TCP connections are broken and timeouts are encountered.
+
+#### Master Processes Incoming Connection <a id="technical-concepts-tls-network-io-connection-handling-incoming"></a>
+
+* The node starts a new ApiListener, this invokes `AddListener()`
+ * Setup TLS Context (SslContext)
+ * Initialize global I/O engine and create a TCP acceptor
+ * Resolve bind host/port (optional)
+ * Listen on IPv4 and IPv6
+ * Re-use socket address and port
+ * Listen on port 5665 with `INT_MAX` possible sockets
+* Spawn a new Coroutine which listens for new incoming connections as 'TCP server' pattern
+ * Accept new connections asynchronously
+ * Spawn a new Coroutine which handles the new client connection in a different context, Role: Server
+
+#### Master Connects Outgoing <a id="technical-concepts-tls-network-io-connection-handling-outgoing"></a>
+
+* The node starts a timer in a 10 seconds interval with `ApiReconnectTimerHandler()` as callback
+ * Loop over all configured zones, exclude global zones and not direct parent/child zones
+ * Get the endpoints configured in the zones, exclude: local endpoint, no 'host' attribute, already connected or in progress
+ * Call `AddConnection()`
+* Spawn a new Coroutine after making the TLS context
+ * Use the global I/O engine for socket I/O
+ * Create TLS stream
+ * Connect to endpoint host/port details
+ * Handle the client connection, Role: Client
+
+#### TLS Handshake <a id="technical-concepts-tls-network-io-connection-handling-handshake"></a>
+
+* Create a TLS connection in sslConn and perform an asynchronous TLS handshake
+* Get the peer certificate
+* Verify the presented certificate: `ssl::verify_peer` and `ssl::verify_client_once`
+* Get the certificate CN and compare it against the endpoint name - if not matching, return and close the connection
+
+#### Data Exchange <a id="technical-concepts-tls-network-io-connection-data-exchange"></a>
+
+Everything runs through TLS, we don't use any "raw" connections nor plain message handling.
+
+HTTP and JSON-RPC messages share the same port and API, so additional handling is required.
+
+On a new connection and successful TLS handshake, the first byte is read. This either
+is a JSON-RPC message in Netstring format starting with a number, or plain HTTP.
+
+```
+HTTP/1.1
+
+2:{}
+```
+
+Depending on this, `ClientJsonRpc` or `ClientHttp` are assigned.
+
+JSON-RPC:
+
+* Create a new JsonRpcConnection object
+ * When the endpoint object is configured, spawn a Coroutine which takes care of syncing the client (file and runtime config, replay log, etc.)
+ * No endpoint treats this connection as anonymous client, with a configurable limit. This client may send a CSR signing request for example.
+ * Start the JsonRpcConnection - this spawns Coroutines to HandleIncomingMessages, WriteOutgoingMessages, HandleAndWriteHeartbeats and CheckLiveness
+
+HTTP:
+
+* Create a new HttpServerConnection
+ * Start the HttpServerConnection - this spawns Coroutines to ProcessMessages and CheckLiveness
+
+
+All the mentioned Coroutines run asynchronously using the global I/O engine's context.
+More details on this topic can be found in [this blogpost](https://www.netways.de/blog/2019/04/04/modern-c-programming-coroutines-with-boost/).
+
+The lower levels of context switching and sharing or event polling are
+hidden in Boost ASIO, Beast, Coroutine and Context libraries.
+
+#### Data Exchange: Coroutines and I/O Engine <a id="technical-concepts-tls-network-io-connection-data-exchange-coroutines"></a>
+
+Light-weight and fast operations such as connection handling or TLS handshakes
+are performed in the default `IoBoundWorkSlot` pool inside the I/O engine.
+
+The I/O engine has another pool available: `CpuBoundWork`.
+
+This is used for processing CPU intensive tasks, such as handling a HTTP request.
+Depending on the available CPU cores, this is limited to `std::thread::hardware_concurrency() * 3u / 2u`.
+
+```
+1 core * 3 / 2 = 1
+2 cores * 3 / 2 = 3
+8 cores * 3 / 2 = 12
+16 cores * 3 / 2 = 24
+```
+
+The I/O engine itself is used with all network I/O in Icinga, not only the cluster
+and the REST API. Features such as Graphite, InfluxDB, etc. also consume its functionality.
+
+There are 2 * CPU cores threads available which run the event loop
+in the I/O engine. This polls the I/O service with `m_IoService.run();`
+and triggers an asynchronous event progress for waiting coroutines.
+
+<!--
+## REST API <a id="technical-concepts-rest-api"></a>
+
+Icinga 2 provides its own HTTP server which shares the port 5665 with
+the JSON-RPC cluster protocol.
+-->
+
+## JSON-RPC Message API <a id="technical-concepts-json-rpc-messages"></a>
+
+**The JSON-RPC message API is not a public API for end users.** In case you want
+to interact with Icinga, use the [REST API](12-icinga2-api.md#icinga2-api).
+
+This section describes the internal cluster messages exchanged between endpoints.
+
+> **Tip**
+>
+> Debug builds with `icinga2 daemon -DInternal.DebugJsonRpc=1` unveils the JSON-RPC messages.
+
+### Registered Handler Functions
+
+Functions by example:
+
+Event Sender: `Checkable::OnNewCheckResult`
+
+```
+On<xyz>.connect(&xyzHandler)
+```
+
+Event Receiver (Client): `CheckResultAPIHandler` in `REGISTER_APIFUNCTION`
+
+```
+<xyz>APIHandler()
+```
+
+### Messages
+
+#### icinga::Hello <a id="technical-concepts-json-rpc-messages-icinga-hello"></a>
+
+> Location: `apilistener.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | icinga::Hello
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------------|-------------|------------------
+capabilities | Number | Bitmask, see `lib/remote/apilistener.hpp`.
+version | Number | Icinga 2 version, e.g. 21300 for v2.13.0.
+
+##### Functions
+
+Event Sender: When a new client connects in `NewClientHandlerInternal()`.
+Event Receiver: `HelloAPIHandler`
+
+##### Permissions
+
+None, this is a required message.
+
+#### event::Heartbeat <a id="technical-concepts-json-rpc-messages-event-heartbeat"></a>
+
+> Location: `jsonrpcconnection-heartbeat.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::Heartbeat
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------|---------------|------------------
+timeout | Number | Heartbeat timeout, sender sets 120s.
+
+
+##### Functions
+
+Event Sender: `JsonRpcConnection::HeartbeatTimerHandler`
+Event Receiver: `HeartbeatAPIHandler`
+
+Both sender and receiver exchange this heartbeat message. If the sender detects
+that a client endpoint hasn't sent anything in the updated timeout span, it disconnects
+the client. This is to avoid stale connections with no message processing.
+
+##### Permissions
+
+None, this is a required message.
+
+#### event::CheckResult <a id="technical-concepts-json-rpc-messages-event-checkresult"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::CheckResult
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------|---------------|------------------
+host | String | Host name
+service | String | Service name
+cr | Serialized CR | Check result
+
+##### Functions
+
+Event Sender: `Checkable::OnNewCheckResult`
+Event Receiver: `CheckResultAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Hosts/services do not exist
+* Origin is a remote command endpoint different to the configured, and whose zone is not allowed to access this checkable.
+
+#### event::SetNextCheck <a id="technical-concepts-json-rpc-messages-event-setnextcheck"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetNextCheck
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+------------|---------------|------------------
+host | String | Host name
+service | String | Service name
+next\_check | Timestamp | Next scheduled time as UNIX timestamp.
+
+##### Functions
+
+Event Sender: `Checkable::OnNextCheckChanged`
+Event Receiver: `NextCheckChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::SetLastCheckStarted <a id="technical-concepts-json-rpc-messages-event-setlastcheckstarted"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetLastCheckStarted
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------------|-----------|------------------
+host | String | Host name
+service | String | Service name
+last\_check\_started | Timestamp | Last check's start time as UNIX timestamp.
+
+##### Functions
+
+Event Sender: `Checkable::OnLastCheckStartedChanged`
+Event Receiver: `LastCheckStartedChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::SetStateBeforeSuppression <a id="technical-concepts-json-rpc-messages-event-setstatebeforesuppression"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------------------------------
+jsonrpc | 2.0
+method | event::SetStateBeforeSuppression
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------------------|--------|-----------------------------------------------
+host | String | Host name
+service | String | Service name
+state\_before\_suppression | Number | Checkable state before the current suppression
+
+##### Functions
+
+Event Sender: `Checkable::OnStateBeforeSuppressionChanged`
+Event Receiver: `StateBeforeSuppressionChangedAPIHandler`
+
+Used to sync the checkable state from before a notification suppression (for example
+because the checkable is in a downtime) started within the same HA zone.
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint is not within the local zone.
+
+#### event::SetSuppressedNotifications <a id="technical-concepts-json-rpc-messages-event-setsupressednotifications"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetSuppressedNotifications
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-------------------------|---------------|------------------
+host | String | Host name
+service | String | Service name
+supressed\_notifications | Number | Bitmask for suppressed notifications.
+
+##### Functions
+
+Event Sender: `Checkable::OnSuppressedNotificationsChanged`
+Event Receiver: `SuppressedNotificationsChangedAPIHandler`
+
+Used to sync the notification state of a host or service object within the same HA zone.
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint is not within the local zone.
+
+#### event::SetSuppressedNotificationTypes <a id="technical-concepts-json-rpc-messages-event-setsuppressednotificationtypes"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetSuppressedNotificationTypes
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-------------------------|--------|------------------
+notification | String | Notification name
+supressed\_notifications | Number | Bitmask for suppressed notifications.
+
+Used to sync the state of a notification object within the same HA zone.
+
+##### Functions
+
+Event Sender: `Notification::OnSuppressedNotificationsChanged`
+Event Receiver: `SuppressedNotificationTypesChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Notification does not exist.
+* Origin endpoint is not within the local zone.
+
+
+#### event::SetNextNotification <a id="technical-concepts-json-rpc-messages-event-setnextnotification"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetNextNotification
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-------------------|---------------|------------------
+host | String | Host name
+service | String | Service name
+notification | String | Notification name
+next\_notification | Timestamp | Next scheduled notification time as UNIX timestamp.
+
+##### Functions
+
+Event Sender: `Notification::OnNextNotificationChanged`
+Event Receiver: `NextNotificationChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Notification does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::UpdateLastNotifiedStatePerUser <a id="technical-concepts-json-rpc-messages-event-updatelastnotifiedstateperuser"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::UpdateLastNotifiedStatePerUser
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-------------|--------|------------------
+notification | String | Notification name
+user | String | User name
+state | Number | Checkable state the user just got a problem notification for
+
+Used to sync the state of a notification object within the same HA zone.
+
+##### Functions
+
+Event Sender: `Notification::OnLastNotifiedStatePerUserUpdated`
+Event Receiver: `LastNotifiedStatePerUserUpdatedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Notification does not exist.
+* Origin endpoint is not within the local zone.
+
+#### event::ClearLastNotifiedStatePerUser <a id="technical-concepts-json-rpc-messages-event-clearlastnotifiedstateperuser"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::ClearLastNotifiedStatePerUser
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-------------|--------|------------------
+notification | String | Notification name
+
+Used to sync the state of a notification object within the same HA zone.
+
+##### Functions
+
+Event Sender: `Notification::OnLastNotifiedStatePerUserCleared`
+Event Receiver: `LastNotifiedStatePerUserClearedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Notification does not exist.
+* Origin endpoint is not within the local zone.
+
+#### event::SetForceNextCheck <a id="technical-concepts-json-rpc-messages-event-setforcenextcheck"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetForceNextCheck
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------|---------------|------------------
+host | String | Host name
+service | String | Service name
+forced | Boolean | Forced next check (execute now)
+
+##### Functions
+
+Event Sender: `Checkable::OnForceNextCheckChanged`
+Event Receiver: `ForceNextCheckChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::SetForceNextNotification <a id="technical-concepts-json-rpc-messages-event-setforcenextnotification"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetForceNextNotification
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------|---------------|------------------
+host | String | Host name
+service | String | Service name
+forced | Boolean | Forced next check (execute now)
+
+##### Functions
+
+Event Sender: `Checkable::SetForceNextNotification`
+Event Receiver: `ForceNextNotificationChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::SetAcknowledgement <a id="technical-concepts-json-rpc-messages-event-setacknowledgement"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetAcknowledgement
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-----------|---------------|------------------
+host | String | Host name
+service | String | Service name
+author | String | Acknowledgement author name.
+comment | String | Acknowledgement comment content.
+acktype | Number | Acknowledgement type (0=None, 1=Normal, 2=Sticky)
+notify | Boolean | Notification should be sent.
+persistent | Boolean | Whether the comment is persistent.
+expiry | Timestamp | Optional expire time as UNIX timestamp.
+
+##### Functions
+
+Event Sender: `Checkable::OnForceNextCheckChanged`
+Event Receiver: `ForceNextCheckChangedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::ClearAcknowledgement <a id="technical-concepts-json-rpc-messages-event-clearacknowledgement"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::ClearAcknowledgement
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------|---------------|------------------
+host | String | Host name
+service | String | Service name
+
+##### Functions
+
+Event Sender: `Checkable::OnAcknowledgementCleared`
+Event Receiver: `AcknowledgementClearedAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::SendNotifications <a id="technical-concepts-json-rpc-messages-event-sendnotifications"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SendNotifications
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------|---------------|------------------
+host | String | Host name
+service | String | Service name
+cr | Serialized CR | Check result
+type | Number | enum NotificationType, same as `types` for notification objects.
+author | String | Author name
+text | String | Notification text
+
+##### Functions
+
+Event Sender: `Checkable::OnNotificationsRequested`
+Event Receiver: `SendNotificationsAPIHandler`
+
+Signals that notifications have to be sent within the same HA zone. This is relevant if the checkable and its
+notifications are active on different endpoints.
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint is not within the local zone.
+
+#### event::NotificationSentUser <a id="technical-concepts-json-rpc-messages-event-notificationsentuser"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::NotificationSentUser
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+--------------|-----------------|------------------
+host | String | Host name
+service | String | Service name
+notification | String | Notification name.
+user | String | Notified user name.
+type | Number | enum NotificationType, same as `types` in Notification objects.
+cr | Serialized CR | Check result.
+author | String | Notification author (for specific types)
+text | String | Notification text (for specific types)
+command | String | Notification command name.
+
+##### Functions
+
+Event Sender: `Checkable::OnNotificationSentToUser`
+Event Receiver: `NotificationSentUserAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone the same as the receiver. This binds notification messages to the HA zone.
+
+#### event::NotificationSentToAllUsers <a id="technical-concepts-json-rpc-messages-event-notificationsenttoallusers"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::NotificationSentToAllUsers
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+----------------------------|-----------------|------------------
+host | String | Host name
+service | String | Service name
+notification | String | Notification name.
+users | Array of String | Notified user names.
+type | Number | enum NotificationType, same as `types` in Notification objects.
+cr | Serialized CR | Check result.
+author | String | Notification author (for specific types)
+text | String | Notification text (for specific types)
+last\_notification | Timestamp | Last notification time as UNIX timestamp.
+next\_notification | Timestamp | Next scheduled notification time as UNIX timestamp.
+notification\_number | Number | Current notification number in problem state.
+last\_problem\_notification | Timestamp | Last problem notification time as UNIX timestamp.
+no\_more\_notifications | Boolean | Whether to send future notifications when this notification becomes active on this HA node.
+
+##### Functions
+
+Event Sender: `Checkable::OnNotificationSentToAllUsers`
+Event Receiver: `NotificationSentToAllUsersAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone the same as the receiver. This binds notification messages to the HA zone.
+
+#### event::ExecuteCommand <a id="technical-concepts-json-rpc-messages-event-executecommand"></a>
+
+> Location: `clusterevents-check.cpp` and `checkable-check.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::ExecuteCommand
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------|---------------|------------------
+host | String | Host name.
+service | String | Service name.
+command\_type | String | `check_command` or `event_command`.
+command | String | CheckCommand or EventCommand name.
+check\_timeout | Number | Check timeout of the checkable object, if specified as `check_timeout` attribute.
+macros | Dictionary | Command arguments as key/value pairs for remote execution.
+endpoint | String | The endpoint to execute the command on.
+deadline | Number | A Unix timestamp indicating the execution deadline
+source | String | The execution UUID
+
+
+##### Functions
+
+**Event Sender:** This gets constructed directly in `Checkable::ExecuteCheck()`, `Checkable::ExecuteEventHandler()` or `ApiActions::ExecuteCommand()` when a remote command endpoint is configured.
+
+* `Get{CheckCommand,EventCommand}()->Execute()` simulates an execution and extracts all command arguments into the `macro` dictionary (inside lib/methods tasks).
+* When the endpoint is connected, the message is constructed and sent directly.
+* When the endpoint is not connected and not syncing replay logs and 5m after application start, generate an UNKNOWN check result for the user ("not connected").
+
+**Event Receiver:** `ExecuteCommandAPIHandler`
+
+Special handling, calls `ClusterEvents::EnqueueCheck()` for command endpoint checks.
+This function enqueues check tasks into a queue which is controlled in `RemoteCheckThreadProc()`.
+If the `endpoint` parameter is specified and is not equal to the local endpoint then the message is forwarded to the correct endpoint zone.
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Origin endpoint's zone is not a parent zone of the receiver endpoint.
+* `accept_commands = false` in the `api` feature configuration sends back an UNKNOWN check result to the sender.
+
+The receiver constructs a virtual host object and looks for the local CheckCommand object.
+
+Returns UNKNOWN as check result to the sender
+
+* when the CheckCommand object does not exist.
+* when there was an exception triggered from check execution, e.g. the plugin binary could not be executed or similar.
+
+The returned messages are synced directly to the sender's endpoint, no cluster broadcast.
+
+> **Note**: EventCommand errors are just logged on the remote endpoint.
+
+#### event::UpdateExecutions <a id="technical-concepts-json-rpc-messages-event-updateexecutions"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::UpdateExecutions
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------|---------------|------------------
+host | String | Host name.
+service | String | Service name.
+executions | Dictionary | Executions to be updated
+
+##### Functions
+
+**Event Sender:** `ClusterEvents::ExecutedCommandAPIHandler`, `ClusterEvents::UpdateExecutionsAPIHandler`, `ApiActions::ExecuteCommand`
+**Event Receiver:** `ClusterEvents::UpdateExecutionsAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::ExecutedCommand <a id="technical-concepts-json-rpc-messages-event-executedcommand"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::ExecutedCommand
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------|---------------|------------------
+host | String | Host name.
+service | String | Service name.
+execution | String | The execution ID executed.
+exitStatus | Number | The command exit status.
+output | String | The command output.
+start | Number | The unix timestamp at the start of the command execution
+end | Number | The unix timestamp at the end of the command execution
+
+##### Functions
+
+**Event Sender:** `ClusterEvents::ExecuteCheckFromQueue`, `ClusterEvents::ExecuteCommandAPIHandler`
+**Event Receiver:** `ClusterEvents::ExecutedCommandAPIHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Checkable does not exist.
+* Origin endpoint's zone is not allowed to access this checkable.
+
+#### event::SetRemovalInfo <a id="technical-concepts-json-rpc-messages-event-setremovalinfo"></a>
+
+> Location: `clusterevents.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | event::SetRemovalInfo
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------|-------------|---------------------------------
+object\_type | String | Object type (`"Comment"` or `"Downtime"`)
+object\_name | String | Object name
+removed\_by | String | Name of the removal requestor
+remove\_time | Timestamp | Time of the remove operation
+
+##### Functions
+
+**Event Sender**: `Comment::OnRemovalInfoChanged` and `Downtime::OnRemovalInfoChanged`
+**Event Receiver**: `SetRemovalInfoAPIHandler`
+
+This message is used to synchronize information about manual comment and downtime removals before deleting the
+corresponding object.
+
+##### Permissions
+
+This message is only accepted from the local zone and from parent zones.
+
+#### config::Update <a id="technical-concepts-json-rpc-messages-config-update"></a>
+
+> Location: `apilistener-filesync.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | config::Update
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+-----------|---------------|------------------
+update | Dictionary | Config file paths and their content.
+update\_v2 | Dictionary | Additional meta config files introduced in 2.4+ for compatibility reasons.
+
+##### Functions
+
+**Event Sender:** `SendConfigUpdate()` called in `ApiListener::SyncClient()` when a new client endpoint connects.
+**Event Receiver:** `ConfigUpdateHandler` reads the config update content and stores them in `/var/lib/icinga2/api`.
+When it detects a configuration change, the function requests and application restart.
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* The origin sender is not in a parent zone of the receiver.
+* `api` feature does not accept config.
+
+Config updates will be ignored when:
+
+* The zone is not configured on the receiver endpoint.
+* The zone is authoritative on this instance (this only happens on a master which has `/etc/icinga2/zones.d` populated, and prevents sync loops)
+
+#### config::UpdateObject <a id="technical-concepts-json-rpc-messages-config-updateobject"></a>
+
+> Location: `apilistener-configsync.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | config::UpdateObject
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------------|-------------|------------------
+name | String | Object name.
+type | String | Object type name.
+version | Number | Object version.
+config | String | Config file content for `_api` packages.
+modified\_attributes | Dictionary | Modified attributes at runtime as key value pairs.
+original\_attributes | Array | Original attributes as array of keys.
+
+
+##### Functions
+
+**Event Sender:** Either on client connect (full sync), or runtime created/updated object
+
+`ApiListener::SendRuntimeConfigObjects()` gets called when a new endpoint is connected
+and runtime created config objects need to be synced. This invokes a call to `UpdateConfigObject()`
+to only sync this JsonRpcConnection client.
+
+`ConfigObject::OnActiveChanged` (created or deleted) or `ConfigObject::OnVersionChanged` (updated)
+also call `UpdateConfigObject()`.
+
+**Event Receiver:** `ConfigUpdateObjectAPIHandler` calls `ConfigObjectUtility::CreateObject()` in order
+to create the object if it is not already existing. Afterwards, all modified attributes are applied
+and in case, original attributes are restored. The object version is set as well, keeping it in sync
+with the sender.
+
+##### Permissions
+
+###### Sender
+
+Client receiver connects:
+
+The sender only syncs config object updates to a client which can access
+the config object, in `ApiListener::SendRuntimeConfigObjects()`.
+
+In addition to that, the client endpoint's zone is checked whether this zone may access
+the config object.
+
+Runtime updated object:
+
+Only if the config object belongs to the `_api` package.
+
+
+###### Receiver
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Origin sender endpoint's zone is in a child zone.
+* `api` feature does not accept config
+* The received config object type does not exist (this is to prevent failures with older nodes and new object types).
+
+Error handling:
+
+* Log an error if `CreateObject` fails (only if the object does not already exist)
+* Local object version is newer than the received version, object will not be updated.
+* Compare modified and original attributes and restore any type of change here.
+
+
+#### config::DeleteObject <a id="technical-concepts-json-rpc-messages-config-deleteobject"></a>
+
+> Location: `apilistener-configsync.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | config::DeleteObject
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+--------------------|-------------|------------------
+name | String | Object name.
+type | String | Object type name.
+version | Number | Object version.
+
+##### Functions
+
+**Event Sender:**
+
+`ConfigObject::OnActiveChanged` (created or deleted) or `ConfigObject::OnVersionChanged` (updated)
+call `DeleteConfigObject()`.
+
+**Event Receiver:** `ConfigDeleteObjectAPIHandler`
+
+##### Permissions
+
+###### Sender
+
+Runtime deleted object:
+
+Only if the config object belongs to the `_api` package.
+
+###### Receiver
+
+The receiver will not process messages from not configured endpoints.
+
+Message updates will be dropped when:
+
+* Origin sender endpoint's zone is in a child zone.
+* `api` feature does not accept config
+* The received config object type does not exist (this is to prevent failures with older nodes and new object types).
+* The object in question was not created at runtime, it does not belong to the `_api` package.
+
+Error handling:
+
+* Log an error if `DeleteObject` fails (only if the object does not already exist)
+
+#### pki::RequestCertificate <a id="technical-concepts-json-rpc-messages-pki-requestcertificate"></a>
+
+> Location: `jsonrpcconnection-pki.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | pki::RequestCertificate
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+--------------|---------------|------------------
+ticket | String | Own ticket, or as satellite in CA proxy from local store.
+cert\_request | String | Certificate request content from local store, optional.
+
+##### Functions
+
+Event Sender: `RequestCertificateHandler`
+Event Receiver: `RequestCertificateHandler`
+
+##### Permissions
+
+This is an anonymous request, and the number of anonymous clients can be configured
+in the `api` feature.
+
+Only valid certificate request messages are processed, and valid signed certificates
+won't be signed again.
+
+#### pki::UpdateCertificate <a id="technical-concepts-json-rpc-messages-pki-updatecertificate"></a>
+
+> Location: `jsonrpcconnection-pki.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | pki::UpdateCertificate
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+---------------------|---------------|------------------
+status\_code | Number | Status code, 0=ok.
+cert | String | Signed certificate content.
+ca | String | Public CA certificate content.
+fingerprint\_request | String | Certificate fingerprint from the CSR.
+
+
+##### Functions
+
+**Event Sender:**
+
+* When a client requests a certificate in `RequestCertificateHandler` and the satellite
+already has a signed certificate, the `pki::UpdateCertificate` message is constructed and sent back.
+* When the endpoint holding the master's CA private key (and TicketSalt private key) is able to sign
+the request, the `pki::UpdateCertificate` message is constructed and sent back.
+
+**Event Receiver:** `UpdateCertificateHandler`
+
+##### Permissions
+
+Message updates are dropped when
+
+* The origin sender is not in a parent zone of the receiver.
+* The certificate fingerprint is in an invalid format.
+
+#### log::SetLogPosition <a id="technical-concepts-json-rpc-messages-log-setlogposition"></a>
+
+> Location: `apilistener.cpp` and `jsonrpcconnection.cpp`
+
+##### Message Body
+
+Key | Value
+----------|---------
+jsonrpc | 2.0
+method | log::SetLogPosition
+params | Dictionary
+
+##### Params
+
+Key | Type | Description
+--------------------|---------------|------------------
+log\_position | Timestamp | The endpoint's log position as UNIX timestamp.
+
+
+##### Functions
+
+**Event Sender:**
+
+During log replay to a client endpoint in `ApiListener::ReplayLog()`, each processed
+file generates a message which updates the log position timestamp.
+
+`ApiListener::ApiTimerHandler()` invokes a check to keep all connected endpoints and
+their log position in sync during replay log.
+
+**Event Receiver:** `SetLogPositionHandler`
+
+##### Permissions
+
+The receiver will not process messages from not configured endpoints.
diff --git a/doc/20-script-debugger.md b/doc/20-script-debugger.md
new file mode 100644
index 0000000..e8ee6db
--- /dev/null
+++ b/doc/20-script-debugger.md
@@ -0,0 +1,177 @@
+# Script Debugger <a id="script-debugger"></a>
+
+You can run the Icinga 2 daemon with the `-X` (`--script-debugger`)
+parameter to enable the script debugger:
+
+```bash
+icinga2 daemon -X
+```
+
+When an exception occurs or the [debugger](17-language-reference.md#breakpoints)
+keyword is encountered in a user script, Icinga 2 launches a console that
+allows the user to debug the script.
+
+You can also attach the script debugger to the [configuration validation](11-cli-commands.md#config-validation):
+
+```bash
+icinga2 daemon -C -X
+```
+
+Here is a list of common errors which can be diagnosed with the script debugger:
+
+* Configuration errors e.g. [apply rules](03-monitoring-basics.md#using-apply)
+* Errors in user-defined [functions](17-language-reference.md#functions)
+
+## Debugging Configuration Errors <a id="script-debugger-config-errors"></a>
+
+The following example illustrates the problem of a service [apply rule](03-monitoring-basics.md#using-apply-for)
+which expects a dictionary value for `config`, but the host custom variable only
+provides a string value:
+
+```
+object Host "script-debugger-host" {
+ check_command = "icinga"
+
+ vars.http_vhosts["example.org"] = "192.168.1.100" // a string value
+}
+
+apply Service for (http_vhost => config in host.vars.http_vhosts) {
+ import "generic-service"
+
+ vars += config // expects a dictionary
+
+ check_command = "http"
+}
+```
+
+The error message on config validation will warn about the wrong value type,
+but does not provide any context which objects are affected.
+
+Enable the script debugger and run the config validation:
+
+```
+# icinga2 daemon -C -X
+
+Breakpoint encountered in /etc/icinga2/conf.d/services.conf: 59:67-65:1
+Exception: Error: Error while evaluating expression: Cannot convert value of type 'String' to an object.
+Location:
+/etc/icinga2/conf.d/services.conf(62): check_command = "http"
+/etc/icinga2/conf.d/services.conf(63):
+/etc/icinga2/conf.d/services.conf(64): vars += config
+ ^^^^^^^^^^^^^^
+/etc/icinga2/conf.d/services.conf(65): }
+/etc/icinga2/conf.d/services.conf(66):
+You can inspect expressions (such as variables) by entering them at the prompt.
+To leave the debugger and continue the program use "$continue".
+<1> =>
+```
+
+You can print the variables `vars` and `config` to get an idea about
+their values:
+
+```
+<1> => vars
+null
+<2> => config
+"192.168.1.100"
+<3> =>
+```
+
+The `vars` attribute has to be a dictionary. Trying to set this attribute to a string caused
+the error in our configuration example.
+
+In order to determine the name of the host where the value of the `config` variable came from
+you can inspect attributes of the service object:
+
+```
+<3> => host_name
+"script-debugger-host-01"
+<4> => name
+"http"
+```
+
+Additionally you can view the service object attributes by printing the value of `this`.
+
+## Using Breakpoints <a id="script-debugger-breakpoints"></a>
+
+In order to halt execution in a script you can use the `debugger` keyword:
+
+```
+object Host "script-debugger-host-02" {
+ check_command = "dummy"
+ check_interval = 5s
+
+ vars.dummy_text = {{
+ var text = "Hello from " + macro("$name$")
+ debugger
+ return text
+ }}
+}
+```
+
+Icinga 2 will spawn a debugger console every time the function is executed:
+
+```
+# icinga2 daemon -X
+...
+Breakpoint encountered in /etc/icinga2/tests/script-debugger.conf: 7:5-7:12
+You can inspect expressions (such as variables) by entering them at the prompt.
+To leave the debugger and continue the program use "$continue".
+<1> => text
+"Hello from script-debugger-host-02"
+<2> => $continue
+```
+
+## Debugging API Filters <a id="script-debugger-api-filters"></a>
+
+Queries against the [Icinga 2 REST API](12-icinga2-api.md#icinga2-api) can use
+filters, just like available in `assign where` expressions. If these filters cause
+an internal error, they return an empty result to the caller.
+
+In order to analyse these server-side errors, you can use the script debugger.
+
+The following example tries filter for all host objects where the custom variable
+`os` is set. There are various possibilities to check that, one of them would be
+`host.vars.os != ""`. Another idea is to use the [contains](18-library-reference.md#dictionary-contains) method on the custom
+attribute dictionary like this: `host.vars.contains("os")`.
+
+```bash
+curl -k -s -u root:icinga -H 'Accept: application/json' -H 'X-HTTP-Method-Override: GET' \
+ -X POST 'https://localhost:5665/v1/objects/services' \
+ -d '{ "filter": "host.vars.contains(\"os\")", "attrs": [ "__name" ], "joins": [ "host.name", "host.vars" ], "pretty": true }'
+```
+
+This will fail on all hosts which don't have any custom variable specified.
+
+```
+# icinga2 daemon -X
+
+Breakpoint encountered.
+Exception: Error: Argument is not a callable object.
+Location: in <API query>: 1:0-1:23
+You can inspect expressions (such as variables) by entering them at the prompt.
+To leave the debugger and continue the program use "$continue".
+
+<1> => this.host
+
+...
+
+ vars = null
+
+<2> => $continue
+```
+
+By definition, a type method can only be invoked on an actual object.
+
+In order to stay safe, add more checks to the API filter:
+
+- `host.vars && host.vars.contains("os")` or
+- `host.vars && typeof(host.vars) == Dictionary && host.vars.contains("os")`
+
+Example:
+
+```bash
+curl -k -s -u root:icinga -H 'Accept: application/json' -H 'X-HTTP-Method-Override: GET' \
+ -X POST 'https://localhost:5665/v1/objects/services' \
+ -d '{ "filter": "host.vars && typeof(host.vars) == Dictionary && host.vars.contains(\"os\")", "attrs": [ "__name" ], "joins": [ "host.name", "host.vars" ], "pretty": true }'
+```
diff --git a/doc/21-development.md b/doc/21-development.md
new file mode 100644
index 0000000..10cf765
--- /dev/null
+++ b/doc/21-development.md
@@ -0,0 +1,2680 @@
+# Development <a id="development"></a>
+
+This chapter provides hints on Icinga 2 debugging,
+development, package builds and tests.
+
+* [Debug Icinga 2](21-development.md#development-debug)
+ * [GDB Backtrace](21-development.md#development-debug-gdb-backtrace)
+ * [Core Dump](21-development.md#development-debug-core-dump)
+* [Test Icinga 2](21-development.md#development-tests)
+ * [Snapshot Packages (Nightly Builds)](21-development.md#development-tests-snapshot-packages)
+* [Develop Icinga 2](21-development.md#development-develop)
+ * [Preparations](21-development.md#development-develop-prepare)
+ * [Design Patterns](21-development.md#development-develop-design-patterns)
+ * [Build Tools](21-development.md#development-develop-builds-tools)
+ * [Unit Tests](21-development.md#development-develop-tests)
+ * [Style Guide](21-development.md#development-develop-styleguide)
+* [Development Environment](21-development.md#development-environment)
+ * [Linux Dev Environment](21-development.md#development-linux-dev-env)
+ * [macOS Dev Environment](21-development.md#development-macos-dev-env)
+ * [Windows Dev Environment](21-development.md#development-windows-dev-env)
+* [Package Builds](21-development.md#development-package-builds)
+ * [RPM](21-development.md#development-package-builds-rpms)
+ * [DEB](21-development.md#development-package-builds-deb)
+ * [Windows](21-development.md#development-package-builds-windows)
+* [Continuous Integration](21-development.md#development-ci)
+* [Advanced Tips](21-development.md#development-advanced)
+
+<!-- mkdocs requires 4 spaces indent for nested lists: https://github.com/Python-Markdown/markdown/issues/3 -->
+
+## Debug Icinga 2 <a id="development-debug"></a>
+
+This chapter targets all users who have been asked by developers to provide
+a stack trace or coredump if the application crashed. It is also useful
+for developers working with different debuggers.
+
+> **Note:**
+>
+> This is intentionally mentioned before any development insights
+> as debugging is a more frequent and commonly asked question.
+
+### Debug Requirements <a id="debug-requirements"></a>
+
+Make sure that the debug symbols are available for Icinga 2.
+The Icinga 2 packages provide a debug package which must be
+installed separately for all involved binaries, like `icinga2-bin`
+or `icinga2-ido-mysql`.
+
+Distribution | Command
+-------------------|------------------------------------------
+Debian/Ubuntu | `apt-get install icinga2-dbg`
+RHEL/CentOS | `yum install icinga2-debuginfo`
+Fedora | `dnf install icinga2-debuginfo icinga2-bin-debuginfo icinga2-ido-mysql-debuginfo`
+SLES/openSUSE | `zypper install icinga2-bin-debuginfo icinga2-ido-mysql-debuginfo`
+
+Furthermore, you may also have to install debug symbols for Boost and your C++ library.
+
+If you're building your own binaries, you should use the `-DCMAKE_BUILD_TYPE=Debug` cmake
+build flag for debug builds.
+
+
+### GDB as Debugger <a id="development-debug-gdb"></a>
+
+Install GDB in your development environment.
+
+Distribution | Command
+-------------------|------------------------------------------
+Debian/Ubuntu | `apt-get install gdb`
+RHEL/CentOS | `yum install gdb`
+Fedora | `dnf install gdb`
+SLES/openSUSE | `zypper install gdb`
+
+#### GDB Run <a id="development-debug-gdb-run"></a>
+
+Run the icinga2 binary `/usr/lib{,64}/icinga2/sbin/icinga2` with gdb, `/usr/bin/icinga2` is a shell wrapper.
+
+```
+gdb --args /usr/lib/icinga2/sbin/icinga2 daemon
+
+(gdb) set follow-fork-mode child
+```
+
+When gdb halts on SIGUSR2, press `c` to continue. This signal originates from the umbrella
+process and can safely be ignored.
+
+
+> **Note**
+>
+> Since v2.11 we would attach to the umbrella process spawned with `/usr/lib/icinga2/sbin/icinga2`,
+> therefore rather attach to a running process.
+>
+```bash
+# Typically the order of PIDs is: 1) umbrella 2) spawn helper 3) main process
+pidof icinga2
+
+gdb -p $(pidof icinga2 | cut -d ' ' -f3)
+```
+
+> **Note**
+>
+> If gdb tells you it's missing debug symbols, quit gdb and install
+> them: `Missing separate debuginfos, use: debuginfo-install ...`
+
+Run/restart the application.
+
+```
+(gdb) r
+```
+
+Kill the running application.
+
+```
+(gdb) k
+```
+
+Continue after breakpoint.
+
+```
+(gdb) c
+```
+
+#### GDB Core Dump <a id="development-debug-gdb-coredump"></a>
+
+Either attach to the running process using `gdb -p PID` or start
+a new gdb run.
+
+```
+(gdb) r
+(gdb) generate-core-file
+```
+
+#### GDB Backtrace <a id="development-debug-gdb-backtrace"></a>
+
+If Icinga 2 aborted its operation abnormally, generate a backtrace.
+
+> **Note**
+>
+> Please install the [required debug symbols](21-development.md#debug-requirements)
+> prior to generating a backtrace.
+
+`thread apply all` is important here since this includes all running threads.
+We need this information when e.g. debugging dead locks and hanging features.
+
+```
+(gdb) bt
+(gdb) thread apply all bt full
+```
+
+If gdb stops at a SIGPIPE signal please disable the signal before
+running Icinga 2. This isn't an error, but we need to workaround it.
+
+```
+(gdb) handle SIGPIPE nostop noprint pass
+(gdb) r
+```
+
+If you create a [new issue](https://github.com/Icinga/icinga2/issues),
+make sure to attach as much detail as possible.
+
+#### GDB Backtrace from Running Process <a id="development-debug-gdb-backtrace-running"></a>
+
+If Icinga 2 is still running, generate a full backtrace from the running
+process and store it into a new file (e.g. for debugging dead locks).
+
+> **Note**
+>
+> Please install the [required debug symbols](21-development.md#debug-requirements)
+> prior to generating a backtrace.
+
+Icinga 2 runs with 2 processes: main and command executor, therefore generate two backtrace logs
+and add them to the GitHub issue.
+
+```bash
+for pid in $(pidof icinga2); do gdb -p $pid -batch -ex "thread apply all bt full" -ex "detach" -ex "q" > gdb_bt_${pid}_`date +%s`.log; done
+```
+
+#### GDB Thread List from Running Process <a id="development-debug-gdb-thread-list-running"></a>
+
+Instead of a full backtrace, you sometimes just need a list of running threads.
+
+```bash
+for pid in $(pidof icinga2); do gdb -p $pid -batch -ex "info threads" -ex "detach" -ex "q" > gdb_threads_${pid}_`date +%s`.log; done
+```
+
+#### GDB Backtrace Stepping <a id="development-debug-gdb-backtrace-stepping"></a>
+
+Identifying the problem may require stepping into the backtrace, analysing
+the current scope, attributes, and possible unmet requirements. `p` prints
+the value of the selected variable or function call result.
+
+```
+(gdb) up
+(gdb) down
+(gdb) p checkable
+(gdb) p checkable.px->m_Name
+```
+
+#### GDB Breakpoints <a id="development-debug-gdb-breakpoint"></a>
+
+To set a breakpoint to a specific function call, or file specific line.
+
+```
+(gdb) b checkable.cpp:125
+(gdb) b icinga::Checkable::SetEnablePerfdata
+```
+
+GDB will ask about loading the required symbols later, select `yes` instead
+of `no`.
+
+Then run Icinga 2 until it reaches the first breakpoint. Continue with `c`
+afterwards.
+
+```
+(gdb) run
+(gdb) c
+```
+
+In case you want to step into the next line of code, use `n`. If there is a
+function call where you want to step into, use `s`.
+
+```
+(gdb) n
+
+(gdb) s
+```
+
+If you want to delete all breakpoints, use `d` and select `yes`.
+
+```
+(gdb) d
+```
+
+> **Tip**
+>
+> When debugging exceptions, set your breakpoint like this: `b __cxa_throw`.
+
+Breakpoint Example:
+
+```
+(gdb) b __cxa_throw
+(gdb) r
+(gdb) up
+....
+(gdb) up
+#11 0x00007ffff7cbf9ff in icinga::Utility::GlobRecursive(icinga::String const&, icinga::String const&, boost::function<void (icinga::String const&)> const&, int) (path=..., pattern=..., callback=..., type=1)
+ at /home/michi/coding/icinga/icinga2/lib/base/utility.cpp:609
+609 callback(cpath);
+(gdb) l
+604
+605 #endif /* _WIN32 */
+606
+607 std::sort(files.begin(), files.end());
+608 BOOST_FOREACH(const String& cpath, files) {
+609 callback(cpath);
+610 }
+611
+612 std::sort(dirs.begin(), dirs.end());
+613 BOOST_FOREACH(const String& cpath, dirs) {
+(gdb) p files
+$3 = std::vector of length 11, capacity 16 = {{static NPos = 18446744073709551615, m_Data = "/etc/icinga2/conf.d/agent.conf"}, {static NPos = 18446744073709551615,
+ m_Data = "/etc/icinga2/conf.d/commands.conf"}, {static NPos = 18446744073709551615, m_Data = "/etc/icinga2/conf.d/downtimes.conf"}, {static NPos = 18446744073709551615,
+ m_Data = "/etc/icinga2/conf.d/groups.conf"}, {static NPos = 18446744073709551615, m_Data = "/etc/icinga2/conf.d/notifications.conf"}, {static NPos = 18446744073709551615,
+ m_Data = "/etc/icinga2/conf.d/satellite.conf"}, {static NPos = 18446744073709551615, m_Data = "/etc/icinga2/conf.d/services.conf"}, {static NPos = 18446744073709551615,
+ m_Data = "/etc/icinga2/conf.d/templates.conf"}, {static NPos = 18446744073709551615, m_Data = "/etc/icinga2/conf.d/test.conf"}, {static NPos = 18446744073709551615,
+ m_Data = "/etc/icinga2/conf.d/timeperiods.conf"}, {static NPos = 18446744073709551615, m_Data = "/etc/icinga2/conf.d/users.conf"}}
+```
+
+
+### Core Dump <a id="development-debug-core-dump"></a>
+
+When the Icinga 2 daemon crashes with a `SIGSEGV` signal
+a core dump file should be written. This will help
+developers to analyze and fix the problem.
+
+#### Core Dump File Size Limit <a id="development-debug-core-dump-limit"></a>
+
+This requires setting the core dump file size to `unlimited`.
+
+
+##### Systemd
+
+```
+systemctl edit icinga2.service
+
+[Service]
+...
+LimitCORE=infinity
+
+systemctl daemon-reload
+
+systemctl restart icinga2
+```
+
+##### Init Script
+
+```
+vim /etc/init.d/icinga2
+...
+ulimit -c unlimited
+
+service icinga2 restart
+```
+
+##### Verify
+
+Verify that the Icinga 2 process core file size limit is set to `unlimited`.
+
+```
+for pid in $(pidof icinga2); do cat /proc/$pid/limits; done
+
+...
+Max core file size unlimited unlimited bytes
+```
+
+
+#### Core Dump Kernel Format <a id="development-debug-core-dump-format"></a>
+
+The Icinga 2 daemon runs with the SUID bit set. Therefore you need
+to explicitly enable core dumps for SUID on Linux.
+
+```bash
+sysctl -w fs.suid_dumpable=2
+```
+
+Adjust the coredump kernel format and file location on Linux:
+
+```bash
+sysctl -w kernel.core_pattern=/var/lib/cores/core.%e.%p
+
+install -m 1777 -d /var/lib/cores
+```
+
+MacOS:
+
+```bash
+sysctl -w kern.corefile=/cores/core.%P
+
+chmod 777 /cores
+```
+
+#### Core Dump Analysis <a id="development-debug-core-dump-analysis"></a>
+
+Once Icinga 2 crashes again a new coredump file will be written. Please
+attach this file to your bug report in addition to the general details.
+
+Simple test case for a `SIGSEGV` simulation with `sleep`:
+
+```
+ulimit -c unlimited
+sleep 1800&
+[1] <PID>
+kill -SEGV <PID>
+gdb `which sleep` /var/lib/cores/core.sleep.<PID>
+(gdb) bt
+rm /var/lib/cores/core.sleep.*
+```
+
+Analyzing Icinga 2:
+
+```
+gdb /usr/lib64/icinga2/sbin/icinga2 core.icinga2.<PID>
+(gdb) bt
+```
+
+### LLDB as Debugger <a id="development-debug-lldb"></a>
+
+LLDB is available on macOS with the Xcode command line tools.
+
+```bash
+xcode-select --install
+```
+
+In order to run Icinga 2 with LLDB you need to pass the binary as argument.
+Since v2.11 we would attach to the umbrella process, therefore rather
+attach to a running process.
+
+```bash
+# Typically the order of PIDs is: 1) umbrella 2) spawn helper 3) main process
+pidof icinga2
+
+lldb -p $(pidof icinga2 | cut -d ' ' -f3)
+```
+
+In case you'll need to attach to the main process immediately, you can delay
+the forked child process and attach to the printed PID.
+
+```
+$ icinga2 daemon -DInternal.DebugWorkerDelay=120
+Closed FD 6 which we inherited from our parent process.
+[2020-01-29 12:22:33 +0100] information/cli: Icinga application loader (version: v2.11.0-477-gfe8701d77; debug)
+[2020-01-29 12:22:33 +0100] information/RunWorker: DEBUG: Current PID: 85253. Sleeping for 120 seconds to allow lldb/gdb -p <PID> attachment.
+```
+
+```bash
+lldb -p 85253
+```
+
+When lldb halts on SIGUSR2, press `c` to continue. This signal originates from the umbrella
+process and can safely be ignored.
+
+
+Breakpoint:
+
+```
+> b checkable.cpp:57
+> b icinga::Checkable::ProcessCheckResult
+```
+
+Full backtrace:
+
+```
+> bt all
+```
+
+Select thread:
+
+```
+> thr sel 5
+```
+
+Step into:
+
+```
+> s
+```
+
+Next step:
+
+```
+> n
+```
+
+Continue:
+
+```
+> c
+```
+
+Up/down in stacktrace:
+
+```
+> up
+> down
+```
+
+
+### Debug on Windows <a id="development-debug-windows"></a>
+
+
+Whenever the application crashes, the Windows error reporting (WER) can be [configured](https://docs.microsoft.com/en-gb/windows/win32/wer/collecting-user-mode-dumps)
+to create user-mode dumps.
+
+
+Tail the log file with Powershell:
+
+```
+Get-Content .\icinga2.log -tail 10 -wait
+```
+
+
+#### Debug on Windows: Dependencies <a id="development-debug-windows-dependencies"></a>
+
+Similar to `ldd` or `nm` on Linux/Unix.
+
+Extract the dependent DLLs from a binary with Visual Studio's `dumpbin` tool
+in Powershell:
+
+```
+C:> &'C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.22.27905\bin\Hostx64\x64\dumpbin.exe' /dependents .\debug\Bin\Debug\Debug\boosttest-test-base.exe
+DEBUG: 1+ >>>> &'C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.22.27905\bin\Hostx64\x64\dumpbin.exe' /dependents .\debug\Bin\Debug\Debug\boosttest-test-base.exe
+Microsoft (R) COFF/PE Dumper Version 14.22.27905.0
+Copyright (C) Microsoft Corporation. All rights reserved.
+
+
+Dump of file .\debug\Bin\Debug\Debug\boosttest-test-base.exe
+
+File Type: EXECUTABLE IMAGE
+
+ Image has the following dependencies:
+
+ boost_coroutine-vc142-mt-gd-x64-1_84.dll
+ boost_date_time-vc142-mt-gd-x64-1_84.dll
+ boost_filesystem-vc142-mt-gd-x64-1_84.dll
+ boost_thread-vc142-mt-gd-x64-1_84.dll
+ boost_regex-vc142-mt-gd-x64-1_84.dll
+ libssl-3_0-x64.dll
+ libcrypto-3_0-x64.dll
+ WS2_32.dll
+ dbghelp.dll
+ SHLWAPI.dll
+ msi.dll
+ boost_unit_test_framework-vc142-mt-gd-x64-1_84.dll
+ KERNEL32.dll
+ SHELL32.dll
+ ADVAPI32.dll
+ MSVCP140D.dll
+ MSWSOCK.dll
+ bcrypt.dll
+ VCRUNTIME140D.dll
+ ucrtbased.dll
+
+ Summary
+
+ 1000 .00cfg
+ 68000 .data
+ B000 .idata
+ 148000 .pdata
+ 69C000 .rdata
+ 25000 .reloc
+ 1000 .rsrc
+ E7A000 .text
+ 1000 .tls
+```
+
+
+## Test Icinga 2 <a id="development-tests"></a>
+
+### Snapshot Packages (Nightly Builds) <a id="development-tests-snapshot-packages"></a>
+
+Icinga provides snapshot packages as nightly builds from [Git master](https://github.com/icinga/icinga2).
+
+These packages contain development code which should be considered "work in progress".
+While developers ensure that tests are running fine with CI actions on PRs,
+things might break, or changes are not yet documented in the changelog.
+
+You can help the developers and test the snapshot packages, e.g. when larger
+changes or rewrites are taking place for a new major version. Your feedback
+is very much appreciated.
+
+Snapshot packages are available for all supported platforms including
+Linux and Windows and can be obtained from [https://packages.icinga.com](https://packages.icinga.com).
+
+The [Vagrant boxes](https://github.com/Icinga/icinga-vagrant) also use
+the Icinga snapshot packages to allow easier integration tests. It is also
+possible to use Docker with base OS images and installing the snapshot
+packages.
+
+If you encounter a problem, please [open a new issue](https://github.com/Icinga/icinga2/issues/new/choose)
+on GitHub and mention that you're testing the snapshot packages.
+
+#### RHEL/CentOS <a id="development-tests-snapshot-packages-rhel"></a>
+
+2.11+ requires the EPEL repository for Boost 1.66+.
+
+In addition to that, the `icinga-rpm-release` package already provides the `icinga-snapshot-builds`
+repository but it is disabled by default.
+
+```bash
+yum -y install https://packages.icinga.com/epel/icinga-rpm-release-7-latest.noarch.rpm
+yum -y install epel-release
+yum makecache
+
+yum install --enablerepo=icinga-snapshot-builds icinga2
+```
+
+#### Debian <a id="development-tests-snapshot-packages-debian"></a>
+
+2.11+ requires Boost 1.66+ which either is provided by the OS, backports or Icinga stable repositories.
+It is advised to configure both Icinga repositories, stable and snapshot and selectively
+choose the repository with the `-t` flag on `apt-get install`.
+
+```bash
+apt-get update
+apt-get -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | apt-key add -
+
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+ echo "deb https://packages.icinga.com/debian icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+ echo "deb-src https://packages.icinga.com/debian icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+ echo "deb http://packages.icinga.com/debian icinga-${DIST}-snapshots main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga-snapshots.list
+ echo "deb-src http://packages.icinga.com/debian icinga-${DIST}-snapshots main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga-snapshots.list
+
+apt-get update
+```
+
+On Debian Stretch, you'll also need to add Debian Backports.
+
+```bash
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+ echo "deb https://deb.debian.org/debian ${DIST}-backports main" > \
+ /etc/apt/sources.list.d/${DIST}-backports.list
+
+apt-get update
+```
+
+Then install the snapshot packages.
+
+```bash
+DIST=$(awk -F"[)(]+" '/VERSION=/ {print $2}' /etc/os-release); \
+apt-get install -t icinga-${DIST}-snapshots icinga2
+```
+
+#### Ubuntu <a id="development-tests-snapshot-packages-ubuntu"></a>
+
+```bash
+apt-get update
+apt-get -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | apt-key add -
+
+. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
+ echo "deb https://packages.icinga.com/ubuntu icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+ echo "deb-src https://packages.icinga.com/ubuntu icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+
+. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
+ echo "deb https://packages.icinga.com/ubuntu icinga-${DIST}-snapshots main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga-snapshots.list
+ echo "deb-src https://packages.icinga.com/ubuntu icinga-${DIST}-snapshots main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga-snapshots.list
+
+apt-get update
+```
+
+Then install the snapshot packages.
+
+```bash
+. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
+apt-get install -t icinga-${DIST}-snapshots icinga2
+```
+
+#### SLES <a id="development-tests-snapshot-packages-sles"></a>
+
+The required Boost packages are provided with the stable release repository.
+
+```bash
+rpm --import https://packages.icinga.com/icinga.key
+
+zypper ar https://packages.icinga.com/SUSE/ICINGA-release.repo
+zypper ref
+
+zypper ar https://packages.icinga.com/SUSE/ICINGA-snapshot.repo
+zypper ref
+```
+
+Selectively install the snapshot packages using the `-r` parameter.
+
+```bash
+zypper in -r icinga-snapshot-builds icinga2
+```
+
+
+### Unit Tests <a id="development-tests-unit"></a>
+
+Build the binaries and run the tests.
+
+
+```bash
+make -j4 -C debug
+make test -C debug
+```
+
+Run a specific boost test:
+
+```bash
+debug/Bin/Debug/boosttest-test-base --run_test=remote_url
+```
+
+
+
+## Develop Icinga 2 <a id="development-develop"></a>
+
+Icinga 2 can be built on many platforms such as Linux, Unix and Windows.
+There are limitations in terms of support, e.g. Windows is only supported for agents,
+not a full-featured master or satellite.
+
+Before you start with actual development, there is a couple of pre-requisites.
+
+### Preparations <a id="development-develop-prepare"></a>
+
+#### Choose your Editor <a id="development-develop-choose-editor"></a>
+
+Icinga 2 can be developed with your favorite editor. Icinga developers prefer
+these tools:
+
+- vim
+- CLion (macOS, Linux)
+- MS Visual Studio (Windows)
+- Atom
+
+Editors differ on the functionality. The more helpers you get for C++ development,
+the faster your development workflow will be.
+
+#### Get to know the architecture <a id="development-develop-get-to-know-the-architecture"></a>
+
+Icinga 2 can run standalone or in distributed environments. It contains a whole lot
+more than a simple check execution engine.
+
+Read more about it in the [Technical Concepts](19-technical-concepts.md#technical-concepts) chapter.
+
+#### Get to know the code <a id="development-develop-get-to-know-the-code"></a>
+
+First off, you really need to know C++ and portions of C++17 and the boost libraries.
+Best is to start with a book or online tutorial to get into the basics.
+Icinga developers gained their knowledge through studies, training and self-teaching
+code by trying it out and asking senior developers for guidance.
+
+Here's a few books we can recommend:
+
+* [Accelerated C++: Practical Programming by Example](https://www.amazon.com/Accelerated-C-Practical-Programming-Example/dp/020170353X) (Andrew Koenig, Barbara E. Moo)
+* [Effective C++](https://www.amazon.com/Effective-Specific-Improve-Programs-Designs/dp/0321334876) (Scott Meyers)
+* [Boost C++ Application Development Cookbook - Second Edition: Recipes to simplify your application development](https://www.amazon.com/dp/1787282244/ref=cm_sw_em_r_mt_dp_U_dN1OCbERS00EQ) (Antony Polukhin)
+* [Der C++ Programmierer](https://www.amazon.de/Programmierer-lernen-Professionell-anwenden-L%C3%B6sungen/dp/3446416447), German (Ulrich Breymann)
+* [C++11 programmieren](https://www.amazon.de/gp/product/3836217325/), German (Torsten T. Will)
+
+In addition, it is a good bet to also know SQL when diving into backend development.
+
+* [SQL Performance Explained](https://www.amazon.de/gp/product/3950307826/) (Markus Winand)
+
+Last but not least, if you are developing on Windows, get to know the internals about services and the Win32 API.
+
+### Design Patterns <a id="development-develop-design-patterns"></a>
+
+Icinga 2 heavily relies on object-oriented programming and encapsulates common
+functionality into classes and objects. It also uses modern programming techniques
+to e.g. work with shared pointer memory management.
+
+Icinga 2 consists of libraries bundled into the main binary. Therefore you'll
+find many code parts in the `lib/` directory wheras the actual application is
+built from `icinga-app/`. Accompanied with Icinga 2, there's the Windows plugins
+which are standalone and compiled from `plugins/`.
+
+Library | Description
+---------------|------------------------------------
+base | Objects, values, types, streams, tockets, TLS, utilities, etc.
+config | Configuration compiler, expressions, etc.
+cli | CLI (sub) commands and helpers.
+icinga | Icinga specific objects and event handling.
+remote | Cluster and HTTP client/server and REST API related code.
+checker | Checker feature, check scheduler.
+notification | Notification feature, notification scheduler.
+methods | Command execution methods, plugins and built-in checks.
+perfdata | Performance data related, including Graphite, Elastic, etc.
+db\_ido | IDO database abstraction layer.
+db\_ido\_mysql | IDO database driver for MySQL.
+db\_ido\_pgsql | IDO database driver for PgSQL.
+mysql\_shin | Library stub for linking against the MySQL client libraries.
+pgsql\_shim | Library stub for linking against the PgSQL client libraries.
+
+#### Class Compiler <a id="development-develop-design-patterns-class-compiler"></a>
+
+Another thing you will recognize are the `.ti` files which are compiled
+by our own class compiler into actual source code. The meta language allows
+developers to easily add object attributes and specify their behaviour.
+
+Some object attributes need to be stored over restarts in the state file
+and therefore have the `state` attribute set. Others are treated as `config`
+attribute and automatically get configuration validation functions created.
+Hidden or read-only REST API attributes are marked with `no_user_view` and
+`no_user_modify`.
+
+The most beneficial thing are getters and setters being generated. The actual object
+inherits from `ObjectImpl<TYPE>` and therefore gets them "for free".
+
+Example:
+
+```
+vim lib/perfdata/gelfwriter.ti
+
+ [config] enable_tls;
+
+vim lib/perfdata/gelfwriter.cpp
+
+ if (GetEnableTls()) {
+```
+
+The logic is hidden in `tools/mkclass/` in case you want to learn more about it.
+The first steps during CMake & make also tell you about code generation.
+
+### Build Tools <a id="development-develop-builds-tools"></a>
+
+#### CMake <a id="development-develop-builds-cmake"></a>
+
+In its early development stages in 2012, Icinga 2 was built with autoconf/automake
+and separate Windows project files. We've found this very fragile, and have changed
+this into CMake as our build tool.
+
+The most common benefits:
+
+* Everything is described in CMakeLists.txt in each directory
+* CMake only needs to know that a sub directory needs to be included.
+* The global CMakeLists.txt acts as main entry point for requirement checks and library/header includes.
+* Separate binary build directories, the actual source tree stays clean.
+* CMake automatically generates a Visual Studio project file `icinga2.sln` on Windows.
+
+#### Unity Builds <a id="development-develop-builds-unity-builds"></a>
+
+Another thing you should be aware of: Unity builds on and off.
+
+Typically, we already use caching mechanisms to reduce recompile time with ccache.
+For release builds, there's always a new build needed as the difference is huge compared
+to a previous (major) release.
+
+Therefore we've invented the Unity builds, which basically concatenates all source files
+into one big library source code file. The compiler then doesn't need to load the many small
+files but compiles and links this huge one.
+
+Unity builds require more memory which is why you should disable them for development
+builds in small sized VMs (Linux, Windows) and also Docker containers.
+
+There's a couple of header files which are included everywhere. If you touch/edit them,
+the cache is invalidated and you need to recompile a lot more files then. `base/utility.hpp`
+and `remote/zone.hpp` are good candidates for this.
+
+### Unit Tests <a id="development-develop-tests"></a>
+
+New functions and classes must implement new unit tests. Whenever
+you decide to add new functions, ensure that you don't need a complex
+mock or runtime attributes in order to test them. Better isolate
+code into function interfaces which can be invoked in the Boost tests
+framework.
+
+Look into the existing tests in the [test/](https://github.com/Icinga/icinga2/tree/master/test) directory
+and adopt new test cases.
+
+Specific tests require special time windows, they are only
+enabled in debug builds for developers. This is the case e.g.
+for testing the flapping algorithm with expected state change
+detection at a specific point from now.
+
+
+### Style Guide <a id="development-develop-styleguide"></a>
+
+Overview of project files:
+
+File Type | File Name/Extension | Description
+---------------|---------------------|-----------------------------
+Header | .hpp | Classes, enums, typedefs inside the icinga Namespace.
+Source | .cpp | Method implementation for class functions, static/global variables.
+CMake | CMakeLists.txt | Build configuration, source and header file references.
+CMake Source | .cmake | Source/Header files generated from CMake placeholders.
+ITL/conf.d | .conf | Template library and example files as configuration
+Class Compiler | .ti | Object classes in our own language, generates source code as `<filename>-ti.{c,h}pp`.
+Lexer/Parser | .ll, .yy | Flex/Bison code generated into source code from CMake builds.
+Docs | .md | Markdown docs and READMEs.
+
+Anything else are additional tools and scripts for developers and build systems.
+
+All files must include the copyright header. We don't use the
+current year as this implies yearly updates we don't want.
+
+Depending on the file type, this must be a comment.
+
+```cpp
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+```
+
+```bash
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+```
+
+#### Code Formatting <a id="development-develop-code-formatting"></a>
+
+**Tabs instead of spaces.** Inside Visual Studio, choose to keep tabs instead of
+spaces. Tabs should use 4 spaces indent by default, depending on your likings.
+
+We follow the clang format, with some exceptions.
+
+- Curly braces for functions and classes always start at a new line.
+
+```cpp
+String ConfigObjectUtility::EscapeName(const String& name)
+{
+//...
+}
+
+String ConfigObjectUtility::CreateObjectConfig(const Type::Ptr& type, const String& fullName,
+ bool ignoreOnError, const Array::Ptr& templates, const Dictionary::Ptr& attrs)
+{
+//...
+}
+```
+
+- Too long lines break at a parameter, the new line needs a tab indent.
+
+```cpp
+ static String CreateObjectConfig(const Type::Ptr& type, const String& fullName,
+ bool ignoreOnError, const Array::Ptr& templates, const Dictionary::Ptr& attrs);
+```
+
+- Conditions require curly braces if it is not a single if with just one line.
+
+
+```cpp
+ if (s == "OK") {
+ //...
+ } else {
+ //...
+ }
+
+ if (!n)
+ return;
+```
+
+- There's a space between `if` and the opening brace `(`. Also after the closing brace `)` and opening curly brace `{`.
+- Negation with `!` doesn't need an extra space.
+- Else branches always start in the same line after the closing curly brace.
+
+
+#### Code Comments <a id="development-develop-code-comments"></a>
+
+Add comments wherever you think that another developer will have a hard
+time to understand the complex algorithm. Or you might have forgotten
+it in a year and struggle again. Also use comments to highlight specific
+stages in a function. Generally speaking, make things easier for the
+team and external contributors.
+
+Comments can also be used to mark additional references and TODOs.
+If there is a specific GitHub issue or discussion going on,
+use that information as a summary and link over to it on purpose.
+
+- Single line comments may use `//` or `/* ... */`
+- Multi line comments must use this format:
+
+```cpp
+/* Ensure to check for XY
+ * This relies on the fact that ABC has been set before.
+ */
+```
+
+#### Function Docs <a id="development-develop-function-docs"></a>
+
+Function header documentation must be added. The current code basis
+needs rework, future functions must provide this.
+
+Editors like CLion or Visual Studio allow you to type `/**` followed
+by Enter and generate the skeleton from the implemented function.
+
+Add a short summary in the first line about the function's purpose.
+Edit the param section with short description on their intention.
+The `return` value should describe the value type and additional details.
+
+Example:
+
+```cpp
+/**
+ * Reads a message from the connected peer.
+ *
+ * @param stream ASIO TLS Stream
+ * @param yc Yield Context for ASIO
+ * @param maxMessageLength maximum size of bytes read.
+ *
+ * @return A JSON string
+ */
+String JsonRpc::ReadMessage(const std::shared_ptr<AsioTlsStream>& stream, boost::asio::yield_context yc, ssize_t maxMessageLength)
+```
+
+While we can generate code docs from it, the main idea behind it is
+to provide on-point docs to fully understand all parameters and the
+function's purpose in the same spot.
+
+
+#### Header <a id="development-develop-styleguide-header"></a>
+
+Only include other headers which are mandatory for the header definitions.
+If the source file requires additional headers, add them there to avoid
+include loops.
+
+The included header order is important.
+
+- First, include the library header `i2-<libraryname>.hpp`, e.g. `i2-base.hpp`.
+- Second, include all headers from Icinga itself, e.g. `remote/apilistener.hpp`. `base` before `icinga` before `remote`, etc.
+- Third, include third-party and external library headers, e.g. openssl and boost.
+- Fourth, include STL headers.
+
+#### Source <a id="development-develop-styleguide-source"></a>
+
+The included header order is important.
+
+- First, include the header whose methods are implemented.
+- Second, include all headers from Icinga itself, e.g. `remote/apilistener.hpp`. `base` before `icinga` before `remote`, etc.
+- Third, include third-party and external library headers, e.g. openssl and boost.
+- Fourth, include STL headers.
+
+Always use an empty line after the header include parts.
+
+#### Namespace <a id="development-develop-styleguide-namespace"></a>
+
+The icinga namespace is used globally, as otherwise we would need to write `icinga::Utility::FormatDateTime()`.
+
+```cpp
+using namespace icinga;
+```
+
+Other namespaces must be declared in the scope they are used. Typically
+this is inside the function where `boost::asio` and variants would
+complicate the code.
+
+```cpp
+ namespace ssl = boost::asio::ssl;
+
+ auto context (std::make_shared<ssl::context>(ssl::context::sslv23));
+```
+
+#### Functions <a id="development-develop-styleguide-functions"></a>
+
+Ensure to pass values and pointers as const reference. By default, all
+values will be copied into the function scope, and we want to avoid this
+wherever possible.
+
+```cpp
+std::vector<EventQueue::Ptr> EventQueue::GetQueuesForType(const String& type)
+```
+
+C++ only allows to return a single value. This can be abstracted with
+returning a specific class object, or with using a map/set. Array and
+Dictionary objects increase the memory footprint, use them only where needed.
+
+A common use case for Icinga value types is where a function can return
+different values - an object, an array, a boolean, etc. This happens in the
+inner parts of the config compiler expressions, or config validation.
+
+The function caller is responsible to determine the correct value type
+and handle possible errors.
+
+Specific algorithms may require to populate a list, which can be passed
+by reference to the function. The inner function can then append values.
+Do not use a global shared resource here, unless this is locked by the caller.
+
+
+#### Conditions and Cases <a id="development-develop-styleguide-conditions"></a>
+
+Prefer if-else-if-else branches. When integers are involved,
+switch-case statements increase readability. Don't forget about `break` though!
+
+Avoid using ternary operators where possible. Putting a condition
+after an assignment complicates reading the source. The compiler
+optimizes this anyways.
+
+Wrong:
+
+```cpp
+ int res = s == "OK" ? 0 : s == "WARNING" ? 1;
+
+ return res;
+```
+
+Better:
+
+```cpp
+ int res = 3;
+
+ if (s == "OK") {
+ res = 0;
+ } else if (s == "WARNING") {
+ res = 1;
+ }
+```
+
+Even better: Create a lookup map instead of if branches. The complexity
+is reduced to O(log(n)).
+
+```cpp
+ std::map<String, unsigned int> stateMap = {
+ { "OK", 1 },
+ { "WARNING", 2 }
+ }
+
+ auto it = stateMap.find(s);
+
+ if (it == stateMap.end()) {
+ return 3
+ }
+
+ return it.second;
+```
+
+The code is not as short as with a ternary operator, but one can re-use
+this design pattern for other generic definitions with e.g. moving the
+lookup into a utility class.
+
+Once a unit test is written, everything works as expected in the future.
+
+#### Locks and Guards <a id="development-develop-locks-guards"></a>
+
+Lock access to resources where multiple threads can read and write.
+Icinga objects can be locked with the `ObjectLock` class.
+
+Object locks and guards must be limited to the scope where they are needed. Otherwise we could create dead locks.
+
+```cpp
+ {
+ ObjectLock olock(frame.Locals);
+ for (const Dictionary::Pair& kv : frame.Locals) {
+ AddSuggestion(matches, word, kv.first);
+ }
+ }
+```
+
+#### Objects and Pointers <a id="development-develop-objects-pointers"></a>
+
+Use shared pointers for objects. Icinga objects implement the `Ptr`
+typedef returning an `intrusive_ptr` for the class object (object.hpp).
+This also ensures reference counting for the object's lifetime.
+
+Use raw pointers with care!
+
+Some methods and classes require specific shared pointers, especially
+when interacting with the Boost library.
+
+#### Value Types <a id="development-develop-styleguide-value-types"></a>
+
+Icinga has its own value types. These provide methods to allow
+generic serialization into JSON for example, and other type methods
+which are made available in the DSL too.
+
+- Always use `String` instead of `std::string`. If you need a C-string, use the `CStr()` method.
+- Avoid casts and rather use the `Convert` class methods.
+
+```cpp
+ double s = static_cast<double>(v); //Wrong
+
+ double s = Convert::ToDouble(v); //Correct, ToDouble also provides overloads with different value types
+```
+
+- Prefer STL containers for internal non-user interfaces. Icinga value types add a small overhead which may decrease performance if e.g. the function is called 100k times.
+- `Array::FromVector` and variants implement conversions, use them.
+
+#### Utilities <a id="development-develop-styleguide-utilities"></a>
+
+Don't re-invent the wheel. The `Utility` class provides
+many helper functions which allow you e.g. to format unix timestamps,
+search in filesystem paths.
+
+Also inspect the Icinga objects, they also provide helper functions
+for formatting, splitting strings, joining arrays into strings, etc.
+
+#### Libraries <a id="development-develop-styleguide-libraries"></a>
+
+2.11 depends on [Boost 1.66](https://www.boost.org/doc/libs/1_66_0/).
+Use the existing libraries and header-only includes
+for this specific version.
+
+Note: Prefer C++17 features where possible, e.g. std::atomic and lambda functions.
+
+General:
+
+- [exception](https://www.boost.org/doc/libs/1_66_0/libs/exception/doc/boost-exception.html) (header only)
+- [algorithm](https://www.boost.org/doc/libs/1_66_0/libs/algorithm/doc/html/index.html) (header only)
+- [lexical_cast](https://www.boost.org/doc/libs/1_66_0/doc/html/boost_lexical_cast.html) (header only)
+- [regex](https://www.boost.org/doc/libs/1_66_0/libs/regex/doc/html/index.html)
+- [uuid](https://www.boost.org/doc/libs/1_66_0/libs/uuid/doc/uuid.html) (header only)
+- [range](https://www.boost.org/doc/libs/1_66_0/libs/range/doc/html/index.html) (header only)
+- [variant](https://www.boost.org/doc/libs/1_66_0/doc/html/variant.html) (header only)
+- [multi_index](https://www.boost.org/doc/libs/1_66_0/libs/multi_index/doc/index.html) (header only)
+- [function_types](https://www.boost.org/doc/libs/1_66_0/libs/function_types/doc/html/index.html) (header only)
+- [circular_buffer](https://www.boost.org/doc/libs/1_66_0/doc/html/circular_buffer.html) (header only)
+- [math](https://www.boost.org/doc/libs/1_66_0/libs/math/doc/html/index.html) (header only)
+- [stacktrace](https://www.boost.org/doc/libs/1_66_0/doc/html/stacktrace.html) (header only)
+
+Events and Runtime:
+
+- [system](https://www.boost.org/doc/libs/1_66_0/libs/system/doc/index.html)
+- [thread](https://www.boost.org/doc/libs/1_66_0/doc/html/thread.html)
+- [signals2](https://www.boost.org/doc/libs/1_66_0/doc/html/signals2.html) (header only)
+- [program_options](https://www.boost.org/doc/libs/1_66_0/doc/html/program_options.html)
+- [date_time](https://www.boost.org/doc/libs/1_66_0/doc/html/date_time.html)
+- [filesystem](https://www.boost.org/doc/libs/1_66_0/libs/filesystem/doc/index.htm)
+
+Network I/O:
+
+- [asio](https://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio.html) (header only)
+- [beast](https://www.boost.org/doc/libs/1_66_0/libs/beast/doc/html/index.html) (header only)
+- [coroutine](https://www.boost.org/doc/libs/1_66_0/libs/coroutine/doc/html/index.html)
+- [context](https://www.boost.org/doc/libs/1_66_0/libs/context/doc/html/index.html)
+
+Consider abstracting their usage into `*utility.{c,h}pp` files with
+wrapping existing Icinga types. That also allows later changes without
+rewriting large code parts.
+
+> **Note**
+>
+> A new Boost library should be explained in a PR and discussed with the team.
+>
+> This requires package dependency changes.
+
+If you consider an external library or code to be included with Icinga, the following
+requirements must be fulfilled:
+
+- License is compatible with GPLv2+. Boost license, MIT works, Apache is not.
+- C++17 is supported
+- Header only implementations are preferred, external libraries require packages on every distribution.
+- No additional frameworks, Boost is the only allowed.
+- The code is proven to be robust and the GitHub repository is alive, or has 1k+ stars. Good libraries also provide a user list, if e.g. Ceph is using it, this is a good candidate.
+
+
+#### Log <a id="development-develop-styleguide-log"></a>
+
+Icinga allows the user to configure logging backends, e.g. syslog or file.
+
+Any log message inside the code must use the `Log()` function.
+
+- The first parameter is the severity level, use them with care.
+- The second parameter defines the location/scope where the log
+happened. Typically we use the class name here, to better analyse
+the logs the user provide in GitHub issues and on the community
+channels.
+- The third parameter takes a log message string
+
+If the message string needs to be computed from existing values,
+everything must be converted to the String type beforehand.
+This conversion for every value is very expensive which is why
+we try to avoid it.
+
+Instead, use Log() with the shift operator where everything is written
+on the stream and conversions are explicitly done with templates
+in the background.
+
+The trick here is that the Log object is destroyed immediately
+after being constructed once. The destructor actually
+evaluates the values and sends it to registers loggers.
+
+Since flushing the stream every time a log entry occurs is
+very expensive, a timer takes care of flushing the stream
+every second.
+
+> **Tip**
+>
+> If logging stopped, the flush timer thread may be dead.
+> Inspect that with gdb/lldb.
+
+Avoid log messages which could irritate the user. During
+implementation, developers can change log levels to better
+see what's going one, but remember to change this back to `debug`
+or remove it entirely.
+
+
+#### Goto <a id="development-develop-styleguide-goto"></a>
+
+Avoid using `goto` statements. There are rare occasions where
+they are allowed:
+
+- The code would become overly complicated within nested loops and conditions.
+- Event processing and C interfaces.
+- Question/Answer loops within interactive CLI commands.
+
+#### Typedef and Auto Keywords <a id="development-develop-styleguide-typedef-auto"></a>
+
+Typedefs allow developers to use shorter names for specific types,
+classes and structs.
+
+```cpp
+ typedef std::map<String, std::shared_ptr<NamespaceValue> >::iterator Iterator;
+```
+
+These typedefs should be part of the Class definition in the header,
+or may be defined in the source scope where they are needed.
+
+Avoid declaring global typedefs, unless necessary.
+
+Using the `auto` keyword allows to ignore a specific value type.
+This comes in handy with maps/sets where no specific access
+is required.
+
+The following example iterates over a map returned from `GetTypes()`.
+
+```cpp
+ for (const auto& kv : GetTypes()) {
+ result.insert(kv.second);
+ }
+```
+
+The long example would require us to define a map iterator, and a slightly
+different algorithm.
+
+```cpp
+ typedef std::map<String, DbType::Ptr> TypeMap;
+ typedef std::map<String, DbType::Ptr>::const_iterator TypeMapIterator;
+
+ TypeMap types = GetTypes();
+
+ for (TypeMapIterator it = types.begin(); it != types.end(); it++) {
+ result.insert(it.second);
+ }
+```
+
+We could also use a pair here, but requiring to know
+the specific types of the map keys and values.
+
+```cpp
+ typedef std::pair<String, DbType::Ptr> kv_pair;
+
+ for (const kv_pair& kv : GetTypes()) {
+ result.insert(kv.second);
+ }
+```
+
+After all, `auto` shortens the code and one does not always need to know
+about the specific types. Function documentation for `GetTypes()` is
+required though.
+
+
+
+#### Whitespace Cleanup <a id="development-develop-choose-editor-whitespaces"></a>
+
+Patches must be cleaned up and follow the indent style (tabs instead of spaces).
+You should also remove any trailing whitespaces.
+
+`git diff` allows to highlight such.
+
+```
+vim $HOME/.gitconfig
+
+[color "diff"]
+ whitespace = red reverse
+[core]
+ whitespace=fix,-indent-with-non-tab,trailing-space,cr-at-eol
+```
+
+`vim` also can match these and visually alert you to remove them.
+
+```
+vim $HOME/.vimrc
+
+highlight ExtraWhitespace ctermbg=red guibg=red
+match ExtraWhitespace /\s\+$/
+autocmd BufWinEnter * match ExtraWhitespace /\s\+$/
+autocmd InsertEnter * match ExtraWhitespace /\s\+\%#\@<!$/
+autocmd InsertLeave * match ExtraWhitespace /\s\+$/
+autocmd BufWinLeave * call clearmatches()
+```
+
+
+## Development Environment <a id="development-environment"></a>
+
+### Linux Dev Environment <a id="development-linux-dev-env"></a>
+
+Based on CentOS 7, we have an early draft available inside the Icinga Vagrant boxes:
+[centos7-dev](https://github.com/Icinga/icinga-vagrant/tree/master/centos7-dev).
+
+If you're compiling Icinga 2 natively without any virtualization layer in between,
+this usually is faster. This is also the reason why developers on macOS prefer native builds
+over Linux or Windows VMs. Don't forget to test the actual code on Linux later! Socket specific
+stuff like `epoll` is not available on Unix kernels.
+
+Depending on your workstation and environment, you may either develop and run locally,
+use a container deployment pipeline or put everything in a high end resource remote VM.
+
+Fork https://github.com/Icinga/icinga2 into your own repository, e.g. `https://github.com/dnsmichi/icinga2`.
+
+Create two build directories for different binary builds.
+
+* `debug` contains the debug build binaries. They contain more debug information and run tremendously slower than release builds from packages. Don't use them for benchmarks.
+* `release` contains the release build binaries, as you would install them on a live system. This helps comparing specific scenarios for race conditions and more.
+
+```bash
+mkdir -p release debug
+```
+
+Proceed with the specific distribution examples below. Keep in mind that these instructions
+are best effort and sometimes out-of-date. Git Master may contain updates.
+
+* [CentOS 7](21-development.md#development-linux-dev-env-centos)
+* [Debian 10 Buster](21-development.md#development-linux-dev-env-debian)
+* [Ubuntu 18 Bionic](21-development.md#development-linux-dev-env-ubuntu)
+
+
+#### CentOS 7 <a id="development-linux-dev-env-centos"></a>
+
+```bash
+yum -y install gdb vim git bash-completion htop centos-release-scl
+
+yum -y install rpmdevtools ccache \
+ cmake make devtoolset-11-gcc-c++ flex bison \
+ openssl-devel boost169-devel systemd-devel \
+ mysql-devel postgresql-devel libedit-devel \
+ devtoolset-11-libstdc++-devel
+
+groupadd icinga
+groupadd icingacmd
+useradd -c "icinga" -s /sbin/nologin -G icingacmd -g icinga icinga
+
+ln -s /bin/ccache /usr/local/bin/gcc
+ln -s /bin/ccache /usr/local/bin/g++
+
+git clone https://github.com/icinga/icinga2.git && cd icinga2
+```
+
+The debug build binaries contain specific code which runs
+slower but allows for better debugging insights.
+
+For benchmarks, change `CMAKE_BUILD_TYPE` to `RelWithDebInfo` and
+build inside the `release` directory.
+
+First, off export some generics for Boost.
+
+```bash
+export I2_BOOST="-DBoost_NO_BOOST_CMAKE=TRUE -DBoost_NO_SYSTEM_PATHS=TRUE -DBOOST_LIBRARYDIR=/usr/lib64/boost169 -DBOOST_INCLUDEDIR=/usr/include/boost169 -DBoost_ADDITIONAL_VERSIONS='1.69;1.69.0'"
+```
+
+Second, add the prefix path to it.
+
+```bash
+export I2_GENERIC="$I2_BOOST -DCMAKE_INSTALL_PREFIX=/usr/local/icinga2"
+```
+
+Third, define the two build types with their specific CMake variables.
+
+```bash
+export I2_DEBUG="-DCMAKE_BUILD_TYPE=Debug -DICINGA2_UNITY_BUILD=OFF $I2_GENERIC"
+export I2_RELEASE="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DICINGA2_WITH_TESTS=ON -DICINGA2_UNITY_BUILD=ON $I2_GENERIC"
+```
+
+Fourth, depending on your likings, you may add a bash alias for building,
+or invoke the commands inside:
+
+```bash
+alias i2_debug="cd /root/icinga2; mkdir -p debug; cd debug; scl enable devtoolset-11 -- cmake $I2_DEBUG ..; make -j2; sudo make -j2 install; cd .."
+alias i2_release="cd /root/icinga2; mkdir -p release; cd release; scl enable devtoolset-11 -- cmake $I2_RELEASE ..; make -j2; sudo make -j2 install; cd .."
+```
+
+This is taken from the [centos7-dev](https://github.com/Icinga/icinga-vagrant/tree/master/centos7-dev) Vagrant box.
+
+
+The source installation doesn't set proper permissions, this is
+handled in the package builds which are officially supported.
+
+```bash
+chown -R icinga:icinga /usr/local/icinga2/var/
+
+/usr/local/icinga2/lib/icinga2/prepare-dirs /usr/local/icinga2/etc/sysconfig/icinga2
+/usr/local/icinga2/sbin/icinga2 api setup
+vim /usr/local/icinga2/etc/icinga2/conf.d/api-users.conf
+
+/usr/local/icinga2/lib/icinga2/sbin/icinga2 daemon
+```
+
+#### Debian 10 <a id="development-linux-dev-env-debian"></a>
+
+Debian Buster doesn't need updated Boost packages from packages.icinga.com,
+the distribution already provides 1.66+. For older versions such as Stretch,
+include the release repository for packages.icinga.com as shown in the [setup instructions](02-installation.md).
+
+```bash
+docker run -ti debian:buster bash
+
+apt-get update
+apt-get -y install apt-transport-https wget gnupg
+
+apt-get -y install gdb vim git cmake make ccache build-essential libssl-dev bison flex default-libmysqlclient-dev libpq-dev libedit-dev monitoring-plugins
+apt-get -y install libboost-all-dev
+```
+
+```bash
+ln -s /usr/bin/ccache /usr/local/bin/gcc
+ln -s /usr/bin/ccache /usr/local/bin/g++
+
+groupadd icinga
+groupadd icingacmd
+useradd -c "icinga" -s /sbin/nologin -G icingacmd -g icinga icinga
+
+git clone https://github.com/icinga/icinga2.git && cd icinga2
+
+mkdir debug release
+
+export I2_DEB="-DBoost_NO_BOOST_CMAKE=TRUE -DBoost_NO_SYSTEM_PATHS=TRUE -DBOOST_LIBRARYDIR=/usr/lib/x86_64-linux-gnu -DBOOST_INCLUDEDIR=/usr/include -DCMAKE_INSTALL_RPATH=/usr/lib/x86_64-linux-gnu"
+export I2_GENERIC="-DCMAKE_INSTALL_PREFIX=/usr/local/icinga2 -DICINGA2_PLUGINDIR=/usr/local/sbin"
+export I2_DEBUG="$I2_DEB $I2_GENERIC -DCMAKE_BUILD_TYPE=Debug -DICINGA2_UNITY_BUILD=OFF"
+
+cd debug
+cmake .. $I2_DEBUG
+cd ..
+
+make -j2 install -C debug
+```
+
+
+The source installation doesn't set proper permissions, this is
+handled in the package builds which are officially supported.
+
+```bash
+chown -R icinga:icinga /usr/local/icinga2/var/
+
+/usr/local/icinga2/lib/icinga2/prepare-dirs /usr/local/icinga2/etc/sysconfig/icinga2
+/usr/local/icinga2/sbin/icinga2 api setup
+vim /usr/local/icinga2/etc/icinga2/conf.d/api-users.conf
+
+/usr/local/icinga2/lib/icinga2/sbin/icinga2 daemon
+```
+
+
+#### Ubuntu 18 Bionic <a id="development-linux-dev-env-ubuntu"></a>
+
+Requires Boost packages from packages.icinga.com.
+
+```bash
+docker run -ti ubuntu:bionic bash
+
+apt-get update
+apt-get -y install apt-transport-https wget gnupg
+
+wget -O - https://packages.icinga.com/icinga.key | apt-key add -
+
+. /etc/os-release; if [ ! -z ${UBUNTU_CODENAME+x} ]; then DIST="${UBUNTU_CODENAME}"; else DIST="$(lsb_release -c| awk '{print $2}')"; fi; \
+ echo "deb https://packages.icinga.com/ubuntu icinga-${DIST} main" > \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+ echo "deb-src https://packages.icinga.com/ubuntu icinga-${DIST} main" >> \
+ /etc/apt/sources.list.d/${DIST}-icinga.list
+
+apt-get update
+```
+
+```bash
+apt-get -y install gdb vim git cmake make ccache build-essential libssl-dev bison flex default-libmysqlclient-dev libpq-dev libedit-dev monitoring-plugins
+
+apt-get install -y libboost1.67-icinga-all-dev
+
+ln -s /usr/bin/ccache /usr/local/bin/gcc
+ln -s /usr/bin/ccache /usr/local/bin/g++
+
+groupadd icinga
+groupadd icingacmd
+useradd -c "icinga" -s /sbin/nologin -G icingacmd -g icinga icinga
+
+git clone https://github.com/icinga/icinga2.git && cd icinga2
+
+mkdir debug release
+
+export I2_DEB="-DBoost_NO_BOOST_CMAKE=TRUE -DBoost_NO_SYSTEM_PATHS=TRUE -DBOOST_LIBRARYDIR=/usr/lib/x86_64-linux-gnu/icinga-boost -DBOOST_INCLUDEDIR=/usr/include/icinga-boost -DCMAKE_INSTALL_RPATH=/usr/lib/x86_64-linux-gnu/icinga-boost"
+export I2_GENERIC="-DCMAKE_INSTALL_PREFIX=/usr/local/icinga2 -DICINGA2_PLUGINDIR=/usr/local/sbin"
+export I2_DEBUG="$I2_DEB $I2_GENERIC -DCMAKE_BUILD_TYPE=Debug -DICINGA2_UNITY_BUILD=OFF"
+
+cd debug
+cmake .. $I2_DEBUG
+cd ..
+```
+
+```bash
+make -j2 install -C debug
+```
+
+The source installation doesn't set proper permissions, this is
+handled in the package builds which are officially supported.
+
+```bash
+chown -R icinga:icinga /usr/local/icinga2/var/
+
+/usr/local/icinga2/lib/icinga2/prepare-dirs /usr/local/icinga2/etc/sysconfig/icinga2
+/usr/local/icinga2/sbin/icinga2 api setup
+vim /usr/local/icinga2/etc/icinga2/conf.d/api-users.conf
+
+/usr/local/icinga2/lib/icinga2/sbin/icinga2 daemon
+```
+
+### macOS Dev Environment <a id="development-macos-dev-env"></a>
+
+It is advised to use Homebrew to install required build dependencies.
+Macports have been reported to work as well, typically you'll get more help
+with Homebrew from Icinga developers.
+
+The idea is to run Icinga with the current user, avoiding root permissions.
+This requires at least v2.11.
+
+> **Note**
+>
+> This is a pure development setup for Icinga developers reducing the compile
+> time in contrast to VMs. There are no packages, startup scripts or dependency management involved.
+>
+> **macOS agents are not officially supported.**
+>
+> macOS uses its own TLS implementation, Icinga relies on extra OpenSSL packages
+> requiring updates apart from vendor security updates.
+
+#### Requirements
+
+Explicitly use OpenSSL 1.1.x, older versions are out of support.
+
+```bash
+brew install ccache boost cmake bison flex openssl@1.1 mysql-connector-c++ postgresql libpq
+```
+
+##### ccache
+
+```bash
+sudo mkdir /opt/ccache
+
+sudo ln -s `which ccache` /opt/ccache/clang
+sudo ln -s `which ccache` /opt/ccache/clang++
+
+vim $HOME/.bash_profile
+
+# ccache is managed with symlinks to avoid collision with cgo
+export PATH="/opt/ccache:$PATH"
+
+source $HOME/.bash_profile
+```
+
+#### Builds
+
+Icinga is built as release (optimized build for packages) and debug (more symbols and details for debugging). Debug builds
+typically run slower than release builds and must not be used for performance benchmarks.
+
+The preferred installation prefix is `/usr/local/icinga/icinga2`. This allows to put e.g. Icinga Web 2 into the `/usr/local/icinga` directory as well.
+
+```bash
+mkdir -p release debug
+
+export I2_USER=$(id -u -n)
+export I2_GROUP=$(id -g -n)
+export I2_GENERIC="-DCMAKE_INSTALL_PREFIX=/usr/local/icinga/icinga2 -DICINGA2_USER=$I2_USER -DICINGA2_GROUP=$I2_GROUP -DOPENSSL_INCLUDE_DIR=/usr/local/opt/openssl@1.1/include -DOPENSSL_SSL_LIBRARY=/usr/local/opt/openssl@1.1/lib/libssl.dylib -DOPENSSL_CRYPTO_LIBRARY=/usr/local/opt/openssl@1.1/lib/libcrypto.dylib -DICINGA2_PLUGINDIR=/usr/local/sbin -DICINGA2_WITH_PGSQL=OFF -DCMAKE_EXPORT_COMPILE_COMMANDS=ON"
+export I2_DEBUG="-DCMAKE_BUILD_TYPE=Debug -DICINGA2_UNITY_BUILD=OFF $I2_GENERIC"
+export I2_RELEASE="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DICINGA2_WITH_TESTS=ON -DICINGA2_UNITY_BUILD=ON $I2_GENERIC"
+
+cd debug
+cmake $I2_DEBUG ..
+cd ..
+
+make -j4 -C debug
+make -j4 install -C debug
+```
+
+In order to run Icinga without any path prefix, and also use Bash completion it is advised to source additional
+things into the local dev environment.
+
+```bash
+export PATH=/usr/local/icinga/icinga2/sbin/:$PATH
+
+test -f /usr/local/icinga/icinga2/etc/bash_completion.d/icinga2 && source /usr/local/icinga/icinga2/etc/bash_completion.d/icinga2
+```
+
+##### Build Aliases
+
+This is derived from [dnsmichi's flavour](https://github.com/dnsmichi/dotfiles) and not generally best practice.
+
+```bash
+vim $HOME/.bash_profile
+
+export I2_USER=$(id -u -n)
+export I2_GROUP=$(id -g -n)
+export I2_GENERIC="-DCMAKE_INSTALL_PREFIX=/usr/local/icinga/icinga2 -DICINGA2_USER=$I2_USER -DICINGA2_GROUP=$I2_GROUP -DOPENSSL_INCLUDE_DIR=/usr/local/opt/openssl@1.1/include -DOPENSSL_SSL_LIBRARY=/usr/local/opt/openssl@1.1/lib/libssl.dylib -DOPENSSL_CRYPTO_LIBRARY=/usr/local/opt/openssl@1.1/lib/libcrypto.dylib -DICINGA2_PLUGINDIR=/usr/local/sbin -DICINGA2_WITH_PGSQL=OFF -DCMAKE_EXPORT_COMPILE_COMMANDS=ON"
+
+export I2_DEBUG="-DCMAKE_BUILD_TYPE=Debug -DICINGA2_UNITY_BUILD=OFF $I2_GENERIC"
+export I2_RELEASE="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DICINGA2_WITH_TESTS=ON -DICINGA2_UNITY_BUILD=ON $I2_GENERIC"
+
+alias i2_debug="mkdir -p debug; cd debug; cmake $I2_DEBUG ..; make -j4; make -j4 install; cd .."
+alias i2_release="mkdir -p release; cd release; cmake $I2_RELEASE ..; make -j4; make -j4 install; cd .."
+
+export PATH=/usr/local/icinga/icinga2/sbin/:$PATH
+test -f /usr/local/icinga/icinga2/etc/bash_completion.d/icinga2 && source /usr/local/icinga/icinga2/etc/bash_completion.d/icinga2
+
+
+source $HOME/.bash_profile
+```
+
+#### Permissions
+
+`make install` doesn't set all required permissions, override this.
+
+```bash
+chown -R $I2_USER:$I2_GROUP /usr/local/icinga/icinga2
+```
+
+#### Run
+
+Start Icinga in foreground.
+
+```bash
+icinga2 daemon
+```
+
+Reloads triggered with HUP or cluster syncs just put the process into background.
+
+#### Plugins
+
+```bash
+brew install monitoring-plugins
+
+sudo vim /usr/local/icinga/icinga2/etc/icinga2/constants.conf
+```
+
+```
+const PluginDir = "/usr/local/sbin"
+```
+
+#### Backends: Redis
+
+```bash
+brew install redis
+brew services start redis
+```
+
+#### Databases: MariaDB
+
+```bash
+brew install mariadb
+mkdir -p /usr/local/etc/my.cnf.d
+brew services start mariadb
+
+mysql_secure_installation
+```
+
+```
+vim $HOME/.my.cnf
+
+[client]
+user = root
+password = supersecurerootpassword
+
+sudo -i
+ln -s /Users/michi/.my.cnf $HOME/.my.cnf
+exit
+```
+
+```bash
+mysql -e 'create database icinga;'
+mysql -e "grant all on icinga.* to 'icinga'@'localhost' identified by 'icinga';"
+mysql icinga < $HOME/dev/icinga/icinga2/lib/db_ido_mysql/schema/mysql.sql
+```
+
+#### API
+
+```bash
+icinga2 api setup
+cd /usr/local/icinga/icinga2/var/lib/icinga2/certs
+HOST_NAME=mbpmif.int.netways.de
+icinga2 pki new-cert --cn ${HOST_NAME} --csr ${HOST_NAME}.csr --key ${HOST_NAME}.key
+icinga2 pki sign-csr --csr ${HOST_NAME}.csr --cert ${HOST_NAME}.crt
+echo "const NodeName = \"${HOST_NAME}\"" >> /usr/local/icinga/icinga2/etc/icinga2/constants.conf
+```
+
+#### Web
+
+While it is recommended to use Docker or the Icinga Web 2 development VM pointing to the shared IDO database resource/REST API, you can also install it locally on macOS.
+
+The required steps are described in [this script](https://github.com/dnsmichi/dotfiles/blob/master/icingaweb2.sh).
+
+
+
+### Windows Dev Environment <a id="development-windows-dev-env"></a>
+
+The following sections explain how to setup the required build tools
+and how to run and debug the code.
+
+#### TL;DR
+
+If you're going to setup a dev environment on a fresh Windows machine
+and don't care for the details,
+
+1. ensure there are 35 GB free space on C:
+2. run the following in an administrative Powershell:
+ 1. `Enable-WindowsOptionalFeature -FeatureName "NetFx3" -Online`
+ (reboot when asked!)
+ 2. `powershell -NoProfile -ExecutionPolicy Bypass -Command "Invoke-Expression (New-Object Net.WebClient).DownloadString('https://raw.githubusercontent.com/Icinga/icinga2/master/doc/win-dev.ps1')"`
+ (will take some time)
+
+This installs everything needed for cloning and building Icinga 2
+on the command line (Powershell) as follows:
+
+(Don't forget to open a new Powershell window
+to be able to use the newly installed Git.)
+
+```
+git clone https://github.com/Icinga/icinga2.git
+cd .\icinga2\
+mkdir build
+cd .\build\
+
+& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin\cmake.exe" `
+ -DICINGA2_UNITY_BUILD=OFF -DBoost_INCLUDE_DIR=C:\local\boost_1_84_0-Win64 `
+ -DBISON_EXECUTABLE=C:\ProgramData\chocolatey\lib\winflexbison3\tools\win_bison.exe `
+ -DFLEX_EXECUTABLE=C:\ProgramData\chocolatey\lib\winflexbison3\tools\win_flex.exe ..
+
+& "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin\MSBuild.exe" .\icinga2.sln
+```
+
+Building icinga2.sln via Visual Studio itself seems to require a reboot
+after installing the build tools.
+
+#### Chocolatey
+
+Open an administrative command prompt (Win key, type “cmd”, right-click and “run as administrator”) and paste the following instructions:
+
+```
+@powershell -NoProfile -ExecutionPolicy Bypass -Command "iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))" && SET PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin
+```
+
+#### Git, Posh and Vim
+
+In case you are used to `vim`, start a new administrative Powershell:
+
+```
+choco install -y vim
+```
+
+The same applies for Git integration in Powershell:
+
+```
+choco install -y poshgit
+```
+
+![Powershell Posh Git](images/development/windows_powershell_posh_git.png)
+
+In order to fix the colors for commands like `git status` or `git diff`,
+edit `$HOME/.gitconfig` in your Powershell and add the following lines:
+
+```
+vim $HOME/.gitconfig
+
+[color "status"]
+ changed = cyan bold
+ untracked = yellow bold
+ added = green bold
+ branch = cyan bold
+ unmerged = red bold
+
+[color "diff"]
+ frag = cyan
+ new = green bold
+ commit = yellow
+ old = red white
+
+[color "branch"]
+ current = yellow reverse
+ local = yellow
+ remote = green bold
+ remote = red bold
+```
+
+#### Visual Studio
+
+Thanks to Microsoft they’ll now provide their Professional Edition of Visual Studio
+as community version, free for use for open source projects such as Icinga.
+The installation requires ~9GB disk space. [Download](https://www.visualstudio.com/downloads/)
+the web installer and start the installation.
+
+Note: Only Visual Studio 2019 is covered here. Older versions are not supported.
+
+You need a free Microsoft account to download and also store your preferences.
+
+Install the following complete workloads:
+
+* C++ Desktop Development
+* .NET Desktop Development
+
+In addition also choose these individual components on Visual Studio:
+
+* .NET
+ * .NET Framework 4.x targeting packs
+ * .NET Framework 4.x.y SDKs
+* Code tools
+ * Git for Windows
+ * GitHub Extension for Visual Studio
+ * NuGet package manager
+* Compilers, build tools and runtimes
+ * C# and Visual Basic Roslyn compilers
+ * C++ 2019 Redistributable Update
+ * C++ CMake tools for Windows
+ * C++/CLI Support for v142 build tools (14.22)
+ * MSBuild
+ * MSVC v142 - VS 2019 C++ x64/x86 build tools (v14.22)
+* Debugging and testing
+ * .NET profiling tools
+ * C++ profiling tools
+ * Just-in-Time debugger
+* Development activities
+ * C# and Visual Basic
+ * C++ core features
+ * IntelliCode
+ * Live Share
+* Games and Graphics
+ * Graphics debugger and GPU profiler for DirectX (required by C++ profiling tools)
+* SDKs, libraries and frameworks
+ * Windows 10 SDK (10.0.18362.0 or later)
+ * Windows Universal C Runtime
+
+![Visual Studio Installer](images/development/windows_visual_studio_installer_01.png)
+![Visual Studio Installer](images/development/windows_visual_studio_installer_02.png)
+![Visual Studio Installer](images/development/windows_visual_studio_installer_03.png)
+
+After a while, Visual Studio will be ready.
+
+##### Style Guide for Visual Studio
+
+Navigate into `Tools > Options > Text Editor` and repeat the following for
+
+- C++
+- C#
+
+Navigate into `Tabs` and set:
+
+- Indenting: Smart (default)
+- Tab size: 4
+- Indent size: 4
+- Keep tabs (instead of spaces)
+
+![Visual Studio Tabs](images/development/windows_visual_studio_tabs_c++.png)
+
+
+#### Flex and Bison
+
+Install it using [chocolatey](https://www.wireshark.org/docs/wsdg_html_chunked/ChSetupWin32.html):
+
+```
+choco install -y winflexbison
+```
+
+Chocolatey installs these tools into the hidden directory `C:\ProgramData\chocolatey\lib\winflexbison\tools`.
+
+#### OpenSSL
+
+Icinga 2 requires the OpenSSL library. [Download](https://slproweb.com/products/Win32OpenSSL.html) the Win64 package
+and install it into `c:\local\OpenSSL-Win64`.
+
+Once asked for `Copy OpenSSLs DLLs to` select `The Windows system directory`. That way CMake/Visual Studio
+will automatically detect them for builds and packaging.
+
+> **Note**
+>
+> We cannot use the chocolatey package as this one does not provide any development headers.
+>
+> Choose 1.1.1 LTS from manual downloads for best compatibility.
+
+#### Boost
+
+Icinga needs the development header and library files from the Boost library.
+
+Visual Studio translates into the following compiler versions:
+
+- `msvc-14.2` = Visual Studio 2019
+
+##### Pre-built Binaries
+
+Prefer the pre-built package over self-compiling, if the newest version already exists.
+
+Download the [boost-binaries](https://sourceforge.net/projects/boost/files/boost-binaries/) for
+
+- msvc-14.2 is Visual Studio 2019
+- 64 for 64 bit builds
+
+```
+https://sourceforge.net/projects/boost/files/boost-binaries/1.82.0/boost_1_84_0-msvc-14.2-64.exe/download
+```
+
+Run the installer and leave the default installation path in `C:\local\boost_1_84_0`.
+
+
+##### Source & Compile
+
+In order to use the boost development header and library files you need to [download](https://www.boost.org/users/download/)
+Boost and then extract it to e.g. `C:\local\boost_1_84_0`.
+
+> **Note**
+>
+> Just use `C:\local`, the zip file already contains the sub folder. Extraction takes a while,
+> the archive contains more than 70k files.
+
+In order to integrate Boost into Visual Studio, open the `Developer Command Prompt` from the start menu,
+and navigate to `C:\local\boost_1_84_0`.
+
+Execute `bootstrap.bat` first.
+
+```
+cd C:\local\boost_1_84_0
+bootstrap.bat
+```
+
+Once finished, specify the required `toolset` to compile boost against Visual Studio.
+This takes quite some time in a Windows VM. Boost Context uses Assembler code,
+which isn't treated as exception safe by the VS compiler. Therefore set the
+additional compilation flag according to [this entry](https://lists.boost.org/Archives/boost/2015/08/224570.php).
+
+```
+b2 --toolset=msvc-14.2 link=static threading=multi runtime-link=static address-model=64 asmflags=\safeseh
+```
+
+![Windows Boost Build in VS Development Console](images/development/windows_boost_build_dev_cmd.png)
+
+#### TortoiseGit
+
+TortoiseGit provides a graphical integration into the Windows explorer. This makes it easier to checkout, commit
+and whatnot.
+
+[Download](https://tortoisegit.org/download/) TortoiseGit on your system.
+
+In order to clone via Git SSH you also need to create a new directory called `.ssh`
+inside your user's home directory.
+Therefore open a command prompt (win key, type `cmd`, enter) and run `mkdir .ssh`.
+Add your `id_rsa` private key and `id_rsa.pub` public key files into that directory.
+
+Start the setup routine and choose `OpenSSH` as default secure transport when asked.
+
+Open a Windows Explorer window and navigate into
+
+```
+cd %HOMEPATH%\source\repos
+```
+
+Right click and select `Git Clone` from the context menu.
+
+Use `ssh://git@github.com/icinga/icinga2.git` for SSH clones, `https://github.com/icinga/icinga2.git` otherwise.
+
+#### Packages
+
+CMake uses CPack and NSIS to create the setup executable including all binaries and libraries
+in addition to setup dialogues and configuration. Therefore we’ll need to install [NSIS](http://nsis.sourceforge.net/Download)
+first.
+
+We also need to install the Windows Installer XML (WIX) toolset. This has .NET 3.5 as a dependency which might need a
+reboot of the system which is not handled properly by Chocolatey. Therefore install it first and reboot when asked.
+
+```
+Enable-WindowsOptionalFeature -FeatureName "NetFx3" -Online
+choco install -y wixtoolset
+```
+
+#### CMake
+
+Icinga 2 uses CMake to manage the build environment. You can generate the Visual Studio project files
+using CMake. [Download](https://cmake.org/download/) and install CMake. Select to add it to PATH for all users
+when asked.
+
+> **Note**
+>
+> In order to properly detect the Boost libraries and VS 2019, install CMake 3.15.2+.
+>
+> **Tip**
+>
+> Cheatsheet: https://www.brianlheim.com/2018/04/09/cmake-cheat-sheet.html
+
+Once setup is completed, open a command prompt and navigate to
+
+```
+cd %HOMEPATH%\source\repos
+```
+
+Build Icinga with specific CMake variables. This generates a new Visual Studio project file called `icinga2.sln`.
+
+Visual Studio translates into the following:
+
+- `msvc-14.2` = Visual Studio 2019
+
+You need to specify the previously installed component paths.
+
+Variable | Value | Description
+----------------------|----------------------------------------------------------------------|-------------------------------------------------------
+`BOOST_ROOT` | `C:\local\boost_1_84_0` | Root path where you've extracted and compiled Boost.
+`BOOST_LIBRARYDIR` | Binary: `C:\local\boost_1_84_0\lib64-msvc-14.2`, Source: `C:\local\boost_1_84_0\stage` | Path to the static compiled Boost libraries, directory must contain `lib`.
+`BISON_EXECUTABLE` | `C:\ProgramData\chocolatey\lib\winflexbison\tools\win_bison.exe` | Path to the Bison executable.
+`FLEX_EXECUTABLE` | `C:\ProgramData\chocolatey\lib\winflexbison\tools\win_flex.exe` | Path to the Flex executable.
+`ICINGA2_UNITY_BUILD` | OFF | Disable unity builds for development environments.
+
+Tip: If you have previously opened a terminal, run `refreshenv` to re-read updated PATH variables.
+
+##### Build Scripts
+
+Icinga provides the build scripts inside the Git repository.
+
+Open a new Powershell and navigate into the cloned Git repository. Set
+specific environment variables and run the build scripts.
+
+```
+cd %HOMEPATH%\source\repos\icinga2
+
+.\tools\win32\configure-dev.ps1
+.\tools\win32\build.ps1
+.\tools\win32\test.ps1
+```
+
+The debug MSI package is located in the `debug` directory.
+
+If you did not follow the above steps with Boost binaries and OpenSSL
+paths, you can still modify the environment variables.
+
+```
+$env:CMAKE_GENERATOR='Visual Studio 16 2019'
+$env:CMAKE_GENERATOR_PLATFORM='x64'
+
+$env:ICINGA2_INSTALLPATH = 'C:\Program Files\Icinga2-debug'
+$env:ICINGA2_BUILDPATH='debug'
+$env:CMAKE_BUILD_TYPE='Debug'
+$env:OPENSSL_ROOT_DIR='C:\OpenSSL-Win64'
+$env:BOOST_ROOT='C:\local\boost_1_84_0'
+$env:BOOST_LIBRARYDIR='C:\local\boost_1_84_0\lib64-msvc-14.2'
+```
+
+#### Icinga 2 in Visual Studio
+
+This requires running the configure script once.
+
+Navigate to
+
+```
+cd %HOMEPATH%\source\repos\icinga2\debug
+```
+
+Open `icinga2.sln`. Log into Visual Studio when asked.
+
+On the right panel, select to build the `Bin/icinga-app` solution.
+
+The executable binaries are located in `Bin\Release\Debug` in your `icinga2`
+project directory.
+
+Navigate there and run `icinga2.exe --version`.
+
+```
+cd %HOMEPATH%\source\repos\icinga2\Bin\Release\Debug
+icinga2.exe --version
+```
+
+
+#### Release Package
+
+This is part of the build process script. Override the build type and pick a different
+build directory.
+
+```
+cd %HOMEPATH%\source\repos\icinga2
+
+$env:ICINGA2_BUILDPATH='release'
+$env:CMAKE_BUILD_TYPE='RelWithDebInfo'
+
+.\tools\win32\configure-dev.ps1
+.\tools\win32\build.ps1
+.\tools\win32\test.ps1
+```
+
+The release MSI package is located in the `release` directory.
+
+
+### Embedded Dev Env: Pi <a id="development-embedded-dev-env"></a>
+
+> **Note**
+>
+> This isn't officially supported yet, just a few hints how you can do it yourself.
+
+The following examples source from armhf on Raspberry Pi.
+
+#### ccache
+
+```bash
+apt install -y ccache
+
+/usr/sbin/update-ccache-symlinks
+
+echo 'export PATH="/usr/lib/ccache:$PATH"' | tee -a ~/.bashrc
+
+source ~/.bashrc && echo $PATH
+```
+
+#### Build
+
+Copy the icinga2 source code into `$HOME/icinga2`. Clone the `deb-icinga2` repository into `debian/`.
+
+```bash
+git clone https://github.com/Icinga/icinga2 $HOME/icinga2
+git clone https://github.com/Icinga/deb-icinga2 $HOME/icinga2/debian
+```
+
+Then build a Debian package and install it like normal.
+
+```bash
+dpkg-buildpackage -uc -us
+```
+
+## Package Builds <a id="development-package-builds"></a>
+
+This documentation is explicitly meant for packagers and the Icinga
+build infrastructure.
+
+The following requirements need to be fulfilled in order to build the
+Icinga application using a dist tarball (including notes for distributions):
+
+* cmake >= 2.6
+* GNU make (make) or ninja-build
+* C++ compiler which supports C++17
+ * RHEL/Fedora/SUSE: gcc-c++ >= 7 (extra Developer Tools on RHEL7 see below)
+ * Debian/Ubuntu: build-essential
+ * Alpine: build-base
+ * you can also use clang++
+* pkg-config
+* OpenSSL library and header files >= 1.0.1
+ * RHEL/Fedora: openssl-devel
+ * SUSE: libopenssl-devel
+ * Debian/Ubuntu: libssl-dev
+ * Alpine: libressl-dev
+* Boost library and header files >= 1.66.0
+ * RHEL/Fedora: boost166-devel
+ * Debian/Ubuntu: libboost-all-dev
+ * Alpine: boost-dev
+* GNU bison (bison)
+* GNU flex (flex) >= 2.5.35
+* systemd headers
+ * Only required when using systemd
+ * Debian/Ubuntu: libsystemd-dev
+ * RHEL/Fedora: systemd-devel
+
+### Optional features <a id="development-package-builds-optional-features"></a>
+
+* MySQL (disable with CMake variable `ICINGA2_WITH_MYSQL` to `OFF`)
+ * RHEL/Fedora: mysql-devel
+ * SUSE: libmysqlclient-devel
+ * Debian/Ubuntu: default-libmysqlclient-dev | libmysqlclient-dev
+ * Alpine: mariadb-dev
+* PostgreSQL (disable with CMake variable `ICINGA2_WITH_PGSQL` to `OFF`)
+ * RHEL/Fedora: postgresql-devel
+ * Debian/Ubuntu: libpq-dev
+ * postgresql-dev on Alpine
+* libedit (CLI console)
+ * RHEL/Fedora: libedit-devel on CentOS (RHEL requires rhel-7-server-optional-rpms)
+ * Debian/Ubuntu/Alpine: libedit-dev
+* Termcap (only required if libedit doesn't already link against termcap/ncurses)
+ * RHEL/Fedora: libtermcap-devel
+ * Debian/Ubuntu: (not necessary)
+
+### Special requirements <a id="development-package-builds-special-requirements"></a>
+
+**FreeBSD**: libexecinfo (automatically used when Icinga 2 is installed via port or package)
+
+**RHEL6**: Requires a newer boost version which is available on packages.icinga.com
+with a version suffixed name.
+
+### Runtime user environment <a id="development-package-builds-runtime-user-env"></a>
+
+By default Icinga will run as user `icinga` and group `icinga`. Additionally the
+external command pipe and livestatus features require a dedicated command group
+`icingacmd`. You can choose your own user/group names and pass them to CMake
+using the `ICINGA2_USER`, `ICINGA2_GROUP` and `ICINGA2_COMMAND_GROUP` variables.
+
+```bash
+groupadd icinga
+groupadd icingacmd
+useradd -c "icinga" -s /sbin/nologin -G icingacmd -g icinga icinga
+```
+
+On Alpine (which uses ash busybox) you can run:
+
+```bash
+addgroup -S icinga
+addgroup -S icingacmd
+adduser -S -D -H -h /var/spool/icinga2 -s /sbin/nologin -G icinga -g icinga icinga
+adduser icinga icingacmd
+```
+
+Add the web server user to the icingacmd group in order to grant it write
+permissions to the external command pipe and livestatus socket:
+
+```bash
+usermod -a -G icingacmd www-data
+```
+
+Make sure to replace "www-data" with the name of the user your web server
+is running as.
+
+### Building Icinga 2: Example <a id="development-package-builds-example"></a>
+
+Once you have installed all the necessary build requirements you can build
+Icinga 2 using the following commands:
+
+```bash
+mkdir release && cd release
+cmake ..
+cd ..
+make -C release
+make install -C release
+```
+
+You can specify an alternative installation prefix using `-DCMAKE_INSTALL_PREFIX`:
+
+```bash
+cmake .. -DCMAKE_INSTALL_PREFIX=/tmp/icinga2
+```
+
+### CMake Variables <a id="development-package-builds-cmake-variables"></a>
+
+In addition to `CMAKE_INSTALL_PREFIX` here are most of the supported Icinga-specific cmake variables.
+
+For all variables regarding defaults paths on in CMake, see
+[GNUInstallDirs](https://cmake.org/cmake/help/latest/module/GNUInstallDirs.html).
+
+Also see `CMakeLists.txt` for details.
+
+#### System Environment
+
+* `CMAKE_INSTALL_SYSCONFDIR`: The configuration directory; defaults to `CMAKE_INSTALL_PREFIX/etc`
+* `CMAKE_INSTALL_LOCALSTATEDIR`: The state directory; defaults to `CMAKE_INSTALL_PREFIX/var`
+* `ICINGA2_CONFIGDIR`: Main config directory; defaults to `CMAKE_INSTALL_SYSCONFDIR/icinga2` usually `/etc/icinga2`
+* `ICINGA2_CACHEDIR`: Directory for cache files; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/cache/icinga2` usually `/var/cache/icinga2`
+* `ICINGA2_DATADIR`: Data directory for the daemon; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/lib/icinga2` usually `/var/lib/icinga2`
+* `ICINGA2_LOGDIR`: Logfiles of the daemon; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/log/icinga2 usually `/var/log/icinga2`
+* `ICINGA2_SPOOLDIR`: Spooling directory ; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/spool/icinga2` usually `/var/spool/icinga2`
+* `ICINGA2_INITRUNDIR`: Runtime data for the init system; defaults to `CMAKE_INSTALL_LOCALSTATEDIR/run/icinga2` usually `/run/icinga2`
+* `ICINGA2_GIT_VERSION_INFO`: Whether to use Git to determine the version number; defaults to `ON`
+* `ICINGA2_USER`: The user Icinga 2 should run as; defaults to `icinga`
+* `ICINGA2_GROUP`: The group Icinga 2 should run as; defaults to `icinga`
+* `ICINGA2_COMMAND_GROUP`: The command group Icinga 2 should use; defaults to `icingacmd`
+* `ICINGA2_SYSCONFIGFILE`: Where to put the config file the initscript/systemd pulls it's dirs from;
+* defaults to `CMAKE_INSTALL_PREFIX/etc/sysconfig/icinga2`
+* `ICINGA2_PLUGINDIR`: The path for the Monitoring Plugins project binaries; defaults to `/usr/lib/nagios/plugins`
+
+#### Build Optimization
+
+* `ICINGA2_UNITY_BUILD`: Whether to perform a unity build; defaults to `ON`. Note: This requires additional memory and is not advised for building VMs, Docker for Mac and embedded hardware.
+* `ICINGA2_LTO_BUILD`: Whether to use link time optimization (LTO); defaults to `OFF`
+
+#### Init System
+
+* `USE_SYSTEMD=ON|OFF`: Use systemd or a classic SysV initscript; defaults to `OFF`
+* `INSTALL_SYSTEMD_SERVICE_AND_INITSCRIPT=ON|OFF` Force install both the systemd service definition file
+ and the SysV initscript in parallel, regardless of how `USE_SYSTEMD` is set.
+ Only use this for special packaging purposes and if you know what you are doing.
+ Defaults to `OFF`.
+
+#### Features
+
+* `ICINGA2_WITH_CHECKER`: Determines whether the checker module is built; defaults to `ON`
+* `ICINGA2_WITH_COMPAT`: Determines whether the compat module is built; defaults to `ON`
+* `ICINGA2_WITH_LIVESTATUS`: Determines whether the Livestatus module is built; defaults to `ON`
+* `ICINGA2_WITH_NOTIFICATION`: Determines whether the notification module is built; defaults to `ON`
+* `ICINGA2_WITH_PERFDATA`: Determines whether the perfdata module is built; defaults to `ON`
+* `ICINGA2_WITH_TESTS`: Determines whether the unit tests are built; defaults to `ON`
+
+#### MySQL or MariaDB
+
+The following settings can be tuned for the MySQL / MariaDB IDO feature.
+
+* `ICINGA2_WITH_MYSQL`: Determines whether the MySQL IDO module is built; defaults to `ON`
+* `MYSQL_CLIENT_LIBS`: Client implementation used (mysqlclient / mariadbclient); defaults searches for `mysqlclient` and `mariadbclient`
+* `MYSQL_INCLUDE_DIR`: Directory containing include files for the mysqlclient; default empty -
+ checking multiple paths like `/usr/include/mysql`
+
+See [FindMySQL.cmake](https://github.com/Icinga/icinga2/blob/master/third-party/cmake/FindMySQL.cmake)
+for implementation details.
+
+#### PostgreSQL
+
+The following settings can be tuned for the PostgreSQL IDO feature.
+
+* `ICINGA2_WITH_PGSQL`: Determines whether the PostgreSQL IDO module is built; defaults to `ON`
+* `PostgreSQL_INCLUDE_DIR`: Top-level directory containing the PostgreSQL include directories
+* `PostgreSQL_LIBRARY`: File path to PostgreSQL library : libpq.so (or libpq.so.[ver] file)
+
+See [FindPostgreSQL.cmake](https://github.com/Icinga/icinga2/blob/master/third-party/cmake/FindPostgreSQL.cmake)
+for implementation details.
+
+#### Version detection
+
+CMake determines the Icinga 2 version number using `git describe` if the
+source directory is contained in a Git repository. Otherwise the version number
+is extracted from the [ICINGA2_VERSION](ICINGA2_VERSION) file. This behavior can be
+overridden by creating a file called `icinga-version.h.force` in the source
+directory. Alternatively the `-DICINGA2_GIT_VERSION_INFO=OFF` option for CMake
+can be used to disable the usage of `git describe`.
+
+
+### Building RPMs <a id="development-package-builds-rpms"></a>
+
+#### Build Environment on RHEL, CentOS, Fedora, Amazon Linux
+
+Setup your build environment:
+
+```bash
+yum -y install rpmdevtools
+```
+
+#### Build Environment on SuSE/SLES
+
+SLES:
+
+```bash
+zypper addrepo http://download.opensuse.org/repositories/devel:tools/SLE_12_SP4/devel:tools.repo
+zypper refresh
+zypper install rpmdevtools spectool
+```
+
+OpenSuSE:
+
+```bash
+zypper addrepo http://download.opensuse.org/repositories/devel:tools/openSUSE_Leap_15.0/devel:tools.repo
+zypper refresh
+zypper install rpmdevtools spectool
+```
+
+#### Package Builds <a id="development-package-builds-rpms-package-builds"></a>
+
+Prepare the rpmbuild directory tree:
+
+```bash
+cd $HOME
+rpmdev-setuptree
+```
+
+Snapshot builds:
+
+```bash
+curl https://raw.githubusercontent.com/Icinga/rpm-icinga2/master/icinga2.spec -o $HOME/rpmbuild/SPECS/icinga2.spec
+```
+
+> **Note**
+>
+> The above command builds snapshot packages. Change to the `release` branch
+> for release package builds.
+
+Copy the tarball to `rpmbuild/SOURCES` e.g. by using the `spectool` binary
+provided with `rpmdevtools`:
+
+```bash
+cd $HOME/rpmbuild/SOURCES
+spectool -g ../SPECS/icinga2.spec
+
+cd $HOME/rpmbuild
+```
+
+Install the build dependencies. Example for CentOS 7:
+
+```bash
+yum -y install libedit-devel ncurses-devel gcc-c++ libstdc++-devel openssl-devel \
+cmake flex bison boost-devel systemd mysql-devel postgresql-devel httpd \
+selinux-policy-devel checkpolicy selinux-policy selinux-policy-doc
+```
+
+Note: If you are using Amazon Linux, systemd is not required.
+
+A shorter way is available using the `yum-builddep` command on RHEL based systems:
+
+```bash
+yum-builddep SPECS/icinga2.spec
+```
+
+Build the RPM:
+
+```bash
+rpmbuild -ba SPECS/icinga2.spec
+```
+
+#### Additional Hints <a id="development-package-builds-rpms-additional-hints"></a>
+
+##### SELinux policy module
+
+The following packages are required to build the SELinux policy module:
+
+* checkpolicy
+* selinux-policy (selinux-policy on CentOS 6, selinux-policy-devel on CentOS 7)
+* selinux-policy-doc
+
+##### RHEL/CentOS 7
+
+The RedHat Developer Toolset is required for building Icinga 2 beforehand.
+This contains a C++ compiler which supports C++17 features.
+
+```bash
+yum install centos-release-scl
+```
+
+Dependencies to devtools-11 are used in the RPM SPEC, so the correct tools
+should be used for building.
+
+##### Amazon Linux
+
+If you prefer to build packages offline, a suitable Vagrant box is located
+[here](https://atlas.hashicorp.com/mvbcoding/boxes/awslinux/).
+
+### Build Debian/Ubuntu packages <a id="development-package-builds-deb"></a>
+
+Setup your build environment on Debian/Ubuntu, copy the 'debian' directory from
+the Debian packaging Git repository (https://github.com/Icinga/deb-icinga2)
+into your source tree and run the following command:
+
+```bash
+dpkg-buildpackage -uc -us
+```
+
+### Build Alpine Linux packages <a id="development-package-builds-alpine"></a>
+
+A simple way to setup a build environment is installing Alpine in a chroot.
+In this way, you can set up an Alpine build environment in a chroot under a
+different Linux distro.
+There is a script that simplifies these steps with just two commands, and
+can be found [here](https://github.com/alpinelinux/alpine-chroot-install).
+
+Once the build environment is installed, you can setup the system to build
+the packages by following [this document](https://wiki.alpinelinux.org/wiki/Creating_an_Alpine_package).
+
+### Build Post Install Tasks <a id="development-package-builds-post-install-tasks"></a>
+
+After building Icinga 2 yourself, your package build system should at least run the following post
+install requirements:
+
+* enable the `checker`, `notification` and `mainlog` feature by default
+* run 'icinga2 api setup' in order to enable the `api` feature and generate TLS certificates for the node
+
+### Run Icinga 2 <a id="development-package-builds-run-icinga"></a>
+
+Icinga 2 comes with a binary that takes care of loading all the relevant
+components (e.g. for check execution, notifications, etc.):
+
+```
+icinga2 daemon
+
+[2016-12-08 16:44:24 +0100] information/cli: Icinga application loader (version: v2.5.4-231-gb10a6b7; debug)
+[2016-12-08 16:44:24 +0100] information/cli: Loading configuration file(s).
+[2016-12-08 16:44:25 +0100] information/ConfigItem: Committing config item(s).
+...
+```
+
+#### Init Script <a id="development-package-builds-init-script"></a>
+
+Icinga 2 can be started as a daemon using the provided init script:
+
+```
+/etc/init.d/icinga2
+Usage: /etc/init.d/icinga2 {start|stop|restart|reload|checkconfig|status}
+```
+
+#### Systemd <a id="development-package-builds-systemd"></a>
+
+If your distribution uses systemd:
+
+```
+systemctl {start|stop|reload|status|enable|disable} icinga2
+```
+
+In case the distribution is running systemd >227, you'll also
+need to package and install the `etc/initsystem/icinga2.service.limits.conf`
+file into `/etc/systemd/system/icinga2.service.d`.
+
+#### openrc <a id="development-package-builds-openrc"></a>
+
+Or if your distribution uses openrc (like Alpine):
+
+```
+rc-service icinga2
+Usage: /etc/init.d/icinga2 {start|stop|restart|reload|checkconfig|status}
+```
+
+Note: the openrc's init.d is not shipped by default.
+A working init.d with openrc can be found here: (https://git.alpinelinux.org/cgit/aports/plain/community/icinga2/icinga2.initd). If you have customized some path, edit the file and adjust it according with your setup.
+Those few steps can be followed:
+
+```bash
+wget https://git.alpinelinux.org/cgit/aports/plain/community/icinga2/icinga2.initd
+mv icinga2.initd /etc/init.d/icinga2
+chmod +x /etc/init.d/icinga2
+```
+
+Icinga 2 reads a single configuration file which is used to specify all
+configuration settings (global settings, hosts, services, etc.). The
+configuration format is explained in detail in the [doc/](doc/) directory.
+
+By default `make install` installs example configuration files in
+`/usr/local/etc/icinga2` unless you have specified a different prefix or
+sysconfdir.
+
+
+### Windows Builds <a id="development-package-builds-windows"></a>
+
+The Windows MSI packages are located at https://packages.icinga.com/windows/
+
+The build infrastructure is based on GitLab CI and an Ansible provisioned
+Windows VM running in OpenStack.
+
+The runner uses the scripts located in `tools/win32` to configure, build
+and test the packages. Uploading them to the package repository is a
+separate step. For manual package creation, please refer to [this chapter](21-development.md#development-windows-dev-env).
+
+![Windows build pipeline in GitLab](images/development/windows_builds_gitlab_pipeline.png)
+
+
+## Continuous Integration <a id="development-ci"></a>
+
+Icinga uses the integrated CI capabilities on GitHub in the development workflow.
+This ensures that incoming pull requests and branches are built on create/push events.
+Contributors and developers can immediately see whether builds fail or succeed and
+help the final reviews.
+
+* For Linux, we are currently using Travis CI.
+* For Windows, AppVeyor has been integrated.
+
+Future plans involve making use of GitHub Actions.
+
+In addition to our development platform on GitHub,
+we are using GitLab's CI platform to build binary packages for
+all supported operating systems and distributions.
+These CI pipelines provide even more detailed insights into
+specific platform failures and developers can react faster.
+
+### CI: Travis CI
+
+[Travis CI](https://travis-ci.org/Icinga/icinga2) provides Ubuntu as base
+distribution where Icinga is compiled from sources followed by running the
+unit tests and a config validation check.
+
+For details, please refer to the [.travis.yml](https://github.com/Icinga/icinga2/blob/master/.travis.yml)
+configuration file.
+
+### CI: AppVeyor
+
+[AppVeyor](https://ci.appveyor.com/project/icinga/icinga2) provides Windows
+as platform where Visual Studio and Boost libraries come pre-installed.
+
+Icinga is built using the Powershell scripts located in `tools/win32`.
+In addition to that, the unit tests are run.
+
+Please check the [appveyor.yml](https://github.com/Icinga/icinga2/blob/master/appveyor.yml) configuration
+file for details.
+
+
+## Advanced Development Tips <a id="development-advanced"></a>
+
+### GDB Pretty Printers <a id="development-advanced-gdb-pretty-printer"></a>
+
+Install the `boost`, `python` and `icinga2` pretty printers. Absolute paths are required,
+so please make sure to update the installation paths accordingly (`pwd`).
+
+```bash
+mkdir -p ~/.gdb_printers && cd ~/.gdb_printers
+```
+
+Boost Pretty Printers compatible with Python 3:
+
+```
+$ git clone https://github.com/mateidavid/Boost-Pretty-Printer.git && cd Boost-Pretty-Printer
+$ git checkout python-3
+$ pwd
+/home/michi/.gdb_printers/Boost-Pretty-Printer
+```
+
+Python Pretty Printers:
+
+```bash
+cd ~/.gdb_printers
+svn co svn://gcc.gnu.org/svn/gcc/trunk/libstdc++-v3/python
+```
+
+Icinga 2 Pretty Printers:
+
+```bash
+mkdir -p ~/.gdb_printers/icinga2 && cd ~/.gdb_printers/icinga2
+wget https://raw.githubusercontent.com/Icinga/icinga2/master/tools/debug/gdb/icingadbg.py
+```
+
+Now you'll need to modify/setup your `~/.gdbinit` configuration file.
+You can download the one from Icinga 2 and modify all paths.
+
+Example on Fedora 22:
+
+```
+$ wget https://raw.githubusercontent.com/Icinga/icinga2/master/tools/debug/gdb/gdbinit -O ~/.gdbinit
+$ vim ~/.gdbinit
+
+set print pretty on
+
+python
+import sys
+sys.path.insert(0, '/home/michi/.gdb_printers/icinga2')
+from icingadbg import register_icinga_printers
+register_icinga_printers()
+end
+
+python
+import sys
+sys.path.insert(0, '/home/michi/.gdb_printers/python')
+from libstdcxx.v6.printers import register_libstdcxx_printers
+try:
+ register_libstdcxx_printers(None)
+except:
+ pass
+end
+
+python
+import sys
+sys.path.insert(0, '/home/michi/.gdb_printers/Boost-Pretty-Printer')
+import boost_print
+boost_print.register_printers()
+end
+```
+
+If you are getting the following error when running gdb, the `libstdcxx`
+printers are already preloaded in your environment and you can remove
+the duplicate import in your `~/.gdbinit` file.
+
+```
+RuntimeError: pretty-printer already registered: libstdc++-v6
+```
diff --git a/doc/22-selinux.md b/doc/22-selinux.md
new file mode 100644
index 0000000..6c64c6f
--- /dev/null
+++ b/doc/22-selinux.md
@@ -0,0 +1,312 @@
+# SELinux <a id="selinux"></a>
+
+## Introduction <a id="selinux-introduction"></a>
+
+SELinux is a mandatory access control (MAC) system on Linux which adds a fine-grained permission system for access to all system resources such as files, devices, networks and inter-process communication.
+
+The most important questions are answered briefly in the [FAQ of the SELinux Project](https://selinuxproject.org/page/FAQ). For more details on SELinux and how to actually use and administrate it on your system have a look at [Red Hat Enterprise Linux 7 - SELinux User's and Administrator's Guide](https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/SELinux_Users_and_Administrators_Guide/index.html). For a simplified (and funny) introduction download the [SELinux Coloring Book](https://github.com/mairin/selinux-coloring-book).
+
+This documentation will use a format similar to the SELinux User's and Administrator's Guide.
+
+### Policy <a id="selinux-policy"></a>
+
+Icinga 2 provides its own SELinux policy. Development target is a policy package for Red Hat Enterprise Linux 7 and derivatives running the targeted policy which confines Icinga 2 with all features and all checks executed. All other distributions will require some tweaks.
+
+### Installation <a id="selinux-policy-installation"></a>
+
+There are two ways of installing the SELinux Policy for Icinga 2 on Enterprise Linux 7. The preferred way is to install the package. The other option involves installing the SELinux policy manually which might be necessary if you need some fixes which haven't made their way into a release yet.
+
+If the system runs in enforcing mode and you encounter problems you can set Icinga 2's domain to permissive mode.
+
+```
+# sestatus
+SELinux status: enabled
+SELinuxfs mount: /sys/fs/selinux
+SELinux root directory: /etc/selinux
+Loaded policy name: targeted
+Current mode: enforcing
+Mode from config file: enforcing
+Policy MLS status: enabled
+Policy deny_unknown status: allowed
+Max kernel policy version: 28
+```
+
+You can change the configured mode by editing `/etc/selinux/config` and the current mode by executing `setenforce 0`.
+
+#### Package installation <a id="selinux-policy-installation-package"></a>
+
+Simply add the `icinga2-selinux` package to your installation.
+
+```bash
+yum install icinga2-selinux
+```
+
+Ensure that the `icinga2` process is running in its own `icinga2_t` domain after installing the policy package:
+
+```
+# systemctl restart icinga2.service
+# ps -eZ | grep icinga2
+system_u:system_r:icinga2_t:s0 2825 ? 00:00:00 icinga2
+```
+
+#### Manual installation <a id="selinux-policy-installation-manual"></a>
+
+This section describes the installation to support development and testing. It assumes that Icinga 2 is already installed from packages and running on the system.
+
+As a prerequisite install the `git`, `selinux-policy-devel` and `audit` packages. Enable and start the audit daemon afterwards:
+
+```bash
+yum install git selinux-policy-devel audit
+systemctl enable auditd.service
+systemctl start auditd.service
+```
+
+After that clone the icinga2 git repository:
+
+```bash
+git clone https://github.com/icinga/icinga2
+```
+
+To create and install the policy package run the installation script which also labels the resources. (The script assumes Icinga 2 was started once after system startup, the labeling of the port will only happen once and fail later on.)
+
+```bash
+cd tools/selinux/
+./icinga.sh
+```
+
+After that restart Icinga 2 and verify it running in its own domain `icinga2_t`.
+
+```
+# systemctl restart icinga2.service
+# ps -eZ | grep icinga2
+system_u:system_r:icinga2_t:s0 2825 ? 00:00:00 icinga2
+```
+
+### General <a id="selinux-policy-general"></a>
+
+When the SELinux policy package for Icinga 2 is installed, the Icinga 2 daemon (icinga2) runs in its own domain `icinga2_t` and is separated from other confined services.
+
+Files have to be labeled correctly in order for Icinga 2 to be able to access them. For example the Icinga 2 log files have to have the `icinga2_log_t` label. Also the API port is labeled with `icinga_port_t`. Furthermore Icinga 2 can open high ports and UNIX sockets to connect to databases and features like Graphite. It executes the Nagios plugins and transitions to their context if those are labeled for example `nagios_services_plugin_exec_t` or `nagios_system_plugin_exec_t`.
+
+Additionally the Apache web server is allowed to connect to Icinga 2's command pipe in order to allow web interfaces to send commands to icinga2. This will perhaps change later on while investigating Icinga Web 2 for SELinux!
+
+### Types <a id="selinux-policy-types"></a>
+
+The command pipe is labeled `icinga2_command_t` and other services can request access to it by using the interface `icinga2_send_commands`.
+
+The nagios plugins use their own contexts and icinga2 will transition to it. This means plugins have to be labeled correctly for their required permissions. The plugins installed from package should have set their permissions by the corresponding policy module and you can restore them using `restorecon -R -v /usr/lib64/nagios/plugins/`. To label your own plugins use `chcon -t type /path/to/plugin`, for the type have a look at table below.
+
+Type | Domain | Use case | Provided by policy package
+----------------------------------|------------------------------|------------------------------------------------------------------|---------------------------
+nagios_admin_plugin_exec_t | nagios_admin_plugin_t | Plugins which require require read access on all file attributes | nagios
+nagios_checkdisk_plugin_exec_t | nagios_checkdisk_plugin_t | Plugins which require read access to all filesystem attributes | nagios
+nagios_mail_plugin_exec_t | nagios_mail_plugin_t | Plugins which access the local mail service | nagios
+nagios_services_plugin_exec_t | nagios_services_plugin_t | Plugins monitoring network services | nagios
+nagios_system_plugin_exec_t | nagios_system_plugin_t | Plugins checking local system state | nagios
+nagios_unconfined_plugin_exec_t | nagios_unconfined_plugin_t | Plugins running without confinement | nagios
+nagios_eventhandler_plugin_exec_t | nagios_eventhandler_plugin_t | Eventhandler (actually running unconfined) | nagios
+nagios_openshift_plugin_exec_t | nagios_openshift_plugin_t | Plugins monitoring openshift | nagios
+nagios_notification_plugin_exec_t | nagios_notification_plugin_t | Notification commands | icinga (will be moved later)
+
+If one of those plugin domains causes problems you can set it to permissive by executing `semanage permissive -a domain`.
+
+The policy provides a role `icinga2adm_r` for confining an user which enables an administrative user managing only Icinga 2 on the system. This user will also execute the plugins in their domain instead of the users one, so you can verify their execution with the same restrictions like they have when executed by icinga2.
+
+### Booleans <a id="selinux-policy-booleans"></a>
+
+SELinux is based on the least level of access required for a service to run. Using booleans you can grant more access in a defined way. The Icinga 2 policy package provides the following booleans.
+
+**icinga2_can_connect_all**
+
+Having this boolean enabled allows icinga2 to connect to all ports. This can be necessary if you use features which connect to unconfined services, for example the [influxdb writer](14-features.md#influxdb-writer).
+
+**icinga2_run_sudo**
+
+To allow Icinga 2 executing plugins via sudo you can toogle this boolean. It is disabled by default, resulting in error messages like `execvpe(sudo) failed: Permission denied`.
+
+**httpd_can_write_icinga2_command**
+
+To allow httpd to write to the command pipe of icinga2 this boolean has to be enabled. This is enabled by default, if not needed you can disable it for more security.
+
+**httpd_can_connect_icinga2_api**
+
+Enabling this boolean allows httpd to connect to the API of icinga2 (Ports labeled `icinga2_port_t`). This is enabled by default, if not needed you can disable it for more security.
+
+### Configuration Examples <a id="selinux-policy-examples"></a>
+
+#### Run the icinga2 service permissive <a id="selinux-policy-examples-permissive"></a>
+
+If problems occur while running the system in enforcing mode and those problems are only caused by the policy of the icinga2 domain, you can set this domain to permissive instead of the complete system. This can be done by executing `semanage permissive -a icinga2_t`.
+
+Make sure to report the bugs in the policy afterwards.
+
+#### Confining a plugin <a id="selinux-policy-examples-plugin"></a>
+
+Download and install a plugin, for example check_mysql_health.
+
+```bash
+wget https://labs.consol.de/download/shinken-nagios-plugins/check_mysql_health-2.1.9.2.tar.gz
+tar xvzf check_mysql_health-2.1.9.2.tar.gz
+cd check_mysql_health-2.1.9.2/
+./configure --libexecdir /usr/lib64/nagios/plugins
+make
+make install
+```
+
+It is labeled `nagios_unconfined_plugins_exec_t` by default, so it runs without restrictions.
+
+```
+# ls -lZ /usr/lib64/nagios/plugins/check_mysql_health
+-rwxr-xr-x. root root system_u:object_r:nagios_unconfined_plugin_exec_t:s0 /usr/lib64/nagios/plugins/check_mysql_health
+```
+
+In this case the plugin is monitoring a service, so it should be labeled `nagios_services_plugin_exec_t` to restrict its permissions.
+
+```
+# chcon -t nagios_services_plugin_exec_t /usr/lib64/nagios/plugins/check_mysql_health
+# ls -lZ /usr/lib64/nagios/plugins/check_mysql_health
+-rwxr-xr-x. root root system_u:object_r:nagios_services_plugin_exec_t:s0 /usr/lib64/nagios/plugins/check_mysql_health
+```
+
+The plugin still runs fine but if someone changes the script to do weird stuff it will fail to do so.
+
+#### Allow icinga to connect to all ports. <a id="selinux-policy-examples-connectall"></a>
+
+You are running graphite on a different port than `2003` and want `icinga2` to connect to it.
+
+Change the port value for the graphite feature according to your graphite installation before enabling it.
+
+```
+# cat /etc/icinga2/features-enabled/graphite.conf
+/**
+ * The GraphiteWriter type writes check result metrics and
+ * performance data to a graphite tcp socket.
+ */
+
+library "perfdata"
+
+object GraphiteWriter "graphite" {
+ //host = "127.0.0.1"
+ //port = 2003
+ port = 2004
+}
+# icinga2 feature enable graphite
+```
+
+Before you restart the icinga2 service allow it to connect to all ports by enabling the boolean `icinga2_can_connect_all` (now and permanent).
+
+```bash
+setsebool icinga2_can_connect_all true
+setsebool -P icinga2_can_connect_all true
+```
+
+If you restart the daemon now it will successfully connect to graphite.
+
+#### Running plugins requiring sudo <a id="selinux-policy-examples-sudo"></a>
+
+Some plugins require privileged access to the system and are designied to be executed via `sudo` to get these privileges.
+
+In this case it is the CheckCommand [running_kernel](10-icinga-template-library.md#plugin-contrib-command-running_kernel) which is set to use `sudo`.
+
+ # cat /etc/icinga2/conf.d/services.conf
+ apply Service "kernel" {
+ import "generic-service"
+
+ check_command = "running_kernel"
+
+ vars.running_kernel_use_sudo = true
+
+ assign where host.name == NodeName
+ }
+
+Having this Service defined will result in a UNKNOWN state and the error message `execvpe(sudo) failed: Permission denied` because SELinux dening the execution.
+
+Switching the boolean `icinga2_run_sudo` to allow the execution will result in the check executed successfully.
+
+ # setsebool icinga2_run_sudo true
+ # setsebool -P icinga2_run_sudo true
+
+#### Confining a user <a id="selinux-policy-examples-user"></a>
+
+If you want to have an administrative account capable of only managing icinga2 and not the complete system, you can restrict the privileges by confining
+this user. This is completly optional!
+
+Start by adding the Icinga 2 administrator role `icinga2adm_r` to the administrative SELinux user `staff_u`.
+
+```bash
+semanage user -m -R "staff_r sysadm_r system_r unconfined_r icinga2adm_r" staff_u
+```
+
+Confine your user login and create a sudo rule.
+
+```bash
+semanage login -a dirk -s staff_u
+echo "dirk ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/dirk
+```
+
+Login to the system using ssh and verify your id.
+
+```
+$ id -Z
+staff_u:staff_r:staff_t:s0-s0:c0.c1023
+```
+
+Try to execute some commands as root using sudo.
+
+```
+$ sudo id -Z
+staff_u:staff_r:staff_t:s0-s0:c0.c1023
+$ sudo vi /etc/icinga2/icinga2.conf
+"/etc/icinga2/icinga2.conf" [Permission Denied]
+$ sudo cat /var/log/icinga2/icinga2.log
+cat: /var/log/icinga2/icinga2.log: Permission denied
+$ sudo systemctl reload icinga2.service
+Failed to get D-Bus connection: No connection to service manager.
+```
+
+Those commands fail because you only switch to root but do not change your SELinux role. Try again but tell sudo also to switch the SELinux role and type.
+
+```
+$ sudo -r icinga2adm_r -t icinga2adm_t id -Z
+staff_u:icinga2adm_r:icinga2adm_t:s0-s0:c0.c1023
+$ sudo -r icinga2adm_r -t icinga2adm_t vi /etc/icinga2/icinga2.conf
+"/etc/icinga2/icinga2.conf"
+$ sudo -r icinga2adm_r -t icinga2adm_t cat /var/log/icinga2/icinga2.log
+[2015-03-26 20:48:14 +0000] information/DynamicObject: Dumping program state to file '/var/lib/icinga2/icinga2.state'
+$ sudo -r icinga2adm_r -t icinga2adm_t systemctl reload icinga2.service
+```
+
+Now the commands will work, but you have always to remember to add the arguments, so change the sudo rule to set it by default.
+
+```bash
+echo "dirk ALL=(ALL) ROLE=icinga2adm_r TYPE=icinga2adm_t NOPASSWD: ALL" > /etc/sudoers.d/dirk
+```
+
+Now try the commands again without providing the role and type and they will work, but if you try to read apache logs or restart apache for example it will still fail.
+
+```
+$ sudo cat /var/log/httpd/error_log
+/bin/cat: /var/log/httpd/error_log: Keine Berechtigung
+$ sudo systemctl reload httpd.service
+Failed to issue method call: Access denied
+```
+
+## Bugreports <a id="selinux-bugreports"></a>
+
+If you experience any problems while running in enforcing mode try to reproduce it in permissive mode. If the problem persists it is not related to SELinux because in permissive mode SELinux will not deny anything.
+
+After some feedback Icinga 2 is now running in a enforced domain, but still adds also some rules for other necessary services so no problems should occure at all. But you can help to enhance the policy by testing Icinga 2 running confined by SELinux.
+
+Please add the following information to [bug reports](https://icinga.com/community/):
+
+* Versions, configuration snippets, etc.
+* Output of `semodule -l | grep -e icinga2 -e nagios -e apache`
+* Output of `ps -eZ | grep icinga2`
+* Output of `semanage port -l | grep icinga2`
+* Output of `audit2allow -li /var/log/audit/audit.log`
+
+If access to a file is blocked and you can tell which one please provided the output of `ls -lZ /path/to/file` (and perhaps the directory above).
+
+If asked for full audit.log add `-w /etc/shadow -p w` to `/etc/audit/rules.d/audit.rules`, restart the audit daemon, reproduce the problem and add `/var/log/audit/audit.log` to the bug report. With the added audit rule it will include the path of files access was denied to.
+
+If asked to provide full audit log with dontaudit rules disabled executed `semodule -DB` before reproducing the problem. After that enable the rules again to prevent auditd spamming your logfile by executing `semodule -B`.
diff --git a/doc/23-migrating-from-icinga-1x.md b/doc/23-migrating-from-icinga-1x.md
new file mode 100644
index 0000000..62b5667
--- /dev/null
+++ b/doc/23-migrating-from-icinga-1x.md
@@ -0,0 +1,1585 @@
+# Migration from Icinga 1.x <a id="migration"></a>
+
+## Configuration Migration <a id="configuration-migration"></a>
+
+The Icinga 2 configuration format introduces plenty of behavioural changes. In
+order to ease migration from Icinga 1.x, this section provides hints and tips
+on your migration requirements.
+
+
+### Automated Config Migration <a id="automated-config-migration"></a>
+
+Depending on your previous setup, you may have already used different sources
+for generating the 1.x configuration files. If this is the case,
+we strongly recommend to use these sources in combination with
+the [Icinga Director](https://icinga.com/docs/director/latest/doc/01-Introduction/).
+
+This can be for example:
+
+* A CMDB or RDBMS which provides host details and facts
+* PuppetDB
+* CSV/XSL/JSON files
+* Cloud resources (AWS, etc.)
+
+In case you have been using Icinga Web 1.x or an addon requiring
+the underlying IDO database, you can use this as database resource
+to import the host details.
+
+Talks:
+
+* [This talk from OSMC 2016](https://www.youtube.com/watch?v=T6GBsfeXIZI) shares more insights (German).
+* [Automated Monitoring in heterogeneous environments](https://www.youtube.com/watch?v=bkUlS5rlHzM&list=PLeoxx10paaAn_xHJ5wBhnBJyW_d5G7-Bl&index=8)
+
+Continue reading more about [Import Sources](https://icinga.com/docs/director/latest/doc/70-Import-and-Sync/)
+for the Icinga Director.
+
+### Manual Config Migration <a id="manual-config-migration"></a>
+
+For a long-term migration of your configuration you should consider re-creating
+your configuration based on the proposed Icinga 2 configuration paradigm.
+
+Please read the [next chapter](23-migrating-from-icinga-1x.md#differences-1x-2) to find out more about the differences
+between 1.x and 2.
+
+### Manual Config Migration Hints <a id="manual-config-migration-hints"></a>
+
+These hints should provide you with enough details for manually migrating your configuration,
+or to adapt your configuration export tool to dump Icinga 2 configuration instead of
+Icinga 1.x configuration.
+
+The examples are taken from Icinga 1.x test and production environments and converted
+straight into a possible Icinga 2 format. If you found a different strategy, please
+let us know!
+
+If you require in-depth explanations, please check the [next chapter](23-migrating-from-icinga-1x.md#differences-1x-2).
+
+#### Manual Config Migration Hints for Intervals <a id="manual-config-migration-hints-Intervals"></a>
+
+By default all intervals without any duration literal are interpreted as seconds. Therefore
+all existing Icinga 1.x `*_interval` attributes require an additional `m` duration literal.
+
+Icinga 1.x:
+
+```
+define service {
+ service_description service1
+ host_name localhost1
+ check_command test_customvar
+ use generic-service
+ check_interval 5
+ retry_interval 1
+}
+```
+
+Icinga 2:
+
+```
+object Service "service1" {
+ import "generic-service"
+ host_name = "localhost1"
+ check_command = "test_customvar"
+ check_interval = 5m
+ retry_interval = 1m
+}
+```
+
+#### Manual Config Migration Hints for Services <a id="manual-config-migration-hints-services"></a>
+
+If you have used the `host_name` attribute in Icinga 1.x with one or more host names this service
+belongs to, you can migrate this to the [apply rules](03-monitoring-basics.md#using-apply) syntax.
+
+Icinga 1.x:
+
+```
+define service {
+ service_description service1
+ host_name localhost1,localhost2
+ check_command test_check
+ use generic-service
+}
+```
+
+Icinga 2:
+
+```
+apply Service "service1" {
+ import "generic-service"
+ check_command = "test_check"
+
+ assign where host.name in [ "localhost1", "localhost2" ]
+}
+```
+
+In Icinga 1.x you would have organized your services with hostgroups using the `hostgroup_name` attribute
+like the following example:
+
+```
+define service {
+ service_description servicewithhostgroups
+ hostgroup_name hostgroup1,hostgroup3
+ check_command test_check
+ use generic-service
+}
+```
+
+Using Icinga 2 you can migrate this to the [apply rules](03-monitoring-basics.md#using-apply) syntax:
+
+```
+apply Service "servicewithhostgroups" {
+ import "generic-service"
+ check_command = "test_check"
+
+ assign where "hostgroup1" in host.groups
+ assign where "hostgroup3" in host.groups
+}
+```
+
+#### Manual Config Migration Hints for Group Members <a id="manual-config-migration-hints-group-members"></a>
+
+The Icinga 1.x hostgroup `hg1` has two members `host1` and `host2`. The hostgroup `hg2` has `host3` as
+a member and includes all members of the `hg1` hostgroup.
+
+```
+define hostgroup {
+ hostgroup_name hg1
+ members host1,host2
+}
+
+define hostgroup {
+ hostgroup_name hg2
+ members host3
+ hostgroup_members hg1
+}
+```
+
+This can be migrated to Icinga 2 and [using group assign](17-language-reference.md#group-assign). The additional nested hostgroup
+`hg1` is included into `hg2` with the `groups` attribute.
+
+```
+object HostGroup "hg1" {
+ groups = [ "hg2" ]
+ assign where host.name in [ "host1", "host2" ]
+}
+
+object HostGroup "hg2" {
+ assign where host.name == "host3"
+}
+```
+
+These assign rules can be applied for all groups: `HostGroup`, `ServiceGroup` and `UserGroup`
+(requires renaming from `contactgroup`).
+
+> **Tip**
+>
+> Define custom variables and assign/ignore members based on these attribute pattern matches.
+
+
+
+#### Manual Config Migration Hints for Check Command Arguments <a id="manual-config-migration-hints-check-command-arguments"></a>
+
+Host and service check command arguments are separated by a `!` in Icinga 1.x. Their order is important and they
+are referenced as `$ARGn$` where `n` is the argument counter.
+
+```
+define command {
+ command_name my-ping
+ command_line $USER1$/check_ping -H $HOSTADDRESS$ -w $ARG1$ -c $ARG2$ -p 5
+}
+
+define service {
+ use generic-service
+ host_name my-server
+ service_description my-ping
+ check_command my-ping-check!100.0,20%!500.0,60%
+}
+```
+
+While you could manually migrate this like (please note the new generic command arguments and default argument values!):
+
+```
+object CheckCommand "my-ping-check" {
+ command = [
+ PluginDir + "/check_ping", "-4"
+ ]
+
+ arguments = {
+ "-H" = "$ping_address$"
+ "-w" = "$ping_wrta$,$ping_wpl$%"
+ "-c" = "$ping_crta$,$ping_cpl$%"
+ "-p" = "$ping_packets$"
+ "-t" = "$ping_timeout$"
+ }
+
+ vars.ping_address = "$address$"
+ vars.ping_wrta = 100
+ vars.ping_wpl = 5
+ vars.ping_crta = 200
+ vars.ping_cpl = 15
+}
+
+object Service "my-ping" {
+ import "generic-service"
+ host_name = "my-server"
+ check_command = "my-ping-check"
+
+ vars.ping_wrta = 100
+ vars.ping_wpl = 20
+ vars.ping_crta = 500
+ vars.ping_cpl = 60
+}
+```
+
+#### Manual Config Migration Hints for Runtime Macros <a id="manual-config-migration-hints-runtime-macros"></a>
+
+Runtime macros have been renamed. A detailed comparison table can be found [here](23-migrating-from-icinga-1x.md#differences-1x-2-runtime-macros).
+
+For example, accessing the service check output looks like the following in Icinga 1.x:
+
+```
+$SERVICEOUTPUT$
+```
+
+In Icinga 2 you will need to write:
+
+```
+$service.output$
+```
+
+Another example referencing the host's address attribute in Icinga 1.x:
+
+```
+$HOSTADDRESS$
+```
+
+In Icinga 2 you'd just use the following macro to access all `address` attributes (even overridden from the service objects):
+
+```
+$address$
+```
+
+#### Manual Config Migration Hints for Runtime Custom Variables <a id="manual-config-migration-hints-runtime-custom-variables"></a>
+
+Custom variables from Icinga 1.x are available as Icinga 2 custom variables.
+
+```
+define command {
+ command_name test_customvar
+ command_line echo "Host CV: $_HOSTCVTEST$ Service CV: $_SERVICECVTEST$\n"
+}
+
+define host {
+ host_name localhost1
+ check_command test_customvar
+ use generic-host
+ _CVTEST host cv value
+}
+
+define service {
+ service_description service1
+ host_name localhost1
+ check_command test_customvar
+ use generic-service
+ _CVTEST service cv value
+}
+```
+
+Can be written as the following in Icinga 2:
+
+```
+object CheckCommand "test_customvar" {
+ command = "echo "Host CV: $host.vars.CVTEST$ Service CV: $service.vars.CVTEST$\n""
+}
+
+object Host "localhost1" {
+ import "generic-host"
+ check_command = "test_customvar"
+ vars.CVTEST = "host cv value"
+}
+
+object Service "service1" {
+ host_name = "localhost1"
+ check_command = "test_customvar"
+ vars.CVTEST = "service cv value"
+}
+```
+
+If you are just defining `$CVTEST$` in your command definition, its value depends on the
+execution scope -- the host check command will fetch the host attribute value of `vars.CVTEST`
+while the service check command resolves its value to the service attribute attribute `vars.CVTEST`.
+
+> **Note**
+>
+> Custom variables in Icinga 2 are case-sensitive. `vars.CVTEST` is not the same as `vars.CvTest`.
+
+#### Manual Config Migration Hints for Contacts (Users) <a id="manual-config-migration-hints-contacts-users"></a>
+
+Contacts in Icinga 1.x act as users in Icinga 2, but do not have any notification commands specified.
+This migration part is explained in the [next chapter](23-migrating-from-icinga-1x.md#manual-config-migration-hints-notifications).
+
+```
+define contact{
+ contact_name testconfig-user
+ use generic-user
+ alias Icinga Test User
+ service_notification_options c,f,s,u
+ email icinga@localhost
+}
+```
+
+The `service_notification_options` can be [mapped](23-migrating-from-icinga-1x.md#manual-config-migration-hints-notification-filters)
+into generic `state` and `type` filters, if additional notification filtering is required. `alias` gets
+renamed to `display_name`.
+
+```
+object User "testconfig-user" {
+ import "generic-user"
+ display_name = "Icinga Test User"
+ email = "icinga@localhost"
+}
+```
+
+This user can be put into usergroups (former contactgroups) or referenced in newly migration notification
+objects.
+
+#### Manual Config Migration Hints for Notifications <a id="manual-config-migration-hints-notifications"></a>
+
+If you are migrating a host or service notification, you'll need to extract the following information from
+your existing Icinga 1.x configuration objects
+
+* host/service attribute `contacts` and `contact_groups`
+* host/service attribute `notification_options`
+* host/service attribute `notification_period`
+* host/service attribute `notification_interval`
+
+The clean approach is to refactor your current contacts and their notification command methods into a
+generic strategy
+
+* host or service has a notification type (for example mail)
+* which contacts (users) are notified by mail?
+* do the notification filters, periods, intervals still apply for them? (do a cleanup during migration)
+* assign users and groups to these notifications
+* Redesign the notifications into generic [apply rules](03-monitoring-basics.md#using-apply-notifications)
+
+
+The ugly workaround solution could look like this:
+
+Extract all contacts from the remaining groups, and create a unique list. This is required for determining
+the host and service notification commands involved.
+
+* contact attributes `host_notification_commands` and `service_notification_commands` (can be a comma separated list)
+* get the command line for each notification command and store them for later
+* create a new notification name and command name
+
+Generate a new notification object based on these values. Import the generic template based on the type (`host` or `service`).
+Assign it to the host or service and set the newly generated notification command name as `command` attribute.
+
+```
+object Notification "<notificationname>" {
+ import "mail-host-notification"
+ host_name = "<thishostname>"
+ command = "<notificationcommandname>"
+```
+
+Convert the `notification_options` attribute from Icinga 1.x to Icinga 2 `states` and `types`. Details
+[here](23-migrating-from-icinga-1x.md#manual-config-migration-hints-notification-filters). Add the notification period.
+
+```
+ states = [ OK, Warning, Critical ]
+ types = [ Recovery, Problem, Custom ]
+ period = "24x7"
+```
+
+The current contact acts as `users` attribute.
+
+```
+ users = [ "<contactwithnotificationcommand>" ]
+}
+```
+
+Do this in a loop for all notification commands (depending if host or service contact). Once done, dump the
+collected notification commands.
+
+The result of this migration are lots of unnecessary notification objects and commands but it will unroll
+the Icinga 1.x logic into the revamped Icinga 2 notification object schema. If you are looking for code
+examples, try [LConf](https://www.netways.org).
+
+
+
+#### Manual Config Migration Hints for Notification Filters <a id="manual-config-migration-hints-notification-filters"></a>
+
+Icinga 1.x defines all notification filters in an attribute called `notification_options`. Using Icinga 2 you will
+have to split these values into the `states` and `types` attributes.
+
+> **Note**
+>
+> `Recovery` type requires the `Ok` state.
+> `Custom` and `Problem` should always be set as `type` filter.
+
+ Icinga 1.x option | Icinga 2 state | Icinga 2 type
+ ----------------------|-----------------------|-------------------
+ o | OK (Up for hosts) |
+ w | Warning | Problem
+ c | Critical | Problem
+ u | Unknown | Problem
+ d | Down | Problem
+ s | . | DowntimeStart / DowntimeEnd / DowntimeRemoved
+ r | Ok | Recovery
+ f | . | FlappingStart / FlappingEnd
+ n | 0 (none) | 0 (none)
+ . | . | Custom
+
+
+
+#### Manual Config Migration Hints for Escalations <a id="manual-config-migration-hints-escalations"></a>
+
+Escalations in Icinga 1.x are a bit tricky. By default service escalations can be applied to hosts and
+hostgroups and require a defined service object.
+
+The following example applies a service escalation to the service `dep_svc01` and all hosts in the `hg_svcdep2`
+hostgroup. The default `notification_interval` is set to `10` minutes notifying the `cg_admin` contact.
+After 20 minutes (`10*2`, notification_interval * first_notification) the notification is escalated to the
+`cg_ops` contactgroup until 60 minutes (`10*6`) have passed.
+
+```
+define service {
+ service_description dep_svc01
+ host_name dep_hostsvc01,dep_hostsvc03
+ check_command test2
+ use generic-service
+ notification_interval 10
+ contact_groups cg_admin
+}
+
+define hostgroup {
+ hostgroup_name hg_svcdep2
+ members dep_hostsvc03
+}
+
+# with hostgroup_name and service_description
+define serviceescalation {
+ hostgroup_name hg_svcdep2
+ service_description dep_svc01
+ first_notification 2
+ last_notification 6
+ contact_groups cg_ops
+}
+```
+
+In Icinga 2 the service and hostgroup definition will look quite the same. Save the `notification_interval`
+and `contact_groups` attribute for an additional notification.
+
+```
+apply Service "dep_svc01" {
+ import "generic-service"
+
+ check_command = "test2"
+
+ assign where host.name == "dep_hostsvc01"
+ assign where host.name == "dep_hostsvc03"
+}
+
+object HostGroup "hg_svcdep2" {
+ assign where host.name == "dep_hostsvc03"
+}
+
+apply Notification "email" to Service {
+ import "service-mail-notification"
+
+ interval = 10m
+ user_groups = [ "cg_admin" ]
+
+ assign where service.name == "dep_svc01" && (host.name == "dep_hostsvc01" || host.name == "dep_hostsvc03")
+}
+```
+
+Calculate the begin and end time for the newly created escalation notification:
+
+* begin = first_notification * notification_interval = 2 * 10m = 20m
+* end = last_notification * notification_interval = 6 * 10m = 60m = 1h
+
+Assign the notification escalation to the service `dep_svc01` on all hosts in the hostgroup `hg_svcdep2`.
+
+```
+apply Notification "email-escalation" to Service {
+ import "service-mail-notification"
+
+ interval = 10m
+ user_groups = [ "cg_ops" ]
+
+ times = {
+ begin = 20m
+ end = 1h
+ }
+
+ assign where service.name == "dep_svc01" && "hg_svcdep2" in host.groups
+}
+```
+
+The assign rule could be made more generic and the notification be applied to more than
+just this service belonging to hosts in the matched hostgroup.
+
+
+> **Note**
+>
+> When the notification is escalated, Icinga 1.x suppresses notifications to the default contacts.
+> In Icinga 2 an escalation is an additional notification with a defined begin and end time. The
+> `email` notification will continue as normal.
+
+
+
+#### Manual Config Migration Hints for Dependencies <a id="manual-config-migration-hints-dependencies"></a>
+
+There are some dependency examples already in the [basics chapter](03-monitoring-basics.md#dependencies). Dependencies in
+Icinga 1.x can be confusing in terms of which host/service is the parent and which host/service acts
+as the child.
+
+While Icinga 1.x defines `notification_failure_criteria` and `execution_failure_criteria` as dependency
+filters, this behaviour has changed in Icinga 2. There is no 1:1 migration but generally speaking
+the state filter defined in the `execution_failure_criteria` defines the Icinga 2 `state` attribute.
+If the state filter matches, you can define whether to disable checks and notifications or not.
+
+The following example describes service dependencies. If you migrate from Icinga 1.x, you will only
+want to use the classic `Host-to-Host` and `Service-to-Service` dependency relationships.
+
+```
+define service {
+ service_description dep_svc01
+ hostgroup_name hg_svcdep1
+ check_command test2
+ use generic-service
+}
+
+define service {
+ service_description dep_svc02
+ hostgroup_name hg_svcdep2
+ check_command test2
+ use generic-service
+}
+
+define hostgroup {
+ hostgroup_name hg_svcdep2
+ members host2
+}
+
+define host{
+ use linux-server-template
+ host_name host1
+ address 192.168.1.10
+}
+
+# with hostgroup_name and service_description
+define servicedependency {
+ host_name host1
+ dependent_hostgroup_name hg_svcdep2
+ service_description dep_svc01
+ dependent_service_description *
+ execution_failure_criteria u,c
+ notification_failure_criteria w,u,c
+ inherits_parent 1
+}
+```
+
+Map the dependency attributes accordingly.
+
+ Icinga 1.x | Icinga 2
+ ----------------------|---------------------
+ host_name | parent_host_name
+ dependent_host_name | child_host_name (used in assign/ignore)
+ dependent_hostgroup_name | all child hosts in group (used in assign/ignore)
+ service_description | parent_service_name
+ dependent_service_description | child_service_name (used in assign/ignore)
+
+And migrate the host and services.
+
+```
+object Host "host1" {
+ import "linux-server-template"
+ address = "192.168.1.10"
+}
+
+object HostGroup "hg_svcdep2" {
+ assign where host.name == "host2"
+}
+
+apply Service "dep_svc01" {
+ import "generic-service"
+ check_command = "test2"
+
+ assign where "hp_svcdep1" in host.groups
+}
+
+apply Service "dep_svc02" {
+ import "generic-service"
+ check_command = "test2"
+
+ assign where "hp_svcdep2" in host.groups
+}
+```
+
+When it comes to the `execution_failure_criteria` and `notification_failure_criteria` attribute migration,
+you will need to map the most common values, in this example `u,c` (`Unknown` and `Critical` will cause the
+dependency to fail). Therefore the `Dependency` should be ok on Ok and Warning. `inherits_parents` is always
+enabled.
+
+```
+apply Dependency "all-svc-for-hg-hg_svcdep2-on-host1-dep_svc01" to Service {
+ parent_host_name = "host1"
+ parent_service_name = "dep_svc01"
+
+ states = [ Ok, Warning ]
+ disable_checks = true
+ disable_notifications = true
+
+ assign where "hg_svcdep2" in host.groups
+}
+```
+
+Host dependencies are explained in the [next chapter](23-migrating-from-icinga-1x.md#manual-config-migration-hints-host-parents).
+
+
+
+#### Manual Config Migration Hints for Host Parents <a id="manual-config-migration-hints-host-parents"></a>
+
+Host parents from Icinga 1.x are migrated into `Host-to-Host` dependencies in Icinga 2.
+
+The following example defines the `vmware-master` host as parent host for the guest
+virtual machines `vmware-vm1` and `vmware-vm2`.
+
+By default all hosts in the hostgroup `vmware` should get the parent assigned. This isn't really
+solvable with Icinga 1.x parents, but only with host dependencies.
+
+```
+define host{
+ use linux-server-template
+ host_name vmware-master
+ hostgroups vmware
+ address 192.168.1.10
+}
+
+define host{
+ use linux-server-template
+ host_name vmware-vm1
+ hostgroups vmware
+ address 192.168.27.1
+ parents vmware-master
+}
+
+define host{
+ use linux-server-template
+ host_name vmware-vm2
+ hostgroups vmware
+ address 192.168.28.1
+ parents vmware-master
+}
+```
+
+By default all hosts in the hostgroup `vmware` should get the parent assigned (but not the `vmware-master`
+host itself). This isn't really solvable with Icinga 1.x parents, but only with host dependencies as shown
+below:
+
+```
+define hostdependency {
+ dependent_hostgroup_name vmware
+ dependent_host_name !vmware-master
+ host_name vmware-master
+ inherits_parent 1
+ notification_failure_criteria d,u
+ execution_failure_criteria d,u
+ dependency_period testconfig-24x7
+}
+```
+
+When migrating to Icinga 2, the parents must be changed to a newly created host dependency.
+
+
+Map the following attributes
+
+ Icinga 1.x | Icinga 2
+ ----------------------|---------------------
+ host_name | parent_host_name
+ dependent_host_name | child_host_name (used in assign/ignore)
+ dependent_hostgroup_name | all child hosts in group (used in assign/ignore)
+
+The Icinga 2 configuration looks like this:
+
+```
+object Host "vmware-master" {
+ import "linux-server-template"
+ groups += [ "vmware" ]
+ address = "192.168.1.10"
+ vars.is_vmware_master = true
+}
+
+object Host "vmware-vm1" {
+ import "linux-server-template"
+ groups += [ "vmware" ]
+ address = "192.168.27.1"
+}
+
+object Host "vmware-vm2" {
+ import "linux-server-template"
+ groups += [ "vmware" ]
+ address = "192.168.28.1"
+}
+
+apply Dependency "vmware-master" to Host {
+ parent_host_name = "vmware-master"
+
+ assign where "vmware" in host.groups
+ ignore where host.vars.is_vmware_master
+ ignore where host.name == "vmware-master"
+}
+```
+
+For easier identification you could add the `vars.is_vmware_master` attribute to the `vmware-master`
+host and let the dependency ignore that instead of the hardcoded host name. That's different
+to the Icinga 1.x example and a best practice hint only.
+
+
+Another way to express the same configuration would be something like:
+
+```
+object Host "vmware-master" {
+ import "linux-server-template"
+ groups += [ "vmware" ]
+ address = "192.168.1.10"
+}
+
+object Host "vmware-vm1" {
+ import "linux-server-template"
+ groups += [ "vmware" ]
+ address = "192.168.27.1"
+ vars.parents = [ "vmware-master" ]
+}
+
+object Host "vmware-vm2" {
+ import "linux-server-template"
+ groups += [ "vmware" ]
+ address = "192.168.28.1"
+ vars.parents = [ "vmware-master" ]
+}
+
+apply Dependency "host-to-parent-" for (parent in host.vars.parents) to Host {
+ parent_host_name = parent
+}
+```
+
+This example allows finer grained host-to-host dependency, as well as multiple dependency support.
+
+#### Manual Config Migration Hints for Distributed Setups <a id="manual-config-migration-hints-distributed-setup"></a>
+
+* Icinga 2 does not use active/passive instances calling OSCP commands and requiring the NSCA
+daemon for passing check results between instances.
+* Icinga 2 does not support any 1.x NEB addons for check load distribution
+
+* If your current setup consists of instances distributing the check load, you should consider
+building a [load distribution](06-distributed-monitoring.md#distributed-monitoring-scenarios) setup with Icinga 2.
+* If your current setup includes active/passive clustering with external tools like Pacemaker/DRBD,
+consider the [High Availability](06-distributed-monitoring.md#distributed-monitoring-scenarios) setup.
+* If you have build your own custom configuration deployment and check result collecting mechanism,
+you should re-design your setup and re-evaluate your requirements, and how they may be fulfilled
+using the Icinga 2 cluster capabilities.
+
+
+## Differences between Icinga 1.x and 2 <a id="differences-1x-2"></a>
+
+### Configuration Format <a id="differences-1x-2-configuration-format"></a>
+
+Icinga 1.x supports two configuration formats: key-value-based settings in the
+`icinga.cfg` configuration file and object-based in included files (`cfg_dir`,
+`cfg_file`). The path to the `icinga.cfg` configuration file must be passed to
+the Icinga daemon at startup.
+
+icinga.cfg:
+
+```
+enable_notifications=1
+```
+
+objects.cfg:
+
+```
+define service {
+ notifications_enabled 0
+}
+```
+
+Icinga 2 supports objects and (global) variables, but does not make a difference
+between the main configuration file or any other included file.
+
+icinga2.conf:
+
+```
+const EnableNotifications = true
+
+object Service "test" {
+ enable_notifications = false
+}
+```
+
+#### Sample Configuration and ITL <a id="differences-1x-2-sample-configuration-itl"></a>
+
+While Icinga 1.x ships sample configuration and templates spread in various
+object files, Icinga 2 moves all templates into the Icinga Template Library (ITL)
+and includes them in the sample configuration.
+
+Additional plugin check commands are shipped with Icinga 2 as well.
+
+The ITL will be updated on every release and must not be edited by the user.
+
+There are still generic templates available for your convenience which may or may
+not be re-used in your configuration. For instance, `generic-service` includes
+all required attributes except `check_command` for a service.
+
+Sample configuration files are located in the `conf.d/` directory which is
+included in `icinga2.conf` by default.
+
+> **Note**
+>
+> Add your own custom templates in the `conf.d/` directory as well, e.g. inside
+> the [templates.conf](04-configuration.md#templates-conf) file.
+
+### Main Config File <a id="differences-1x-2-main-config"></a>
+
+In Icinga 1.x there are many global configuration settings available in `icinga.cfg`.
+Icinga 2 only uses a small set of [global constants](17-language-reference.md#constants) allowing
+you to specify certain different setting such as the `NodeName` in a cluster scenario.
+
+Aside from that, the [icinga2.conf](04-configuration.md#icinga2-conf) should take care of including
+global constants, enabled [features](11-cli-commands.md#enable-features) and the object configuration.
+
+### Include Files and Directories <a id="differences-1x-2-include-files-dirs"></a>
+
+In Icinga 1.x the `icinga.cfg` file contains `cfg_file` and `cfg_dir`
+directives. The `cfg_dir` directive recursively includes all files with a `.cfg`
+suffix in the given directory. Only absolute paths may be used. The `cfg_file`
+and `cfg_dir` directives can include the same file twice which leads to
+configuration errors in Icinga 1.x.
+
+```
+cfg_file=/etc/icinga/objects/commands.cfg
+cfg_dir=/etc/icinga/objects
+```
+
+Icinga 2 supports wildcard includes and relative paths, e.g. for including
+`conf.d/*.conf` in the same directory.
+
+```
+include "conf.d/*.conf"
+```
+
+If you want to include files and directories recursively, you need to define
+a separate option and add the directory and an optional pattern.
+
+```
+include_recursive "conf.d"
+```
+
+A global search path for includes is available for advanced features like
+the Icinga Template Library (ITL) or additional monitoring plugins check
+command configuration.
+
+```
+include <itl>
+include <plugins>
+```
+
+By convention the `.conf` suffix is used for Icinga 2 configuration files.
+
+### Resource File and Global Macros <a id="differences-1x-2-resource-file-global-macros"></a>
+
+Global macros such as for the plugin directory, usernames and passwords can be
+set in the `resource.cfg` configuration file in Icinga 1.x. By convention the
+`USER1` macro is used to define the directory for the plugins.
+
+Icinga 2 uses global constants instead. In the default config these are
+set in the `constants.conf` configuration file:
+
+```
+/**
+ * This file defines global constants which can be used in
+ * the other configuration files. At a minimum the
+ * PluginDir constant should be defined.
+ */
+
+const PluginDir = "/usr/lib/nagios/plugins"
+```
+
+[Global macros](17-language-reference.md#constants) can only be defined once. Trying to modify a
+global constant will result in an error.
+
+### Configuration Comments <a id="differences-1x-2-configuration-comments"></a>
+
+In Icinga 1.x comments are made using a leading hash (`#`) or a semi-colon (`;`)
+for inline comments.
+
+In Icinga 2 comments can either be encapsulated by `/*` and `*/` (allowing for
+multi-line comments) or starting with two slashes (`//`). A leading hash (`#`)
+could also be used.
+
+### Object Names <a id="differences-1x-2-object-names"></a>
+
+Object names must not contain an exclamation mark (`!`). Use the `display_name` attribute
+to specify user-friendly names which should be shown in UIs (supported by
+Icinga Web 2 for example).
+
+Object names are not specified using attributes (e.g. `service_description` for
+services) like in Icinga 1.x but directly after their type definition.
+
+```
+define service {
+ host_name localhost
+ service_description ping4
+}
+
+object Service "ping4" {
+ host_name = "localhost"
+}
+```
+
+### Templates <a id="differences-1x-2-templates"></a>
+
+In Icinga 1.x templates are identified using the `register 0` setting. Icinga 2
+uses the `template` identifier:
+
+```
+template Service "ping4-template" { }
+```
+
+Icinga 1.x objects inherit from templates using the `use` attribute.
+Icinga 2 uses the keyword `import` with template names in double quotes.
+
+```
+define service {
+ service_description testservice
+ use tmpl1,tmpl2,tmpl3
+}
+
+object Service "testservice" {
+ import "tmpl1"
+ import "tmpl2"
+ import "tmpl3"
+}
+```
+
+The last template overrides previously set values.
+
+### Object attributes <a id="differences-1x-2-object-attributes"></a>
+
+Icinga 1.x separates attribute and value pairs with whitespaces/tabs. Icinga 2
+requires an equal sign (=) between them.
+
+```
+define service {
+ check_interval 5
+}
+
+object Service "test" {
+ check_interval = 5m
+}
+```
+
+Please note that the default time value is seconds if no duration literal
+is given. `check_interval = 5` behaves the same as `check_interval = 5s`.
+
+All strings require double quotes in Icinga 2. Therefore a double quote
+must be escaped by a backslash (e.g. in command line).
+If an attribute identifier starts with a number, it must be enclosed
+in double quotes as well.
+
+#### Alias vs. Display Name <a id="differences-1x-2-alias-display-name"></a>
+
+In Icinga 1.x a host can have an `alias` and a `display_name` attribute used
+for a more descriptive name. A service only can have a `display_name` attribute.
+The `alias` is used for group, timeperiod, etc. objects too.
+Icinga 2 only supports the `display_name` attribute which is also taken into
+account by Icinga web interfaces.
+
+### Custom Variables <a id="differences-1x-2-custom-variables"></a>
+
+Icinga 2 allows you to define custom variables in the `vars` dictionary.
+The `notes`, `notes_url`, `action_url`, `icon_image`, `icon_image_alt`
+attributes for host and service objects are still available in Icinga 2.
+
+`2d_coords` and `statusmap_image` are not supported in Icinga 2.
+
+Icinga 1.x custom variable attributes must be prefixed using an underscore (`_`).
+In Icinga 2 these attributes must be added to the `vars` dictionary as custom variables.
+
+```
+vars.dn = "cn=icinga2-dev-host,ou=icinga,ou=main,ou=IcingaConfig,ou=LConf,dc=icinga,dc=org"
+vars.cv = "my custom cmdb description"
+```
+
+These custom variables are also used as [command parameters](03-monitoring-basics.md#command-passing-parameters).
+
+While Icinga 1.x only supports numbers and strings as custom variable values,
+Icinga 2 extends that to arrays and (nested) dictionaries. For more details
+look [here](03-monitoring-basics.md#custom-variables).
+
+### Host Service Relation <a id="differences-1x-2-host-service-relation"></a>
+
+In Icinga 1.x a service object is associated with a host by defining the
+`host_name` attribute in the service definition. Alternate methods refer
+to `hostgroup_name` or behaviour changing regular expression.
+
+The preferred way of associating hosts with services in Icinga 2 is by
+using the [apply](03-monitoring-basics.md#using-apply) keyword.
+
+Direct object relations between a service and a host still allow you to use
+the `host_name` [Service](09-object-types.md#objecttype-service) object attribute.
+
+### Users <a id="differences-1x-2-users"></a>
+
+Contacts have been renamed to users (same for groups). A contact does not
+only provide (custom) attributes and notification commands used for notifications,
+but is also used for authorization checks in Icinga 1.x.
+
+Icinga 2 changes that behavior and makes the user an attribute provider only.
+These attributes can be accessed using [runtime macros](03-monitoring-basics.md#runtime-macros)
+inside notification command definitions.
+
+In Icinga 2 notification commands are not directly associated with users.
+Instead the notification command is specified inside `Notification` objects next to
+user and user group relations.
+
+The `IdoMySqlConnection` and `LivestatusListener` types will
+provide the contact and contactgroups attributes for services for compatibility
+reasons. These values are calculated from all services, their notifications,
+and their users.
+
+### Macros <a id="differences-1x-2-macros"></a>
+
+Various object attributes and runtime variables can be accessed as macros in
+commands in Icinga 1.x -- Icinga 2 supports all required [custom variables](03-monitoring-basics.md#custom-variables).
+
+#### Command Arguments <a id="differences-1x-2-command-arguments"></a>
+
+If you have previously used Icinga 1.x, you may already be familiar with
+user and argument definitions (e.g., `USER1` or `ARG1`). Unlike in Icinga 1.x
+the Icinga 2 custom variables may have arbitrary names and arguments are no
+longer specified in the `check_command` setting.
+
+In Icinga 1.x arguments are specified in the `check_command` attribute and
+are separated from the command name using an exclamation mark (`!`).
+
+Please check the migration hints for a detailed
+[migration example](23-migrating-from-icinga-1x.md#manual-config-migration-hints-check-command-arguments).
+
+> **Note**
+>
+> The Icinga 1.x feature named `Command Expander` does not work with Icinga 2.
+
+#### Environment Macros <a id="differences-1x-2-environment-macros"></a>
+
+The global configuration setting `enable_environment_macros` does not exist in
+Icinga 2.
+
+Macros exported into the [environment](03-monitoring-basics.md#command-environment-variables)
+can be set using the `env` attribute in command objects.
+
+#### Runtime Macros <a id="differences-1x-2-runtime-macros"></a>
+
+Icinga 2 requires an object specific namespace when accessing configuration
+and stateful runtime macros. Custom variables can be accessed directly.
+
+If a runtime macro from Icinga 1.x is not listed here, it is not supported
+by Icinga 2.
+
+Changes to user (contact) runtime macros
+
+ Icinga 1.x | Icinga 2
+ -----------------------|----------------------
+ CONTACTNAME | user.name
+ CONTACTALIAS | user.display_name
+ CONTACTEMAIL | user.email
+ CONTACTPAGER | user.pager
+
+`CONTACTADDRESS*` is not supported but can be accessed as `$user.vars.address1$`
+if set.
+
+Changes to service runtime macros
+
+ Icinga 1.x | Icinga 2
+ -----------------------|----------------------
+ SERVICEDESC | service.name
+ SERVICEDISPLAYNAME | service.display_name
+ SERVICECHECKCOMMAND | service.check_command
+ SERVICESTATE | service.state
+ SERVICESTATEID | service.state_id
+ SERVICESTATETYPE | service.state_type
+ SERVICEATTEMPT | service.check_attempt
+ MAXSERVICEATTEMPT | service.max_check_attempts
+ LASTSERVICESTATE | service.last_state
+ LASTSERVICESTATEID | service.last_state_id
+ LASTSERVICESTATETYPE | service.last_state_type
+ LASTSERVICESTATECHANGE | service.last_state_change
+ SERVICEDOWNTIME | service.downtime_depth
+ SERVICEDURATIONSEC | service.duration_sec
+ SERVICELATENCY | service.latency
+ SERVICEEXECUTIONTIME | service.execution_time
+ SERVICEOUTPUT | service.output
+ SERVICEPERFDATA | service.perfdata
+ LASTSERVICECHECK | service.last_check
+ SERVICENOTES | service.notes
+ SERVICENOTESURL | service.notes_url
+ SERVICEACTIONURL | service.action_url
+
+
+Changes to host runtime macros
+
+ Icinga 1.x | Icinga 2
+ -----------------------|----------------------
+ HOSTNAME | host.name
+ HOSTADDRESS | host.address
+ HOSTADDRESS6 | host.address6
+ HOSTDISPLAYNAME | host.display_name
+ HOSTALIAS | (use `host.display_name` instead)
+ HOSTCHECKCOMMAND | host.check_command
+ HOSTSTATE | host.state
+ HOSTSTATEID | host.state_id
+ HOSTSTATETYPE | host.state_type
+ HOSTATTEMPT | host.check_attempt
+ MAXHOSTATTEMPT | host.max_check_attempts
+ LASTHOSTSTATE | host.last_state
+ LASTHOSTSTATEID | host.last_state_id
+ LASTHOSTSTATETYPE | host.last_state_type
+ LASTHOSTSTATECHANGE | host.last_state_change
+ HOSTDOWNTIME | host.downtime_depth
+ HOSTDURATIONSEC | host.duration_sec
+ HOSTLATENCY | host.latency
+ HOSTEXECUTIONTIME | host.execution_time
+ HOSTOUTPUT | host.output
+ HOSTPERFDATA | host.perfdata
+ LASTHOSTCHECK | host.last_check
+ HOSTNOTES | host.notes
+ HOSTNOTESURL | host.notes_url
+ HOSTACTIONURL | host.action_url
+ TOTALSERVICES | host.num_services
+ TOTALSERVICESOK | host.num_services_ok
+ TOTALSERVICESWARNING | host.num_services_warning
+ TOTALSERVICESUNKNOWN | host.num_services_unknown
+ TOTALSERVICESCRITICAL | host.num_services_critical
+
+Changes to command runtime macros
+
+ Icinga 1.x | Icinga 2
+ -----------------------|----------------------
+ COMMANDNAME | command.name
+
+Changes to notification runtime macros
+
+ Icinga 1.x | Icinga 2
+ -----------------------|----------------------
+ NOTIFICATIONTYPE | notification.type
+ NOTIFICATIONAUTHOR | notification.author
+ NOTIFICATIONCOMMENT | notification.comment
+ NOTIFICATIONAUTHORNAME | (use `notification.author`)
+ NOTIFICATIONAUTHORALIAS | (use `notification.author`)
+
+
+Changes to global runtime macros:
+
+ Icinga 1.x | Icinga 2
+ -----------------------|----------------------
+ TIMET | icinga.timet
+ LONGDATETIME | icinga.long_date_time
+ SHORTDATETIME | icinga.short_date_time
+ DATE | icinga.date
+ TIME | icinga.time
+ PROCESSSTARTTIME | icinga.uptime
+
+Changes to global statistic macros:
+
+ Icinga 1.x | Icinga 2
+ ----------------------------------|----------------------
+ TOTALHOSTSUP | icinga.num_hosts_up
+ TOTALHOSTSDOWN | icinga.num_hosts_down
+ TOTALHOSTSUNREACHABLE | icinga.num_hosts_unreachable
+ TOTALHOSTSDOWNUNHANDLED | --
+ TOTALHOSTSUNREACHABLEUNHANDLED | --
+ TOTALHOSTPROBLEMS | down
+ TOTALHOSTPROBLEMSUNHANDLED | down-(downtime+acknowledged)
+ TOTALSERVICESOK | icinga.num_services_ok
+ TOTALSERVICESWARNING | icinga.num_services_warning
+ TOTALSERVICESCRITICAL | icinga.num_services_critical
+ TOTALSERVICESUNKNOWN | icinga.num_services_unknown
+ TOTALSERVICESWARNINGUNHANDLED | --
+ TOTALSERVICESCRITICALUNHANDLED | --
+ TOTALSERVICESUNKNOWNUNHANDLED | --
+ TOTALSERVICEPROBLEMS | ok+warning+critical+unknown
+ TOTALSERVICEPROBLEMSUNHANDLED | warning+critical+unknown-(downtime+acknowledged)
+
+
+
+
+### External Commands <a id="differences-1x-2-external-commands"></a>
+
+`CHANGE_CUSTOM_CONTACT_VAR` was renamed to `CHANGE_CUSTOM_USER_VAR`.
+
+The following external commands are not supported:
+
+```
+CHANGE_*MODATTR
+CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD
+CHANGE_HOST_NOTIFICATION_TIMEPERIOD
+CHANGE_SVC_NOTIFICATION_TIMEPERIOD
+DEL_DOWNTIME_BY_HOSTGROUP_NAME
+DEL_DOWNTIME_BY_START_TIME_COMMENT
+DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST
+DISABLE_CONTACT_HOST_NOTIFICATIONS
+DISABLE_CONTACT_SVC_NOTIFICATIONS
+DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS
+DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS
+DISABLE_FAILURE_PREDICTION
+DISABLE_HOST_AND_CHILD_NOTIFICATIONS
+DISABLE_HOST_FRESHNESS_CHECKS
+DISABLE_NOTIFICATIONS_EXPIRE_TIME
+DISABLE_SERVICE_FRESHNESS_CHECKS
+ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST
+ENABLE_CONTACT_HOST_NOTIFICATIONS
+ENABLE_CONTACT_SVC_NOTIFICATIONS
+ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS
+ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS
+ENABLE_FAILURE_PREDICTION
+ENABLE_HOST_AND_CHILD_NOTIFICATIONS
+ENABLE_HOST_FRESHNESS_CHECKS
+ENABLE_SERVICE_FRESHNESS_CHECKS
+READ_STATE_INFORMATION
+SAVE_STATE_INFORMATION
+SET_HOST_NOTIFICATION_NUMBER
+SET_SVC_NOTIFICATION_NUMBER
+START_ACCEPTING_PASSIVE_HOST_CHECKS
+START_ACCEPTING_PASSIVE_SVC_CHECKS
+START_OBSESSING_OVER_HOST
+START_OBSESSING_OVER_HOST_CHECKS
+START_OBSESSING_OVER_SVC
+START_OBSESSING_OVER_SVC_CHECKS
+STOP_ACCEPTING_PASSIVE_HOST_CHECKS
+STOP_ACCEPTING_PASSIVE_SVC_CHECKS
+STOP_OBSESSING_OVER_HOST
+STOP_OBSESSING_OVER_HOST_CHECKS
+STOP_OBSESSING_OVER_SVC
+STOP_OBSESSING_OVER_SVC_CHECKS
+```
+
+### Asynchronous Event Execution <a id="differences-1x-2-async-event-execution"></a>
+
+Unlike Icinga 1.x, Icinga 2 does not block when it's waiting for a command
+being executed -- whether if it's a check, a notification, an event
+handler, a performance data writing update, etc. That way you'll
+recognize low to zero (check) latencies with Icinga 2.
+
+### Checks <a id="differences-1x-2-checks"></a>
+
+#### Check Output <a id="differences-1x-2-check-output"></a>
+
+Icinga 2 does not make a difference between `output` (first line) and
+`long_output` (remaining lines) like in Icinga 1.x. Performance Data is
+provided separately.
+
+There is no output length restriction as known from Icinga 1.x using an
+[8KB static buffer](https://docs.icinga.com/latest/en/pluginapi.html#outputlengthrestrictions).
+
+The `IdoMysqlConnection` and `LivestatusListener` types
+split the raw output into `output` (first line) and `long_output` (remaining
+lines) for compatibility reasons.
+
+#### Initial State <a id="differences-1x-2-initial-state"></a>
+
+Icinga 1.x uses the `max_service_check_spread` setting to specify a timerange
+where the initial state checks must have happened. Icinga 2 will use the
+`retry_interval` setting instead and `check_interval` divided by 5 if
+`retry_interval` is not defined.
+
+### Comments <a id="differences-1x-2-comments"></a>
+
+Icinga 2 doesn't support non-persistent comments.
+
+### Commands <a id="differences-1x-2-commands"></a>
+
+Unlike in Icinga 1.x there are three different command types in Icinga 2:
+`CheckCommand`, `NotificationCommand`, and `EventCommand`.
+
+For example in Icinga 1.x it is possible to accidentally use a notification
+command as an event handler which might cause problems depending on which
+runtime macros are used in the notification command.
+
+In Icinga 2 these command types are separated and will generate an error on
+configuration validation if used in the wrong context.
+
+While Icinga 2 still supports the complete command line in command objects, it's
+recommended to use [command arguments](03-monitoring-basics.md#command-arguments)
+with optional and conditional command line parameters instead.
+
+It's also possible to define default argument values for the command itself
+which can be overridden by the host or service then.
+
+#### Command Timeouts <a id="differences-1x-2-commands-timeouts"></a>
+
+In Icinga 1.x there were two global options defining a host and service check
+timeout. This was essentially bad when there only was a couple of check plugins
+requiring some command timeouts to be extended.
+
+Icinga 2 allows you to specify the command timeout directly on the command. So,
+if your VMVware check plugin takes 15 minutes, [increase the timeout](09-object-types.md#objecttype-checkcommand)
+accordingly.
+
+
+### Groups <a id="differences-1x-2-groups"></a>
+
+In Icinga 2 hosts, services, and users are added to groups using the `groups`
+attribute in the object. The old way of listing all group members in the group's
+`members` attribute is available through `assign where` and `ignore where`
+expressions by using [group assign](03-monitoring-basics.md#group-assign-intro).
+
+```
+object Host "web-dev" {
+ import "generic-host"
+}
+
+object HostGroup "dev-hosts" {
+ display_name = "Dev Hosts"
+ assign where match("*-dev", host.name)
+}
+```
+
+#### Add Service to Hostgroup where Host is Member <a id="differences-1x-2-service-hostgroup-host"></a>
+
+In order to associate a service with all hosts in a host group the [apply](03-monitoring-basics.md#using-apply)
+keyword can be used:
+
+```
+apply Service "ping4" {
+ import "generic-service"
+
+ check_command = "ping4"
+
+ assign where "dev-hosts" in host.groups
+}
+```
+
+### Notifications <a id="differences-1x-2-notifications"></a>
+
+Notifications are a new object type in Icinga 2. Imagine the following
+notification configuration problem in Icinga 1.x:
+
+* Service A should notify contact X via SMS
+* Service B should notify contact X via Mail
+* Service C should notify contact Y via Mail and SMS
+* Contact X and Y should also be used for authorization
+
+The only way achieving a semi-clean solution is to
+
+* Create contact X-sms, set service_notification_command for sms, assign contact
+ to service A
+* Create contact X-mail, set service_notification_command for mail, assign
+ contact to service B
+* Create contact Y, set service_notification_command for sms and mail, assign
+ contact to service C
+* Create contact X without notification commands, assign to service A and B
+
+Basically you are required to create duplicated contacts for either each
+notification method or used for authorization only.
+
+Icinga 2 attempts to solve that problem in this way
+
+* Create user X, set SMS and Mail attributes, used for authorization
+* Create user Y, set SMS and Mail attributes, used for authorization
+* Create notification A-SMS, set command for sms, add user X,
+ assign notification A-SMS to service A
+* Create notification B-Mail, set command for mail, add user X,
+ assign notification Mail to service B
+* Create notification C-SMS, set command for sms, add user Y,
+ assign notification C-SMS to service C
+* Create notification C-Mail, set command for mail, add user Y,
+ assign notification C-Mail to service C
+
+Previously in Icinga 1.x it looked like this:
+
+```
+service -> (contact, contactgroup) -> notification command
+```
+
+In Icinga 2 it will look like this:
+
+```
+Service -> Notification -> NotificationCommand
+ -> User, UserGroup
+```
+
+#### Escalations <a id="differences-1x-2-escalations"></a>
+
+Escalations in Icinga 1.x require a separated object matching on existing
+objects. Escalations happen between a defined start and end time which is
+calculated from the notification_interval:
+
+```
+start = notification start + (notification_interval * first_notification)
+end = notification start + (notification_interval * last_notification)
+```
+
+In theory first_notification and last_notification can be set to readable
+numbers. In practice users are manipulating those attributes in combination
+with notification_interval in order to get a start and end time.
+
+In Icinga 2 the notification object can be used as notification escalation
+if the start and end times are defined within the 'times' attribute using
+duration literals (e.g. 30m).
+
+The Icinga 2 escalation does not replace the current running notification.
+In Icinga 1.x it's required to copy the contacts from the service notification
+to the escalation to guarantee the normal notifications once an escalation
+happens.
+That's not necessary with Icinga 2 only requiring an additional notification
+object for the escalation itself.
+
+#### Notification Options <a id="differences-1x-2-notification-options"></a>
+
+Unlike Icinga 1.x with the 'notification_options' attribute with comma-separated
+state and type filters, Icinga 2 uses two configuration attributes for that.
+All state and type filter use long names OR'd with a pipe together
+
+```
+notification_options w,u,c,r,f,s
+
+states = [ Warning, Unknown, Critical ]
+types = [ Problem, Recovery, FlappingStart, FlappingEnd, DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+```
+
+Icinga 2 adds more fine-grained type filters for acknowledgements, downtime,
+and flapping type (start, end, ...).
+
+### Dependencies and Parents <a id="differences-1x-2-dependencies-parents"></a>
+
+In Icinga 1.x it's possible to define host parents to determine network reachability
+and keep a host's state unreachable rather than down.
+Furthermore there are host and service dependencies preventing unnecessary checks and
+notifications. A host must not depend on a service, and vice versa. All dependencies
+are configured as separate objects and cannot be set directly on the host or service
+object.
+
+A service can now depend on a host, and vice versa. A service has an implicit dependency
+(parent) to its host. A host to host dependency acts implicitly as host parent relation.
+
+The former `host_name` and `dependent_host_name` have been renamed to `parent_host_name`
+and `child_host_name` (same for the service attribute). When using apply rules the
+child attributes may be omitted.
+
+For detailed examples on how to use the dependencies please check the [dependencies](03-monitoring-basics.md#dependencies)
+chapter.
+
+Dependencies can be applied to hosts or services using the [apply rules](17-language-reference.md#apply).
+
+The `IdoMysqlConnection` and `LivestatusListener` types
+support the Icinga 1.x schema with dependencies and parent attributes for
+compatibility reasons.
+
+### Flapping <a id="differences-1x-2-flapping"></a>
+
+The Icinga 1.x flapping detection uses the last 21 states of a service. This
+value is hardcoded and cannot be changed. The algorithm on determining a flapping state
+is as follows:
+
+```
+flapping value = (number of actual state changes / number of possible state changes)
+```
+
+The flapping value is then compared to the low and high flapping thresholds.
+
+The algorithm used in Icinga 2 does not store the past states but calculates the flapping
+threshold from a single value based on counters and half-life values. Icinga 2 compares
+the value with a single flapping threshold configuration attribute.
+
+### Check Result Freshness <a id="differences-1x-2-check-result-freshness"></a>
+
+Freshness of check results must be enabled explicitly in Icinga 1.x. The attribute
+`freshness_threshold` defines the threshold in seconds. Once the threshold is triggered, an
+active freshness check is executed defined by the `check_command` attribute. Both check
+methods (active and passive) use the same freshness check method.
+
+In Icinga 2 active check freshness is determined by the `check_interval` attribute and no
+incoming check results in that period of time (last check + check interval). Passive check
+freshness is calculated from the `check_interval` attribute if set. There is no extra
+`freshness_threshold` attribute in Icinga 2. If the freshness checks are invalid, a new
+service check is forced.
+
+### Real Reload <a id="differences-1x-2-real-reload"></a>
+
+In Nagios / Icinga 1.x a daemon reload does the following:
+
+* receive reload signal SIGHUP
+* stop all events (checks, notifications, etc.)
+* read the configuration from disk and validate all config objects in a single threaded fashion
+* validation NOT ok: stop the daemon (cannot restore old config state)
+* validation ok: start with new objects, dump status.dat / ido
+
+Unlike Icinga 1.x the Icinga 2 daemon reload does not block any event
+execution during config validation:
+
+* receive reload signal SIGHUP
+* fork a child process, start configuration validation in parallel work queues
+* parent process continues with old configuration objects and the event scheduling
+(doing checks, replicating cluster events, triggering alert notifications, etc.)
+* validation NOT ok: child process terminates, parent process continues with old configuration state
+(this is **essential** for the [cluster config synchronisation](06-distributed-monitoring.md#distributed-monitoring-top-down-config-sync))
+* validation ok: child process signals parent process to terminate and save its current state
+(all events until now) into the icinga2 state file
+* parent process shuts down writing icinga2.state file
+* child process waits for parent process gone, reads the icinga2 state file and synchronizes all historical and status data
+* child becomes the new session leader
+
+The DB IDO configuration dump and status/historical event updates use a queue
+not blocking event execution. Same goes for any other enabled feature.
+The configuration validation itself runs in parallel allowing fast verification checks.
+
+That way your monitoring does not stop during a configuration reload.
+
+
+### State Retention <a id="differences-1x-2-state-retention"></a>
+
+Icinga 1.x uses the `retention.dat` file to save its state in order to be able
+to reload it after a restart. In Icinga 2 this file is called `icinga2.state`.
+
+The format is **not** compatible with Icinga 1.x.
+
+### Logging <a id="differences-1x-2-logging"></a>
+
+Icinga 1.x supports syslog facilities and writes its own `icinga.log` log file
+and archives. These logs are used in Icinga 1.x to generate
+historical reports.
+
+Icinga 2 compat library provides the CompatLogger object which writes the icinga.log and archive
+in Icinga 1.x format in order to stay compatible with addons.
+
+The native Icinga 2 logging facilities are split into three configuration objects: SyslogLogger,
+FileLogger, StreamLogger. Each of them has their own severity and target configuration.
+
+The Icinga 2 daemon log does not log any alerts but is considered an application log only.
+
+### Broker Modules and Features <a id="differences-1x-2-broker-modules-features"></a>
+
+Icinga 1.x broker modules are incompatible with Icinga 2.
+
+In order to provide compatibility with Icinga 1.x the functionality of several
+popular broker modules was implemented for Icinga 2:
+
+* IDOUtils
+* Livestatus
+* Cluster (allows for high availability and load balancing)
+
+
+### Distributed Monitoring <a id="differences-1x-2-distributed-monitoring"></a>
+
+Icinga 1.x uses the native "obsess over host/service" method which requires the NSCA addon
+passing the slave's check results passively onto the master's external command pipe.
+While this method may be used for check load distribution, it does not provide any configuration
+distribution out-of-the-box. Furthermore comments, downtimes, and other stateful runtime data is
+not synced between the master and slave nodes. There are addons available solving the check
+and configuration distribution problems Icinga 1.x distributed monitoring currently suffers from.
+
+Icinga 2 implements a new built-in
+[distributed monitoring architecture](06-distributed-monitoring.md#distributed-monitoring-scenarios),
+including config and check distribution, IPv4/IPv6 support, TLS certificates and zone support for DMZ.
+High Availability and load balancing are also part of the Icinga 2 Cluster feature, next to local replay
+logs on connection loss ensuring that the event history is kept in sync.
diff --git a/doc/24-appendix.md b/doc/24-appendix.md
new file mode 100644
index 0000000..e0f0b2f
--- /dev/null
+++ b/doc/24-appendix.md
@@ -0,0 +1,695 @@
+# Appendix <a id="appendix"></a>
+
+## External Commands List <a id="external-commands-list-detail"></a>
+
+Additional details can be found in the [Icinga 1.x Documentation](https://docs.icinga.com/latest/en/extcommands2.html)
+
+ Command name | Parameters | Description
+ ------------------------------------------|-----------------------------------|--------------------------
+ PROCESS_HOST_CHECK_RESULT | ;&lt;host_name&gt;;&lt;status_code&gt;;&lt;plugin_output&gt; (3) | -
+ PROCESS_SERVICE_CHECK_RESULT | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;return_code&gt;;&lt;plugin_output&gt; (4) | -
+ SCHEDULE_HOST_CHECK | ;&lt;host_name&gt;;&lt;check_time&gt; (2) | -
+ SCHEDULE_FORCED_HOST_CHECK | ;&lt;host_name&gt;;&lt;check_time&gt; (2) | -
+ SCHEDULE_SVC_CHECK | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;check_time&gt; (3) | -
+ SCHEDULE_FORCED_SVC_CHECK | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;check_time&gt; (3) | -
+ ENABLE_HOST_CHECK | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOST_CHECK | ;&lt;host_name&gt; (1) | -
+ ENABLE_SVC_CHECK | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ DISABLE_SVC_CHECK | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ SHUTDOWN_PROCESS | - | -
+ RESTART_PROCESS | - | -
+ SCHEDULE_FORCED_HOST_SVC_CHECKS | ;&lt;host_name&gt;;&lt;check_time&gt; (2) | -
+ SCHEDULE_HOST_SVC_CHECKS | ;&lt;host_name&gt;;&lt;check_time&gt; (2) | -
+ ENABLE_HOST_SVC_CHECKS | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOST_SVC_CHECKS | ;&lt;host_name&gt; (1) | -
+ ACKNOWLEDGE_SVC_PROBLEM | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;sticky&gt;;&lt;notify&gt;;&lt;persistent&gt;;&lt;author&gt;;&lt;comment&gt; (7) | Note: Icinga 2 treats all comments as persistent.
+ ACKNOWLEDGE_SVC_PROBLEM_EXPIRE | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;sticky&gt;;&lt;notify&gt;;&lt;persistent&gt;;&lt;timestamp&gt;;&lt;author&gt;;&lt;comment&gt; (8) | Note: Icinga 2 treats all comments as persistent.
+ REMOVE_SVC_ACKNOWLEDGEMENT | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ ACKNOWLEDGE_HOST_PROBLEM | ;&lt;host_name&gt;;&lt;sticky&gt;;&lt;notify&gt;;&lt;persistent&gt;;&lt;author&gt;;&lt;comment&gt; (6) | Note: Icinga 2 treats all comments as persistent.
+ ACKNOWLEDGE_HOST_PROBLEM_EXPIRE | ;&lt;host_name&gt;;&lt;sticky&gt;;&lt;notify&gt;;&lt;persistent&gt;;&lt;timestamp&gt;;&lt;author&gt;;&lt;comment&gt; (7) | Note: Icinga 2 treats all comments as persistent.
+ REMOVE_HOST_ACKNOWLEDGEMENT | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOST_FLAP_DETECTION | ;&lt;host_name&gt; (1) | -
+ ENABLE_HOST_FLAP_DETECTION | ;&lt;host_name&gt; (1) | -
+ DISABLE_SVC_FLAP_DETECTION | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ ENABLE_SVC_FLAP_DETECTION | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ ENABLE_HOSTGROUP_SVC_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ DISABLE_HOSTGROUP_SVC_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ ENABLE_SERVICEGROUP_SVC_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ DISABLE_SERVICEGROUP_SVC_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ ENABLE_PASSIVE_HOST_CHECKS | ;&lt;host_name&gt; (1) | -
+ DISABLE_PASSIVE_HOST_CHECKS | ;&lt;host_name&gt; (1) | -
+ ENABLE_PASSIVE_SVC_CHECKS | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ DISABLE_PASSIVE_SVC_CHECKS | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ PROCESS_FILE | ;&lt;file_name&gt;;&lt;delete&gt; (2) | -
+ SCHEDULE_SVC_DOWNTIME | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (9) | -
+ DEL_SVC_DOWNTIME | ;&lt;downtime_id&gt; (1) | -
+ SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME | ;&lt;host_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME | ;&lt;host_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ SCHEDULE_HOST_DOWNTIME | ;&lt;host_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ DEL_HOST_DOWNTIME | ;&lt;downtime_id&gt; (1) | -
+ DEL_DOWNTIME_BY_HOST_NAME | ;&lt;host_name&gt;[;&lt;service_name;&gt;[;&lt;start_time;&gt;[;&lt;comment_text;&gt;]]] (1) | -
+ SCHEDULE_HOST_SVC_DOWNTIME | ;&lt;host_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ SCHEDULE_HOSTGROUP_HOST_DOWNTIME | ;&lt;hostgroup_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ SCHEDULE_HOSTGROUP_SVC_DOWNTIME | ;&lt;hostgroup_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ SCHEDULE_SERVICEGROUP_HOST_DOWNTIME | ;&lt;servicegroup_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ SCHEDULE_SERVICEGROUP_SVC_DOWNTIME | ;&lt;servicegroup_name&gt;;&lt;start_time&gt;;&lt;end_time&gt;;&lt;fixed&gt;;&lt;trigger_id&gt;;&lt;duration&gt;;&lt;author&gt;;&lt;comment&gt; (8) | -
+ ADD_HOST_COMMENT | ;&lt;host_name&gt;;&lt;persistent&gt;;&lt;author&gt;;&lt;comment&gt; (4) | Note: Icinga 2 treats all comments as persistent.
+ DEL_HOST_COMMENT | ;&lt;comment_id&gt; (1) | -
+ ADD_SVC_COMMENT | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;persistent&gt;;&lt;author&gt;;&lt;comment&gt; (5) | Note: Icinga 2 treats all comments as persistent.
+ DEL_SVC_COMMENT | ;&lt;comment_id&gt; (1) | -
+ DEL_ALL_HOST_COMMENTS | ;&lt;host_name&gt; (1) | -
+ DEL_ALL_SVC_COMMENTS | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ SEND_CUSTOM_HOST_NOTIFICATION | ;&lt;host_name&gt;;&lt;options&gt;;&lt;author&gt;;&lt;comment&gt; (4) | -
+ SEND_CUSTOM_SVC_NOTIFICATION | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;options&gt;;&lt;author&gt;;&lt;comment&gt; (5) | -
+ DELAY_HOST_NOTIFICATION | ;&lt;host_name&gt;;&lt;notification_time&gt; (2) | -
+ DELAY_SVC_NOTIFICATION | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;notification_time&gt; (3) | -
+ ENABLE_HOST_NOTIFICATIONS | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOST_NOTIFICATIONS | ;&lt;host_name&gt; (1) | -
+ ENABLE_SVC_NOTIFICATIONS | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ DISABLE_SVC_NOTIFICATIONS | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ ENABLE_HOST_SVC_NOTIFICATIONS | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOST_SVC_NOTIFICATIONS | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOSTGROUP_HOST_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ DISABLE_SERVICEGROUP_HOST_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ ENABLE_HOSTGROUP_HOST_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS | ;&lt;hostgroup_name&gt; (1) | -
+ ENABLE_SERVICEGROUP_HOST_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS | ;&lt;servicegroup_name&gt; (1) | -
+ ENABLE_NOTIFICATIONS | - | -
+ DISABLE_NOTIFICATIONS | - | -
+ ENABLE_FLAP_DETECTION | - | -
+ DISABLE_FLAP_DETECTION | - | -
+ ENABLE_EVENT_HANDLERS | - | -
+ DISABLE_EVENT_HANDLERS | - | -
+ ENABLE_PERFORMANCE_DATA | - | -
+ DISABLE_PERFORMANCE_DATA | - | -
+ START_EXECUTING_HOST_CHECKS | - | -
+ STOP_EXECUTING_HOST_CHECKS | - | -
+ START_EXECUTING_SVC_CHECKS | - | -
+ STOP_EXECUTING_SVC_CHECKS | - | -
+ CHANGE_NORMAL_SVC_CHECK_INTERVAL | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;check_interval&gt; (3) | -
+ CHANGE_NORMAL_HOST_CHECK_INTERVAL | ;&lt;host_name&gt;;&lt;check_interval&gt; (2) | -
+ CHANGE_RETRY_SVC_CHECK_INTERVAL | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;check_interval&gt; (3) | -
+ CHANGE_RETRY_HOST_CHECK_INTERVAL | ;&lt;host_name&gt;;&lt;check_interval&gt; (2) | -
+ ENABLE_HOST_EVENT_HANDLER | ;&lt;host_name&gt; (1) | -
+ DISABLE_HOST_EVENT_HANDLER | ;&lt;host_name&gt; (1) | -
+ ENABLE_SVC_EVENT_HANDLER | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ DISABLE_SVC_EVENT_HANDLER | ;&lt;host_name&gt;;&lt;service_name&gt; (2) | -
+ CHANGE_HOST_EVENT_HANDLER | ;&lt;host_name&gt;;&lt;event_command_name&gt; (2) | -
+ CHANGE_SVC_EVENT_HANDLER | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;event_command_name&gt; (3) | -
+ CHANGE_HOST_CHECK_COMMAND | ;&lt;host_name&gt;;&lt;check_command_name&gt; (2) | -
+ CHANGE_SVC_CHECK_COMMAND | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;check_command_name&gt; (3) | -
+ CHANGE_MAX_HOST_CHECK_ATTEMPTS | ;&lt;host_name&gt;;&lt;check_attempts&gt; (2) | -
+ CHANGE_MAX_SVC_CHECK_ATTEMPTS | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;check_attempts&gt; (3) | -
+ CHANGE_HOST_CHECK_TIMEPERIOD | ;&lt;host_name&gt;;&lt;timeperiod_name&gt; (2) | -
+ CHANGE_SVC_CHECK_TIMEPERIOD | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;timeperiod_name&gt; | -
+ CHANGE_CUSTOM_HOST_VAR | ;&lt;host_name&gt;;&lt;var_name&gt;;&lt;var_value&gt; (3) | -
+ CHANGE_CUSTOM_SVC_VAR | ;&lt;host_name&gt;;&lt;service_name&gt;;&lt;var_name&gt;;&lt;var_value&gt; (4) | -
+ CHANGE_CUSTOM_USER_VAR | ;&lt;user_name&gt;;&lt;var_name&gt;;&lt;var_value&gt; (3) | -
+ CHANGE_CUSTOM_CHECKCOMMAND_VAR | ;&lt;check_command_name&gt;;&lt;var_name&gt;;&lt;var_value&gt; (3) | -
+ CHANGE_CUSTOM_EVENTCOMMAND_VAR | ;&lt;event_command_name&gt;;&lt;var_name&gt;;&lt;var_value&gt; (3) | -
+ CHANGE_CUSTOM_NOTIFICATIONCOMMAND_VAR | ;&lt;notification_command_name&gt;;&lt;var_name&gt;;&lt;var_value&gt; (3) | -
+ ENABLE_HOSTGROUP_HOST_NOTIFICATIONS | ;&lt;hostgroup_name&gt; (1) | -
+ ENABLE_HOSTGROUP_SVC_NOTIFICATIONS | ;&lt;hostgroup_name&gt; (1) | -
+ DISABLE_HOSTGROUP_HOST_NOTIFICATIONS | ;&lt;hostgroup_name&gt; (1) | -
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS | ;&lt;hostgroup_name&gt; (1) | -
+ ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS | ;&lt;servicegroup_name&gt; (1) | -
+ DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS | ;&lt;servicegroup_name&gt; (1) | -
+ ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS | ;&lt;servicegroup_name&gt; (1) | -
+ DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS | ;&lt;servicegroup_name&gt; (1) | -
+
+
+## Schemas <a id="schemas"></a>
+
+By convention `CheckCommand`, `EventCommand`, and `NotificationCommand` objects
+are exported using a prefix. This is mandatory for unique objects in the
+command tables.
+
+Object | Prefix
+------------------------|------------------------
+CheckCommand | check\_
+EventCommand | event\_
+NotificationCommand | notification\_
+
+### DB IDO Schema <a id="schema-db-ido"></a>
+
+There is a detailed documentation for the Icinga IDOUtils 1.x
+database schema available on [https://docs.icinga.com/latest/en/db_model.html]
+
+#### DB IDO Schema Extensions <a id="schema-db-ido-extensions"></a>
+
+Icinga 2 specific extensions are shown below:
+
+New table: `endpointstatus`
+
+ Table | Column | Type | Default | Description
+ --------------------|--------------------|----------|---------|-------------
+ endpoints | endpoint_object_id | bigint | NULL | FK: objects table
+ endpoints | identity | TEXT | NULL | endpoint name
+ endpoints | node | TEXT | NULL | local node name
+ endpoints | zone_object_id | bigint | NULL | zone object where this endpoint is a member of
+
+New table: `endpointstatus`
+
+ Table | Column | Type | Default | Description
+ --------------------|--------------------|----------|---------|-------------
+ endpointstatus | endpoint_object_id | bigint | NULL | FK: objects table
+ endpointstatus | identity | TEXT | NULL | endpoint name
+ endpointstatus | node | TEXT | NULL | local node name
+ endpointstatus | is_connected | smallint | 0 | update on endpoint connect/disconnect
+ endpointstatus | zone_object_id | bigint | NULL | zone object where this endpoint is a member of
+
+New tables: `zones` and `zonestatus`:
+
+ Table | Column | Type | Default | Description
+ --------------------|--------------------|----------|---------|-------------
+ zones | zone_object_id | bigint | NULL | FK: objects table
+ zones | parent_zone_object_id | bigint | NULL | FK: zones table
+ zones | is_global | smallint | 0 | zone is global
+
+
+New columns:
+
+ Table | Column | Type | Default | Description
+ --------------------|-------------------------|----------|---------|-------------
+ all status/history | endpoint_object_id | bigint | NULL | FK: objects table
+ servicestatus | check_source | TEXT | NULL | node name where check was executed
+ hoststatus | check_source | TEXT | NULL | node name where check was executed
+ statehistory | check_source | TEXT | NULL | node name where check was executed
+ servicestatus | is_reachable | integer | NULL | object reachability
+ hoststatus | is_reachable | integer | NULL | object reachability
+ logentries | object_id | bigint | NULL | FK: objects table (service associated with column)
+ {host,service}group | notes | TEXT | NULL | -
+ {host,service}group | notes_url | TEXT | NULL | -
+ {host,service}group | action_url | TEXT | NULL | -
+ customvariable* | is_json | integer | 0 | Defines whether `varvalue` is a json encoded string from custom variables, or not
+ servicestatus | original_attributes | TEXT | NULL | JSON encoded dictionary of original attributes if modified at runtime.
+ hoststatus | original_attributes | TEXT | NULL | JSON encoded dictionary of original attributes if modified at runtime.
+
+Additional command custom variables populated from 'vars' dictionary.
+Additional global custom variables populated from 'Vars' constant (object_id is NULL).
+
+### Livestatus Schema <a id="schema-livestatus"></a>
+
+#### Livestatus Schema Extensions <a id="schema-livestatus-extensions"></a>
+
+Icinga 2 specific extensions are shown below:
+
+New table: `endpoints`:
+
+ Table | Column
+ ----------|--------------
+ endpoints | name
+ endpoints | identity
+ endpoints | node
+ endpoints | is_connected
+ endpoints | zone
+
+New table: `zones`:
+
+ Table | Column
+ ----------|--------------
+ zone | name
+ zone | endpoints
+ zone | parent
+ zone | global
+
+New columns:
+
+ Table | Column
+ ----------|--------------
+ hosts | is_reachable
+ services | is_reachable
+ hosts | cv_is_json
+ services | cv_is_json
+ contacts | cv_is_json
+ hosts | check_source
+ services | check_source
+ downtimes | triggers
+ downtimes | trigger_time
+ commands | custom_variable_names
+ commands | custom_variable_values
+ commands | custom_variables
+ commands | modified_attributes
+ commands | modified_attributes_list
+ status | custom_variable_names
+ status | custom_variable_values
+ status | custom_variables
+ hosts | original_attributes
+ services | original_attributes
+
+Command custom variables reflect the local 'vars' dictionary.
+Status custom variables reflect the global 'Vars' constant.
+
+#### Livestatus Hosts Table Attributes <a id="schema-livestatus-hosts-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | .
+ display_name | string | .
+ alias | string | same as display_name.
+ address | string | .
+ address6 | string | NEW in Icinga.
+ check_command | string | .
+ check_command_expanded | string | .
+ event_handler | string | .
+ notification_period | string | host with notifications: period.
+ check_period | string | .
+ notes | string | .
+ notes_expanded | string | .
+ notes_url | string | .
+ notes_url_expanded | string | .
+ action_url | string | .
+ action_url_expanded | string | .
+ plugin_output | string | .
+ perf_data | string | .
+ icon_image | string | .
+ icon_image_expanded | string | .
+ icon_image_alt | stirng | .
+ statusmap_image | string | .
+ long_plugin_output | string | .
+ max_check_attempts | int | .
+ flap_detection_enabled | int | .
+ check_freshness | int | .
+ process_performance_data | int | .
+ accept_passive_checks | int | .
+ event_handler_enabled | int | .
+ acknowledgement_type | int | Only 0 or 1.
+ check_type | int | .
+ last_state | int | .
+ last_hard_state | int | .
+ current_attempt | int | .
+ last_notification | int | host with notifications: last notification.
+ next_notification | int | host with notifications: next notification.
+ next_check | int | .
+ last_hard_state_change | int | .
+ has_been_checked | int | .
+ current_notification_number | int | host with notifications: number.
+ total_services | int | .
+ checks_enabled | int | .
+ notifications_enabled | int | .
+ acknowledged | int | .
+ state | int | .
+ state_type | int | .
+ no_more_notifications | int | notification_interval == 0 && volatile == false.
+ last_check | int | .
+ last_state_change | int | .
+ last_time_up | int | .
+ last_time_down | int | .
+ last_time_unreachable | int | .
+ is_flapping | int | .
+ scheduled_downtime_depth | int | .
+ active_checks_enabled | int | .
+ modified_attributes | array | .
+ modified_attributes_list | array | .
+ check_interval | double | .
+ retry_interval | double | .
+ notification_interval | double | host with notifications: smallest interval.
+ low_flap_threshold | double | flapping_threshold
+ high_flap_threshold | double | flapping_threshold
+ latency | double | .
+ execution_time | double | .
+ percent_state_change | double | flapping.
+ in_notification_period | int | host with notifications: matching period.
+ in_check_period | int | .
+ contacts | array | host with notifications, users and user groups.
+ downtimes | array | id.
+ downtimes_with_info | array | id+author+comment.
+ comments | array | id.
+ comments_with_info | array | id+author+comment.
+ comments_with_extra_info | array | id+author+comment+entry_type+entry_time.
+ custom_variable_names | array | .
+ custom_variable_values | array | .
+ custom_variables | array | Array of custom variable array pair.
+ parents | array | Direct host parents.
+ childs | array | Direct host children (Note: `childs` is inherited from the origin MK_Livestatus protocol).
+ num_services | int | .
+ worst_service_state | int | All services and their worst state.
+ num_services_ok | int | All services with Ok state.
+ num_services_warn | int | All services with Warning state.
+ num_services_crit | int | All services with Critical state.
+ num_services_unknown | int | All services with Unknown state.
+ worst_service_hard_state | int | All services and their worst hard state.
+ num_services_hard_ok | int | All services in a hard state with Ok state.
+ num_services_hard_warn | int | All services in a hard state with Warning state.
+ num_services_hard_crit | int | All services in a hard state with Critical state.
+ num_services_hard_unknown | int | All services in a hard state with Unknown state.
+ hard_state | int | Returns OK if state is OK. Returns current state if now a hard state type. Returns last hard state otherwise.
+ staleness | int | Indicates time since last check normalized onto the check_interval.
+ groups | array | All hostgroups this host is a member of.
+ contact_groups | array | All usergroups associated with this host through notifications.
+ services | array | All services associated with this host.
+ services_with_state | array | All services associated with this host with state and hasbeenchecked.
+ services_with_info | array | All services associated with this host with state, hasbeenchecked and output.
+
+Not supported: `initial_state`, `pending_flex_downtime`, `check_flapping_recovery_notification`,
+`is_executing`, `check_options`, `obsess_over_host`, `first_notification_delay`, `x_3d`, `y_3d`, `z_3d`,
+`x_2d`, `y_2d`, `filename`, `pnpgraph_present`.
+
+#### Livestatus Hostgroups Table Attributes <a id="schema-livestatus-hostgroups-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | .
+ alias | string | `display_name` attribute.
+ notes | string | .
+ notes_url | string | .
+ action_url | string | .
+ members | array | .
+ members_with_state | array | Host name and state.
+ worst_host_state | int | Of all group members.
+ num_hosts | int | In this group.
+ num_hosts_pending | int | .
+ num_hosts_up | int | .
+ num_hosts_down | int | .
+ num_hosts_unreach | int | .
+ num_services | int | Number of services associated with hosts in this hostgroup.
+ worst_services_state | int | .
+ num_services_pending | int | .
+ num_services_ok | int | .
+ num_services_warn | int | .
+ num_services_crit | int | .
+ num_services_unknown | int | .
+ worst_service_hard_state | int | .
+ num_services_hard_ok | int | .
+ num_services_hard_warn | int | .
+ num_services_hard_crit | int | .
+ num_services_hard_unknown | int | .
+
+#### Livestatus Services Table Attributes <a id="schema-livestatus-services-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ description | string | .
+ display_name | string | .
+ alias | string | same as display_name.
+ check_command | string | .
+ check_command_expanded | string | .
+ event_handler | string | .
+ notification_period | string | host with notifications: period.
+ check_period | string | .
+ notes | string | .
+ notes_expanded | string | .
+ notes_url | string | .
+ notes_url_expanded | string | .
+ action_url | string | .
+ action_url_expanded | string | .
+ plugin_output | string | .
+ perf_data | string | .
+ icon_image | string | .
+ icon_image_expanded | string | .
+ icon_image_alt | stirng | .
+ statusmap_image | string | .
+ long_plugin_output | string | .
+ max_check_attempts | int | .
+ flap_detection_enabled | int | .
+ check_freshness | int | .
+ process_performance_data | int | .
+ accept_passive_checks | int | .
+ event_handler_enabled | int | .
+ acknowledgement_type | int | Only 0 or 1.
+ check_type | int | .
+ last_state | int | .
+ last_hard_state | int | .
+ current_attempt | int | .
+ last_notification | int | service with notifications: last notification.
+ next_notification | int | service with notifications: next notification.
+ next_check | int | .
+ last_hard_state_change | int | .
+ has_been_checked | int | .
+ current_notification_number | int | service with notifications: number.
+ checks_enabled | int | .
+ notifications_enabled | int | .
+ acknowledged | int | .
+ state | int | .
+ state_type | int | .
+ no_more_notifications | int | notification_interval == 0 && volatile == false.
+ last_check | int | .
+ last_state_change | int | .
+ last_time_ok | int | .
+ last_time_warning | int | .
+ last_time_critical | int | .
+ last_time_unknown | int | .
+ is_flapping | int | .
+ scheduled_downtime_depth | int | .
+ active_checks_enabled | int | .
+ modified_attributes | array | .
+ modified_attributes_list | array | .
+ check_interval | double | .
+ retry_interval | double | .
+ notification_interval | double | service with notifications: smallest interval.
+ low_flap_threshold | double | flapping_threshold
+ high_flap_threshold | double | flapping_threshold
+ latency | double | .
+ execution_time | double | .
+ percent_state_change | double | flapping.
+ in_notification_period | int | service with notifications: matching period.
+ in_check_period | int | .
+ contacts | array | service with notifications, users and user groups.
+ downtimes | array | id.
+ downtimes_with_info | array | id+author+comment.
+ comments | array | id.
+ comments_with_info | array | id+author+comment.
+ comments_with_extra_info | array | id+author+comment+entry_type+entry_time.
+ custom_variable_names | array | .
+ custom_variable_values | array | .
+ custom_variables | array | Array of custom variable array pair.
+ hard_state | int | Returns OK if state is OK. Returns current state if now a hard state type. Returns last hard state otherwise.
+ staleness | int | Indicates time since last check normalized onto the check_interval.
+ groups | array | All hostgroups this host is a member of.
+ contact_groups | array | All usergroups associated with this host through notifications.
+ host_ | join | Prefix for attributes from implicit join with hosts table.
+
+Not supported: `initial_state`, `is_executing`, `check_options`, `obsess_over_service`, `first_notification_delay`,
+`pnpgraph_present`.
+
+#### Livestatus Servicegroups Table Attributes <a id="schema-livestatus-servicegroups-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | .
+ alias | string | `display_name` attribute.
+ notes | string | .
+ notes_url | string | .
+ action_url | string | .
+ members | array | CSV format uses `host|service` syntax.
+ members_with_state | array | Host, service, hoststate, servicestate.
+ worst_service_state | int | .
+ num_services | int | .
+ num_services_pending | int | .
+ num_services_ok | int | .
+ num_services_warn | int | .
+ num_services_crit | int | .
+ num_services_unknown | int | .
+ num_services_hard_ok | int | .
+ num_services_hard_warn | int | .
+ num_services_hard_crit | int | .
+ num_services_hard_unknown | int | .
+
+#### Livestatus Contacts Table Attributes <a id="schema-livestatus-contacts-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | .
+ alias | string | `display_name` attribute.
+ email | string | .
+ pager | string | .
+ host_notification_period | string | .
+ service_notification_period | string | .
+ host_notifications_enabled | int | .
+ service_notifications_enabled | int | .
+ in_host_notification_period | int | .
+ in_service_notification_period | int | .
+ custom_variable_names | array | .
+ custom_variable_values | array | .
+ custom_variables | array | Array of customvariable array pairs.
+ modified_attributes | array | .
+ modified_attributes_list | array | .
+
+
+Not supported: `can_submit_commands`.
+
+#### Livestatus Contactgroups Table Attributes <a id="schema-livestatus-contactgroups-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | .
+ alias | string | `display_name` attribute.
+ members | array | .
+
+
+#### Livestatus Commands Table Attributes <a id="schema-livestatus-commands-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | 3 types of commands in Icinga 2.
+ line | string | .
+
+
+#### Livestatus Status Table Attributes <a id="schema-livestatus-status-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ connections | int | Since application start.
+ connections_rate | double | .
+ service_checks | int | Since application start.
+ service_checks_rate | double | .
+ host_checks | int | Since application start.
+ host_checks_rate | double | .
+ external_commands | int | Since application start.
+ external_commands_rate | double | .
+ nagios_pid | string | Application PID.
+ enable_notifications | int | .
+ execute_service_checks | int | .
+ accept_passive_service_checks | int | .
+ execute_host_checks | int | .
+ accept_passive_host_checks | int | .
+ enable_event_handlers | int | .
+ check_service_freshness | int | .
+ check_host_freshness | int | .
+ enable_flap_detection | int | .
+ process_performance_data | int | .
+ check_external_commands | int | Always enabled.
+ program_start | int | In seconds.
+ last_command_check | int | Always.
+ interval_length | int | Compatibility mode: 60.
+ num_hosts | int | .
+ num_services | int | .
+ program_version | string | 2.0.
+ livestatus_active_connections | string | .
+
+Not supported: `neb_callbacks`, `neb_callbacks_rate`, `requests`, `requests_rate`, `forks`, `forks_rate`,
+`log_messages`, `log_messages_rate`, `livechecks`, `livechecks_rate`, `livecheck_overflows`,
+`livecheck_overflows_rate`, `obsess_over_services`, `obsess_over_hosts`, `last_log_rotation`,
+`external_command_buffer_slots`, `external_command_buffer_usage`, `external_command_buffer_max`,
+`cached_log_messages`, `livestatus_queued_connections`, `livestatus_threads`.
+
+
+#### Livestatus Comments Table Attributes <a id="schema-livestatus-comments-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ author | string | .
+ comment | string | .
+ id | int | legacy_id.
+ entry_time | string | Seconds.
+ type | int | 1=host, 2=service.
+ is_service | int | .
+ persistent | int | Always.
+ source | string | Always external (1).
+ entry_type | int | .
+ expires | int | .
+ expire_time | string | Seconds.
+ service_ | join | Prefix for attributes from implicit join with services table.
+ host_ | join | Prefix for attributes from implicit join with hosts table.
+
+
+#### Livestatus Downtimes Table Attributes <a id="schema-livestatus-downtimes-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ author | string | .
+ comment | string | .
+ id | int | legacy_id.
+ entry_time | string | Seconds.
+ type | int | 1=active, 0=pending.
+ is_service | int | .
+ start_time | string | Seconds.
+ end_time | string | Seconds.
+ fixed | int | 0=flexible, 1=fixed.
+ duration | int | .
+ triggered_by | int | legacy_id.
+ triggers | int | NEW in Icinga 2.
+ trigger_time | string | NEW in Icinga 2.
+ service_ | join | Prefix for attributes from implicit join with services table.
+ host_ | join | Prefix for attributes from implicit join with hosts table.
+
+
+#### Livestatus Timeperiods Table Attributes <a id="schema-livestatus-timeperiods-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ name | string | .
+ alias | string | `display_name` attribute.
+ in | int | Current time is in timeperiod or not.
+
+#### Livestatus Log Table Attributes <a id="schema-livestatus-log-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ time | int | Time of log event (unix timestamp).
+ lineno | int | Line number in `CompatLogger` log file.
+ class | int | Log message class: 0=info, 1=state, 2=program, 3=notification, 4=passive, 5=command.
+ message | string | Complete message line.
+ type | string | Text before the colon `:`.
+ options | string | Text after the colon `:`.
+ comment | string | Comment if available.
+ plugin_output | string | Check output if available.
+ state | int | Host or service state.
+ state_type | int | State type if available.
+ attempt | int | Current check attempt.
+ service_description | string | .
+ host_name | string | .
+ contact_name | string | .
+ command_name | string | .
+ current_service_ | join | Prefix for attributes from implicit join with services table.
+ current_host_ | join | Prefix for attributes from implicit join with hosts table.
+ current_contact_ | join | Prefix for attributes from implicit join with contacts table.
+ current_command_ | join | Prefix for attributes from implicit join with commands table.
+
+#### Livestatus Statehist Table Attributes <a id="schema-livestatus-statehist-table-attributes"></a>
+
+ Key | Type | Note
+ ----------------------|-----------|-------------------------
+ time | int | Time of log event (unix timestamp).
+ lineno | int | Line number in `CompatLogger` log file.
+ from | int | Start timestamp (unix timestamp).
+ until | int | End timestamp (unix timestamp).
+ duration | int | until-from.
+ duration_part | double | duration / query_part.
+ state | int | State: 0=ok, 1=warn, 2=crit, 3=unknown, -1=notmonitored.
+ host_down | int | Host associated with the service is down or not.
+ in_downtime | int | Host/service is in downtime.
+ in_host_downtime | int | Host associated with the service is in a downtime or not.
+ is_flapping | int | Host/service is flapping.
+ in_notification_period | int | Host/service notification periods match or not.
+ notification_period | string | Host/service notification period.
+ host_name | string | .
+ service_description | string | .
+ log_output | string | Log file output for this state.
+ duration_ok | int | until-from for OK state.
+ duration_part_ok | double | .
+ duration_warning | int | until-from for Warning state.
+ duration_part_warning | double | .
+ duration_critical | int | until-from for Critical state.
+ duration_part_critical | double | .
+ duration_unknown | int | until-from for Unknown state.
+ duration_part_unknown | double | .
+ duration_unmonitored | int | until-from for Not-Monitored state.
+ duration_part_unmonitored | double | .
+ current_service_ | join | Prefix for attributes from implicit join with services table.
+ current_host_ | join | Prefix for attributes from implicit join with hosts table.
+
+Not supported: `debug_info`.
+
+#### Livestatus Hostsbygroup Table Attributes <a id="schema-livestatus-hostsbygroup-table-attributes"></a>
+
+All [hosts](24-appendix.md#schema-livestatus-hosts-table-attributes) table attributes grouped with
+the [hostgroups](24-appendix.md#schema-livestatus-hostgroups-table-attributes) table prefixed with `hostgroup_`.
+
+#### Livestatus Servicesbygroup Table Attributes <a id="schema-livestatus-servicesbygroup-table-attributes"></a>
+
+All [services](24-appendix.md#schema-livestatus-services-table-attributes) table attributes grouped with
+the [servicegroups](24-appendix.md#schema-livestatus-servicegroups-table-attributes) table prefixed with `servicegroup_`.
+
+#### Livestatus Servicesbyhostgroup Table Attributes <a id="schema-livestatus-servicesbyhostgroup-table-attributes"></a>
+
+All [services](24-appendix.md#schema-livestatus-services-table-attributes) table attributes grouped with
+the [hostgroups](24-appendix.md#schema-livestatus-hostgroups-table-attributes) table prefixed with `hostgroup_`.
+
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
new file mode 100644
index 0000000..3be5b58
--- /dev/null
+++ b/doc/CMakeLists.txt
@@ -0,0 +1,20 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+file(GLOB DOCSRCS "*.md")
+
+if(UNIX OR CYGWIN)
+ install(
+ FILES icinga2.8
+ DESTINATION ${CMAKE_INSTALL_MANDIR}/man8
+ )
+endif()
+
+install(
+ FILES ${DOCSRCS}
+ DESTINATION ${CMAKE_INSTALL_DOCDIR}/markdown
+)
+
+install(
+ DIRECTORY images
+ DESTINATION ${CMAKE_INSTALL_DOCDIR}/markdown
+)
diff --git a/doc/icinga2.8 b/doc/icinga2.8
new file mode 100644
index 0000000..dfc062f
--- /dev/null
+++ b/doc/icinga2.8
@@ -0,0 +1,99 @@
+.TH ICINGA2 "8" "October 2015" "icinga2 - The Icinga 2 network monitoring daemon"
+.SH NAME
+icinga2 \- The Icinga 2 network monitoring daemon
+
+.SH SYNOPSIS
+.B icinga2
+.I command
+[
+.I command options
+][
+.I global options
+]
+
+.I command
+:= [
+.B api | ca | console | daemon | feature | node | object | pki | variable
+]
+.B --help
+
+.SH DESCRIPTION
+
+Icinga 2 is an open source monitoring system which checks the availability of your network resources, notifies users of outages, and generates performance data for reporting.
+
+Scalable and extensible, Icinga 2 can monitor large, complex environments across multiple locations.
+
+.SH OPTIONS
+Details for specific command options can be viewed by invoking the command name with
+.B --help
+parameter.
+
+.SS Global options
+.TP
+.B -h,--help
+Show this help message.
+.TP
+.B -V,--version
+Show version information.
+.TP
+.B --color
+Use VT100 color codes even when stdout is not a terminal.
+.TP
+.BI "-D, --define" " arg"
+Define a constant.
+.TP
+.BI "-l, --library" " arg"
+Load a library.
+.TP
+.BI "-I, --include" " arg"
+Add include search directory.
+.TP
+.BI "-x, --log-level" " [ debug | notice | information | warning | critical ]"
+Specify the log level for the console log, default is
+.B information.
+.TP
+.BI "-X, --script-debugger"
+Enables the script debugger. When an exception occurs or the 'debugger' keyword
+is encountered in a user script Icinga 2 launches the script debugger that
+allows the user to debug the script.
+
+.SS daemon options
+The CLI command daemon provides the functionality to start/stop Icinga 2.
+Furthermore it provides the configuration validation.
+
+.TP
+.BI "-c, --config" " arg"
+Using this option you can specify one or more configuration files.
+Config files are processed in the order they are specified on the command-line.
+
+When no configuration file is specified and the
+.B --no-config
+is not used, Icinga 2 automatically falls back to using the configuration file
+.B ConfigDir + "/icinga2.conf"
+(where ConfigDir is usually
+.BI "/etc/icinga2" ")."
+
+.TP
+.B "-z, --noconfig"
+Start without a configuration file.
+.TP
+.B "-C, --validate"
+This option can be used to check if your configuration files contain errors.
+If any errors are found the exit status is 1, otherwise 0 is returned.
+.TP
+.BI "-e, --errorlog" " arg"
+Log fatal errors to the specified log file (only works in combination with
+.BR "--daemonize" ")."
+.TP
+.B "-d, --daemonize"
+Detach from the controlling terminal.
+.SH "REPORTING BUGS"
+Report bugs at <https://github.com/Icinga/icinga2>
+.br
+Icinga home page: <https://icinga.com/>
+.SH COPYRIGHT
+Copyright \(co 2012 Icinga GmbH
+License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl2.html>
+.br
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
diff --git a/doc/images/addons/dashing_icinga2.png b/doc/images/addons/dashing_icinga2.png
new file mode 100644
index 0000000..d3e0e42
--- /dev/null
+++ b/doc/images/addons/dashing_icinga2.png
Binary files differ
diff --git a/doc/images/addons/icinga_certificate_monitoring.png b/doc/images/addons/icinga_certificate_monitoring.png
new file mode 100644
index 0000000..d1be34b
--- /dev/null
+++ b/doc/images/addons/icinga_certificate_monitoring.png
Binary files differ
diff --git a/doc/images/addons/icinga_reporting.png b/doc/images/addons/icinga_reporting.png
new file mode 100644
index 0000000..4b561a3
--- /dev/null
+++ b/doc/images/addons/icinga_reporting.png
Binary files differ
diff --git a/doc/images/addons/icingaweb2_businessprocess.png b/doc/images/addons/icingaweb2_businessprocess.png
new file mode 100644
index 0000000..7824ded
--- /dev/null
+++ b/doc/images/addons/icingaweb2_businessprocess.png
Binary files differ
diff --git a/doc/images/addons/icingaweb2_grafana.png b/doc/images/addons/icingaweb2_grafana.png
new file mode 100644
index 0000000..0861543
--- /dev/null
+++ b/doc/images/addons/icingaweb2_grafana.png
Binary files differ
diff --git a/doc/images/addons/icingaweb2_graphite.png b/doc/images/addons/icingaweb2_graphite.png
new file mode 100644
index 0000000..4147ba5
--- /dev/null
+++ b/doc/images/addons/icingaweb2_graphite.png
Binary files differ
diff --git a/doc/images/addons/icingaweb2_maps.png b/doc/images/addons/icingaweb2_maps.png
new file mode 100644
index 0000000..5564eda
--- /dev/null
+++ b/doc/images/addons/icingaweb2_maps.png
Binary files differ
diff --git a/doc/images/addons/nano-syntax.png b/doc/images/addons/nano-syntax.png
new file mode 100644
index 0000000..d89b2af
--- /dev/null
+++ b/doc/images/addons/nano-syntax.png
Binary files differ
diff --git a/doc/images/addons/vim-syntax.png b/doc/images/addons/vim-syntax.png
new file mode 100644
index 0000000..ebe116f
--- /dev/null
+++ b/doc/images/addons/vim-syntax.png
Binary files differ
diff --git a/doc/images/advanced-topics/flapping-state-graph.png b/doc/images/advanced-topics/flapping-state-graph.png
new file mode 100644
index 0000000..2f78057
--- /dev/null
+++ b/doc/images/advanced-topics/flapping-state-graph.png
Binary files differ
diff --git a/doc/images/advanced-topics/icinga2_external_checks_freshness_icingaweb2.png b/doc/images/advanced-topics/icinga2_external_checks_freshness_icingaweb2.png
new file mode 100644
index 0000000..b46db68
--- /dev/null
+++ b/doc/images/advanced-topics/icinga2_external_checks_freshness_icingaweb2.png
Binary files differ
diff --git a/doc/images/advanced-topics/icingaweb2_downtime_handled.png b/doc/images/advanced-topics/icingaweb2_downtime_handled.png
new file mode 100644
index 0000000..3fe2690
--- /dev/null
+++ b/doc/images/advanced-topics/icingaweb2_downtime_handled.png
Binary files differ
diff --git a/doc/images/api/icinga2_api_powershell_ise.png b/doc/images/api/icinga2_api_powershell_ise.png
new file mode 100644
index 0000000..41acbdd
--- /dev/null
+++ b/doc/images/api/icinga2_api_powershell_ise.png
Binary files differ
diff --git a/doc/images/configuration/icinga_web_local_server.png b/doc/images/configuration/icinga_web_local_server.png
new file mode 100644
index 0000000..3dac92d
--- /dev/null
+++ b/doc/images/configuration/icinga_web_local_server.png
Binary files differ
diff --git a/doc/images/development/windows_boost_build_dev_cmd.png b/doc/images/development/windows_boost_build_dev_cmd.png
new file mode 100644
index 0000000..1a3c30c
--- /dev/null
+++ b/doc/images/development/windows_boost_build_dev_cmd.png
Binary files differ
diff --git a/doc/images/development/windows_builds_gitlab_pipeline.png b/doc/images/development/windows_builds_gitlab_pipeline.png
new file mode 100644
index 0000000..8110c53
--- /dev/null
+++ b/doc/images/development/windows_builds_gitlab_pipeline.png
Binary files differ
diff --git a/doc/images/development/windows_powershell_posh_git.png b/doc/images/development/windows_powershell_posh_git.png
new file mode 100644
index 0000000..48014a4
--- /dev/null
+++ b/doc/images/development/windows_powershell_posh_git.png
Binary files differ
diff --git a/doc/images/development/windows_visual_studio_installer_01.png b/doc/images/development/windows_visual_studio_installer_01.png
new file mode 100644
index 0000000..a8cb449
--- /dev/null
+++ b/doc/images/development/windows_visual_studio_installer_01.png
Binary files differ
diff --git a/doc/images/development/windows_visual_studio_installer_02.png b/doc/images/development/windows_visual_studio_installer_02.png
new file mode 100644
index 0000000..0369970
--- /dev/null
+++ b/doc/images/development/windows_visual_studio_installer_02.png
Binary files differ
diff --git a/doc/images/development/windows_visual_studio_installer_03.png b/doc/images/development/windows_visual_studio_installer_03.png
new file mode 100644
index 0000000..c29f57f
--- /dev/null
+++ b/doc/images/development/windows_visual_studio_installer_03.png
Binary files differ
diff --git a/doc/images/development/windows_visual_studio_tabs_c++.png b/doc/images/development/windows_visual_studio_tabs_c++.png
new file mode 100644
index 0000000..d511469
--- /dev/null
+++ b/doc/images/development/windows_visual_studio_tabs_c++.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_agent_checks_command_endpoint.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_agent_checks_command_endpoint.png
new file mode 100644
index 0000000..d55278e
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_agent_checks_command_endpoint.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_endpoints.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_endpoints.png
new file mode 100644
index 0000000..aa37f60
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_endpoints.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_roles.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_roles.png
new file mode 100644
index 0000000..d9018f8
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_roles.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_satellite_config_sync.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_satellite_config_sync.png
new file mode 100644
index 0000000..92dcda9
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_satellite_config_sync.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenario_ha_masters_with_agents.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenario_ha_masters_with_agents.png
new file mode 100644
index 0000000..c45df2c
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenario_ha_masters_with_agents.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_satellites_agents.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_satellites_agents.png
new file mode 100644
index 0000000..8535993
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_satellites_agents.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_with_agents.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_with_agents.png
new file mode 100644
index 0000000..fe7ac4d
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_scenarios_master_with_agents.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_monitoring_zones.png b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_zones.png
new file mode 100644
index 0000000..84b42f8
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_monitoring_zones.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_windows_client_disk_icingaweb2.png b/doc/images/distributed-monitoring/icinga2_distributed_windows_client_disk_icingaweb2.png
new file mode 100644
index 0000000..de13ad7
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_windows_client_disk_icingaweb2.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_api_drivesize_icingaweb2.png b/doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_api_drivesize_icingaweb2.png
new file mode 100644
index 0000000..9409025
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_api_drivesize_icingaweb2.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_counter_icingaweb2.png b/doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_counter_icingaweb2.png
new file mode 100644
index 0000000..a7383e2
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_distributed_windows_nscp_counter_icingaweb2.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_running_service.png b/doc/images/distributed-monitoring/icinga2_windows_running_service.png
new file mode 100644
index 0000000..53b851b
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_running_service.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_installer_01.png b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_01.png
new file mode 100644
index 0000000..8460dc6
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_01.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_installer_02.png b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_02.png
new file mode 100644
index 0000000..476b6d2
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_02.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_installer_03.png b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_03.png
new file mode 100644
index 0000000..35aad83
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_03.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_installer_04.png b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_04.png
new file mode 100644
index 0000000..4d314e6
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_04.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_installer_05.png b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_05.png
new file mode 100644
index 0000000..7b2c3d8
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_installer_05.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_01.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_01.png
new file mode 100644
index 0000000..1b7a1f1
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_01.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02.png
new file mode 100644
index 0000000..73f5559
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02_global_zone.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02_global_zone.png
new file mode 100644
index 0000000..1b4ad27
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_02_global_zone.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_03.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_03.png
new file mode 100644
index 0000000..ab07134
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_03.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_04.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_04.png
new file mode 100644
index 0000000..9f1a5d0
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_04.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_no_ticket.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_no_ticket.png
new file mode 100644
index 0000000..1c91f2d
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_no_ticket.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_with_ticket.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_with_ticket.png
new file mode 100644
index 0000000..3d60237
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_06_finish_with_ticket.png
Binary files differ
diff --git a/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_examine_config.png b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_examine_config.png
new file mode 100644
index 0000000..f568776
--- /dev/null
+++ b/doc/images/distributed-monitoring/icinga2_windows_setup_wizard_examine_config.png
Binary files differ
diff --git a/doc/images/icingadb/icingadb-architecture.png b/doc/images/icingadb/icingadb-architecture.png
new file mode 100644
index 0000000..3d55ff7
--- /dev/null
+++ b/doc/images/icingadb/icingadb-architecture.png
Binary files differ
diff --git a/doc/images/icingadb/icingadb-daemon.png b/doc/images/icingadb/icingadb-daemon.png
new file mode 100644
index 0000000..de3f4c7
--- /dev/null
+++ b/doc/images/icingadb/icingadb-daemon.png
Binary files differ
diff --git a/doc/images/icingadb/icingadb-icinga2.png b/doc/images/icingadb/icingadb-icinga2.png
new file mode 100644
index 0000000..7b7aafa
--- /dev/null
+++ b/doc/images/icingadb/icingadb-icinga2.png
Binary files differ
diff --git a/doc/images/icingadb/icingadb-redis.png b/doc/images/icingadb/icingadb-redis.png
new file mode 100644
index 0000000..d6eafab
--- /dev/null
+++ b/doc/images/icingadb/icingadb-redis.png
Binary files differ
diff --git a/doc/scroll.js b/doc/scroll.js
new file mode 100644
index 0000000..bad2ef6
--- /dev/null
+++ b/doc/scroll.js
@@ -0,0 +1,16 @@
+$(document).ready(function() {
+
+ $('a[href^="#"]').on('click',function (e) {
+ e.preventDefault();
+
+ var target = this.hash;
+ var $target = $(target);
+
+ $('html, body').stop().animate({
+ 'scrollTop': $target.offset().top
+ }, 900, 'swing', function () {
+ window.location.hash = target;
+ });
+ });
+
+});
diff --git a/doc/update-links.py b/doc/update-links.py
new file mode 100755
index 0000000..765d4a0
--- /dev/null
+++ b/doc/update-links.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+import os
+import sys
+import re
+
+if len(sys.argv) < 2:
+ print "Syntax: %s <md-files>" % sys.argv[0]
+ print ""
+ print "Updates inter-chapter links in the specified Markdown files."
+ sys.exit(1)
+
+anchors = {}
+
+for file in sys.argv[1:]:
+ text = open(file).read()
+ for match in re.finditer(r"<a id=\"(?P<id>.*?)\">", text):
+ id = match.group("id")
+
+ if id in anchors:
+ print "Error: Anchor '%s' is used multiple times: in %s and %s" % (id, file, anchors[id])
+
+ anchors[match.group("id")] = file
+
+def update_anchor(match):
+ id = match.group("id")
+
+ try:
+ file = os.path.basename(anchors[id])
+ except KeyError:
+ print "Error: Unmatched anchor: %s" % (id)
+ file = ""
+
+ return "[%s](%s#%s)" % (match.group("text"), file, id)
+
+for file in sys.argv[1:]:
+ text = open(file).read()
+ print "> Processing file '%s'..." % (file)
+ new_text = re.sub(r"\[(?P<text>.*?)\]\((?P<file>[0-9-a-z\.]+)?#(?P<id>[^#\)]+)\)", update_anchor, text)
+ open(file, "w").write(new_text)
diff --git a/doc/win-dev.ps1 b/doc/win-dev.ps1
new file mode 100644
index 0000000..8077928
--- /dev/null
+++ b/doc/win-dev.ps1
@@ -0,0 +1,99 @@
+Set-PSDebug -Trace 1
+
+Set-StrictMode -Version Latest
+$ErrorActionPreference = 'Stop'
+$PSDefaultParameterValues['*:ErrorAction'] = 'Stop'
+
+function ThrowOnNativeFailure {
+ if (-not $?) {
+ throw 'Native failure'
+ }
+}
+
+
+$VsVersion = 2019
+$MsvcVersion = '14.2'
+$BoostVersion = @(1, 84, 0)
+$OpensslVersion = '3_0_12'
+
+switch ($Env:BITS) {
+ 32 { }
+ 64 { }
+ default {
+ $Env:BITS = 64
+ }
+}
+
+
+function Install-Exe {
+ param (
+ [string]$Url,
+ [string]$Dir
+ )
+
+ $TempDir = Join-Path ([System.IO.Path]::GetTempPath()) ([System.Guid]::NewGuid().Guid)
+ $ExeFile = Join-Path $TempDir inst.exe
+
+ New-Item -ItemType Directory -Path $TempDir
+
+ for ($trial = 1;; ++$trial) {
+ try {
+ Invoke-WebRequest -Uri $Url -OutFile $ExeFile -UseBasicParsing
+ } catch {
+ if ($trial -ge 2) {
+ throw
+ }
+
+ continue
+ }
+
+ break
+ }
+
+ Start-Process -Wait -FilePath $ExeFile -ArgumentList @('/VERYSILENT', '/INSTALL', '/PASSIVE', '/NORESTART', "/DIR=${Dir}")
+ ThrowOnNativeFailure
+
+ Remove-Item -Recurse -Path $TempDir
+}
+
+
+try {
+ Get-Command choco
+} catch {
+ Invoke-Expression (New-Object Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')
+ ThrowOnNativeFailure
+
+ $RegEnv = 'Registry::HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
+ $ChocoPath = ";$(Join-Path $Env:AllUsersProfile chocolatey\bin)"
+
+ Set-ItemProperty -Path $RegEnv -Name Path -Value ((Get-ItemProperty -Path $RegEnv -Name Path).Path + $ChocoPath)
+ $Env:Path += $ChocoPath
+}
+
+# GitHub Actions uses an image that comes with most dependencies preinstalled. Don't install them twice.
+if (-not $Env:GITHUB_ACTIONS) {
+ choco install -y `
+ "visualstudio${VsVersion}community" `
+ "visualstudio${VsVersion}-workload-netcoretools" `
+ "visualstudio${VsVersion}-workload-vctools" `
+ "visualstudio${VsVersion}-workload-manageddesktop" `
+ "visualstudio${VsVersion}-workload-nativedesktop" `
+ "visualstudio${VsVersion}-workload-universal" `
+ "visualstudio${VsVersion}buildtools" `
+ git `
+ cmake `
+ winflexbison3 `
+ windows-sdk-8.1 `
+ wixtoolset
+ ThrowOnNativeFailure
+} else {
+ choco install -y winflexbison3
+ ThrowOnNativeFailure
+}
+
+# Disable the progress bar for downloads from the Web, which will speed up the entire download process
+$Global:ProgressPreference = 'SilentlyContinue';
+
+Install-Exe -Url "https://packages.icinga.com/windows/dependencies/boost_$($BoostVersion -join '_')-msvc-${MsvcVersion}-${Env:BITS}.exe" -Dir "C:\local\boost_$($BoostVersion -join '_')-Win${Env:BITS}"
+
+Install-Exe -Url "https://packages.icinga.com/windows/dependencies/Win${Env:BITS}OpenSSL-${OpensslVersion}.exe" -Dir "C:\local\OpenSSL_${OpensslVersion}-Win${Env:BITS}"
diff --git a/etc/CMakeLists.txt b/etc/CMakeLists.txt
new file mode 100644
index 0000000..40e181a
--- /dev/null
+++ b/etc/CMakeLists.txt
@@ -0,0 +1,69 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+if(NOT WIN32)
+ configure_file(icinga2/constants.conf.cmake ${CMAKE_CURRENT_BINARY_DIR}/icinga2/constants.conf @ONLY)
+endif()
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ configure_file(logrotate.d/icinga2.cmake ${CMAKE_CURRENT_BINARY_DIR}/logrotate.d/icinga2 @ONLY)
+endif()
+
+if(NOT WIN32)
+ install_if_not_exists(${CMAKE_CURRENT_BINARY_DIR}/icinga2/constants.conf ${ICINGA2_CONFIGDIR})
+ install_if_not_exists(icinga2/icinga2.conf ${ICINGA2_CONFIGDIR})
+else()
+ install_if_not_exists(${CMAKE_CURRENT_SOURCE_DIR}/icinga2/win32/constants.conf ${ICINGA2_CONFIGDIR})
+ install_if_not_exists(icinga2/win32/icinga2.conf ${ICINGA2_CONFIGDIR})
+endif()
+
+install_if_not_exists(icinga2/zones.conf ${ICINGA2_CONFIGDIR})
+install_if_not_exists(icinga2/conf.d/app.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/conf.d/commands.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/conf.d/downtimes.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/conf.d/groups.conf ${ICINGA2_CONFIGDIR}/conf.d)
+
+if(NOT WIN32)
+ install_if_not_exists(icinga2/conf.d/hosts.conf ${ICINGA2_CONFIGDIR}/conf.d)
+ install_if_not_exists(icinga2/conf.d/services.conf ${ICINGA2_CONFIGDIR}/conf.d)
+else()
+ install_if_not_exists(icinga2/conf.d/win32/hosts.conf ${ICINGA2_CONFIGDIR}/conf.d)
+ install_if_not_exists(icinga2/conf.d/win32/services.conf ${ICINGA2_CONFIGDIR}/conf.d)
+endif()
+
+install_if_not_exists(icinga2/conf.d/notifications.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/conf.d/templates.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/conf.d/timeperiods.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/conf.d/users.conf ${ICINGA2_CONFIGDIR}/conf.d)
+install_if_not_exists(icinga2/features-available/api.conf ${ICINGA2_CONFIGDIR}/features-available)
+install_if_not_exists(icinga2/features-available/debuglog.conf ${ICINGA2_CONFIGDIR}/features-available)
+install_if_not_exists(icinga2/features-available/mainlog.conf ${ICINGA2_CONFIGDIR}/features-available)
+if(NOT WIN32)
+ install_if_not_exists(icinga2/features-available/syslog.conf ${ICINGA2_CONFIGDIR}/features-available)
+ if(HAVE_SYSTEMD)
+ install_if_not_exists(icinga2/features-available/journald.conf ${ICINGA2_CONFIGDIR}/features-available)
+ endif()
+else()
+ install_if_not_exists(icinga2/features-available/windowseventlog.conf ${ICINGA2_CONFIGDIR}/features-available)
+endif()
+install_if_not_exists(icinga2/scripts/mail-host-notification.sh ${ICINGA2_CONFIGDIR}/scripts)
+install_if_not_exists(icinga2/scripts/mail-service-notification.sh ${ICINGA2_CONFIGDIR}/scripts)
+install_if_not_exists(icinga2/zones.d/README ${ICINGA2_CONFIGDIR}/zones.d)
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ install_if_not_exists(${CMAKE_CURRENT_BINARY_DIR}/logrotate.d/icinga2 ${LOGROTATE_DIR})
+endif()
+
+if(NOT WIN32)
+ install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_CONFIGDIR}/features-enabled\")")
+ install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink ../features-available/mainlog.conf \"\$ENV{DESTDIR}${ICINGA2_FULL_CONFIGDIR}/features-enabled/mainlog.conf\")")
+
+ install(FILES bash_completion.d/icinga2 DESTINATION ${BASHCOMPLETION_DIR})
+else()
+ install_if_not_exists(icinga2/features-enabled/windowseventlog.conf ${ICINGA2_CONFIGDIR}/features-enabled)
+endif()
+
+if(${CMAKE_SYSTEM_NAME} MATCHES "(Linux|Solaris|SunOS)")
+ add_subdirectory(initsystem)
+endif()
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/etc/bash_completion.d/icinga2 b/etc/bash_completion.d/icinga2
new file mode 100644
index 0000000..5c49ab0
--- /dev/null
+++ b/etc/bash_completion.d/icinga2
@@ -0,0 +1,17 @@
+_icinga2()
+{
+ local cur opts
+ opts="${COMP_WORDS[*]}"
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ COMPREPLY=($(icinga2 --autocomplete $COMP_CWORD ${COMP_WORDS[*]} < /dev/null))
+ case $COMPREPLY in
+ */|*=)
+ compopt -o nospace
+ ;;
+ esac
+
+ return 0
+}
+
+complete -F _icinga2 icinga2
+
diff --git a/etc/icinga2/conf.d/app.conf b/etc/icinga2/conf.d/app.conf
new file mode 100644
index 0000000..3e4be0d
--- /dev/null
+++ b/etc/icinga2/conf.d/app.conf
@@ -0,0 +1 @@
+object IcingaApplication "app" { }
diff --git a/etc/icinga2/conf.d/commands.conf b/etc/icinga2/conf.d/commands.conf
new file mode 100644
index 0000000..e7d555c
--- /dev/null
+++ b/etc/icinga2/conf.d/commands.conf
@@ -0,0 +1,196 @@
+/* Command objects */
+
+/* Notification Commands
+ *
+ * Please check the documentation for all required and
+ * optional parameters.
+ */
+
+object NotificationCommand "mail-host-notification" {
+ command = [ ConfigDir + "/scripts/mail-host-notification.sh" ]
+
+ arguments += {
+ "-4" = "$notification_address$"
+ "-6" = "$notification_address6$"
+ "-b" = "$notification_author$"
+ "-c" = "$notification_comment$"
+ "-d" = {
+ required = true
+ value = "$notification_date$"
+ }
+ "-f" = {
+ value = "$notification_from$"
+ description = "Set from address. Requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE)"
+ }
+ "-i" = "$notification_icingaweb2url$"
+ "-l" = {
+ required = true
+ value = "$notification_hostname$"
+ }
+ "-n" = {
+ required = true
+ value = "$notification_hostdisplayname$"
+ }
+ "-o" = {
+ required = true
+ value = "$notification_hostoutput$"
+ }
+ "-r" = {
+ required = true
+ value = "$notification_useremail$"
+ }
+ "-s" = {
+ required = true
+ value = "$notification_hoststate$"
+ }
+ "-X" = "$notification_hostnotes$"
+ "-t" = {
+ required = true
+ value = "$notification_type$"
+ }
+ "-v" = "$notification_logtosyslog$"
+ }
+
+ vars += {
+ notification_address = "$address$"
+ notification_address6 = "$address6$"
+ notification_author = "$notification.author$"
+ notification_comment = "$notification.comment$"
+ notification_type = "$notification.type$"
+ notification_date = "$icinga.long_date_time$"
+ notification_hostname = "$host.name$"
+ notification_hostdisplayname = "$host.display_name$"
+ notification_hostoutput = "$host.output$"
+ notification_hoststate = "$host.state$"
+ notification_useremail = "$user.email$"
+ notification_hostnotes = "$host.notes$"
+ }
+}
+
+object NotificationCommand "mail-service-notification" {
+ command = [ ConfigDir + "/scripts/mail-service-notification.sh" ]
+
+ arguments += {
+ "-4" = "$notification_address$"
+ "-6" = "$notification_address6$"
+ "-b" = "$notification_author$"
+ "-c" = "$notification_comment$"
+ "-d" = {
+ required = true
+ value = "$notification_date$"
+ }
+ "-e" = {
+ required = true
+ value = "$notification_servicename$"
+ }
+ "-f" = {
+ value = "$notification_from$"
+ description = "Set from address. Requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE)"
+ }
+ "-i" = "$notification_icingaweb2url$"
+ "-l" = {
+ required = true
+ value = "$notification_hostname$"
+ }
+ "-n" = {
+ required = true
+ value = "$notification_hostdisplayname$"
+ }
+ "-o" = {
+ required = true
+ value = "$notification_serviceoutput$"
+ }
+ "-r" = {
+ required = true
+ value = "$notification_useremail$"
+ }
+ "-s" = {
+ required = true
+ value = "$notification_servicestate$"
+ }
+ "-t" = {
+ required = true
+ value = "$notification_type$"
+ }
+ "-X" = "$notification_hostnotes$"
+ "-x" = "$notification_servicenotes$"
+ "-u" = {
+ required = true
+ value = "$notification_servicedisplayname$"
+ }
+ "-v" = "$notification_logtosyslog$"
+ }
+
+ vars += {
+ notification_address = "$address$"
+ notification_address6 = "$address6$"
+ notification_author = "$notification.author$"
+ notification_comment = "$notification.comment$"
+ notification_type = "$notification.type$"
+ notification_date = "$icinga.long_date_time$"
+ notification_hostname = "$host.name$"
+ notification_hostdisplayname = "$host.display_name$"
+ notification_servicename = "$service.name$"
+ notification_serviceoutput = "$service.output$"
+ notification_servicestate = "$service.state$"
+ notification_useremail = "$user.email$"
+ notification_servicedisplayname = "$service.display_name$"
+ notification_hostnotes = "$host.notes$"
+ notification_servicenotes = "$service.notes$"
+ }
+}
+
+/*
+ * If you prefer to use the notification scripts with environment
+ * variables instead of command line parameters, you can use
+ * the following commands. They have been updated from < 2.7
+ * to support the new notification scripts and should help
+ * with an upgrade.
+ * Remove the comment blocks and comment the notification commands above.
+ */
+
+/*
+
+object NotificationCommand "mail-host-notification" {
+ command = [ ConfigDir + "/scripts/mail-host-notification.sh" ]
+
+ env = {
+ NOTIFICATIONTYPE = "$notification.type$"
+ HOSTDISPLAYNAME = "$host.display_name$"
+ HOSTNAME = "$host.name$"
+ HOSTADDRESS = "$address$"
+ HOSTSTATE = "$host.state$"
+ LONGDATETIME = "$icinga.long_date_time$"
+ HOSTOUTPUT = "$host.output$"
+ NOTIFICATIONAUTHORNAME = "$notification.author$"
+ NOTIFICATIONCOMMENT = "$notification.comment$"
+ HOSTDISPLAYNAME = "$host.display_name$"
+ USEREMAIL = "$user.email$"
+ HOSTNOTES = "$host.notes$"
+ }
+}
+
+object NotificationCommand "mail-service-notification" {
+ command = [ ConfigDir + "/scripts/mail-service-notification.sh" ]
+
+ env = {
+ NOTIFICATIONTYPE = "$notification.type$"
+ SERVICENAME = "$service.name$"
+ HOSTNAME = "$host.name$"
+ HOSTDISPLAYNAME = "$host.display_name$"
+ HOSTADDRESS = "$address$"
+ SERVICESTATE = "$service.state$"
+ LONGDATETIME = "$icinga.long_date_time$"
+ SERVICEOUTPUT = "$service.output$"
+ NOTIFICATIONAUTHORNAME = "$notification.author$"
+ NOTIFICATIONCOMMENT = "$notification.comment$"
+ HOSTDISPLAYNAME = "$host.display_name$"
+ SERVICEDISPLAYNAME = "$service.display_name$"
+ USEREMAIL = "$user.email$"
+ HOSTNOTES = "$host.notes$"
+ SERVICENOTES = "$service.notes$"
+ }
+}
+
+*/
+
diff --git a/etc/icinga2/conf.d/downtimes.conf b/etc/icinga2/conf.d/downtimes.conf
new file mode 100644
index 0000000..0bed647
--- /dev/null
+++ b/etc/icinga2/conf.d/downtimes.conf
@@ -0,0 +1,20 @@
+/**
+ * The example downtime apply rule.
+ */
+
+apply ScheduledDowntime "backup-downtime" to Service {
+ author = "icingaadmin"
+ comment = "Scheduled downtime for backup"
+
+ ranges = {
+ monday = service.vars.backup_downtime
+ tuesday = service.vars.backup_downtime
+ wednesday = service.vars.backup_downtime
+ thursday = service.vars.backup_downtime
+ friday = service.vars.backup_downtime
+ saturday = service.vars.backup_downtime
+ sunday = service.vars.backup_downtime
+ }
+
+ assign where service.vars.backup_downtime != ""
+}
diff --git a/etc/icinga2/conf.d/groups.conf b/etc/icinga2/conf.d/groups.conf
new file mode 100644
index 0000000..e6004a3
--- /dev/null
+++ b/etc/icinga2/conf.d/groups.conf
@@ -0,0 +1,37 @@
+/**
+ * Host group examples.
+ */
+
+object HostGroup "linux-servers" {
+ display_name = "Linux Servers"
+
+ assign where host.vars.os == "Linux"
+}
+
+object HostGroup "windows-servers" {
+ display_name = "Windows Servers"
+
+ assign where host.vars.os == "Windows"
+}
+
+/**
+ * Service group examples.
+ */
+
+object ServiceGroup "ping" {
+ display_name = "Ping Checks"
+
+ assign where match("ping*", service.name)
+}
+
+object ServiceGroup "http" {
+ display_name = "HTTP Checks"
+
+ assign where match("http*", service.check_command)
+}
+
+object ServiceGroup "disk" {
+ display_name = "Disk Checks"
+
+ assign where match("disk*", service.check_command)
+}
diff --git a/etc/icinga2/conf.d/hosts.conf b/etc/icinga2/conf.d/hosts.conf
new file mode 100644
index 0000000..e54d01d
--- /dev/null
+++ b/etc/icinga2/conf.d/hosts.conf
@@ -0,0 +1,52 @@
+/*
+ * Host definitions with object attributes
+ * used for apply rules for Service, Notification,
+ * Dependency and ScheduledDowntime objects.
+ *
+ * Tip: Use `icinga2 object list --type Host` to
+ * list all host objects after running
+ * configuration validation (`icinga2 daemon -C`).
+ */
+
+/*
+ * This is an example host based on your
+ * local host's FQDN. Specify the NodeName
+ * constant in `constants.conf` or use your
+ * own description, e.g. "db-host-1".
+ */
+
+object Host NodeName {
+ /* Import the default host template defined in `templates.conf`. */
+ import "generic-host"
+
+ /* Specify the address attributes for checks e.g. `ssh` or `http`. */
+ address = "127.0.0.1"
+ address6 = "::1"
+
+ /* Set custom variable `os` for hostgroup assignment in `groups.conf`. */
+ vars.os = "Linux"
+
+ /* Define http vhost attributes for service apply rules in `services.conf`. */
+ vars.http_vhosts["http"] = {
+ http_uri = "/"
+ }
+ /* Uncomment if you've successfully installed Icinga Web 2. */
+ //vars.http_vhosts["Icinga Web 2"] = {
+ // http_uri = "/icingaweb2"
+ //}
+
+ /* Define disks and attributes for service apply rules in `services.conf`. */
+ vars.disks["disk"] = {
+ /* No parameters. */
+ }
+ vars.disks["disk /"] = {
+ disk_partitions = "/"
+ }
+
+ /* Define notification mail attributes for notification apply rules in `notifications.conf`. */
+ vars.notification["mail"] = {
+ /* The UserGroup `icingaadmins` is defined in `users.conf`. */
+ groups = [ "icingaadmins" ]
+ }
+}
+
diff --git a/etc/icinga2/conf.d/notifications.conf b/etc/icinga2/conf.d/notifications.conf
new file mode 100644
index 0000000..ac65875
--- /dev/null
+++ b/etc/icinga2/conf.d/notifications.conf
@@ -0,0 +1,33 @@
+/**
+ * The example notification apply rules.
+ *
+ * Only applied if host/service objects have
+ * the custom variable `notification` defined
+ * and containing `mail` as key.
+ *
+ * Check `hosts.conf` for an example.
+ */
+
+apply Notification "mail-icingaadmin" to Host {
+ import "mail-host-notification"
+ user_groups = host.vars.notification.mail.groups
+ users = host.vars.notification.mail.users
+
+ //interval = 2h
+
+ //vars.notification_logtosyslog = true
+
+ assign where host.vars.notification.mail
+}
+
+apply Notification "mail-icingaadmin" to Service {
+ import "mail-service-notification"
+ user_groups = host.vars.notification.mail.groups
+ users = host.vars.notification.mail.users
+
+ //interval = 2h
+
+ //vars.notification_logtosyslog = true
+
+ assign where host.vars.notification.mail
+}
diff --git a/etc/icinga2/conf.d/services.conf b/etc/icinga2/conf.d/services.conf
new file mode 100644
index 0000000..c8e1b3c
--- /dev/null
+++ b/etc/icinga2/conf.d/services.conf
@@ -0,0 +1,117 @@
+/*
+ * Service apply rules.
+ *
+ * The CheckCommand objects `ping4`, `ping6`, etc
+ * are provided by the plugin check command templates.
+ * Check the documentation for details.
+ *
+ * Tip: Use `icinga2 object list --type Service` to
+ * list all service objects after running
+ * configuration validation (`icinga2 daemon -C`).
+ */
+
+/*
+ * This is an example host based on your
+ * local host's FQDN. Specify the NodeName
+ * constant in `constants.conf` or use your
+ * own description, e.g. "db-host-1".
+ */
+
+/*
+ * These are generic `ping4` and `ping6`
+ * checks applied to all hosts having the
+ * `address` resp. `address6` attribute
+ * defined.
+ */
+apply Service "ping4" {
+ import "generic-service"
+
+ check_command = "ping4"
+
+ assign where host.address
+}
+
+apply Service "ping6" {
+ import "generic-service"
+
+ check_command = "ping6"
+
+ assign where host.address6
+}
+
+/*
+ * Apply the `ssh` service to all hosts
+ * with the `address` attribute defined and
+ * the custom variable `os` set to `Linux`.
+ */
+apply Service "ssh" {
+ import "generic-service"
+
+ check_command = "ssh"
+
+ assign where (host.address || host.address6) && host.vars.os == "Linux"
+}
+
+
+
+apply Service for (http_vhost => config in host.vars.http_vhosts) {
+ import "generic-service"
+
+ check_command = "http"
+
+ vars += config
+}
+
+apply Service for (disk => config in host.vars.disks) {
+ import "generic-service"
+
+ check_command = "disk"
+
+ vars += config
+}
+
+apply Service "icinga" {
+ import "generic-service"
+
+ check_command = "icinga"
+
+ assign where host.name == NodeName
+}
+
+apply Service "load" {
+ import "generic-service"
+
+ check_command = "load"
+
+ /* Used by the ScheduledDowntime apply rule in `downtimes.conf`. */
+ vars.backup_downtime = "02:00-03:00"
+
+ assign where host.name == NodeName
+}
+
+apply Service "procs" {
+ import "generic-service"
+
+ check_command = "procs"
+
+ assign where host.name == NodeName
+}
+
+apply Service "swap" {
+ import "generic-service"
+
+ check_command = "swap"
+
+ assign where host.name == NodeName
+}
+
+apply Service "users" {
+ import "generic-service"
+
+ check_command = "users"
+
+ assign where host.name == NodeName
+}
+
+
+
diff --git a/etc/icinga2/conf.d/templates.conf b/etc/icinga2/conf.d/templates.conf
new file mode 100644
index 0000000..5655e3f
--- /dev/null
+++ b/etc/icinga2/conf.d/templates.conf
@@ -0,0 +1,83 @@
+/*
+ * Generic template examples.
+ */
+
+
+/**
+ * Provides default settings for hosts. By convention
+ * all hosts should import this template.
+ *
+ * The CheckCommand object `hostalive` is provided by
+ * the plugin check command templates.
+ * Check the documentation for details.
+ */
+template Host "generic-host" {
+ max_check_attempts = 3
+ check_interval = 1m
+ retry_interval = 30s
+
+ check_command = "hostalive"
+}
+
+/**
+ * Provides default settings for services. By convention
+ * all services should import this template.
+ */
+template Service "generic-service" {
+ max_check_attempts = 5
+ check_interval = 1m
+ retry_interval = 30s
+}
+
+/**
+ * Provides default settings for users. By convention
+ * all users should inherit from this template.
+ */
+
+template User "generic-user" {
+
+}
+
+/**
+ * Provides default settings for host notifications.
+ * By convention all host notifications should import
+ * this template.
+ */
+template Notification "mail-host-notification" {
+ command = "mail-host-notification"
+
+ states = [ Up, Down ]
+ types = [ Problem, Acknowledgement, Recovery, Custom,
+ FlappingStart, FlappingEnd,
+ DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ vars += {
+ // notification_icingaweb2url = "https://www.example.com/icingaweb2"
+ // notification_from = "Icinga 2 Host Monitoring <icinga@example.com>"
+ notification_logtosyslog = false
+ }
+
+ period = "24x7"
+}
+
+/**
+ * Provides default settings for service notifications.
+ * By convention all service notifications should import
+ * this template.
+ */
+template Notification "mail-service-notification" {
+ command = "mail-service-notification"
+
+ states = [ OK, Warning, Critical, Unknown ]
+ types = [ Problem, Acknowledgement, Recovery, Custom,
+ FlappingStart, FlappingEnd,
+ DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ vars += {
+ // notification_icingaweb2url = "https://www.example.com/icingaweb2"
+ // notification_from = "Icinga 2 Service Monitoring <icinga@example.com>"
+ notification_logtosyslog = false
+ }
+
+ period = "24x7"
+}
diff --git a/etc/icinga2/conf.d/timeperiods.conf b/etc/icinga2/conf.d/timeperiods.conf
new file mode 100644
index 0000000..ea162ff
--- /dev/null
+++ b/etc/icinga2/conf.d/timeperiods.conf
@@ -0,0 +1,35 @@
+/**
+ * Sample timeperiods for Icinga 2.
+ * Check the documentation for details.
+ */
+
+object TimePeriod "24x7" {
+ display_name = "Icinga 2 24x7 TimePeriod"
+ ranges = {
+ "monday" = "00:00-24:00"
+ "tuesday" = "00:00-24:00"
+ "wednesday" = "00:00-24:00"
+ "thursday" = "00:00-24:00"
+ "friday" = "00:00-24:00"
+ "saturday" = "00:00-24:00"
+ "sunday" = "00:00-24:00"
+ }
+}
+
+object TimePeriod "9to5" {
+ display_name = "Icinga 2 9to5 TimePeriod"
+ ranges = {
+ "monday" = "09:00-17:00"
+ "tuesday" = "09:00-17:00"
+ "wednesday" = "09:00-17:00"
+ "thursday" = "09:00-17:00"
+ "friday" = "09:00-17:00"
+ }
+}
+
+object TimePeriod "never" {
+ display_name = "Icinga 2 never TimePeriod"
+ ranges = {
+ }
+}
+
diff --git a/etc/icinga2/conf.d/users.conf b/etc/icinga2/conf.d/users.conf
new file mode 100644
index 0000000..515ef21
--- /dev/null
+++ b/etc/icinga2/conf.d/users.conf
@@ -0,0 +1,17 @@
+/**
+ * The example user 'icingaadmin' and the example
+ * group 'icingaadmins'.
+ */
+
+object User "icingaadmin" {
+ import "generic-user"
+
+ display_name = "Icinga 2 Admin"
+ groups = [ "icingaadmins" ]
+
+ email = "icinga@localhost"
+}
+
+object UserGroup "icingaadmins" {
+ display_name = "Icinga 2 Admin Group"
+}
diff --git a/etc/icinga2/conf.d/win32/hosts.conf b/etc/icinga2/conf.d/win32/hosts.conf
new file mode 100644
index 0000000..ecee11a
--- /dev/null
+++ b/etc/icinga2/conf.d/win32/hosts.conf
@@ -0,0 +1,43 @@
+/*
+ * Host definitions with object attributes
+ * used for apply rules for Service, Notification,
+ * Dependency and ScheduledDowntime objects.
+ *
+ * Tip: Use `icinga2 object list --type Host` to
+ * list all host objects after running
+ * configuration validation (`icinga2 daemon -C`).
+ */
+
+/*
+ * This is an example host based on your
+ * local host's FQDN. Specify the NodeName
+ * constant in `constants.conf` or use your
+ * own description, e.g. "db-host-1".
+ */
+
+object Host NodeName {
+ /* Import the default host template defined in `templates.conf`. */
+ import "generic-host"
+
+ /* Specify the address attributes for checks e.g. `ssh` or `http`. */
+ address = "127.0.0.1"
+ address6 = "::1"
+
+ /* Set custom variable `os` for hostgroup assignment in `groups.conf`. */
+ vars.os = "Windows"
+
+ /* Define disks and attributes for service apply rules in `services.conf`. */
+ vars.disks["disk"] = {
+ /* No parameters. */
+ }
+ vars.disks["disk C:"] = {
+ disk_win_path = "C:"
+ }
+
+ /* Define notification mail attributes for notification apply rules in `notifications.conf`. */
+ vars.notification["mail"] = {
+ /* The UserGroup `icingaadmins` is defined in `users.conf`. */
+ groups = [ "icingaadmins" ]
+ }
+}
+
diff --git a/etc/icinga2/conf.d/win32/services.conf b/etc/icinga2/conf.d/win32/services.conf
new file mode 100644
index 0000000..929be54
--- /dev/null
+++ b/etc/icinga2/conf.d/win32/services.conf
@@ -0,0 +1,92 @@
+/*
+ * Service apply rules.
+ *
+ * The CheckCommand objects `ping4`, `ping6`, etc
+ * are provided by the plugin check command templates.
+ * Check the documentation for details.
+ *
+ * Tip: Use `icinga2 object list --type Service` to
+ * list all service objects after running
+ * configuration validation (`icinga2 daemon -C`).
+ */
+
+/*
+ * This is an example host based on your
+ * local host's FQDN. Specify the NodeName
+ * constant in `constants.conf` or use your
+ * own description, e.g. "db-host-1".
+ */
+
+/*
+ * These are generic `ping4` and `ping6`
+ * checks applied to all hosts having the
+ * `address` resp. `address6` attribute
+ * defined.
+ */
+apply Service "ping4" {
+ import "generic-service"
+
+ check_command = "ping4-windows"
+
+ assign where host.address
+}
+
+apply Service "ping6" {
+ import "generic-service"
+
+ check_command = "ping6-windows"
+
+ assign where host.address6
+}
+
+apply Service for (disk => config in host.vars.disks) {
+ import "generic-service"
+
+ check_command = "disk-windows"
+
+ vars += config
+}
+
+apply Service "icinga" {
+ import "generic-service"
+
+ check_command = "icinga"
+
+ assign where host.name == NodeName
+}
+
+apply Service "load" {
+ import "generic-service"
+
+ check_command = "load-windows"
+
+ /* Used by the ScheduledDowntime apply rule in `downtimes.conf`. */
+ vars.backup_downtime = "02:00-03:00"
+
+ assign where host.name == NodeName
+}
+
+apply Service "procs" {
+ import "generic-service"
+
+ check_command = "procs-windows"
+
+ assign where host.name == NodeName
+}
+
+apply Service "swap" {
+ import "generic-service"
+
+ check_command = "swap-windows"
+
+ assign where host.name == NodeName
+}
+
+apply Service "users" {
+ import "generic-service"
+
+ check_command = "users-windows"
+
+ assign where host.name == NodeName
+}
+
diff --git a/etc/icinga2/constants.conf.cmake b/etc/icinga2/constants.conf.cmake
new file mode 100644
index 0000000..1dae310
--- /dev/null
+++ b/etc/icinga2/constants.conf.cmake
@@ -0,0 +1,28 @@
+/**
+ * This file defines global constants which can be used in
+ * the other configuration files.
+ */
+
+/* The directory which contains the plugins from the Monitoring Plugins project. */
+const PluginDir = "@ICINGA2_PLUGINDIR@"
+
+/* The directory which contains the Manubulon plugins.
+ * Check the documentation, chapter "SNMP Manubulon Plugin Check Commands", for details.
+ */
+const ManubulonPluginDir = "@ICINGA2_PLUGINDIR@"
+
+/* The directory which you use to store additional plugins which ITL provides user contributed command definitions for.
+ * Check the documentation, chapter "Plugins Contribution", for details.
+ */
+const PluginContribDir = "@ICINGA2_PLUGINDIR@"
+
+/* Our local instance name. By default this is the server's hostname as returned by `hostname --fqdn`.
+ * This should be the common name from the API certificate.
+ */
+//const NodeName = "localhost"
+
+/* Our local zone name. */
+const ZoneName = NodeName
+
+/* Secret key for remote node tickets */
+const TicketSalt = ""
diff --git a/etc/icinga2/features-available/api.conf b/etc/icinga2/features-available/api.conf
new file mode 100644
index 0000000..b072a44
--- /dev/null
+++ b/etc/icinga2/features-available/api.conf
@@ -0,0 +1,10 @@
+/**
+ * The API listener is used for distributed monitoring setups.
+ */
+
+object ApiListener "api" {
+ //accept_config = false
+ //accept_commands = false
+
+ ticket_salt = TicketSalt
+}
diff --git a/etc/icinga2/features-available/checker.conf b/etc/icinga2/features-available/checker.conf
new file mode 100644
index 0000000..6f003b8
--- /dev/null
+++ b/etc/icinga2/features-available/checker.conf
@@ -0,0 +1,5 @@
+/**
+ * The checker component takes care of executing service checks.
+ */
+
+object CheckerComponent "checker" { }
diff --git a/etc/icinga2/features-available/command.conf b/etc/icinga2/features-available/command.conf
new file mode 100644
index 0000000..ffdd143
--- /dev/null
+++ b/etc/icinga2/features-available/command.conf
@@ -0,0 +1,7 @@
+/**
+ * The ExternalCommandListener implements support for the external
+ * commands pipe.
+ */
+
+object ExternalCommandListener "command" { }
+
diff --git a/etc/icinga2/features-available/compatlog.conf b/etc/icinga2/features-available/compatlog.conf
new file mode 100644
index 0000000..6f5cf5f
--- /dev/null
+++ b/etc/icinga2/features-available/compatlog.conf
@@ -0,0 +1,7 @@
+/**
+ * The CompatLogger type is responsible for writing log files in a format
+ * that is compatible with Icinga 1.x.
+ */
+
+object CompatLogger "compatlog" { }
+
diff --git a/etc/icinga2/features-available/debuglog.conf b/etc/icinga2/features-available/debuglog.conf
new file mode 100644
index 0000000..e66518f
--- /dev/null
+++ b/etc/icinga2/features-available/debuglog.conf
@@ -0,0 +1,10 @@
+/**
+ * The FileLogger type writes log information to a log file.
+ * Unlike the mainlog feature this sets up a logger
+ * with severity "debug".
+ */
+
+object FileLogger "debug-file" {
+ severity = "debug"
+ path = LogDir + "/debug.log"
+}
diff --git a/etc/icinga2/features-available/elasticsearch.conf b/etc/icinga2/features-available/elasticsearch.conf
new file mode 100644
index 0000000..8014913
--- /dev/null
+++ b/etc/icinga2/features-available/elasticsearch.conf
@@ -0,0 +1,8 @@
+object ElasticsearchWriter "elasticsearch" {
+ //host = "127.0.0.1"
+ //port = 9200
+ //index = "icinga2"
+ //enable_send_perfdata = false
+ //flush_threshold = 1024
+ //flush_interval = 10s
+}
diff --git a/etc/icinga2/features-available/gelf.conf b/etc/icinga2/features-available/gelf.conf
new file mode 100644
index 0000000..c14c864
--- /dev/null
+++ b/etc/icinga2/features-available/gelf.conf
@@ -0,0 +1,10 @@
+/**
+ * The GelfWriter type writes event log entries
+ * to a GELF tcp socket provided by Graylog,
+ * Logstash or any other receiver.
+ */
+
+object GelfWriter "gelf" {
+ //host = "127.0.0.1"
+ //port = 12201
+}
diff --git a/etc/icinga2/features-available/graphite.conf b/etc/icinga2/features-available/graphite.conf
new file mode 100644
index 0000000..ae8a928
--- /dev/null
+++ b/etc/icinga2/features-available/graphite.conf
@@ -0,0 +1,9 @@
+/**
+ * The GraphiteWriter type writes check result metrics and
+ * performance data to a graphite tcp socket.
+ */
+
+object GraphiteWriter "graphite" {
+ //host = "127.0.0.1"
+ //port = 2003
+}
diff --git a/etc/icinga2/features-available/icingadb.conf b/etc/icinga2/features-available/icingadb.conf
new file mode 100644
index 0000000..9951c83
--- /dev/null
+++ b/etc/icinga2/features-available/icingadb.conf
@@ -0,0 +1,5 @@
+object IcingaDB "icingadb" {
+ //host = "127.0.0.1"
+ //port = 6380
+ //password = "xxx"
+}
diff --git a/etc/icinga2/features-available/ido-mysql.conf b/etc/icinga2/features-available/ido-mysql.conf
new file mode 100644
index 0000000..7b44c23
--- /dev/null
+++ b/etc/icinga2/features-available/ido-mysql.conf
@@ -0,0 +1,11 @@
+/**
+ * The IdoMysqlConnection type implements MySQL support
+ * for DB IDO.
+ */
+
+object IdoMysqlConnection "ido-mysql" {
+ //user = "icinga"
+ //password = "icinga"
+ //host = "localhost"
+ //database = "icinga"
+}
diff --git a/etc/icinga2/features-available/ido-pgsql.conf b/etc/icinga2/features-available/ido-pgsql.conf
new file mode 100644
index 0000000..9f3c132
--- /dev/null
+++ b/etc/icinga2/features-available/ido-pgsql.conf
@@ -0,0 +1,11 @@
+/**
+ * The IdoPgsqlConnection type implements PostgreSQL support
+ * for DB IDO.
+ */
+
+object IdoPgsqlConnection "ido-pgsql" {
+ //user = "icinga"
+ //password = "icinga"
+ //host = "localhost"
+ //database = "icinga"
+}
diff --git a/etc/icinga2/features-available/influxdb.conf b/etc/icinga2/features-available/influxdb.conf
new file mode 100644
index 0000000..f0af37b
--- /dev/null
+++ b/etc/icinga2/features-available/influxdb.conf
@@ -0,0 +1,25 @@
+/**
+ * The InfluxdbWriter type writes check result metrics and
+ * performance data to an InfluxDB v1 HTTP API
+ */
+
+object InfluxdbWriter "influxdb" {
+ //host = "127.0.0.1"
+ //port = 8086
+ //database = "icinga2"
+ //flush_threshold = 1024
+ //flush_interval = 10s
+ //host_template = {
+ // measurement = "$host.check_command$"
+ // tags = {
+ // hostname = "$host.name$"
+ // }
+ //}
+ //service_template = {
+ // measurement = "$service.check_command$"
+ // tags = {
+ // hostname = "$host.name$"
+ // service = "$service.name$"
+ // }
+ //}
+}
diff --git a/etc/icinga2/features-available/influxdb2.conf b/etc/icinga2/features-available/influxdb2.conf
new file mode 100644
index 0000000..53f7a21
--- /dev/null
+++ b/etc/icinga2/features-available/influxdb2.conf
@@ -0,0 +1,27 @@
+/**
+ * The Influxdb2Writer type writes check result metrics and
+ * performance data to an InfluxDB v2 HTTP API
+ */
+
+object Influxdb2Writer "influxdb2" {
+ //host = "127.0.0.1"
+ //port = 8086
+ //organization = "monitoring"
+ //bucket = "icinga2"
+ //auth_token = "ABCDEvwxyz0189-_"
+ //flush_threshold = 1024
+ //flush_interval = 10s
+ //host_template = {
+ // measurement = "$host.check_command$"
+ // tags = {
+ // hostname = "$host.name$"
+ // }
+ //}
+ //service_template = {
+ // measurement = "$service.check_command$"
+ // tags = {
+ // hostname = "$host.name$"
+ // service = "$service.name$"
+ // }
+ //}
+}
diff --git a/etc/icinga2/features-available/journald.conf b/etc/icinga2/features-available/journald.conf
new file mode 100644
index 0000000..e0b36f7
--- /dev/null
+++ b/etc/icinga2/features-available/journald.conf
@@ -0,0 +1,7 @@
+/**
+ * The JournaldLogger type writes log information to the systemd journal.
+ */
+
+object JournaldLogger "journald" {
+ severity = "warning"
+}
diff --git a/etc/icinga2/features-available/livestatus.conf b/etc/icinga2/features-available/livestatus.conf
new file mode 100644
index 0000000..246c085
--- /dev/null
+++ b/etc/icinga2/features-available/livestatus.conf
@@ -0,0 +1,6 @@
+/**
+ * The LivestatusListener type implements the Livestatus query protocol.
+ */
+
+object LivestatusListener "livestatus" { }
+
diff --git a/etc/icinga2/features-available/mainlog.conf b/etc/icinga2/features-available/mainlog.conf
new file mode 100644
index 0000000..a3bb19d
--- /dev/null
+++ b/etc/icinga2/features-available/mainlog.conf
@@ -0,0 +1,8 @@
+/**
+ * The FileLogger type writes log information to a file.
+ */
+
+object FileLogger "main-log" {
+ severity = "information"
+ path = LogDir + "/icinga2.log"
+}
diff --git a/etc/icinga2/features-available/notification.conf b/etc/icinga2/features-available/notification.conf
new file mode 100644
index 0000000..3f9a88e
--- /dev/null
+++ b/etc/icinga2/features-available/notification.conf
@@ -0,0 +1,5 @@
+/**
+ * The notification component is responsible for sending notifications.
+ */
+
+object NotificationComponent "notification" { }
diff --git a/etc/icinga2/features-available/opentsdb.conf b/etc/icinga2/features-available/opentsdb.conf
new file mode 100644
index 0000000..471a622
--- /dev/null
+++ b/etc/icinga2/features-available/opentsdb.conf
@@ -0,0 +1,25 @@
+/**
+ * The OpenTsdbWriter type writes check result metrics and
+ * performance data to a OpenTSDB tcp socket.
+ */
+
+object OpenTsdbWriter "opentsdb" {
+ //host = "127.0.0.1"
+ //port = 4242
+ //enable_generic_metrics = false
+
+ // Custom Tagging, refer to Icinga object type documentation for
+ // OpenTsdbWriter
+ //host_template = {
+ // metric = "icinga.host"
+ // tags = {
+ // zone = "$host.zone$"
+ // }
+ //}
+ //service_template = {
+ // metric = "icinga.service.$service.check_command$"
+ // tags = {
+ // zone = "$service.zone$"
+ // }
+ //}
+}
diff --git a/etc/icinga2/features-available/perfdata.conf b/etc/icinga2/features-available/perfdata.conf
new file mode 100644
index 0000000..3ba8635
--- /dev/null
+++ b/etc/icinga2/features-available/perfdata.conf
@@ -0,0 +1,6 @@
+/**
+ * The PerfdataWriter type writes performance data files and rotates
+ * them in a regular interval.
+ */
+
+object PerfdataWriter "perfdata" { }
diff --git a/etc/icinga2/features-available/syslog.conf b/etc/icinga2/features-available/syslog.conf
new file mode 100644
index 0000000..3b794b4
--- /dev/null
+++ b/etc/icinga2/features-available/syslog.conf
@@ -0,0 +1,8 @@
+/**
+ * The SyslogLogger type writes log information to syslog.
+ */
+
+object SyslogLogger "syslog" {
+ severity = "warning"
+}
+
diff --git a/etc/icinga2/features-available/windowseventlog.conf b/etc/icinga2/features-available/windowseventlog.conf
new file mode 100644
index 0000000..8e5c0ae
--- /dev/null
+++ b/etc/icinga2/features-available/windowseventlog.conf
@@ -0,0 +1,8 @@
+/**
+ * The WindowsEventLogLogger type writes log information to the Windows Event Log.
+ */
+
+object WindowsEventLogLogger "windowseventlog" {
+ severity = "information"
+}
+
diff --git a/etc/icinga2/features-enabled/checker.conf b/etc/icinga2/features-enabled/checker.conf
new file mode 100644
index 0000000..0b39650
--- /dev/null
+++ b/etc/icinga2/features-enabled/checker.conf
@@ -0,0 +1 @@
+include "../features-available/checker.conf"
diff --git a/etc/icinga2/features-enabled/notification.conf b/etc/icinga2/features-enabled/notification.conf
new file mode 100644
index 0000000..b549ad4
--- /dev/null
+++ b/etc/icinga2/features-enabled/notification.conf
@@ -0,0 +1 @@
+include "../features-available/notification.conf"
diff --git a/etc/icinga2/features-enabled/windowseventlog.conf b/etc/icinga2/features-enabled/windowseventlog.conf
new file mode 100644
index 0000000..8e57873
--- /dev/null
+++ b/etc/icinga2/features-enabled/windowseventlog.conf
@@ -0,0 +1 @@
+include "../features-available/windowseventlog.conf"
diff --git a/etc/icinga2/icinga2.conf b/etc/icinga2/icinga2.conf
new file mode 100644
index 0000000..17513a3
--- /dev/null
+++ b/etc/icinga2/icinga2.conf
@@ -0,0 +1,57 @@
+/**
+ * Icinga 2 configuration file
+ * - this is where you define settings for the Icinga application including
+ * which hosts/services to check.
+ *
+ * For an overview of all available configuration options please refer
+ * to the documentation that is distributed as part of Icinga 2.
+ */
+
+/**
+ * The constants.conf defines global constants.
+ */
+include "constants.conf"
+
+/**
+ * The zones.conf defines zones for a cluster setup.
+ * Not required for single instance setups.
+ */
+include "zones.conf"
+
+/**
+ * The Icinga Template Library (ITL) provides a number of useful templates
+ * and command definitions.
+ * Common monitoring plugin command definitions are included separately.
+ */
+include <itl>
+include <plugins>
+include <plugins-contrib>
+include <manubulon>
+
+/**
+ * This includes the Icinga 2 Windows plugins. These command definitions
+ * are required on a master node when a client is used as command endpoint.
+ */
+include <windows-plugins>
+
+/**
+ * This includes the NSClient++ check commands. These command definitions
+ * are required on a master node when a client is used as command endpoint.
+ */
+include <nscp>
+
+/**
+ * The features-available directory contains a number of configuration
+ * files for features which can be enabled and disabled using the
+ * icinga2 feature enable / icinga2 feature disable CLI commands.
+ * These commands work by creating and removing symbolic links in
+ * the features-enabled directory.
+ */
+include "features-enabled/*.conf"
+
+/**
+ * Although in theory you could define all your objects in this file
+ * the preferred way is to create separate directories and files in the conf.d
+ * directory. Each of these files must have the file extension ".conf".
+ */
+include_recursive "conf.d"
diff --git a/etc/icinga2/scripts/mail-host-notification.sh b/etc/icinga2/scripts/mail-host-notification.sh
new file mode 100755
index 0000000..70d3b50
--- /dev/null
+++ b/etc/icinga2/scripts/mail-host-notification.sh
@@ -0,0 +1,177 @@
+#!/usr/bin/env bash
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+# Except of function urlencode which is Copyright (C) by Brian White (brian@aljex.com) used under MIT license
+
+PROG="`basename $0`"
+ICINGA2HOST="`hostname`"
+MAILBIN="mail"
+
+if [ -z "`which $MAILBIN`" ] ; then
+ echo "$MAILBIN not found in \$PATH. Consider installing it."
+ exit 1
+fi
+
+## Function helpers
+Usage() {
+cat << EOF
+
+Required parameters:
+ -d LONGDATETIME (\$icinga.long_date_time\$)
+ -l HOSTNAME (\$host.name\$)
+ -n HOSTDISPLAYNAME (\$host.display_name\$)
+ -o HOSTOUTPUT (\$host.output\$)
+ -r USEREMAIL (\$user.email\$)
+ -s HOSTSTATE (\$host.state\$)
+ -t NOTIFICATIONTYPE (\$notification.type\$)
+
+Optional parameters:
+ -4 HOSTADDRESS (\$address\$)
+ -6 HOSTADDRESS6 (\$address6\$)
+ -X HOSTNOTES (\$host.notes\$)
+ -b NOTIFICATIONAUTHORNAME (\$notification.author\$)
+ -c NOTIFICATIONCOMMENT (\$notification.comment\$)
+ -i ICINGAWEB2URL (\$notification_icingaweb2url\$, Default: unset)
+ -f MAILFROM (\$notification_mailfrom\$, requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE))
+ -v (\$notification_sendtosyslog\$, Default: false)
+
+EOF
+}
+
+Help() {
+ Usage;
+ exit 0;
+}
+
+Error() {
+ if [ "$1" ]; then
+ echo $1
+ fi
+ Usage;
+ exit 1;
+}
+
+urlencode() {
+ local LANG=C i=0 c e s="$1"
+
+ while [ $i -lt ${#1} ]; do
+ [ "$i" -eq 0 ] || s="${s#?}"
+ c=${s%"${s#?}"}
+ [ -z "${c#[[:alnum:].~_-]}" ] || c=$(printf '%%%02X' "'$c")
+ e="${e}${c}"
+ i=$((i + 1))
+ done
+ echo "$e"
+}
+
+## Main
+while getopts 4:6::b:c:d:f:hi:l:n:o:r:s:t:v:X: opt
+do
+ case "$opt" in
+ 4) HOSTADDRESS=$OPTARG ;;
+ 6) HOSTADDRESS6=$OPTARG ;;
+ b) NOTIFICATIONAUTHORNAME=$OPTARG ;;
+ c) NOTIFICATIONCOMMENT=$OPTARG ;;
+ d) LONGDATETIME=$OPTARG ;; # required
+ f) MAILFROM=$OPTARG ;;
+ h) Help ;;
+ i) ICINGAWEB2URL=$OPTARG ;;
+ l) HOSTNAME=$OPTARG ;; # required
+ n) HOSTDISPLAYNAME=$OPTARG ;; # required
+ o) HOSTOUTPUT=$OPTARG ;; # required
+ X) HOSTNOTES=$OPTARG ;;
+ r) USEREMAIL=$OPTARG ;; # required
+ s) HOSTSTATE=$OPTARG ;; # required
+ t) NOTIFICATIONTYPE=$OPTARG ;; # required
+ v) VERBOSE=$OPTARG ;;
+ \?) echo "ERROR: Invalid option -$OPTARG" >&2
+ Error ;;
+ :) echo "Missing option argument for -$OPTARG" >&2
+ Error ;;
+ *) echo "Unimplemented option: -$OPTARG" >&2
+ Error ;;
+ esac
+done
+
+shift $((OPTIND - 1))
+
+## Keep formatting in sync with mail-service-notification.sh
+for P in LONGDATETIME HOSTNAME HOSTDISPLAYNAME HOSTOUTPUT HOSTSTATE USEREMAIL NOTIFICATIONTYPE ; do
+ eval "PAR=\$${P}"
+
+ if [ ! "$PAR" ] ; then
+ Error "Required parameter '$P' is missing."
+ fi
+done
+
+## Build the message's subject
+SUBJECT="[$NOTIFICATIONTYPE] Host $HOSTDISPLAYNAME is $HOSTSTATE!"
+
+## Build the notification message
+NOTIFICATION_MESSAGE=`cat << EOF
+***** Host Monitoring on $ICINGA2HOST *****
+
+$HOSTDISPLAYNAME is $HOSTSTATE!
+
+Info: $HOSTOUTPUT
+
+When: $LONGDATETIME
+Host: $HOSTNAME
+EOF
+`
+
+## Check whether IPv4 was specified.
+if [ -n "$HOSTADDRESS" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+IPv4: $HOSTADDRESS"
+fi
+
+## Check whether IPv6 was specified.
+if [ -n "$HOSTADDRESS6" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+IPv6: $HOSTADDRESS6"
+fi
+
+## Check whether host notes was specified.
+if [ -n "$HOSTNOTES" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+Host notes: $HOSTNOTES"
+fi
+
+## Check whether author and comment was specified.
+if [ -n "$NOTIFICATIONCOMMENT" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+
+Comment by $NOTIFICATIONAUTHORNAME:
+ $NOTIFICATIONCOMMENT"
+fi
+
+## Check whether Icinga Web 2 URL was specified.
+if [ -n "$ICINGAWEB2URL" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+
+$ICINGAWEB2URL/icingadb/host?name=$(urlencode "$HOSTNAME")"
+fi
+
+## Check whether verbose mode was enabled and log to syslog.
+if [ "$VERBOSE" = "true" ] ; then
+ logger "$PROG sends $SUBJECT => $USEREMAIL"
+fi
+
+## Send the mail using the $MAILBIN command.
+## If an explicit sender was specified, try to set it.
+if [ -n "$MAILFROM" ] ; then
+
+ ## Modify this for your own needs!
+
+ ## Debian/Ubuntu use mailutils which requires `-a` to append the header
+ if [ -f /etc/debian_version ]; then
+ /usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -a "From: $MAILFROM" -s "$SUBJECT" $USEREMAIL
+ ## Other distributions (RHEL/SUSE/etc.) prefer mailx which sets a sender address with `-r`
+ else
+ /usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -r "$MAILFROM" -s "$SUBJECT" $USEREMAIL
+ fi
+
+else
+ /usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" \
+ | $MAILBIN -s "$SUBJECT" $USEREMAIL
+fi
diff --git a/etc/icinga2/scripts/mail-service-notification.sh b/etc/icinga2/scripts/mail-service-notification.sh
new file mode 100755
index 0000000..31d9137
--- /dev/null
+++ b/etc/icinga2/scripts/mail-service-notification.sh
@@ -0,0 +1,190 @@
+#!/usr/bin/env bash
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+# Except of function urlencode which is Copyright (C) by Brian White (brian@aljex.com) used under MIT license
+
+PROG="`basename $0`"
+ICINGA2HOST="`hostname`"
+MAILBIN="mail"
+
+if [ -z "`which $MAILBIN`" ] ; then
+ echo "$MAILBIN not found in \$PATH. Consider installing it."
+ exit 1
+fi
+
+## Function helpers
+Usage() {
+cat << EOF
+
+Required parameters:
+ -d LONGDATETIME (\$icinga.long_date_time\$)
+ -e SERVICENAME (\$service.name\$)
+ -l HOSTNAME (\$host.name\$)
+ -n HOSTDISPLAYNAME (\$host.display_name\$)
+ -o SERVICEOUTPUT (\$service.output\$)
+ -r USEREMAIL (\$user.email\$)
+ -s SERVICESTATE (\$service.state\$)
+ -t NOTIFICATIONTYPE (\$notification.type\$)
+ -u SERVICEDISPLAYNAME (\$service.display_name\$)
+
+Optional parameters:
+ -4 HOSTADDRESS (\$address\$)
+ -6 HOSTADDRESS6 (\$address6\$)
+ -X HOSTNOTES (\$host.notes\$)
+ -x SERVICENOTES (\$service.notes\$)
+ -b NOTIFICATIONAUTHORNAME (\$notification.author\$)
+ -c NOTIFICATIONCOMMENT (\$notification.comment\$)
+ -i ICINGAWEB2URL (\$notification_icingaweb2url\$, Default: unset)
+ -f MAILFROM (\$notification_mailfrom\$, requires GNU mailutils (Debian/Ubuntu) or mailx (RHEL/SUSE))
+ -v (\$notification_sendtosyslog\$, Default: false)
+
+EOF
+}
+
+Help() {
+ Usage;
+ exit 0;
+}
+
+Error() {
+ if [ "$1" ]; then
+ echo $1
+ fi
+ Usage;
+ exit 1;
+}
+
+urlencode() {
+ local LANG=C i=0 c e s="$1"
+
+ while [ $i -lt ${#1} ]; do
+ [ "$i" -eq 0 ] || s="${s#?}"
+ c=${s%"${s#?}"}
+ [ -z "${c#[[:alnum:].~_-]}" ] || c=$(printf '%%%02X' "'$c")
+ e="${e}${c}"
+ i=$((i + 1))
+ done
+ echo "$e"
+}
+
+## Main
+while getopts 4:6:b:c:d:e:f:hi:l:n:o:r:s:t:u:v:X:x: opt
+do
+ case "$opt" in
+ 4) HOSTADDRESS=$OPTARG ;;
+ 6) HOSTADDRESS6=$OPTARG ;;
+ b) NOTIFICATIONAUTHORNAME=$OPTARG ;;
+ c) NOTIFICATIONCOMMENT=$OPTARG ;;
+ d) LONGDATETIME=$OPTARG ;; # required
+ e) SERVICENAME=$OPTARG ;; # required
+ f) MAILFROM=$OPTARG ;;
+ h) Usage ;;
+ i) ICINGAWEB2URL=$OPTARG ;;
+ l) HOSTNAME=$OPTARG ;; # required
+ n) HOSTDISPLAYNAME=$OPTARG ;; # required
+ X) HOSTNOTES=$OPTARG ;;
+ x) SERVICENOTES=$OPTARG ;;
+ o) SERVICEOUTPUT=$OPTARG ;; # required
+ r) USEREMAIL=$OPTARG ;; # required
+ s) SERVICESTATE=$OPTARG ;; # required
+ t) NOTIFICATIONTYPE=$OPTARG ;; # required
+ u) SERVICEDISPLAYNAME=$OPTARG ;; # required
+ v) VERBOSE=$OPTARG ;;
+ \?) echo "ERROR: Invalid option -$OPTARG" >&2
+ Usage ;;
+ :) echo "Missing option argument for -$OPTARG" >&2
+ Usage ;;
+ *) echo "Unimplemented option: -$OPTARG" >&2
+ Usage ;;
+ esac
+done
+
+shift $((OPTIND - 1))
+
+## Keep formatting in sync with mail-host-notification.sh
+for P in LONGDATETIME HOSTNAME HOSTDISPLAYNAME SERVICENAME SERVICEDISPLAYNAME SERVICEOUTPUT SERVICESTATE USEREMAIL NOTIFICATIONTYPE ; do
+ eval "PAR=\$${P}"
+
+ if [ ! "$PAR" ] ; then
+ Error "Required parameter '$P' is missing."
+ fi
+done
+
+## Build the message's subject
+SUBJECT="[$NOTIFICATIONTYPE] $SERVICEDISPLAYNAME on $HOSTDISPLAYNAME is $SERVICESTATE!"
+
+## Build the notification message
+NOTIFICATION_MESSAGE=`cat << EOF
+***** Service Monitoring on $ICINGA2HOST *****
+
+$SERVICEDISPLAYNAME on $HOSTDISPLAYNAME is $SERVICESTATE!
+
+Info: $SERVICEOUTPUT
+
+When: $LONGDATETIME
+Service: $SERVICENAME
+Host: $HOSTNAME
+EOF
+`
+
+## Check whether IPv4 was specified.
+if [ -n "$HOSTADDRESS" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+IPv4: $HOSTADDRESS"
+fi
+
+## Check whether IPv6 was specified.
+if [ -n "$HOSTADDRESS6" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+IPv6: $HOSTADDRESS6"
+fi
+
+## Check whether host notes was specified.
+if [ -n "$HOSTNOTES" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+Host notes: $HOSTNOTES"
+fi
+
+## Check whether service notes was specified.
+if [ -n "$SERVICENOTES" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+Service notes: $SERVICENOTES"
+fi
+
+## Check whether author and comment was specified.
+if [ -n "$NOTIFICATIONCOMMENT" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+
+Comment by $NOTIFICATIONAUTHORNAME:
+ $NOTIFICATIONCOMMENT"
+fi
+
+## Check whether Icinga Web 2 URL was specified.
+if [ -n "$ICINGAWEB2URL" ] ; then
+ NOTIFICATION_MESSAGE="$NOTIFICATION_MESSAGE
+
+$ICINGAWEB2URL/icingadb/service?name=$(urlencode "$SERVICENAME")&host.name=$(urlencode "$HOSTNAME")"
+fi
+
+## Check whether verbose mode was enabled and log to syslog.
+if [ "$VERBOSE" = "true" ] ; then
+ logger "$PROG sends $SUBJECT => $USEREMAIL"
+fi
+
+## Send the mail using the $MAILBIN command.
+## If an explicit sender was specified, try to set it.
+if [ -n "$MAILFROM" ] ; then
+
+ ## Modify this for your own needs!
+
+ ## Debian/Ubuntu use mailutils which requires `-a` to append the header
+ if [ -f /etc/debian_version ]; then
+ /usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -a "From: $MAILFROM" -s "$SUBJECT" $USEREMAIL
+ ## Other distributions (RHEL/SUSE/etc.) prefer mailx which sets a sender address with `-r`
+ else
+ /usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" | $MAILBIN -r "$MAILFROM" -s "$SUBJECT" $USEREMAIL
+ fi
+
+else
+ /usr/bin/printf "%b" "$NOTIFICATION_MESSAGE" \
+ | $MAILBIN -s "$SUBJECT" $USEREMAIL
+fi
diff --git a/etc/icinga2/win32/constants.conf b/etc/icinga2/win32/constants.conf
new file mode 100644
index 0000000..f793d1d
--- /dev/null
+++ b/etc/icinga2/win32/constants.conf
@@ -0,0 +1,28 @@
+/**
+ * This file defines global constants which can be used in
+ * the other configuration files.
+ */
+
+/* The directory which contains the plugins from the Monitoring Plugins project. */
+const PluginDir = PrefixDir + "/sbin"
+
+/* The directory which contains the Manubulon plugins.
+ * Check the documentation, chapter "SNMP Manubulon Plugin Check Commands", for details.
+ */
+const ManubulonPluginDir = PrefixDir + "/sbin"
+
+/* The directory which you use to store additional plugins which ITL provides user contributed command definitions for.
+ * Check the documentation, chapter "Plugins Contribution", for details.
+ */
+const PluginContribDir = PrefixDir + "/sbin"
+
+/* Our local instance name. By default this is the server's hostname as returned by `hostname --fqdn`.
+ * This should be the common name from the API certificate.
+ */
+//const NodeName = "localhost"
+
+/* Our local zone name. */
+const ZoneName = NodeName
+
+/* Secret key for remote node tickets */
+const TicketSalt = ""
diff --git a/etc/icinga2/win32/icinga2.conf b/etc/icinga2/win32/icinga2.conf
new file mode 100644
index 0000000..cc47c41
--- /dev/null
+++ b/etc/icinga2/win32/icinga2.conf
@@ -0,0 +1,55 @@
+/**
+ * Icinga 2 configuration file
+ * - this is where you define settings for the Icinga application including
+ * which hosts/services to check.
+ *
+ * For an overview of all available configuration options please refer
+ * to the documentation that is distributed as part of Icinga 2.
+ */
+
+/**
+ * The constants.conf defines global constants.
+ */
+include "constants.conf"
+
+/**
+ * The zones.conf defines zones for a cluster setup.
+ * Not required for single instance setups.
+ */
+include "zones.conf"
+
+/**
+ * The Icinga Template Library (ITL) provides a number of useful templates
+ * and command definitions.
+ * Common monitoring plugin command definitions are included separately.
+ */
+include <itl>
+include <plugins>
+include <plugins-contrib>
+include <manubulon>
+
+/**
+ * This includes the Icinga 2 Windows plugins.
+ */
+include <windows-plugins>
+
+/**
+ * This includes the NSClient++ check commands.
+ */
+include <nscp>
+
+/**
+ * The features-available directory contains a number of configuration
+ * files for features which can be enabled and disabled using the
+ * icinga2 feature enable / icinga2 feature disable CLI commands.
+ * These commands work by creating and removing symbolic links in
+ * the features-enabled directory.
+ */
+include "features-enabled/*.conf"
+
+/**
+ * Although in theory you could define all your objects in this file
+ * the preferred way is to create separate directories and files in the conf.d
+ * directory. Each of these files must have the file extension ".conf".
+ */
+include_recursive "conf.d"
diff --git a/etc/icinga2/zones.conf b/etc/icinga2/zones.conf
new file mode 100644
index 0000000..70ac766
--- /dev/null
+++ b/etc/icinga2/zones.conf
@@ -0,0 +1,63 @@
+/*
+ * Endpoint and Zone configuration for a cluster setup
+ * This local example requires `NodeName` defined in
+ * constants.conf.
+ */
+
+object Endpoint NodeName {
+ host = NodeName
+}
+
+object Zone ZoneName {
+ endpoints = [ NodeName ]
+}
+
+/*
+ * Defines a global zone for distributed setups with masters,
+ * satellites and clients.
+ * This is required to sync configuration commands,
+ * templates, apply rules, etc. to satellite and clients.
+ * All nodes require the same configuration and must
+ * have `accept_config` enabled in the `api` feature.
+ */
+
+object Zone "global-templates" {
+ global = true
+}
+
+/*
+ * Defines a global zone for the Icinga Director.
+ * This is required to sync configuration commands,
+ * templates, apply rules, etc. to satellite and clients.
+ * All nodes require the same configuration and must
+ * have `accept_config` enabled in the `api` feature.
+ */
+
+object Zone "director-global" {
+ global = true
+}
+
+/*
+ * Read the documentation on how to configure
+ * a cluster setup with multiple zones.
+ */
+
+/*
+object Endpoint "master.example.org" {
+ host = "master.example.org"
+}
+
+object Endpoint "satellite.example.org" {
+ host = "satellite.example.org"
+}
+
+object Zone "master" {
+ endpoints = [ "master.example.org" ]
+}
+
+object Zone "satellite" {
+ parent = "master"
+ endpoints = [ "satellite.example.org" ]
+}
+*/
+
diff --git a/etc/icinga2/zones.d/README b/etc/icinga2/zones.d/README
new file mode 100644
index 0000000..e378eae
--- /dev/null
+++ b/etc/icinga2/zones.d/README
@@ -0,0 +1,2 @@
+Please check the documentation for more details:
+https://icinga.com/docs/icinga2/latest/doc/06-distributed-monitoring/
diff --git a/etc/initsystem/CMakeLists.txt b/etc/initsystem/CMakeLists.txt
new file mode 100644
index 0000000..eb0f9f2
--- /dev/null
+++ b/etc/initsystem/CMakeLists.txt
@@ -0,0 +1,44 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+
+if(NOT WIN32)
+ configure_file(icinga2.sysconfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/initsystem/icinga2.sysconfig @ONLY)
+ get_filename_component(ICINGA2_SYSCONFIGFILE_NAME ${ICINGA2_SYSCONFIGFILE} NAME)
+ get_filename_component(ICINGA2_SYSCONFIGFILE_DIR ${ICINGA2_SYSCONFIGFILE} PATH)
+ install(
+ FILES ${CMAKE_CURRENT_BINARY_DIR}/initsystem/icinga2.sysconfig
+ DESTINATION ${ICINGA2_SYSCONFIGFILE_DIR}
+ RENAME ${ICINGA2_SYSCONFIGFILE_NAME}
+ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ )
+
+ configure_file(prepare-dirs.cmake ${CMAKE_CURRENT_BINARY_DIR}/initsystem/prepare-dirs @ONLY)
+ configure_file(safe-reload.cmake ${CMAKE_CURRENT_BINARY_DIR}/initsystem/safe-reload @ONLY)
+ install(
+ FILES ${CMAKE_CURRENT_BINARY_DIR}/initsystem/prepare-dirs ${CMAKE_CURRENT_BINARY_DIR}/initsystem/safe-reload
+ DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/icinga2
+ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ )
+
+ # required for packaging on Gentoo, see Bug #6498
+ option (INSTALL_SYSTEMD_SERVICE_AND_INITSCRIPT
+ "Force install both the systemd service definition file and the SysV initscript in parallel, regardless of how USE_SYSTEMD is set. Only use this for special packaging purposes and if you know what you are doing" OFF)
+
+ if (NOT USE_SYSTEMD OR INSTALL_SYSTEMD_SERVICE_AND_INITSCRIPT)
+ configure_file(icinga2.init.d.cmake ${CMAKE_CURRENT_BINARY_DIR}/initsystem/icinga2 @ONLY)
+ install(
+ FILES ${CMAKE_CURRENT_BINARY_DIR}/initsystem/icinga2
+ DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/init.d
+ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ )
+ endif()
+
+ if (USE_SYSTEMD OR INSTALL_SYSTEMD_SERVICE_AND_INITSCRIPT)
+ configure_file(icinga2.service.cmake ${CMAKE_CURRENT_BINARY_DIR}/initsystem/icinga2.service @ONLY)
+ install(
+ FILES ${CMAKE_CURRENT_BINARY_DIR}/initsystem/icinga2.service
+ DESTINATION ${DESTDIR}/usr/lib/systemd/system
+ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ )
+ endif()
+endif()
diff --git a/etc/initsystem/icinga2.init.d.cmake b/etc/initsystem/icinga2.init.d.cmake
new file mode 100644
index 0000000..56588a7
--- /dev/null
+++ b/etc/initsystem/icinga2.init.d.cmake
@@ -0,0 +1,198 @@
+#!/bin/sh
+#
+# chkconfig: 35 90 12
+# description: Icinga 2
+#
+### BEGIN INIT INFO
+# Provides: icinga2
+# Required-Start: $remote_fs $syslog $network
+# Required-Stop: $remote_fs $syslog $network
+# Should-Start: mysql postgresql
+# Should-Stop: mysql postgresql
+# Default-Start: 2 3 5
+# Default-Stop: 0 1 6
+# Short-Description: icinga2 host/service/network monitoring and management system
+# Description: Icinga 2 is a monitoring and management system for hosts, services and networks.
+### END INIT INFO
+
+# Get function from functions library
+if [ -f /etc/rc.d/init.d/functions ]; then
+ . /etc/rc.d/init.d/functions
+elif [ -f /etc/init.d/functions ]; then
+ . /etc/init.d/functions
+fi
+
+# load system specific defines
+SYSCONFIGFILE=@ICINGA2_SYSCONFIGFILE@
+if [ -f "$SYSCONFIGFILE" ]; then
+ . "$SYSCONFIGFILE"
+else
+ echo "Couldn't load system specific defines from $SYSCONFIGFILE. Using defaults."
+fi
+
+# Set defaults, to overwrite see "@ICINGA2_SYSCONFIGFILE@"
+
+: "${ICINGA2_USER:="@ICINGA2_USER@"}"
+: "${ICINGA2_GROUP:="@ICINGA2_GROUP@"}"
+: "${ICINGA2_COMMAND_GROUP:="@ICINGA2_COMMAND_GROUP@"}"
+: "${DAEMON:="@CMAKE_INSTALL_FULL_SBINDIR@/icinga2"}"
+: "${ICINGA2_CONFIG_FILE:="@ICINGA2_CONFIGDIR@/icinga2.conf"}"
+: "${ICINGA2_ERROR_LOG:=@ICINGA2_LOGDIR@/error.log}"
+: "${ICINGA2_STARTUP_LOG:=@ICINGA2_LOGDIR@/startup.log}"
+: "${ICINGA2_PID_FILE:="@ICINGA2_INITRUNDIR@/icinga2.pid"}"
+
+# Load extra environment variables
+if [ -f /etc/default/icinga2 ]; then
+ . /etc/default/icinga2
+fi
+
+test -x "$DAEMON" || exit 5
+
+if [ ! -e "$ICINGA2_CONFIG_FILE" ]; then
+ echo "Config file '$ICINGA2_CONFIG_FILE' does not exist."
+ exit 6
+fi
+
+if ! getent passwd "$ICINGA2_USER" >/dev/null 2>&1; then
+ echo "Icinga user '$ICINGA2_USER' does not exist. Exiting."
+ exit 6
+fi
+
+if ! getent group "$ICINGA2_GROUP" >/dev/null 2>&1; then
+ echo "Icinga group '$ICINGA2_GROUP' does not exist. Exiting."
+ exit 6
+fi
+
+if ! getent group "$ICINGA2_COMMAND_GROUP" >/dev/null 2>&1; then
+ echo "Icinga command group '$ICINGA2_COMMAND_GROUP' does not exist. Exiting."
+ exit 6
+fi
+
+# Start Icinga 2
+start() {
+ printf "Starting Icinga 2: "
+ @CMAKE_INSTALL_PREFIX@/lib/icinga2/prepare-dirs "$SYSCONFIGFILE"
+
+ if ! "$DAEMON" daemon -c "$ICINGA2_CONFIG_FILE" -d -e "$ICINGA2_ERROR_LOG" > "$ICINGA2_STARTUP_LOG" 2>&1; then
+ echo "Error starting Icinga. Check '$ICINGA2_STARTUP_LOG' for details."
+ exit 1
+ else
+ echo "Done"
+ fi
+}
+
+# Restart Icinga 2
+stop() {
+ printf "Stopping Icinga 2: "
+
+ if [ ! -e "$ICINGA2_PID_FILE" ]; then
+ echo "The PID file '$ICINGA2_PID_FILE' does not exist."
+ if [ "x$1" = "xnofail" ]; then
+ return
+ else
+ exit 7
+ fi
+ fi
+
+ pid=`cat "$ICINGA2_PID_FILE"`
+
+ if icinga2 internal signal -s SIGINT -p "$pid" >/dev/null 2>&1; then
+ for i in 1 2 3 4 5 6 7 8 9 10; do
+ if ! icinga2 internal signal -s SIGCHLD -p "$pid" >/dev/null 2>&1; then
+ break
+ fi
+
+ printf '.'
+ sleep 3
+ done
+ fi
+
+ if icinga2 internal signal -s SIGCHLD -p "$pid" >/dev/null 2>&1; then
+ icinga2 internal signal -s SIGKILL -p "$pid" >/dev/null 2>&1
+ fi
+
+ echo "Done"
+}
+
+# Reload Icinga 2
+reload() {
+ exec @CMAKE_INSTALL_PREFIX@/lib/icinga2/safe-reload "$SYSCONFIGFILE"
+}
+
+# Check the Icinga 2 configuration
+checkconfig() {
+ printf "Checking configuration: "
+
+ if ! "$DAEMON" daemon -c "$ICINGA2_CONFIG_FILE" -C > "$ICINGA2_STARTUP_LOG" 2>&1; then
+ if [ "x$1" = "x" ]; then
+ cat "$ICINGA2_STARTUP_LOG"
+ echo "Icinga 2 detected configuration errors. Check '$ICINGA2_STARTUP_LOG' for details."
+ exit 1
+ else
+ echo "Not ${1}ing Icinga 2 due to configuration errors. Check '$ICINGA2_STARTUP_LOG' for details."
+ if [ "x$2" = "xfail" ]; then
+ exit 1
+ fi
+ fi
+ fi
+
+ echo "Done"
+ # no arguments requires full output
+ if [ "x$1" = "x" ]; then
+ cat "$ICINGA2_STARTUP_LOG"
+ fi
+}
+
+# Print status for Icinga 2
+status() {
+ printf "Icinga 2 status: "
+
+ if [ ! -e "$ICINGA2_PID_FILE" ]; then
+ echo "Not running"
+ exit 3
+ fi
+
+ pid=`cat "$ICINGA2_PID_FILE"`
+ if icinga2 internal signal -s SIGCHLD -p "$pid" >/dev/null 2>&1; then
+ echo "Running"
+ else
+ echo "Not running"
+ exit 3
+ fi
+}
+
+### main logic ###
+case "$1" in
+ start)
+ checkconfig start fail
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status
+ ;;
+ restart)
+ checkconfig restart fail
+ stop nofail
+ start
+ ;;
+ condrestart)
+ status > /dev/null 2>&1 || exit 0
+ checkconfig restart fail
+ stop nofail
+ start
+ ;;
+ reload)
+ reload
+ ;;
+ checkconfig)
+ checkconfig
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|reload|checkconfig|status}"
+ exit 3
+esac
+
+exit 0
diff --git a/etc/initsystem/icinga2.service.cmake b/etc/initsystem/icinga2.service.cmake
new file mode 100644
index 0000000..ca85930
--- /dev/null
+++ b/etc/initsystem/icinga2.service.cmake
@@ -0,0 +1,30 @@
+[Unit]
+Description=Icinga host/service/network monitoring system
+Requires=network-online.target
+After=syslog.target network-online.target icingadb-redis.service postgresql.service mariadb.service carbon-cache.service carbon-relay.service
+
+[Service]
+Type=notify
+NotifyAccess=all
+Environment="ICINGA2_ERROR_LOG=@ICINGA2_LOGDIR@/error.log"
+EnvironmentFile=@ICINGA2_SYSCONFIGFILE@
+ExecStartPre=@CMAKE_INSTALL_PREFIX@/lib/icinga2/prepare-dirs @ICINGA2_SYSCONFIGFILE@
+ExecStart=@CMAKE_INSTALL_FULL_SBINDIR@/icinga2 daemon --close-stdio -e ${ICINGA2_ERROR_LOG}
+PIDFile=@ICINGA2_INITRUNDIR@/icinga2.pid
+ExecReload=@CMAKE_INSTALL_PREFIX@/lib/icinga2/safe-reload @ICINGA2_SYSCONFIGFILE@
+TimeoutStartSec=30m
+KillMode=mixed
+
+# Systemd >228 enforces a lower process number for services.
+# Depending on the distribution and Systemd version, this must
+# be explicitly raised. Packages will set the needed values
+# into /etc/systemd/system/icinga2.service.d/limits.conf
+#
+# Please check the troubleshooting documentation for further details.
+# The values below can be used as examples for customized service files.
+
+#TasksMax=infinity
+#LimitNPROC=62883
+
+[Install]
+WantedBy=multi-user.target
diff --git a/etc/initsystem/icinga2.service.limits.conf b/etc/initsystem/icinga2.service.limits.conf
new file mode 100644
index 0000000..dea0cde
--- /dev/null
+++ b/etc/initsystem/icinga2.service.limits.conf
@@ -0,0 +1,9 @@
+# Icinga 2 sets Systemd default values to extend OS defaults.
+#
+# Please check the troubleshooting documentation for further details.
+
+[Service]
+TasksMax=infinity
+
+# Uncomment this setting in case of further problems.
+#LimitNPROC=62883
diff --git a/etc/initsystem/icinga2.sysconfig.cmake b/etc/initsystem/icinga2.sysconfig.cmake
new file mode 100644
index 0000000..167c125
--- /dev/null
+++ b/etc/initsystem/icinga2.sysconfig.cmake
@@ -0,0 +1,15 @@
+#This is the default environment Icinga 2 runs with.
+#Make your changes here.
+
+#DAEMON=@CMAKE_INSTALL_FULL_SBINDIR@/icinga2
+#ICINGA2_CONFIG_FILE=@ICINGA2_CONFIGDIR@/icinga2.conf
+#ICINGA2_INIT_RUN_DIR=@ICINGA2_INITRUNDIR@
+#ICINGA2_PID_FILE=@ICINGA2_INITRUNDIR@/icinga2.pid
+#ICINGA2_LOG_DIR=@ICINGA2_LOGDIR@
+#ICINGA2_ERROR_LOG=@ICINGA2_LOGDIR@/error.log
+#ICINGA2_STARTUP_LOG=@ICINGA2_LOGDIR@/startup.log
+#ICINGA2_LOG=@ICINGA2_LOGDIR@/icinga2.log
+#ICINGA2_CACHE_DIR=@ICINGA2_CACHEDIR@
+#ICINGA2_USER=@ICINGA2_USER@
+#ICINGA2_GROUP=@ICINGA2_GROUP@
+#ICINGA2_COMMAND_GROUP=@ICINGA2_COMMAND_GROUP@
diff --git a/etc/initsystem/prepare-dirs.cmake b/etc/initsystem/prepare-dirs.cmake
new file mode 100644
index 0000000..0053b90
--- /dev/null
+++ b/etc/initsystem/prepare-dirs.cmake
@@ -0,0 +1,52 @@
+#!/bin/sh
+#
+# This script prepares directories and files needed for running Icinga2
+#
+
+# Load sysconf on systems where the initsystem does not pass the environment
+if [ "$1" != "" ]; then
+ if [ -r "$1" ]; then
+ . "$1"
+ else
+ echo "Unable to read sysconf from '$1'. Exiting." && exit 6
+ fi
+fi
+
+# Set defaults, to overwrite see "@ICINGA2_SYSCONFIGFILE@"
+
+: "${ICINGA2_USER:="@ICINGA2_USER@"}"
+: "${ICINGA2_GROUP:="@ICINGA2_GROUP@"}"
+: "${ICINGA2_COMMAND_GROUP:="@ICINGA2_COMMAND_GROUP@"}"
+: "${ICINGA2_INIT_RUN_DIR:="@ICINGA2_FULL_INITRUNDIR@"}"
+: "${ICINGA2_LOG_DIR:="@ICINGA2_FULL_LOGDIR@"}"
+: "${ICINGA2_CACHE_DIR:="@ICINGA2_FULL_CACHEDIR@"}"
+
+if ! getent passwd "$ICINGA2_USER" >/dev/null 2>&1; then
+ echo "Icinga user '$ICINGA2_USER' does not exist. Exiting."
+ exit 6
+fi
+
+if ! getent group "$ICINGA2_GROUP" >/dev/null 2>&1; then
+ echo "Icinga group '$ICINGA2_GROUP' does not exist. Exiting."
+ exit 6
+fi
+
+if ! getent group "$ICINGA2_COMMAND_GROUP" >/dev/null 2>&1; then
+ echo "Icinga command group '$ICINGA2_COMMAND_GROUP' does not exist. Exiting."
+ exit 6
+fi
+
+if [ ! -e "$ICINGA2_INIT_RUN_DIR" ]; then
+ mkdir -m 755 "$ICINGA2_INIT_RUN_DIR"
+ mkdir -m 2750 "$ICINGA2_INIT_RUN_DIR"/cmd
+fi
+
+chown -R "$ICINGA2_USER:$ICINGA2_COMMAND_GROUP" "$ICINGA2_INIT_RUN_DIR"
+
+test -e "$ICINGA2_LOG_DIR" || install -m 750 -o "$ICINGA2_USER" -g "$ICINGA2_COMMAND_GROUP" -d "$ICINGA2_LOG_DIR"
+
+if type restorecon >/dev/null 2>&1; then
+ restorecon -R "$ICINGA2_INIT_RUN_DIR"/
+fi
+
+test -e "$ICINGA2_CACHE_DIR" || install -m 750 -o "$ICINGA2_USER" -g "$ICINGA2_COMMAND_GROUP" -d "$ICINGA2_CACHE_DIR"
diff --git a/etc/initsystem/safe-reload.cmake b/etc/initsystem/safe-reload.cmake
new file mode 100644
index 0000000..0cba415
--- /dev/null
+++ b/etc/initsystem/safe-reload.cmake
@@ -0,0 +1,52 @@
+#!/bin/sh
+
+# Load sysconf on systems where the initsystem does not pass the environment
+if [ "$1" != "" ]; then
+ if [ -r "$1" ]; then
+ . "$1"
+ else
+ echo "Unable to read sysconf from '$1'. Exiting."
+ exit 6
+ fi
+fi
+
+# Set defaults, to overwrite see "@ICINGA2_SYSCONFIGFILE@"
+
+: "${ICINGA2_PID_FILE:="@ICINGA2_FULL_INITRUNDIR@/icinga2.pid"}"
+: "${DAEMON:="@CMAKE_INSTALL_FULL_SBINDIR@/icinga2"}"
+
+printf "Validating config files: "
+
+OUTPUTFILE=`mktemp`
+
+if type selinuxenabled >/dev/null 2>&1; then
+ if selinuxenabled; then
+ chcon -t icinga2_tmp_t "$OUTPUTFILE" >/dev/null 2>&1
+ fi
+fi
+
+if ! "$DAEMON" daemon --validate --color > "$OUTPUTFILE"; then
+ echo "Failed"
+
+ cat "$OUTPUTFILE"
+ rm -f "$OUTPUTFILE"
+ exit 1
+fi
+
+echo "Done"
+rm -f "$OUTPUTFILE"
+
+printf "Reloading Icinga 2: "
+
+if [ ! -e "$ICINGA2_PID_FILE" ]; then
+ exit 7
+fi
+
+pid=`cat "$ICINGA2_PID_FILE"`
+if ! kill -HUP "$pid" >/dev/null 2>&1; then
+ echo "Error: Icinga not running"
+ exit 7
+fi
+
+echo "Done"
+exit 0
diff --git a/etc/logrotate.d/icinga2.cmake b/etc/logrotate.d/icinga2.cmake
new file mode 100644
index 0000000..f0a9e59
--- /dev/null
+++ b/etc/logrotate.d/icinga2.cmake
@@ -0,0 +1,21 @@
+@ICINGA2_LOGDIR@/icinga2.log @ICINGA2_LOGDIR@/debug.log {
+ daily
+ rotate 7@LOGROTATE_USE_SU@
+ compress
+ delaycompress
+ missingok
+ notifempty@LOGROTATE_CREATE@
+ postrotate
+ /bin/kill -USR1 $(cat @ICINGA2_INITRUNDIR@/icinga2.pid 2> /dev/null) 2> /dev/null || true
+ endscript
+}
+
+@ICINGA2_LOGDIR@/error.log {
+ daily
+ rotate 90@LOGROTATE_USE_SU@
+ compress
+ delaycompress
+ missingok
+ notifempty@LOGROTATE_CREATE@
+ # TODO: figure out how to get Icinga to re-open this log file
+}
diff --git a/icinga-app/CMakeLists.txt b/icinga-app/CMakeLists.txt
new file mode 100644
index 0000000..ef71ad9
--- /dev/null
+++ b/icinga-app/CMakeLists.txt
@@ -0,0 +1,100 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+if(MSVC)
+ set(WindowsSources icinga.rc)
+else()
+ set(WindowsSources "")
+endif()
+
+set(icingaloader_SOURCES
+ icinga.cpp
+ ${WindowsSources}
+)
+
+add_library(icingaloader OBJECT ${icingaloader_SOURCES})
+add_dependencies(icingaloader base config cli)
+
+set_target_properties (
+ icingaloader PROPERTIES
+ FOLDER Lib
+)
+
+include_directories(${Boost_INCLUDE_DIRS})
+
+if(ICINGA2_WITH_CHECKER)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:checker>)
+endif()
+
+if(ICINGA2_WITH_COMPAT)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:compat>)
+endif()
+
+if(ICINGA2_WITH_MYSQL OR ICINGA2_WITH_PGSQL)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:db_ido>)
+endif()
+
+if(ICINGA2_WITH_MYSQL)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:db_ido_mysql>)
+endif()
+
+if(ICINGA2_WITH_PGSQL)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:db_ido_pgsql>)
+endif()
+
+if(ICINGA2_WITH_LIVESTATUS)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:livestatus>)
+endif()
+
+if(ICINGA2_WITH_NOTIFICATION)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:notification>)
+endif()
+
+if(ICINGA2_WITH_PERFDATA)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:perfdata>)
+endif()
+
+if(ICINGA2_WITH_ICINGADB)
+ list(APPEND icinga_app_SOURCES $<TARGET_OBJECTS:icingadb>)
+endif()
+
+add_executable(icinga-app
+ $<TARGET_OBJECTS:icingaloader>
+ ${base_OBJS}
+ $<TARGET_OBJECTS:config>
+ $<TARGET_OBJECTS:remote>
+ $<TARGET_OBJECTS:cli>
+ $<TARGET_OBJECTS:icinga>
+ $<TARGET_OBJECTS:methods>
+ ${icinga_app_SOURCES}
+)
+
+target_link_libraries(icinga-app ${base_DEPS})
+
+set_target_properties (
+ icinga-app PROPERTIES
+ FOLDER Bin
+ OUTPUT_NAME icinga2
+)
+
+if(WIN32)
+ set(InstallPath "${CMAKE_INSTALL_SBINDIR}")
+else()
+ configure_file(icinga2.cmake ${CMAKE_CURRENT_BINARY_DIR}/icinga2 @ONLY)
+
+ install(
+ FILES ${CMAKE_CURRENT_BINARY_DIR}/icinga2
+ DESTINATION ${CMAKE_INSTALL_SBINDIR}
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ )
+
+ set(InstallPath "${CMAKE_INSTALL_LIBDIR}/icinga2/sbin")
+endif()
+
+install(
+ TARGETS icinga-app
+ RUNTIME DESTINATION ${InstallPath}
+)
+
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_INITRUNDIR}\")")
diff --git a/icinga-app/icinga.cpp b/icinga-app/icinga.cpp
new file mode 100644
index 0000000..1811c8e
--- /dev/null
+++ b/icinga-app/icinga.cpp
@@ -0,0 +1,949 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/clicommand.hpp"
+#include "config/configcompilercontext.hpp"
+#include "config/configcompiler.hpp"
+#include "config/configitembuilder.hpp"
+#include "config/expression.hpp"
+#include "base/application.hpp"
+#include "base/configuration.hpp"
+#include "base/logger.hpp"
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include "base/loader.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/context.hpp"
+#include "base/console.hpp"
+#include "base/process.hpp"
+#include "config.h"
+#include <boost/program_options.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <thread>
+
+#ifndef _WIN32
+# include <sys/types.h>
+# include <pwd.h>
+# include <grp.h>
+#else
+# include <windows.h>
+# include <Lmcons.h>
+# include <Shellapi.h>
+# include <tchar.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+#ifdef _WIN32
+static SERVICE_STATUS l_SvcStatus;
+static SERVICE_STATUS_HANDLE l_SvcStatusHandle;
+static HANDLE l_Job;
+#endif /* _WIN32 */
+
+static std::vector<String> GetLogLevelCompletionSuggestions(const String& arg)
+{
+ std::vector<String> result;
+
+ String debugLevel = "debug";
+ if (debugLevel.Find(arg) == 0)
+ result.push_back(debugLevel);
+
+ String noticeLevel = "notice";
+ if (noticeLevel.Find(arg) == 0)
+ result.push_back(noticeLevel);
+
+ String informationLevel = "information";
+ if (informationLevel.Find(arg) == 0)
+ result.push_back(informationLevel);
+
+ String warningLevel = "warning";
+ if (warningLevel.Find(arg) == 0)
+ result.push_back(warningLevel);
+
+ String criticalLevel = "critical";
+ if (criticalLevel.Find(arg) == 0)
+ result.push_back(criticalLevel);
+
+ return result;
+}
+
+static std::vector<String> GlobalArgumentCompletion(const String& argument, const String& word)
+{
+ if (argument == "include")
+ return GetBashCompletionSuggestions("directory", word);
+ else if (argument == "log-level")
+ return GetLogLevelCompletionSuggestions(word);
+ else
+ return std::vector<String>();
+}
+
+static void HandleLegacyDefines()
+{
+#ifdef _WIN32
+ String dataPrefix = Utility::GetIcingaDataPath();
+#endif /* _WIN32 */
+
+ Value localStateDir = Configuration::LocalStateDir;
+
+ if (!localStateDir.IsEmpty()) {
+ Log(LogWarning, "icinga-app")
+ << "Please do not set the deprecated 'LocalStateDir' constant,"
+ << " use the 'DataDir', 'LogDir', 'CacheDir' and 'SpoolDir' constants instead!"
+ << " For compatibility reasons, these are now set based on the 'LocalStateDir' constant.";
+
+#ifdef _WIN32
+ Configuration::DataDir = localStateDir + "\\lib\\icinga2";
+ Configuration::LogDir = localStateDir + "\\log\\icinga2";
+ Configuration::CacheDir = localStateDir + "\\cache\\icinga2";
+ Configuration::SpoolDir = localStateDir + "\\spool\\icinga2";
+ } else {
+ Configuration::LocalStateDir = dataPrefix + "\\var";
+#else /* _WIN32 */
+ Configuration::DataDir = localStateDir + "/lib/icinga2";
+ Configuration::LogDir = localStateDir + "/log/icinga2";
+ Configuration::CacheDir = localStateDir + "/cache/icinga2";
+ Configuration::SpoolDir = localStateDir + "/spool/icinga2";
+ } else {
+ Configuration::LocalStateDir = ICINGA_LOCALSTATEDIR;
+#endif /* _WIN32 */
+ }
+
+ Value sysconfDir = Configuration::SysconfDir;
+ if (!sysconfDir.IsEmpty()) {
+ Log(LogWarning, "icinga-app")
+ << "Please do not set the deprecated 'Sysconfdir' constant, use the 'ConfigDir' constant instead! For compatibility reasons, their value is set based on the 'SysconfDir' constant.";
+
+#ifdef _WIN32
+ Configuration::ConfigDir = sysconfDir + "\\icinga2";
+ } else {
+ Configuration::SysconfDir = dataPrefix + "\\etc";
+#else /* _WIN32 */
+ Configuration::ConfigDir = sysconfDir + "/icinga2";
+ } else {
+ Configuration::SysconfDir = ICINGA_SYSCONFDIR;
+#endif /* _WIN32 */
+ }
+
+ Value runDir = Configuration::RunDir;
+ if (!runDir.IsEmpty()) {
+ Log(LogWarning, "icinga-app")
+ << "Please do not set the deprecated 'RunDir' constant, use the 'InitRunDir' constant instead! For compatibility reasons, their value is set based on the 'RunDir' constant.";
+
+#ifdef _WIN32
+ Configuration::InitRunDir = runDir + "\\icinga2";
+ } else {
+ Configuration::RunDir = dataPrefix + "\\var\\run";
+#else /* _WIN32 */
+ Configuration::InitRunDir = runDir + "/icinga2";
+ } else {
+ Configuration::RunDir = ICINGA_RUNDIR;
+#endif /* _WIN32 */
+ }
+}
+
+static int Main()
+{
+ int argc = Application::GetArgC();
+ char **argv = Application::GetArgV();
+
+ bool autocomplete = false;
+ int autoindex = 0;
+
+ if (argc >= 4 && strcmp(argv[1], "--autocomplete") == 0) {
+ autocomplete = true;
+
+ try {
+ autoindex = Convert::ToLong(argv[2]);
+ } catch (const std::invalid_argument&) {
+ Log(LogCritical, "icinga-app")
+ << "Invalid index for --autocomplete: " << argv[2];
+ return EXIT_FAILURE;
+ }
+
+ argc -= 3;
+ argv += 3;
+ }
+
+ /* Set thread title. */
+ Utility::SetThreadName("Main Thread", false);
+
+ /* Install exception handlers to make debugging easier. */
+ Application::InstallExceptionHandlers();
+
+#ifdef _WIN32
+ bool builtinPaths = true;
+
+ /* Programm install location, C:/Program Files/Icinga2 */
+ String binaryPrefix = Utility::GetIcingaInstallPath();
+ /* Returns the datapath for daemons, %PROGRAMDATA%/icinga2 */
+ String dataPrefix = Utility::GetIcingaDataPath();
+
+ if (!binaryPrefix.IsEmpty() && !dataPrefix.IsEmpty()) {
+ Configuration::ProgramData = dataPrefix;
+
+ Configuration::ConfigDir = dataPrefix + "\\etc\\icinga2";
+
+ Configuration::DataDir = dataPrefix + "\\var\\lib\\icinga2";
+ Configuration::LogDir = dataPrefix + "\\var\\log\\icinga2";
+ Configuration::CacheDir = dataPrefix + "\\var\\cache\\icinga2";
+ Configuration::SpoolDir = dataPrefix + "\\var\\spool\\icinga2";
+
+ Configuration::PrefixDir = binaryPrefix;
+
+ /* Internal constants. */
+ Configuration::PkgDataDir = binaryPrefix + "\\share\\icinga2";
+ Configuration::IncludeConfDir = binaryPrefix + "\\share\\icinga2\\include";
+
+ Configuration::InitRunDir = dataPrefix + "\\var\\run\\icinga2";
+ } else {
+ Log(LogWarning, "icinga-app", "Registry key could not be read. Falling back to built-in paths.");
+
+#endif /* _WIN32 */
+ Configuration::ConfigDir = ICINGA_CONFIGDIR;
+
+ Configuration::DataDir = ICINGA_DATADIR;
+ Configuration::LogDir = ICINGA_LOGDIR;
+ Configuration::CacheDir = ICINGA_CACHEDIR;
+ Configuration::SpoolDir = ICINGA_SPOOLDIR;
+
+ Configuration::PrefixDir = ICINGA_PREFIX;
+
+ /* Internal constants. */
+ Configuration::PkgDataDir = ICINGA_PKGDATADIR;
+ Configuration::IncludeConfDir = ICINGA_INCLUDECONFDIR;
+
+ Configuration::InitRunDir = ICINGA_INITRUNDIR;
+
+#ifdef _WIN32
+ }
+#endif /* _WIN32 */
+
+ Configuration::ZonesDir = Configuration::ConfigDir + "/zones.d";
+
+ String icingaUser = Utility::GetFromEnvironment("ICINGA2_USER");
+ if (icingaUser.IsEmpty())
+ icingaUser = ICINGA_USER;
+
+ String icingaGroup = Utility::GetFromEnvironment("ICINGA2_GROUP");
+ if (icingaGroup.IsEmpty())
+ icingaGroup = ICINGA_GROUP;
+
+ Configuration::RunAsUser = icingaUser;
+ Configuration::RunAsGroup = icingaGroup;
+
+ if (!autocomplete) {
+#ifdef RLIMIT_NOFILE
+ String rLimitFiles = Utility::GetFromEnvironment("ICINGA2_RLIMIT_FILES");
+ if (rLimitFiles.IsEmpty())
+ Configuration::RLimitFiles = Application::GetDefaultRLimitFiles();
+ else {
+ try {
+ Configuration::RLimitFiles = Convert::ToLong(rLimitFiles);
+ } catch (const std::invalid_argument& ex) {
+ std::cout
+ << "Error setting \"ICINGA2_RLIMIT_FILES\": " << ex.what() << '\n';
+ return EXIT_FAILURE;
+ }
+ }
+#endif /* RLIMIT_NOFILE */
+
+#ifdef RLIMIT_NPROC
+ String rLimitProcesses = Utility::GetFromEnvironment("ICINGA2_RLIMIT_PROCESSES");
+ if (rLimitProcesses.IsEmpty())
+ Configuration::RLimitProcesses = Application::GetDefaultRLimitProcesses();
+ else {
+ try {
+ Configuration::RLimitProcesses = Convert::ToLong(rLimitProcesses);
+ } catch (const std::invalid_argument& ex) {
+ std::cout
+ << "Error setting \"ICINGA2_RLIMIT_PROCESSES\": " << ex.what() << '\n';
+ return EXIT_FAILURE;
+ }
+ }
+#endif /* RLIMIT_NPROC */
+
+#ifdef RLIMIT_STACK
+ String rLimitStack = Utility::GetFromEnvironment("ICINGA2_RLIMIT_STACK");
+ if (rLimitStack.IsEmpty())
+ Configuration::RLimitStack = Application::GetDefaultRLimitStack();
+ else {
+ try {
+ Configuration::RLimitStack = Convert::ToLong(rLimitStack);
+ } catch (const std::invalid_argument& ex) {
+ std::cout
+ << "Error setting \"ICINGA2_RLIMIT_STACK\": " << ex.what() << '\n';
+ return EXIT_FAILURE;
+ }
+ }
+#endif /* RLIMIT_STACK */
+ }
+
+ if (!autocomplete)
+ Application::SetResourceLimits();
+
+ LogSeverity logLevel = Logger::GetConsoleLogSeverity();
+ Logger::SetConsoleLogSeverity(LogWarning);
+
+ po::options_description visibleDesc("Global options");
+
+ visibleDesc.add_options()
+ ("help,h", "show this help message")
+ ("version,V", "show version information")
+#ifndef _WIN32
+ ("color", "use VT100 color codes even when stdout is not a terminal")
+#endif /* _WIN32 */
+ ("define,D", po::value<std::vector<std::string> >(), "define a constant")
+ ("include,I", po::value<std::vector<std::string> >(), "add include search directory")
+ ("log-level,x", po::value<std::string>(), "specify the log level for the console log.\n"
+ "The valid value is either debug, notice, information (default), warning, or critical")
+ ("script-debugger,X", "whether to enable the script debugger");
+
+ po::options_description hiddenDesc("Hidden options");
+
+ hiddenDesc.add_options()
+ ("no-stack-rlimit", "used internally, do not specify manually")
+ ("arg", po::value<std::vector<std::string> >(), "positional argument");
+
+ po::positional_options_description positionalDesc;
+ positionalDesc.add("arg", -1);
+
+ String cmdname;
+ CLICommand::Ptr command;
+ po::variables_map vm;
+
+ try {
+ CLICommand::ParseCommand(argc, argv, visibleDesc, hiddenDesc, positionalDesc,
+ vm, cmdname, command, autocomplete);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "icinga-app")
+ << "Error while parsing command-line options: " << ex.what();
+ return EXIT_FAILURE;
+ }
+
+#ifdef _WIN32
+ char username[UNLEN + 1];
+ DWORD usernameLen = UNLEN + 1;
+ GetUserName(username, &usernameLen);
+
+ std::ifstream userFile;
+
+ /* The implicit string assignment is needed for Windows builds. */
+ String configDir = Configuration::ConfigDir;
+ userFile.open(configDir + "/user");
+
+ if (userFile && command && !Application::IsProcessElevated()) {
+ std::string userLine;
+ if (std::getline(userFile, userLine)) {
+ userFile.close();
+
+ std::vector<std::string> strs;
+ boost::split(strs, userLine, boost::is_any_of("\\"));
+
+ if (username != strs[1] && command->GetImpersonationLevel() == ImpersonationLevel::ImpersonateIcinga
+ || command->GetImpersonationLevel() == ImpersonationLevel::ImpersonateRoot) {
+ TCHAR szPath[MAX_PATH];
+
+ if (GetModuleFileName(nullptr, szPath, ARRAYSIZE(szPath))) {
+ SHELLEXECUTEINFO sei = { sizeof(sei) };
+ sei.lpVerb = _T("runas");
+ sei.lpFile = "cmd.exe";
+ sei.fMask = SEE_MASK_NOCLOSEPROCESS | SEE_MASK_NOASYNC | SEE_MASK_FLAG_NO_UI;
+ sei.nShow = SW_SHOW;
+
+ std::stringstream parameters;
+
+ parameters << "/C " << "\"" << szPath << "\"" << " ";
+
+ for (int i = 1; i < argc; i++) {
+ if (i != 1)
+ parameters << " ";
+ parameters << argv[i];
+ }
+
+ parameters << " & SET exitcode=%errorlevel%";
+ parameters << " & pause";
+ parameters << " & EXIT /B %exitcode%";
+
+ std::string str = parameters.str();
+ LPCSTR cstr = str.c_str();
+
+ sei.lpParameters = cstr;
+
+ if (!ShellExecuteEx(&sei)) {
+ DWORD dwError = GetLastError();
+ if (dwError == ERROR_CANCELLED)
+ Application::Exit(0);
+ } else {
+ WaitForSingleObject(sei.hProcess, INFINITE);
+
+ DWORD exitCode;
+ GetExitCodeProcess(sei.hProcess, &exitCode);
+
+ CloseHandle(sei.hProcess);
+
+ Application::Exit(exitCode);
+ }
+ }
+ }
+ } else {
+ userFile.close();
+ }
+ }
+#endif /* _WIN32 */
+
+#ifndef _WIN32
+ if (vm.count("color")) {
+ Console::SetType(std::cout, Console_VT100);
+ Console::SetType(std::cerr, Console_VT100);
+ }
+#endif /* _WIN32 */
+
+ if (vm.count("define")) {
+ for (const String& define : vm["define"].as<std::vector<std::string> >()) {
+ String key, value;
+ size_t pos = define.FindFirstOf('=');
+ if (pos != String::NPos) {
+ key = define.SubStr(0, pos);
+ value = define.SubStr(pos + 1);
+ } else {
+ key = define;
+ value = "1";
+ }
+
+ std::vector<String> keyTokens = key.Split(".");
+
+ std::unique_ptr<Expression> expr;
+ std::unique_ptr<VariableExpression> varExpr{new VariableExpression(keyTokens[0], {}, DebugInfo())};
+ expr = std::move(varExpr);
+
+ for (size_t i = 1; i < keyTokens.size(); i++) {
+ std::unique_ptr<IndexerExpression> indexerExpr{new IndexerExpression(std::move(expr), MakeLiteral(keyTokens[i]))};
+ indexerExpr->SetOverrideFrozen();
+ expr = std::move(indexerExpr);
+ }
+
+ std::unique_ptr<SetExpression> setExpr{new SetExpression(std::move(expr), OpSetLiteral, MakeLiteral(value))};
+ setExpr->SetOverrideFrozen();
+
+ try {
+ ScriptFrame frame(true);
+ setExpr->Evaluate(frame);
+ } catch (const ScriptError& e) {
+ Log(LogCritical, "icinga-app") << "cannot set '" << key << "': " << e.what();
+ return EXIT_FAILURE;
+ }
+ }
+ }
+
+ Configuration::SetReadOnly(true);
+
+ if (!Configuration::ConcurrencyWasModified) {
+ Configuration::Concurrency = std::thread::hardware_concurrency();
+ }
+
+ Application::GetTP().Restart();
+
+ /* Ensure that all defined constants work in the way we expect them. */
+ HandleLegacyDefines();
+
+ if (vm.count("script-debugger"))
+ Application::SetScriptDebuggerEnabled(true);
+
+ Configuration::StatePath = Configuration::DataDir + "/icinga2.state";
+ Configuration::ModAttrPath = Configuration::DataDir + "/modified-attributes.conf";
+ Configuration::ObjectsPath = Configuration::CacheDir + "/icinga2.debug";
+ Configuration::VarsPath = Configuration::CacheDir + "/icinga2.vars";
+ Configuration::PidPath = Configuration::InitRunDir + "/icinga2.pid";
+
+ ConfigCompiler::AddIncludeSearchDir(Configuration::IncludeConfDir);
+
+ if (!autocomplete && vm.count("include")) {
+ for (const String& includePath : vm["include"].as<std::vector<std::string> >()) {
+ ConfigCompiler::AddIncludeSearchDir(includePath);
+ }
+ }
+
+ if (!autocomplete) {
+ Logger::SetConsoleLogSeverity(logLevel);
+
+ if (vm.count("log-level")) {
+ String severity = vm["log-level"].as<std::string>();
+
+ LogSeverity logLevel = LogInformation;
+ try {
+ logLevel = Logger::StringToSeverity(severity);
+ } catch (std::exception&) {
+ /* Inform user and exit */
+ Log(LogCritical, "icinga-app", "Invalid log level set. Default is 'information'.");
+ return EXIT_FAILURE;
+ }
+
+ Logger::SetConsoleLogSeverity(logLevel);
+ }
+
+ if (!command || vm.count("help") || vm.count("version")) {
+ String appName;
+
+ try {
+ appName = Utility::BaseName(Application::GetArgV()[0]);
+ } catch (const std::bad_alloc&) {
+ Log(LogCritical, "icinga-app", "Allocation failed.");
+ return EXIT_FAILURE;
+ }
+
+ if (appName.GetLength() > 3 && appName.SubStr(0, 3) == "lt-")
+ appName = appName.SubStr(3, appName.GetLength() - 3);
+
+ std::cout << appName << " " << "- The Icinga 2 network monitoring daemon (version: "
+ << ConsoleColorTag(vm.count("version") ? Console_ForegroundRed : Console_Normal)
+ << Application::GetAppVersion()
+#ifdef I2_DEBUG
+ << "; debug"
+#endif /* I2_DEBUG */
+ << ConsoleColorTag(Console_Normal)
+ << ")" << std::endl << std::endl;
+
+ if ((!command || vm.count("help")) && !vm.count("version")) {
+ std::cout << "Usage:" << std::endl
+ << " " << Utility::BaseName(argv[0]) << " ";
+
+ if (cmdname.IsEmpty())
+ std::cout << "<command>";
+ else
+ std::cout << cmdname;
+
+ std::cout << " [<arguments>]" << std::endl;
+
+ if (command) {
+ std::cout << std::endl
+ << command->GetDescription() << std::endl;
+ }
+ }
+
+ if (vm.count("version")) {
+ std::cout << "Copyright (c) 2012-" << Utility::FormatDateTime("%Y", Utility::GetTime())
+ << " Icinga GmbH (https://icinga.com/)" << std::endl
+ << "License GPLv2+: GNU GPL version 2 or later <https://gnu.org/licenses/gpl2.html>" << std::endl
+ << "This is free software: you are free to change and redistribute it." << std::endl
+ << "There is NO WARRANTY, to the extent permitted by law.";
+ }
+
+ std::cout << std::endl;
+
+ if (vm.count("version")) {
+ std::cout << std::endl;
+
+ Application::DisplayInfoMessage(std::cout, true);
+
+ return EXIT_SUCCESS;
+ }
+ }
+
+ if (!command || vm.count("help")) {
+ if (!command)
+ CLICommand::ShowCommands(argc, argv, nullptr);
+
+ std::cout << visibleDesc << std::endl
+ << "Report bugs at <https://github.com/Icinga/icinga2>" << std::endl
+ << "Get support: <https://icinga.com/support/>" << std::endl
+ << "Documentation: <https://icinga.com/docs/>" << std::endl
+ << "Icinga home page: <https://icinga.com/>" << std::endl;
+ return EXIT_SUCCESS;
+ }
+ }
+
+ int rc = 1;
+
+ if (autocomplete) {
+ CLICommand::ShowCommands(argc, argv, &visibleDesc, &hiddenDesc,
+ &GlobalArgumentCompletion, true, autoindex);
+ rc = 0;
+ } else if (command) {
+ Logger::DisableTimestamp();
+#ifndef _WIN32
+ if (command->GetImpersonationLevel() == ImpersonateRoot) {
+ if (getuid() != 0) {
+ Log(LogCritical, "cli", "This command must be run as root.");
+ return 0;
+ }
+ } else if (command && command->GetImpersonationLevel() == ImpersonateIcinga) {
+ String group = Configuration::RunAsGroup;
+ String user = Configuration::RunAsUser;
+
+ errno = 0;
+ struct group *gr = getgrnam(group.CStr());
+
+ if (!gr) {
+ if (errno == 0) {
+ Log(LogCritical, "cli")
+ << "Invalid group specified: " << group;
+ return EXIT_FAILURE;
+ } else {
+ Log(LogCritical, "cli")
+ << "getgrnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (getgid() != gr->gr_gid) {
+ if (!vm.count("reload-internal") && setgroups(0, nullptr) < 0) {
+ Log(LogCritical, "cli")
+ << "setgroups() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ Log(LogCritical, "cli")
+ << "Please re-run this command as a privileged user or using the \"" << user << "\" account.";
+ return EXIT_FAILURE;
+ }
+
+ if (setgid(gr->gr_gid) < 0) {
+ Log(LogCritical, "cli")
+ << "setgid() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return EXIT_FAILURE;
+ }
+ }
+
+ errno = 0;
+ struct passwd *pw = getpwnam(user.CStr());
+
+ if (!pw) {
+ if (errno == 0) {
+ Log(LogCritical, "cli")
+ << "Invalid user specified: " << user;
+ return EXIT_FAILURE;
+ } else {
+ Log(LogCritical, "cli")
+ << "getpwnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return EXIT_FAILURE;
+ }
+ }
+
+ // also activate the additional groups the configured user is member of
+ if (getuid() != pw->pw_uid) {
+ if (!vm.count("reload-internal") && initgroups(user.CStr(), pw->pw_gid) < 0) {
+ Log(LogCritical, "cli")
+ << "initgroups() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ Log(LogCritical, "cli")
+ << "Please re-run this command as a privileged user or using the \"" << user << "\" account.";
+ return EXIT_FAILURE;
+ }
+
+ if (setuid(pw->pw_uid) < 0) {
+ Log(LogCritical, "cli")
+ << "setuid() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ Log(LogCritical, "cli")
+ << "Please re-run this command as a privileged user or using the \"" << user << "\" account.";
+ return EXIT_FAILURE;
+ }
+ }
+ }
+#endif /* _WIN32 */
+
+ std::vector<std::string> args;
+ if (vm.count("arg"))
+ args = vm["arg"].as<std::vector<std::string> >();
+
+ if (static_cast<int>(args.size()) < command->GetMinArguments()) {
+ Log(LogCritical, "cli")
+ << "Too few arguments. Command needs at least " << command->GetMinArguments()
+ << " argument" << (command->GetMinArguments() != 1 ? "s" : "") << ".";
+ return EXIT_FAILURE;
+ }
+
+ if (command->GetMaxArguments() >= 0 && static_cast<int>(args.size()) > command->GetMaxArguments()) {
+ Log(LogCritical, "cli")
+ << "Too many arguments. At most " << command->GetMaxArguments()
+ << " argument" << (command->GetMaxArguments() != 1 ? "s" : "") << " may be specified.";
+ return EXIT_FAILURE;
+ }
+
+ rc = command->Run(vm, args);
+ }
+
+ return rc;
+}
+
+#ifdef _WIN32
+static int SetupService(bool install, int argc, char **argv)
+{
+ SC_HANDLE schSCManager = OpenSCManager(nullptr, nullptr, SC_MANAGER_ALL_ACCESS);
+
+ if (!schSCManager) {
+ printf("OpenSCManager failed (%d)\n", GetLastError());
+ return 1;
+ }
+
+ TCHAR szPath[MAX_PATH];
+
+ if (!GetModuleFileName(nullptr, szPath, MAX_PATH)) {
+ printf("Cannot install service (%d)\n", GetLastError());
+ return 1;
+ }
+
+ String szArgs;
+ szArgs = Utility::EscapeShellArg(szPath) + " --scm";
+
+ std::string scmUser = "NT AUTHORITY\\NetworkService";
+ std::ifstream initf(Utility::GetIcingaDataPath() + "\\etc\\icinga2\\user");
+ if (initf.good()) {
+ std::getline(initf, scmUser);
+ }
+ initf.close();
+
+ for (int i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "--scm-user") && i + 1 < argc) {
+ scmUser = argv[i + 1];
+ i++;
+ } else
+ szArgs += " " + Utility::EscapeShellArg(argv[i]);
+ }
+
+ SC_HANDLE schService = OpenService(schSCManager, "icinga2", SERVICE_ALL_ACCESS);
+
+ if (schService) {
+ SERVICE_STATUS status;
+ ControlService(schService, SERVICE_CONTROL_STOP, &status);
+
+ double start = Utility::GetTime();
+ while (status.dwCurrentState != SERVICE_STOPPED) {
+ double end = Utility::GetTime();
+
+ if (end - start > 30) {
+ printf("Could not stop the service.\n");
+ break;
+ }
+
+ Utility::Sleep(5);
+
+ if (!QueryServiceStatus(schService, &status)) {
+ printf("QueryServiceStatus failed (%d)\n", GetLastError());
+ return 1;
+ }
+ }
+ } else if (install) {
+ schService = CreateService(
+ schSCManager,
+ "icinga2",
+ "Icinga 2",
+ SERVICE_ALL_ACCESS,
+ SERVICE_WIN32_OWN_PROCESS,
+ SERVICE_DEMAND_START,
+ SERVICE_ERROR_NORMAL,
+ szArgs.CStr(),
+ nullptr,
+ nullptr,
+ nullptr,
+ scmUser.c_str(),
+ nullptr);
+
+ if (!schService) {
+ printf("CreateService failed (%d)\n", GetLastError());
+ CloseServiceHandle(schSCManager);
+ return 1;
+ }
+ } else {
+ printf("Service isn't installed.\n");
+ CloseServiceHandle(schSCManager);
+ return 0;
+ }
+
+ if (!install) {
+ if (!DeleteService(schService)) {
+ printf("DeleteService failed (%d)\n", GetLastError());
+ CloseServiceHandle(schService);
+ CloseServiceHandle(schSCManager);
+ return 1;
+ }
+
+ printf("Service uninstalled successfully\n");
+ } else {
+ if (!ChangeServiceConfig(schService, SERVICE_NO_CHANGE, SERVICE_AUTO_START,
+ SERVICE_ERROR_NORMAL, szArgs.CStr(), nullptr, nullptr, nullptr, scmUser.c_str(), nullptr, nullptr)) {
+ printf("ChangeServiceConfig failed (%d)\n", GetLastError());
+ CloseServiceHandle(schService);
+ CloseServiceHandle(schSCManager);
+ return 1;
+ }
+
+ SERVICE_DESCRIPTION sdDescription = { "The Icinga 2 monitoring application" };
+ if(!ChangeServiceConfig2(schService, SERVICE_CONFIG_DESCRIPTION, &sdDescription)) {
+ printf("ChangeServiceConfig2 failed (%d)\n", GetLastError());
+ CloseServiceHandle(schService);
+ CloseServiceHandle(schSCManager);
+ return 1;
+ }
+
+ if (!StartService(schService, 0, nullptr)) {
+ printf("StartService failed (%d)\n", GetLastError());
+ CloseServiceHandle(schService);
+ CloseServiceHandle(schSCManager);
+ return 1;
+ }
+
+ std::cout << "Service successfully installed for user '" << scmUser << "'\n";
+
+ String userFilePath = Utility::GetIcingaDataPath() + "\\etc\\icinga2\\user";
+
+ std::ofstream fuser(userFilePath.CStr(), std::ios::out | std::ios::trunc);
+ if (fuser)
+ fuser << scmUser;
+ else
+ std::cout << "Could not write user to " << userFilePath << "\n";
+ }
+
+ CloseServiceHandle(schService);
+ CloseServiceHandle(schSCManager);
+
+ return 0;
+}
+
+static VOID ReportSvcStatus(DWORD dwCurrentState,
+ DWORD dwWin32ExitCode,
+ DWORD dwWaitHint)
+{
+ static DWORD dwCheckPoint = 1;
+
+ l_SvcStatus.dwCurrentState = dwCurrentState;
+ l_SvcStatus.dwWin32ExitCode = dwWin32ExitCode;
+ l_SvcStatus.dwWaitHint = dwWaitHint;
+
+ if (dwCurrentState == SERVICE_START_PENDING)
+ l_SvcStatus.dwControlsAccepted = 0;
+ else
+ l_SvcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP;
+
+ if ((dwCurrentState == SERVICE_RUNNING) ||
+ (dwCurrentState == SERVICE_STOPPED))
+ l_SvcStatus.dwCheckPoint = 0;
+ else
+ l_SvcStatus.dwCheckPoint = dwCheckPoint++;
+
+ SetServiceStatus(l_SvcStatusHandle, &l_SvcStatus);
+}
+
+static VOID WINAPI ServiceControlHandler(DWORD dwCtrl)
+{
+ if (dwCtrl == SERVICE_CONTROL_STOP) {
+ ReportSvcStatus(SERVICE_STOP_PENDING, NO_ERROR, 0);
+ TerminateJobObject(l_Job, 0);
+ }
+}
+
+static VOID WINAPI ServiceMain(DWORD argc, LPSTR *argv)
+{
+ l_SvcStatusHandle = RegisterServiceCtrlHandler(
+ "icinga2",
+ ServiceControlHandler);
+
+ l_SvcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
+ l_SvcStatus.dwServiceSpecificExitCode = 0;
+
+ ReportSvcStatus(SERVICE_RUNNING, NO_ERROR, 0);
+ l_Job = CreateJobObject(nullptr, nullptr);
+
+ for (;;) {
+ LPSTR arg = argv[0];
+ String args;
+ int uargc = Application::GetArgC();
+ char **uargv = Application::GetArgV();
+
+ args += Utility::EscapeShellArg(Application::GetExePath(uargv[0]));
+
+ for (int i = 2; i < uargc && uargv[i]; i++) {
+ if (args != "")
+ args += " ";
+
+ args += Utility::EscapeShellArg(uargv[i]);
+ }
+
+ STARTUPINFO si = { sizeof(si) };
+ PROCESS_INFORMATION pi;
+
+ char *uargs = strdup(args.CStr());
+
+ BOOL res = CreateProcess(nullptr, uargs, nullptr, nullptr, FALSE, 0, nullptr, nullptr, &si, &pi);
+
+ free(uargs);
+
+ if (!res)
+ break;
+
+ CloseHandle(pi.hThread);
+
+ AssignProcessToJobObject(l_Job, pi.hProcess);
+
+ if (WaitForSingleObject(pi.hProcess, INFINITE) != WAIT_OBJECT_0)
+ break;
+
+ DWORD exitStatus;
+
+ if (!GetExitCodeProcess(pi.hProcess, &exitStatus))
+ break;
+
+ if (exitStatus != 7)
+ break;
+ }
+
+ TerminateJobObject(l_Job, 0);
+
+ CloseHandle(l_Job);
+
+ ReportSvcStatus(SERVICE_STOPPED, NO_ERROR, 0);
+
+ Application::Exit(0);
+}
+#endif /* _WIN32 */
+
+/**
+* Entry point for the Icinga application.
+*
+* @params argc Number of command line arguments.
+* @params argv Command line arguments.
+* @returns The application's exit status.
+*/
+int main(int argc, char **argv)
+{
+#ifndef _WIN32
+ String keepFDs = Utility::GetFromEnvironment("ICINGA2_KEEP_FDS");
+ if (keepFDs.IsEmpty()) {
+#ifdef I2_DEBUG
+ Utility::CloseAllFDs({0, 1, 2}, [](int fd) {
+ std::cerr << "Closed FD " << fd << " which we inherited from our parent process." << std::endl;
+ });
+#else /* I2_DEBUG */
+ Utility::CloseAllFDs({0, 1, 2});
+#endif /* I2_DEBUG */
+ }
+#endif /* _WIN32 */
+
+ /* must be called before using any other libbase functions */
+ Application::InitializeBase();
+
+ /* Set command-line arguments. */
+ Application::SetArgC(argc);
+ Application::SetArgV(argv);
+
+#ifdef _WIN32
+ if (argc > 1 && strcmp(argv[1], "--scm-install") == 0) {
+ return SetupService(true, argc - 2, &argv[2]);
+ }
+
+ if (argc > 1 && strcmp(argv[1], "--scm-uninstall") == 0) {
+ return SetupService(false, argc - 2, &argv[2]);
+ }
+
+ if (argc > 1 && strcmp(argv[1], "--scm") == 0) {
+ SERVICE_TABLE_ENTRY dispatchTable[] = {
+ { "icinga2", ServiceMain },
+ { nullptr, nullptr }
+ };
+
+ StartServiceCtrlDispatcher(dispatchTable);
+ Application::Exit(EXIT_FAILURE);
+ }
+#endif /* _WIN32 */
+
+ int rc = Main();
+
+ Application::Exit(rc);
+}
diff --git a/icinga-app/icinga.ico b/icinga-app/icinga.ico
new file mode 100644
index 0000000..9be324c
--- /dev/null
+++ b/icinga-app/icinga.ico
Binary files differ
diff --git a/icinga-app/icinga.rc b/icinga-app/icinga.rc
new file mode 100644
index 0000000..ac4836e
--- /dev/null
+++ b/icinga-app/icinga.rc
@@ -0,0 +1,34 @@
+#include <windows.h>
+#include "icinga-version.h"
+
+LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
+
+100 ICON "icinga.ico"
+
+VS_VERSION_INFO VERSIONINFO
+FILEVERSION 1,0,0,0
+PRODUCTVERSION 1,0,0,0
+FILEOS VOS__WINDOWS32
+FILETYPE VFT_APP
+FILESUBTYPE VFT2_UNKNOWN
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904E4"
+ BEGIN
+ VALUE "CompanyName", "Icinga GmbH"
+ VALUE "FileDescription", "Icinga 2"
+ VALUE "FileVersion", ICINGA2_VERSION
+ VALUE "InternalName", "icinga2.exe"
+ VALUE "LegalCopyright", " Icinga GmbH"
+ VALUE "OriginalFilename", "icinga2.exe"
+ VALUE "ProductName", "Icinga 2"
+ VALUE "ProductVersion", ICINGA2_VERSION
+ END
+ END
+
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 0x04E4
+ END
+END
diff --git a/icinga-app/icinga2.cmake b/icinga-app/icinga2.cmake
new file mode 100644
index 0000000..f55f22a
--- /dev/null
+++ b/icinga-app/icinga2.cmake
@@ -0,0 +1,29 @@
+#!/bin/sh
+ICINGA2_BIN=@CMAKE_INSTALL_FULL_LIBDIR@/icinga2/sbin/icinga2
+
+if test "x`uname -s`" = "xLinux" -a "x$1" = "xconsole"; then
+ libedit_line=`ldd $ICINGA2_BIN 2>&1 | grep libedit`
+ if test $? -eq 0; then
+ libedit_path=`echo $libedit_line | cut -f3 -d' '`
+ if test -n "$libedit_path"; then
+ libdir=`dirname -- $libedit_path`
+ found=0
+ for libreadline_path in `ls -1r -- $libdir/libreadline.so.* 2>/dev/null`; do
+ found=1
+ break
+ done
+ if test $found -eq 0; then
+ libdir2=/`echo $libdir | cut -f3- -d/`
+ for libreadline_path in `ls -1r -- $libdir2/libreadline.so.* 2>/dev/null`; do
+ found=1
+ break
+ done
+ fi
+ if test $found -gt 0; then
+ export LD_PRELOAD="$libreadline_path:$LD_PRELOAD"
+ fi
+ fi
+ fi
+fi
+
+exec $ICINGA2_BIN "$@"
diff --git a/icinga-installer/CMakeLists.txt b/icinga-installer/CMakeLists.txt
new file mode 100644
index 0000000..6ac5e1f
--- /dev/null
+++ b/icinga-installer/CMakeLists.txt
@@ -0,0 +1,47 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+foreach(flag_var
+ CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
+ if(${flag_var} MATCHES "/MD")
+ string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
+ endif(${flag_var} MATCHES "/MD")
+endforeach(flag_var)
+
+set(icinga_installer_SOURCES
+ icinga-installer.cpp
+)
+
+add_executable(icinga-installer ${icinga_installer_SOURCES})
+
+set_target_properties(
+ icinga-installer PROPERTIES
+ FOLDER Bin
+ OUTPUT_NAME icinga2-installer
+ LINK_FLAGS "/SUBSYSTEM:WINDOWS"
+)
+
+target_link_libraries(icinga-installer shlwapi)
+
+install(CODE "
+ execute_process(COMMAND \${CMAKE_COMMAND} -E copy \"${CMAKE_CURRENT_BINARY_DIR}/icinga2.wixpatch.\${BUILD_TYPE}\"
+ \"${CMAKE_CURRENT_BINARY_DIR}/icinga2.wixpatch\"
+ RESULT_VARIABLE copy_result
+ ERROR_VARIABLE error_output)
+ if(copy_result)
+ message(FATAL_ERROR \${error_output})
+ endif()"
+)
+
+file(
+ GENERATE
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/icinga2.wixpatch.$<CONFIG>"
+ INPUT "${CMAKE_CURRENT_SOURCE_DIR}/icinga2.wixpatch.cmake"
+)
+
+set(InstallPath "${CMAKE_INSTALL_SBINDIR}")
+
+install(
+ TARGETS icinga-installer
+ RUNTIME DESTINATION ${InstallPath}
+)
diff --git a/icinga-installer/bannrbmp.bmp b/icinga-installer/bannrbmp.bmp
new file mode 100644
index 0000000..0f68e62
--- /dev/null
+++ b/icinga-installer/bannrbmp.bmp
Binary files differ
diff --git a/icinga-installer/dlgbmp.bmp b/icinga-installer/dlgbmp.bmp
new file mode 100644
index 0000000..e46566f
--- /dev/null
+++ b/icinga-installer/dlgbmp.bmp
Binary files differ
diff --git a/icinga-installer/icinga-installer.cpp b/icinga-installer/icinga-installer.cpp
new file mode 100644
index 0000000..4dc050d
--- /dev/null
+++ b/icinga-installer/icinga-installer.cpp
@@ -0,0 +1,312 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include <string>
+#include <vector>
+#include <fstream>
+#include <direct.h>
+#include <windows.h>
+#include <shlwapi.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+static std::string GetIcingaInstallPath(void)
+{
+ char szFileName[MAX_PATH];
+ if (!GetModuleFileName(nullptr, szFileName, sizeof(szFileName)))
+ return "";
+
+ if (!PathRemoveFileSpec(szFileName))
+ return "";
+
+ if (!PathRemoveFileSpec(szFileName))
+ return "";
+
+ return szFileName;
+}
+
+
+static bool ExecuteCommand(const std::string& app, const std::string& arguments)
+{
+ SHELLEXECUTEINFO sei = {};
+ sei.cbSize = sizeof(sei);
+ sei.fMask = SEE_MASK_NOCLOSEPROCESS;
+ sei.lpFile = app.c_str();
+ sei.lpParameters = arguments.c_str();
+ sei.nShow = SW_HIDE;
+ if (!ShellExecuteEx(&sei))
+ return false;
+
+ if (!sei.hProcess)
+ return false;
+
+ WaitForSingleObject(sei.hProcess, INFINITE);
+
+ DWORD exitCode;
+ BOOL res = GetExitCodeProcess(sei.hProcess, &exitCode);
+ CloseHandle(sei.hProcess);
+
+ if (!res)
+ return false;
+
+ return exitCode == 0;
+}
+
+static bool ExecuteIcingaCommand(const std::string& arguments)
+{
+ return ExecuteCommand(GetIcingaInstallPath() + "\\sbin\\icinga2.exe", arguments);
+}
+
+static std::string DirName(const std::string& path)
+{
+ char *spath = strdup(path.c_str());
+
+ if (!PathRemoveFileSpec(spath)) {
+ free(spath);
+ throw std::runtime_error("PathRemoveFileSpec failed");
+ }
+
+ std::string result = spath;
+
+ free(spath);
+
+ return result;
+}
+
+static bool PathExists(const std::string& path)
+{
+ struct _stat statbuf;
+ return (_stat(path.c_str(), &statbuf) >= 0);
+}
+
+static std::string GetIcingaDataPath(void)
+{
+ char path[MAX_PATH];
+ if (!SUCCEEDED(SHGetFolderPath(nullptr, CSIDL_COMMON_APPDATA, nullptr, 0, path)))
+ throw std::runtime_error("SHGetFolderPath failed");
+ return std::string(path) + "\\icinga2";
+}
+
+static void MkDir(const std::string& path)
+{
+ if (mkdir(path.c_str()) < 0 && errno != EEXIST)
+ throw std::runtime_error("mkdir failed");
+}
+
+static void MkDirP(const std::string& path)
+{
+ size_t pos = 0;
+
+ while (pos != std::string::npos) {
+ pos = path.find_first_of("/\\", pos + 1);
+
+ std::string spath = path.substr(0, pos + 1);
+ struct _stat statbuf;
+ if (_stat(spath.c_str(), &statbuf) < 0 && errno == ENOENT)
+ MkDir(path.substr(0, pos));
+ }
+}
+
+static std::string GetNSISInstallPath(void)
+{
+ HKEY hKey;
+ //TODO: Change hardcoded key
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Icinga Development Team\\ICINGA2", 0,
+ KEY_QUERY_VALUE | KEY_WOW64_32KEY, &hKey) == ERROR_SUCCESS) {
+ BYTE pvData[MAX_PATH];
+ DWORD cbData = sizeof(pvData) - 1;
+ DWORD lType;
+ if (RegQueryValueEx(hKey, nullptr, nullptr, &lType, pvData, &cbData) == ERROR_SUCCESS && lType == REG_SZ) {
+ pvData[cbData] = '\0';
+
+ return (char *)pvData;
+ }
+
+ RegCloseKey(hKey);
+ }
+
+ return "";
+}
+
+static bool CopyDirectory(const std::string& source, const std::string& destination)
+{
+ // SHFileOperation requires file names to be terminated with two \0s
+ std::string tmpSource = source + std::string(1, '\0');
+ std::string tmpDestination = destination + std::string(1, '\0');
+
+ SHFILEOPSTRUCT fop;
+ fop.wFunc = FO_COPY;
+ fop.pFrom = tmpSource.c_str();
+ fop.pTo = tmpDestination.c_str();
+ fop.fFlags = FOF_NO_UI;
+
+ return (SHFileOperation(&fop) == 0);
+}
+
+static bool DeleteDirectory(const std::string& dir)
+{
+ // SHFileOperation requires file names to be terminated with two \0s
+ std::string tmpDir = dir + std::string(1, '\0');
+
+ SHFILEOPSTRUCT fop;
+ fop.wFunc = FO_DELETE;
+ fop.pFrom = tmpDir.c_str();
+ fop.fFlags = FOF_NO_UI;
+
+ return (SHFileOperation(&fop) == 0);
+}
+
+static int UpgradeNSIS(void)
+{
+ std::string installPath = GetNSISInstallPath();
+
+ if (installPath.empty())
+ return 0;
+
+ std::string uninstallerPath = installPath + "\\uninstall.exe";
+
+ if (!PathExists(uninstallerPath))
+ return 0;
+
+ std::string dataPath = GetIcingaDataPath();
+
+ if (dataPath.empty())
+ return 1;
+
+ bool moveUserData = !PathExists(dataPath);
+
+ /* perform open heart surgery on the user's data dirs - yay */
+ if (moveUserData) {
+ MkDir(dataPath.c_str());
+
+ std::string oldNameEtc = installPath + "\\etc";
+ std::string newNameEtc = dataPath + "\\etc";
+ if (!CopyDirectory(oldNameEtc, newNameEtc))
+ return 1;
+
+ std::string oldNameVar = installPath + "\\var";
+ std::string newNameVar = dataPath + "\\var";
+ if (!CopyDirectory(oldNameVar, newNameVar))
+ return 1;
+ }
+
+ ExecuteCommand(uninstallerPath, "/S _?=" + installPath);
+
+ _unlink(uninstallerPath.c_str());
+
+ if (moveUserData) {
+ std::string oldNameEtc = installPath + "\\etc";
+ if (!DeleteDirectory(oldNameEtc))
+ return 1;
+
+ std::string oldNameVar = installPath + "\\var";
+ if (!DeleteDirectory(oldNameVar))
+ return 1;
+
+ _rmdir(installPath.c_str());
+ }
+
+ return 0;
+}
+
+static int InstallIcinga(void)
+{
+ std::string installDir = GetIcingaInstallPath();
+ std::string skelDir = installDir + "\\share\\skel";
+ std::string dataDir = GetIcingaDataPath();
+
+ if (!PathExists(dataDir)) {
+ std::string sourceDir = skelDir + std::string(1, '\0');
+ std::string destinationDir = dataDir + std::string(1, '\0');
+
+ SHFILEOPSTRUCT fop;
+ fop.wFunc = FO_COPY;
+ fop.pFrom = sourceDir.c_str();
+ fop.pTo = destinationDir.c_str();
+ fop.fFlags = FOF_NO_UI | FOF_NOCOPYSECURITYATTRIBS;
+
+ if (SHFileOperation(&fop) != 0)
+ return 1;
+
+ MkDirP(dataDir + "/etc/icinga2/pki");
+ MkDirP(dataDir + "/var/cache/icinga2");
+ MkDirP(dataDir + "/var/lib/icinga2/certs");
+ MkDirP(dataDir + "/var/lib/icinga2/certificate-requests");
+ MkDirP(dataDir + "/var/lib/icinga2/agent/inventory");
+ MkDirP(dataDir + "/var/lib/icinga2/api/config");
+ MkDirP(dataDir + "/var/lib/icinga2/api/log");
+ MkDirP(dataDir + "/var/lib/icinga2/api/zones");
+ MkDirP(dataDir + "/var/log/icinga2/compat/archive");
+ MkDirP(dataDir + "/var/log/icinga2/crash");
+ MkDirP(dataDir + "/var/run/icinga2/cmd");
+ MkDirP(dataDir + "/var/spool/icinga2/perfdata");
+ MkDirP(dataDir + "/var/spool/icinga2/tmp");
+ }
+
+ // Upgrade from versions older than 2.13 by making the windowseventlog feature available,
+ // enable it by default and disable the old mainlog feature.
+ if (!PathExists(dataDir + "/etc/icinga2/features-available/windowseventlog.conf")) {
+ // Disable the old mainlog feature as it is replaced by windowseventlog by default.
+ std::string mainlogEnabledFile = dataDir + "/etc/icinga2/features-enabled/mainlog.conf";
+ if (PathExists(mainlogEnabledFile)) {
+ if (DeleteFileA(mainlogEnabledFile.c_str()) == 0) {
+ throw std::runtime_error("deleting '" + mainlogEnabledFile + "' failed");
+ }
+ }
+
+ // Install the new windowseventlog feature. As features-available/windowseventlog.conf is used as a marker file,
+ // copy it as the last step, so that this is run again should the upgrade be interrupted.
+ for (const std::string& d : {"features-enabled", "features-available"}) {
+ std::string sourceFile = skelDir + "/etc/icinga2/" + d + "/windowseventlog.conf";
+ std::string destinationFile = dataDir + "/etc/icinga2/" + d + "/windowseventlog.conf";
+
+ if (CopyFileA(sourceFile.c_str(), destinationFile.c_str(), false) == 0) {
+ throw std::runtime_error("copying '" + sourceFile + "' to '" + destinationFile + "' failed");
+ }
+ }
+ }
+
+ // TODO: In Icinga 2.14, rename features-available/mainlog.conf to mainlog.conf.deprecated
+ // so that it's no longer listed as an available feature.
+
+ ExecuteCommand("icacls", "\"" + dataDir + "\" /grant *S-1-5-20:(oi)(ci)m");
+ ExecuteCommand("icacls", "\"" + dataDir + "\\etc\" /inheritance:r /grant:r *S-1-5-20:(oi)(ci)m *S-1-5-32-544:(oi)(ci)f");
+
+ ExecuteIcingaCommand("--scm-install daemon");
+
+ return 0;
+}
+
+static int UninstallIcinga(void)
+{
+ ExecuteIcingaCommand("--scm-uninstall");
+
+ return 0;
+}
+
+/**
+* Entry point for the installer application.
+*/
+int CALLBACK WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nShowCmd)
+{
+ CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE);
+
+ //AllocConsole();
+ int rc;
+
+ if (strcmp(lpCmdLine, "install") == 0) {
+ rc = InstallIcinga();
+ } else if (strcmp(lpCmdLine, "uninstall") == 0) {
+ rc = UninstallIcinga();
+ } else if (strcmp(lpCmdLine, "upgrade-nsis") == 0) {
+ rc = UpgradeNSIS();
+ } else {
+ MessageBox(nullptr, "This application should only be run by the MSI installer package.", "Icinga 2 Installer", MB_ICONWARNING);
+ rc = 1;
+ }
+
+ //::Sleep(3000s);
+
+ return rc;
+}
diff --git a/icinga-installer/icinga2.wixpatch.cmake b/icinga-installer/icinga2.wixpatch.cmake
new file mode 100644
index 0000000..eee4027
--- /dev/null
+++ b/icinga-installer/icinga2.wixpatch.cmake
@@ -0,0 +1,52 @@
+<CPackWiXPatch>
+ <CPackWiXFragment Id="#PRODUCT">
+ <Property Id="ALLUSERS">1</Property>
+ <Property Id="MSIRESTARTMANAGERCONTROL">Disable</Property>
+
+ <PropertyRef Id="WIX_IS_NETFRAMEWORK_46_OR_LATER_INSTALLED" />
+ <Condition Message='This application requires .NET Framework 4.6 or higher. Please install the .NET Framework then run this installer again.'>
+ <![CDATA[Installed OR WIX_IS_NETFRAMEWORK_46_OR_LATER_INSTALLED]]>
+ </Condition>
+
+ <CustomAction Id="XtraUpgradeNSIS" BinaryKey="icinga2_installer" ExeCommand="upgrade-nsis" Execute="deferred" Impersonate="no" />
+ <CustomAction Id="XtraInstall" FileKey="CM_FP_sbin.icinga2_installer.exe" ExeCommand="install" Execute="deferred" Impersonate="no" />
+ <CustomAction Id="XtraUninstall" FileKey="CM_FP_sbin.icinga2_installer.exe" ExeCommand="uninstall" Execute="deferred" Impersonate="no" />
+
+ <Binary Id="icinga2_installer" SourceFile="$<TARGET_FILE:icinga-installer>" />
+
+ <InstallExecuteSequence>
+ <Custom Action="XtraUpgradeNSIS" After="InstallInitialize">$CM_CP_sbin.icinga2_installer.exe&gt;2 AND NOT SUPPRESS_XTRA</Custom>
+ <Custom Action="XtraInstall" Before="InstallFinalize">$CM_CP_sbin.icinga2_installer.exe&gt;2 AND NOT SUPPRESS_XTRA</Custom>
+ <Custom Action="XtraUninstall" Before="RemoveExistingProducts">$CM_CP_sbin.icinga2_installer.exe=2 AND NOT SUPPRESS_XTRA</Custom>
+ </InstallExecuteSequence>
+
+ <!--
+ Write the path to eventprovider.dll to the registry so that the Event Viewer is able to find
+ the message definitions and properly displays our log messages.
+
+ See also: https://docs.microsoft.com/en-us/windows/win32/eventlog/reporting-an-event
+ -->
+ <FeatureRef Id="ProductFeature" IgnoreParent="yes">
+ <Component Id="EventProviderRegistryEntry" Guid="*" Directory="INSTALL_ROOT">
+ <RegistryKey Root="HKLM" Key="SYSTEM\CurrentControlSet\Services\EventLog\Application\Icinga 2" Action="createAndRemoveOnUninstall">
+ <RegistryValue Name="EventMessageFile" Type="string" Value="[#CM_FP_sbin.eventprovider.dll]" />
+ </RegistryKey>
+ </Component>
+ </FeatureRef>
+
+ <Property Id="WIXUI_EXITDIALOGOPTIONALCHECKBOXTEXT" Value="Run Icinga 2 setup wizard" />
+
+ <Property Id="WixShellExecTarget" Value="[#CM_FP_sbin.Icinga2SetupAgent.exe]" />
+ <CustomAction Id="LaunchIcinga2Wizard"
+ BinaryKey="WixCA"
+ DllEntry="WixShellExec"
+ Impersonate="no" />
+
+ <UI>
+ <Publish Dialog="ExitDialog"
+ Control="Finish"
+ Event="DoAction"
+ Value="LaunchIcinga2Wizard">WIXUI_EXITDIALOGOPTIONALCHECKBOX = 1 and NOT Installed</Publish>
+ </UI>
+ </CPackWiXFragment>
+</CPackWiXPatch>
diff --git a/icinga-spec-version.h.cmake b/icinga-spec-version.h.cmake
new file mode 100644
index 0000000..60a77bf
--- /dev/null
+++ b/icinga-spec-version.h.cmake
@@ -0,0 +1 @@
+#define SPEC_VERSION "${SPEC_VERSION}"
diff --git a/icinga-version.h.cmake b/icinga-version.h.cmake
new file mode 100644
index 0000000..44acd8c
--- /dev/null
+++ b/icinga-version.h.cmake
@@ -0,0 +1,2 @@
+#define VERSION "${GIT_VERSION}"
+#define ICINGA2_VERSION "${ICINGA2_VERSION}"
diff --git a/itl/CMakeLists.txt b/itl/CMakeLists.txt
new file mode 100644
index 0000000..b302aa3
--- /dev/null
+++ b/itl/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+add_subdirectory(plugins-contrib.d)
+
+install(
+ FILES itl command-icinga.conf hangman plugins command-plugins.conf manubulon command-plugins-manubulon.conf windows-plugins command-plugins-windows.conf nscp command-nscp-local.conf plugins-contrib
+ DESTINATION ${ICINGA2_INCLUDEDIR}
+)
diff --git a/itl/command-icinga.conf b/itl/command-icinga.conf
new file mode 100644
index 0000000..74523a4
--- /dev/null
+++ b/itl/command-icinga.conf
@@ -0,0 +1,57 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "icinga" {
+ import "icinga-check-command"
+}
+
+object CheckCommand "cluster" {
+ import "cluster-check-command"
+}
+
+object CheckCommand "cluster-zone" {
+ import "cluster-zone-check-command"
+
+ vars.cluster_zone = "$host.name$"
+}
+
+object CheckCommand "dummy" {
+ import "dummy-check-command"
+
+ vars.dummy_state = 0
+ vars.dummy_text = "Check was successful."
+}
+
+object CheckCommand "passive" {
+ import "dummy"
+
+ vars.dummy_state = 3
+ vars.dummy_text = "No Passive Check Result Received."
+}
+
+object CheckCommand "random" {
+ import "random-check-command"
+}
+
+object CheckCommand "exception" {
+ import "exception-check-command"
+}
+
+object CheckCommand "sleep" {
+ import "sleep-check-command"
+}
+
+object CheckCommand "ifw-api" {
+ import "ifw-api-check-command"
+
+ vars.ifw_api_command = "$command.name$"
+ vars.ifw_api_arguments = {}
+ vars.ifw_api_host = null
+ vars.ifw_api_port = 5668
+ vars.ifw_api_expected_san = "$ifw_api_host$"
+ vars.ifw_api_cert = null
+ vars.ifw_api_key = null
+ vars.ifw_api_ca = null
+ vars.ifw_api_crl = null
+ vars.ifw_api_username = null
+ vars.ifw_api_password = null
+}
diff --git a/itl/command-nscp-local.conf b/itl/command-nscp-local.conf
new file mode 100644
index 0000000..8498d68
--- /dev/null
+++ b/itl/command-nscp-local.conf
@@ -0,0 +1,347 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+if (!globals.contains("NscpPath")) {
+ globals.NscpPath = dirname(msi_get_component_path("{5C45463A-4AE9-4325-96DB-6E239C034F93}"))
+}
+
+object CheckCommand "nscp-local" {
+ command = [ NscpPath + "\\nscp.exe", "client" ]
+
+ arguments = {
+ "--log" = {
+ value = "$nscp_log_level$"
+ description = "The log level to use"
+ }
+ "--load-all" = {
+ set_if ="$nscp_load_all$"
+ description = "Load all plugins (currently only used with generate)"
+ }
+ "--module" = {
+ value = "$nscp_modules$"
+ description = "Specify which NSClient++ modules are required. 'nscp client' just needs 'CheckSystem' by default."
+ repeat_key = true
+ }
+ "-q" = {
+ value = "$nscp_query$"
+ description = "Run a query with a given name"
+ required = true
+ }
+ "-b" = {
+ set_if = "$nscp_boot$"
+ description = "Boot the client before executing command (similar as running the command from test mode)"
+ }
+ "-a" = {
+ value = "$nscp_arguments$"
+ repeat_key = true
+ description = "List of arguments (arguments gets -- prefixed automatically (--argument foo=bar is the same as setting '--foo bar')"
+ }
+ "--show-all" = {
+ set_if = "$nscp_showall$"
+ description = ""
+ }
+ }
+
+ vars.nscp_log_level = "critical"
+ vars.nscp_load_all = false
+ vars.nscp_boot = true
+ vars.nscp_showall = false
+ vars.nscp_modules = [ "CheckSystem" ]
+}
+
+object CheckCommand "nscp-local-cpu" {
+ import "nscp-local"
+
+ arguments += {
+ "--time" = {
+ value = "$nscp_cpu_time$"
+ repeat_key = true
+ description = "The time to check"
+ }
+ "--warning" = {
+ value = "load>$nscp_cpu_warning$"
+ }
+ "--critical" = {
+ value = "load>$nscp_cpu_critical$"
+ }
+ "-a" = {
+ value = "$nscp_cpu_arguments$"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_query = "check_cpu"
+ vars.nscp_showall = "$nscp_cpu_showall$"
+
+ vars.nscp_cpu_time = [ "1m", "5m", "15m" ]
+ vars.nscp_cpu_showall = true
+ vars.nscp_cpu_warning = 80
+ vars.nscp_cpu_critical = 90
+}
+
+object CheckCommand "nscp-local-memory" {
+ import "nscp-local"
+
+ arguments += {
+ "--type=committed" = {
+ set_if = "$nscp_memory_committed$"
+ description = "Total memory (RAM+PAGE)"
+ }
+ "--type=physical" = {
+ set_if = "$nscp_memory_physical$"
+ description = "Physical memory (RAM)"
+ }
+ "--warning" = {
+ value = "$nscp_memory_op$ $nscp_memory_warning$"
+ }
+ "--critical" = {
+ value = "$nscp_memory_op$ $nscp_memory_critical$"
+ }
+ "-a" = {
+ value = "$nscp_memory_arguments$"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_query = "check_memory"
+ vars.nscp_showall = "$nscp_memory_showall$"
+
+ vars.nscp_memory_op = {{
+ if (!macro("$nscp_memory_free$")) {
+ return "used >"
+ } else {
+ return "free <"
+ }
+ }}
+
+ vars.nscp_memory_committed = false
+ vars.nscp_memory_physical = true
+ vars.nscp_memory_free = true
+ vars.nscp_memory_warning = {{
+ if (!macro("$nscp_memory_free$")) {
+ return 80
+ } else {
+ return 20
+ }
+ }}
+ vars.nscp_memory_critical = {{
+ if (!macro("$nscp_memory_free$")) {
+ return 90
+ } else {
+ return 10
+ }
+ }}
+ vars.nscp_memory_showall = false
+}
+
+object CheckCommand "nscp-local-os-version" {
+ import "nscp-local"
+
+ vars.nscp_query = "check_os_version"
+}
+
+object CheckCommand "nscp-local-pagefile" {
+ import "nscp-local"
+
+ vars.nscp_query = "check_pagefile"
+}
+
+object CheckCommand "nscp-local-process" {
+ import "nscp-local"
+
+ vars.nscp_query = "check_process"
+}
+
+object CheckCommand "nscp-local-service" {
+ import "nscp-local"
+
+ arguments += {
+ "--service" = {
+ value = "$nscp_service_name$"
+ repeat_key = true
+ }
+ "--ok" = {
+ value = "$nscp_service_otype$='$nscp_service_ok$'"
+ }
+ "--warning" = {
+ value = "$nscp_service_wtype$='$nscp_service_warning$'"
+ }
+ "--critical" = {
+ value = "$nscp_service_ctype$='$nscp_service_critical$'"
+ }
+ "-a" = {
+ value = "$nscp_service_arguments$"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_query = "check_service"
+ vars.nscp_showall = "$nscp_service_showall$"
+
+ vars.nscp_service_showall = true
+ vars.nscp_service_type = "state"
+ vars.nscp_service_otype = vars.nscp_service_type
+ vars.nscp_service_wtype = vars.nscp_service_type
+ vars.nscp_service_ctype = vars.nscp_service_type
+}
+
+object CheckCommand "nscp-local-uptime" {
+ import "nscp-local"
+
+ vars.nscp_query = "check_uptime"
+}
+
+object CheckCommand "nscp-local-version" {
+ import "nscp-local"
+
+ vars.nscp_query = "check_version"
+ vars.nscp_modules = [ "CheckHelpers" ]
+}
+
+object CheckCommand "nscp-local-disk" {
+ import "nscp-local"
+
+ arguments += {
+ "--drive" = {
+ value = "$nscp_disk_drive$"
+ repeat_key = true
+ }
+ "--exclude" = {
+ value = "$nscp_disk_exclude$"
+ repeat_key = true
+ }
+ "--warning" = {
+ value = "$nscp_disk_op$ $nscp_disk_warning$"
+ }
+ "--critical" = {
+ value = "$nscp_disk_op$ $nscp_disk_critical$"
+ }
+ "-a" = {
+ value = "$nscp_disk_arguments$"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_query = "check_drivesize"
+ vars.nscp_showall = "$nscp_disk_showall$"
+
+ vars.nscp_disk_op = {{
+ if (!macro("$nscp_disk_free$")) {
+ return "used >"
+ } else {
+ return "free <"
+ }
+ }}
+
+ vars.nscp_disk_showall = true
+ vars.nscp_disk_free = false
+ vars.nscp_disk_warning = {{
+ if (!macro("$nscp_disk_free$")) { return 80 } else { return 20 }
+ }}
+ vars.nscp_disk_critical = {{
+ if (!macro("$nscp_disk_free$")) { return 90 } else { return 10 }
+ }}
+
+ vars.nscp_modules = [ "CheckDisk" ]
+}
+
+object CheckCommand "nscp-local-counter" {
+ import "nscp-local"
+
+ arguments += {
+ "--counter" = {
+ value = "$nscp_counter_name$"
+ repeat_key = true
+ }
+ "--warning" = {
+ value = "value $nscp_counter_op$ $nscp_counter_warning$"
+ }
+ "--critical" = {
+ value = "value $nscp_counter_op$ $nscp_counter_critical$"
+ }
+ "--perf-syntax" = {
+ value = "$nscp_counter_perfsyntax$"
+ }
+ "-a" = {
+ value = "$nscp_counter_arguments$"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_counter_op = {{
+ if (!macro("$nscp_counter_less$")) {
+ return ">"
+ } else {
+ return "<"
+ }
+ }}
+
+ vars.nscp_query = "check_pdh"
+ vars.nscp_showall = "$nscp_counter_showall$"
+ vars.nscp_counter_less = false
+ vars.nscp_counter_perfsyntax = "$nscp_counter_name$"
+}
+
+object CheckCommand "nscp-local-tasksched" {
+ import "nscp-local"
+
+ arguments += {
+ "--filter" = {
+ set_if = {{
+ var scheduler_name = macro("$nscp_tasksched_name$")
+ if (len(scheduler_name) > 0 ) {
+ return true
+ } else {
+ return false
+ }
+ }}
+ value = "title='$nscp_tasksched_name$'"
+ description = "Name of the task to check."
+ }
+ "--folder" = {
+ value = "$nscp_tasksched_folder$"
+ description = "The folder in which the tasks to check reside."
+ }
+ "--hidden" = {
+ set_if = "$nscp_tasksched_hidden$"
+ description = "Look for hidden tasks."
+ }
+ "--recursive" = {
+ value = "$nscp_tasksched_recursive$"
+ description = "Recurse sub folder (defaults to true)."
+ }
+ "--warning" = {
+ value = "$nscp_tasksched_warning$"
+ description = "Filter which marks items which generates a warning state."
+ }
+ "--critical" = {
+ value = "$nscp_tasksched_critical$"
+ description = "Filter which marks items which generates a critical state."
+ }
+ "--empty-state" = {
+ value = "$nscp_tasksched_emptystate$"
+ description = "Return status to use when nothing matched filter."
+ }
+ "--perf-syntax" = {
+ value = "$nscp_tasksched_perfsyntax$"
+ description = "Performance alias syntax."
+ }
+ "--detail-syntax" = {
+ value = "$nscp_tasksched_detailsyntax$"
+ description = "Detail level syntax."
+ }
+ "-a" = {
+ value = "$nscp_tasksched_arguments$"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_modules = "CheckTaskSched"
+ vars.nscp_query = "check_tasksched"
+ vars.nscp_showall = "$nscp_tasksched_showall$"
+ vars.nscp_tasksched_recursive = true
+ vars.nscp_tasksched_perfsyntax = "%(title)"
+ vars.nscp_tasksched_detailsyntax = "%(folder)/%(title): %(exit_code) != 0"
+ vars.nscp_tasksched_warning = "exit_code != 0"
+ vars.nscp_tasksched_critical = "exit_code < 0"
+
+}
diff --git a/itl/command-plugins-manubulon.conf b/itl/command-plugins-manubulon.conf
new file mode 100644
index 0000000..add365b
--- /dev/null
+++ b/itl/command-plugins-manubulon.conf
@@ -0,0 +1,407 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+/**
+ * main snmp-manubulon template
+ */
+
+template CheckCommand "snmp-manubulon-command" {
+ import "ipv4-or-ipv6"
+
+ arguments = {
+ "-H" = {
+ value = "$snmp_address$"
+ description = "Name or IP address of host to check"
+ }
+ "-C" = {
+ set_if = "$snmp_nocrypt$"
+ value = "$snmp_community$"
+ description = "Community name for the host's SNMP agent (implies v1 protocol)"
+ }
+ "-p" = {
+ value = "$snmp_port$"
+ description = "SNMP port (Default 161)"
+ }
+ "-2" = {
+ set_if = "$snmp_v2$"
+ description = "Use snmp v2c"
+ }
+ "-l" = {
+ set_if = "$snmp_v3$"
+ value = "$snmp_login$"
+ description = "Login and auth password for snmpv3 authentication"
+ }
+ "-x" = {
+ set_if = "$snmp_v3$"
+ value = "$snmp_password$"
+ description = "Priv password"
+ }
+ "-L" = {
+ set_if = "$snmp_v3_use_authprotocol$"
+ value = "$snmp_authprotocol$"
+ description = "<authproto>,<privproto> - <Authentication protocol (md5|sha : default md5)>,<Priv protocole (des|aes : default des)>"
+ }
+ "-X" = {
+ set_if = "$snmp_v3_use_privpass$"
+ value = "$snmp_privpass$"
+ description = "Priv password for snmpv3 (AuthPriv protocol)"
+ }
+ "-w" = {
+ value = "$snmp_warn$"
+ }
+ "-c" = {
+ value = "$snmp_crit$"
+ }
+ "-t" = {
+ value = "$snmp_timeout$"
+ description = "Timeout for SNMP in seconds (Default: 5)"
+ }
+ }
+
+ vars.snmp_address = "$check_address$"
+ vars.snmp_nocrypt = true
+ vars.snmp_community = "public"
+ vars.snmp_v2 = false
+ vars.snmp_v3 = false
+ vars.snmp_login = "snmpuser"
+ vars.snmp_v3_use_privpass = false
+ vars.snmp_v3_use_authprotocol = false
+ vars.snmp_authprotocol = "md5,des"
+ vars.snmp_timeout = "5"
+}
+
+
+/**
+ * snmp env
+ * Url reference: http://nagios.manubulon.com/snmp_env.html
+ */
+
+object CheckCommand "snmp-env" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_env.pl" ]
+
+ arguments += {
+ "-T" = {
+ value = "$snmp_env_type$"
+ description = "Environment Type [cisco|nokia|bc|iron|foundry|linux]"
+ }
+ "-F" = {
+ value = "$snmp_env_fan$"
+ description = "Minimum fan rpm value (only needed for 'iron' & 'linux')"
+ }
+ "-c" = {
+ value = "$snmp_env_celsius$"
+ description = "Maximum temp in degrees celsius (only needed for 'iron' & 'linux')"
+ }
+ "-f" = {
+ set_if = "$snmp_perf$"
+ description = "Perfparse compatible output"
+ }
+ }
+
+ vars.snmp_env_type = "cisco"
+ vars.snmp_perf = true
+}
+
+
+/**
+ * snmp load
+ * Url reference: http://nagios.manubulon.com/snmp_load.html
+ */
+
+object CheckCommand "snmp-load" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_load.pl" ]
+
+ arguments += {
+ "-T" = {
+ value = "$snmp_load_type$"
+ description = "CPU check"
+ }
+ "-f" = {
+ set_if = "$snmp_perf$"
+ description = "Perfparse compatible output"
+ }
+ }
+
+ vars.snmp_load_type = "stand"
+ vars.snmp_warn = 85
+ vars.snmp_crit = 95
+ vars.snmp_perf = true
+}
+
+
+/**
+ * Memory and swap usage on Linux given by Net-snmp
+ * Memory usage on cisco routers or Pix
+ * For other systems use check_snmp_storage.pl
+ * Url reference: http://nagios.manubulon.com/snmp_mem.html
+ */
+
+object CheckCommand "snmp-memory" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_mem.pl" ]
+
+ arguments += {
+ "-f" = {
+ set_if = "$snmp_perf$"
+ description = "Performance data output"
+ }
+ "-I" = {
+ set_if = "$snmp_is_cisco$"
+ description = "check cisco memory (sum of all memory pools)"
+ }
+ "-E" = {
+ set_if = "$snmp_is_hp$"
+ description = "check HP / Procurve memory"
+ }
+ "-m" = {
+ set_if = "$snmp_memcached$"
+ description = "Include cached memory in used memory"
+ }
+ "-b" = {
+ set_if = "$snmp_membuffer$"
+ description = "Exclude buffered memory in used memory"
+ }
+ }
+
+ vars.snmp_warn = "94,50"
+ vars.snmp_crit = "98,80"
+ vars.snmp_perf = true
+ vars.snmp_is_cisco = false
+ vars.snmp_memcached = false
+ vars.snmp_membuffer = false
+}
+
+
+/**
+ * snmp storage - Disk/Memory
+ * Url reference: http://nagios.manubulon.com/snmp_storage.html
+ */
+object CheckCommand "snmp-storage" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_storage.pl" ]
+
+ arguments += {
+ "-m" = {
+ value = "$snmp_storage_name$"
+ description = "Name in description OID (can be mounpoints '/home' or 'Swap Space'...)"
+ }
+ "-f" = {
+ set_if = "$snmp_perf$"
+ description = "Perfparse compatible output"
+ }
+ "-e" = {
+ set_if = "$snmp_exclude$"
+ description = "Select all storages except the one(s) selected by -m. No action on storage type selection."
+ }
+ "-o" = {
+ value = "$snmp_storage_olength$"
+ description = "Max-size of the SNMP message, usefull in case of Too Long responses."
+ }
+ "-q" = {
+ value = "$snmp_storage_type$"
+ description = "Storage type: Other, Ram, VirtualMemory, FixedDisk, RemovableDisk, FloppyDisk, CompactDisk, RamDisk, FlashMemory, or NetworkDisk"
+ }
+ }
+
+ vars.snmp_storage_name = "^/$$"
+ vars.snmp_warn = 80
+ vars.snmp_crit = 90
+ vars.snmp_perf = true
+ vars.snmp_exclude = false
+}
+
+
+/**
+ * snmp network interfaces
+ * Url reference: http://nagios.manubulon.com/snmp_int.html
+ */
+
+object CheckCommand "snmp-interface" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_int.pl" ]
+
+ arguments += {
+ "-n" = {
+ value = "$snmp_interface$"
+ description = "Name in description OID (eth0, ppp0 ...). This is treated as a regexp : -n eth will match eth0,eth1,..."
+ }
+ "-k" = {
+ set_if = "$snmp_interface_perf$"
+ description = "Check the input/ouput bandwidth of the interface"
+ }
+ "--label" = {
+ value = "$snmp_interface_label$"
+ description = "Add label before speed in output : in=, out=, errors-out=, etc..."
+ }
+ "-Y" = {
+ set_if = "$snmp_interface_bits_bytes$"
+ description = "Output performance data in bits/s or Bytes/s"
+ }
+ "-y" = {
+ set_if = "$snmp_interface_percent$"
+ description = "Output performance data in % of max speed"
+ }
+ "-B" = {
+ set_if = "$snmp_interface_kbits$"
+ description = "Make the warning and critical levels in K|M|G Bits/s instead of K|M|G Bytes/s"
+ }
+ "-M" = {
+ set_if = "$snmp_interface_megabytes$"
+ description = "Make the warning and critical levels in Mbps"
+ }
+ "--64bits" = {
+ set_if = "$snmp_interface_64bit$"
+ description = "Use 64 bits counters instead of the standard counters when checking bandwidth & performance data for interface >= 1Gbps"
+ }
+ "-e" = {
+ set_if = "$snmp_interface_errors$"
+ description = "Add error & discard to Perfparse output"
+ }
+ "-q" = {
+ set_if = "$snmp_interface_extended_checks$"
+ description = "Also check the error and discard input/output. When enabled format of snmp_warn and snmp_crit changes to <In bytes>,<Out bytes>,<In error>,<Out error>,<In disc>,<Out disc>"
+ }
+ "-i" = {
+ set_if = "$snmp_interface_inverse$"
+ description = "Make critical when up"
+ }
+ "-r" = {
+ set_if = "$snmp_interface_noregexp$"
+ description = "Do not use regexp to match NAME in description OID"
+ }
+ "-d" = {
+ value = "$snmp_interface_delta$"
+ description = "Make an average of <delta> seconds (default 300=5min)"
+ }
+ "-u" = {
+ set_if = "$snmp_interface_warncrit_percent$"
+ description = "Make the warning and critical levels in % of reported interface speed"
+ }
+ "-N" = {
+ set_if = "$snmp_interface_ifname$"
+ }
+ "-A" = {
+ set_if = "$snmp_interface_ifalias$"
+ }
+ "-f" = {
+ set_if = "$snmp_perf$"
+ description = "Perfparse compatible output (no output when interface is down)"
+ }
+ "-W" = {
+ set_if = "$snmp_interface_weathermap$"
+ description = "Include 'weathermap' data for NagVis in performance data"
+ }
+ "-a" = {
+ set_if = "$snmp_interface_admin$"
+ description = "Use administrative status instead of operational"
+ }
+ }
+
+ vars.snmp_interface = "eth0"
+ vars.snmp_interface_perf = true
+ vars.snmp_interface_bits_bytes = true
+ vars.snmp_interface_percent = false
+ vars.snmp_interface_kbits = true
+ vars.snmp_interface_megabytes = true
+ vars.snmp_interface_64bit = false
+ vars.snmp_interface_errors = true
+ vars.snmp_interface_extended_checks = false
+ vars.snmp_interface_noregexp = false
+ vars.snmp_interface_delta = 300
+ vars.snmp_interface_warncrit_percent = false
+ vars.snmp_interface_ifname = false
+ vars.snmp_interface_ifalias = false
+ vars.snmp_warn = "300,400"
+ vars.snmp_crit = "0,600"
+ vars.snmp_perf = true
+}
+
+
+/**
+ * snmp process
+ * Url reference: http://nagios.manubulon.com/snmp_process.html
+ */
+
+object CheckCommand "snmp-process" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_process.pl" ]
+
+ arguments += {
+ "-n" = {
+ value = "$snmp_process_name$"
+ description = "Regex service name eg. ^apache2$"
+ }
+ "-F" = {
+ set_if = "$snmp_perf$"
+ description = "Add performance output (outputs : memory_usage, num_process, cpu_usage)"
+ }
+ "-A" = {
+ set_if = "$snmp_process_use_params$"
+ description = "Add parameters to select processes (ex : 'named.*-t /var/named/chroot' will only select named process with this parameter)"
+ }
+ "-f" = {
+ set_if = "$snmp_process_use_fullpath$"
+ description = "Use full path name instead of process name to select processes (ex : '/opt/app1/app1bin' will only select named process with this full path)"
+ }
+ "-m" = {
+ set_if = "$snmp_process_mem_usage$"
+ value = "$snmp_process_mem_threshold$"
+ description = "Checks memory usage. Values warning and critical in Mb eg. 512,1024"
+ }
+ "-u" = {
+ set_if = "$snmp_process_cpu_usage$"
+ value = "$snmp_process_cpu_threshold$"
+ description = "Checks CPU usage. Values warning and critical in % (value can be > 100% : 100%=1 CPU) eg. 15,50"
+ }
+ }
+
+ vars.snmp_process_name = ".*"
+ vars.snmp_warn = 0
+ vars.snmp_crit = 0
+ vars.snmp_perf = true
+ vars.snmp_process_use_params = false
+ vars.snmp_process_use_fullpath = false
+ vars.snmp_process_mem_usage = false
+ vars.snmp_process_mem_threshold = "0,0"
+ vars.snmp_process_cpu_usage = false
+ vars.snmp_process_cpu_threshold = "0,0"
+}
+
+/**
+ * snmp service
+ * Url reference: http://nagios.manubulon.com/snmp_windows.html
+ */
+
+object CheckCommand "snmp-service" {
+ import "snmp-manubulon-command"
+
+ command = [ ManubulonPluginDir + "/check_snmp_win.pl" ]
+
+ arguments += {
+ "-n" = {
+ value = "$snmp_service_name$"
+ description = "Comma separated names of services (perl regular expressions can be used for every one). By default, it is not case sensitive. eg. ^dns$"
+ }
+ "-N" = {
+ value = "$snmp_service_count$"
+ description = "Compare matching services with <n> instead of the number of names provided."
+ }
+ "-s" = {
+ set_if = "$snmp_service_showall$"
+ description = "Show all services in the output, instead of only the non-active ones."
+ }
+ "-r" = {
+ set_if = "$snmp_service_noregexp$"
+ description = "Do not use regexp to match NAME in service description."
+ }
+ }
+
+ vars.snmp_service_name = ".*"
+}
diff --git a/itl/command-plugins-windows.conf b/itl/command-plugins-windows.conf
new file mode 100644
index 0000000..22ab623
--- /dev/null
+++ b/itl/command-plugins-windows.conf
@@ -0,0 +1,319 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "disk-windows" {
+ command = [ PluginDir + "/check_disk.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$disk_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$disk_win_crit$"
+ description = "Critical threshold"
+ }
+ "-p" = {
+ value = "$disk_win_path$"
+ description = "Optional paths to check"
+ repeat_key = true
+ }
+ "-u" = {
+ value = "$disk_win_unit$"
+ description = "Use this unit to display disk space"
+ }
+ "-x" = {
+ value = "$disk_win_exclude$"
+ description = "Exclude these drives from check"
+ }
+ "-U" = {
+ set_if = "$disk_win_show_used$"
+ description = "Work with used instead of free space"
+ }
+ }
+
+ //The default
+ vars.disk_win_unit = "mb"
+ vars.disk_win_warn = "20%"
+ vars.disk_win_crit = "10%"
+}
+
+object CheckCommand "load-windows" {
+ command = [ PluginDir + "/check_load.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$load_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$load_win_crit$"
+ description = "Critical threshold"
+ }
+ }
+}
+
+object CheckCommand "memory-windows" {
+ command = [ PluginDir + "/check_memory.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$memory_win_warn$"
+ description = "Warning Threshold"
+ }
+ "-c" = {
+ value = "$memory_win_crit$"
+ description = "Critical Threshold"
+ }
+ "-u" = {
+ value = "$memory_win_unit$"
+ description = "Use this unit to display memory"
+ }
+ "-U" = {
+ set_if = "$memory_win_show_used$"
+ description = "Show used memory instead of the free memory"
+ }
+ }
+
+ //The default
+ vars.memory_win_unit = "mb"
+ vars.memory_win_warn = "10%"
+ vars.memory_win_crit = "5%"
+}
+
+object CheckCommand "network-windows" {
+ command = [ PluginDir + "/check_network.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$network_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$network_win_crit$"
+ description = "Critical threshold"
+ }
+ "-n" = {
+ set_if = "$network_no_isatap$"
+ description = "Don't show ISATAP interfaces in output"
+ }
+ }
+ vars.network_no_isatap = true
+}
+
+object CheckCommand "perfmon-windows" {
+ command = [ PluginDir + "/check_perfmon.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$perfmon_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$perfmon_win_crit$"
+ description = "Critical threshold"
+ }
+ "-P" = {
+ value = "$perfmon_win_counter$"
+ description = "The Performance Counter string"
+ required = true
+ }
+ "--performance-wait" = {
+ value = "$perfmon_win_wait$"
+ description = "Wait time between two counter collections in ms"
+ }
+ "--fmt-countertype" = {
+ value = "$perfmon_win_type$"
+ description = "Performance counter type"
+ }
+ "--perf-syntax" = {
+ value = "$perfmon_win_syntax$"
+ description = "Use this instead of the counter name in the perfomance data"
+ }
+ }
+
+ vars.performance_win_wait = 1000
+ vars.perfmon_win_type = "double"
+}
+
+
+template CheckCommand "ping-common-windows" {
+ command = [ PluginDir + "/check_ping.exe" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ping_win_address$"
+ required = true
+ description = "Address to ping"
+ }
+ "-w" = {
+ value = "$ping_win_wrta$,$ping_win_wpl$%"
+ description = "Warning threshold: RTA and package loss seperated by comma"
+ }
+ "-c" = {
+ value = "$ping_win_crta$,$ping_win_cpl$%"
+ description = "Warning threshold: RTA and package loss seperated by comma"
+ }
+ "-p" = {
+ value = "$ping_win_packets$"
+ description = "Number of packages to send"
+ }
+ "-t" = {
+ value = "$ping_win_timeout$"
+ description = "Timeout in ms"
+ }
+ }
+
+ vars.ping_win_packets = "5"
+ vars.ping_win_timeout = "1000"
+}
+
+object CheckCommand "ping-windows" {
+ import "ping-common-windows"
+ import "ipv4-or-ipv6"
+
+ vars.ping_win_address = "$check_address$"
+}
+
+object CheckCommand "ping4-windows" {
+ import "ping-common-windows"
+
+ command += [ "-4" ]
+
+ vars.ping_win_address = "$address$"
+}
+
+object CheckCommand "ping6-windows" {
+ import "ping-common-windows"
+
+ command += [ "-6" ]
+
+ vars.ping_win_address = "$address6$"
+}
+
+object CheckCommand "procs-windows" {
+ command = [ PluginDir + "/check_procs.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$procs_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$procs_win_crit$"
+ description = "Critical threshold"
+ }
+ "-u" = {
+ value = "$procs_win_user$"
+ description = "Count only procs of this user"
+ }
+ }
+}
+
+object CheckCommand "service-windows" {
+ command = [ PluginDir + "/check_service.exe" ]
+
+ arguments = {
+ "-w" = {
+ set_if = "$service_win_warn$"
+ description = "Warn instead of critical when service is not running"
+ }
+ "-s" = {
+ value = "$service_win_service$"
+ required = true
+ description = "Service to check"
+ }
+ "--description" = {
+ set_if = "$service_win_description$"
+ description = "Use service description instead of name"
+ }
+ }
+}
+
+object CheckCommand "swap-windows" {
+ command = [ PluginDir + "/check_swap.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$swap_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$swap_win_crit$"
+ description = "Critical threshold"
+ }
+ "-u" = {
+ value = "$swap_win_unit$"
+ description = "Unit to display swap in"
+ }
+ "-U" = {
+ set_if = "$swap_win_show_used$"
+ description = "Show used swap instead of the free swap"
+ }
+ }
+
+ // Default
+ vars.swap_win_unit = "mb"
+ vars.swap_win_warn = "10%"
+ vars.swap_win_crit = "5%"
+}
+
+object CheckCommand "update-windows" {
+ command = [ PluginDir + "/check_update.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$update_win_warn$"
+ description = "Number of updates to trigger a warning"
+ }
+ "-c" = {
+ value = "$update_win_crit$"
+ description = "Number of updates to trigger a critical"
+ }
+ "--possible-reboot" = {
+ set_if = "$update_win_reboot$"
+ description = "Treat 'may need update' as 'definitely needs update'"
+ }
+ "--no-reboot-critical" = {
+ set_if = "$ignore_reboot$"
+ description = "Do not automatically return critical if an update requiring reboot is present."
+ }
+ }
+
+ timeout = 5m
+}
+
+object CheckCommand "uptime-windows" {
+ command = [ PluginDir + "/check_uptime.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$uptime_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$uptime_win_crit$"
+ description = "Critical threshold"
+ }
+ "-u" = {
+ value = "$uptime_win_unit$"
+ description = "Time unit to use"
+ }
+ }
+
+ vars.uptime_win_unit = "s"
+}
+
+object CheckCommand "users-windows" {
+ command = [ PluginDir + "/check_users.exe" ]
+
+ arguments = {
+ "-w" = {
+ value = "$users_win_warn$"
+ description = "Warning threshold"
+ }
+ "-c" = {
+ value = "$users_win_crit$"
+ description = "Critical threshold"
+ }
+ }
+}
diff --git a/itl/command-plugins.conf b/itl/command-plugins.conf
new file mode 100644
index 0000000..4d9ae06
--- /dev/null
+++ b/itl/command-plugins.conf
@@ -0,0 +1,3258 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+template CheckCommand "ipv4-or-ipv6" {
+ vars.check_address = {{
+ var addr_v4 = macro("$address$")
+ var addr_v6 = macro("$address6$")
+
+ if (addr_v4 && !macro("$check_ipv6$") || macro("$check_ipv4$")) {
+ return addr_v4
+ } else {
+ return addr_v6
+ }
+ }}
+
+ vars.check_ipv4 = false
+ vars.check_ipv6 = false
+}
+
+template CheckCommand "ping-common" {
+ command = [ PluginDir + "/check_ping" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ping_address$"
+ description = "host to ping"
+ }
+ "-w" = {
+ value = "$ping_wrta$,$ping_wpl$%"
+ description = "warning threshold pair"
+ }
+ "-c" = {
+ value = "$ping_crta$,$ping_cpl$%"
+ description = "critical threshold pair"
+ }
+ "-p" = {
+ value = "$ping_packets$"
+ description = "number of ICMP ECHO packets to send (Default: 5)"
+ }
+ "-t" = {
+ value = "$ping_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ }
+
+ vars.ping_wrta = 100
+ vars.ping_wpl = 5
+ vars.ping_crta = 200
+ vars.ping_cpl = 15
+}
+
+object CheckCommand "ping" {
+ import "ping-common"
+ import "ipv4-or-ipv6"
+
+ vars.ping_address = "$check_address$"
+}
+
+object CheckCommand "ping4" {
+ import "ping-common"
+
+ command += [ "-4" ]
+
+ vars.ping_address = "$address$"
+}
+
+object CheckCommand "ping6" {
+ import "ping-common"
+
+ command += [ "-6" ]
+
+ vars.ping_address = "$address6$"
+}
+
+template CheckCommand "hostalive-common" {
+ vars.ping_wrta = 3000.0
+ vars.ping_wpl = 80
+
+ vars.ping_crta = 5000.0
+ vars.ping_cpl = 100
+}
+
+object CheckCommand "hostalive" {
+ import "ping"
+ import "hostalive-common"
+}
+
+object CheckCommand "hostalive4" {
+ import "ping4"
+ import "hostalive-common"
+}
+
+object CheckCommand "hostalive6" {
+ import "ping6"
+ import "hostalive-common"
+}
+
+template CheckCommand "fping-common" {
+ command = [
+ PluginDir + "/check_fping",
+ "$fping_address$"
+ ]
+
+ arguments = {
+ "-w" = {
+ value = "$fping_wrta$,$fping_wpl$%"
+ description = "warning threshold pair"
+ }
+ "-c" = {
+ value = "$fping_crta$,$fping_cpl$%"
+ description = "critical threshold pair"
+ }
+ "-n" = {
+ value = "$fping_number$"
+ description = "number of ICMP packets to send (default: 1)"
+ }
+ "-i" = {
+ value = "$fping_interval$"
+ description = "Interval (ms) between sending packets (default: fping's default for -p)"
+ }
+ "-b" = {
+ value = "$fping_bytes$"
+ description = "size of ICMP packet (default: 56)"
+ }
+ "-T" = {
+ value = "$fping_target_timeout$"
+ description = "Target timeout (ms) (default: fping's default for -t)"
+ }
+ "-S" = {
+ value = "$fping_source_ip$"
+ description = "name or IP Address of sourceip"
+ }
+ "-I" = {
+ value = "$fping_source_interface$"
+ description = "source interface name"
+ }
+ }
+
+ vars.fping_wrta = 100
+ vars.fping_wpl = 5
+ vars.fping_crta = 200
+ vars.fping_cpl = 15
+ vars.fping_number = 5
+ vars.fping_interval = 500
+}
+
+object CheckCommand "fping4" {
+ import "fping-common"
+
+ command += [ "-4" ]
+
+ vars.fping_address = "$address$"
+}
+
+object CheckCommand "fping6" {
+ import "fping-common"
+
+ command += [ "-6" ]
+
+ vars.fping_address = "$address6$"
+}
+
+object CheckCommand "tcp" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_tcp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$tcp_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)."
+ }
+ "-p" = {
+ value = "$tcp_port$"
+ description = "The TCP port number."
+ }
+ "-e" = {
+ value = "$tcp_expect$"
+ description = "String to expect in server response (may be repeated)."
+ }
+ "-A" = {
+ set_if = "$tcp_all$"
+ description = "All expect strings need to occur in server response. Defaults to false."
+ }
+ "-E_send" = {
+ key = "-E"
+ order = 1
+ set_if = "$tcp_escape_send$"
+ description = "Enable usage of \n, \r, \t or \\ in send string."
+ }
+ "-s" = {
+ order = 2
+ value = "$tcp_send$"
+ description = "String to send to the server."
+ }
+ "-E_quit" = {
+ key = "-E"
+ order = 3
+ set_if = "$tcp_escape_quit$"
+ description = "Enable usage of \n, \r, \t or \\ in quit string."
+ }
+ "-q" = {
+ order = 4
+ value = "$tcp_quit$"
+ description = "String to send server to initiate a clean close of the connection."
+ }
+ "-r" = {
+ value = "$tcp_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit. Defaults to crit."
+ }
+ "-M" = {
+ value = "$tcp_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit. Defaults to warn."
+ }
+ "-j" = {
+ set_if = "$tcp_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$tcp_maxbytes$"
+ description = "Close connection once more than this number of bytes are received."
+ }
+ "-d" = {
+ value = "$tcp_delay$"
+ description = "Seconds to wait between sending string and polling for response."
+ }
+ "-D" = {
+ value = "$tcp_certificate$"
+ description = "Minimum number of days a certificate has to be valid. 1st value is number of days for warning, 2nd is critical (if not specified: 0) - seperated by comma."
+ }
+ "-S" = {
+ set_if = "$tcp_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "--sni" = {
+ value = "$tcp_sni$"
+ description = "Hostname to send in the server_name (SNI) SSL/TLS extension."
+ }
+ "-w" = {
+ value = "$tcp_wtime$"
+ description = "Response time to result in warning status (seconds)."
+ }
+ "-c" = {
+ value = "$tcp_ctime$"
+ description = "Response time to result in critical status (seconds)."
+ }
+ "-t" = {
+ value = "$tcp_timeout$"
+ description = "Seconds before connection times out. Defaults to 10."
+ }
+ "-4" = {
+ set_if = "$tcp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$tcp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.tcp_address = "$check_address$"
+ vars.tcp_all = false
+ vars.tcp_refuse = "crit"
+ vars.tcp_mismatch = "warn"
+ vars.tcp_timeout = 10
+ vars.check_ipv4 = "$tcp_ipv4$"
+ vars.check_ipv6 = "$tcp_ipv6$"
+}
+
+object CheckCommand "ssl" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_tcp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ssl_address$"
+ description = "Host address"
+ }
+ "-p" = {
+ value = "$ssl_port$"
+ description ="TCP port (default: 443)"
+ }
+ "--ssl" = {
+ description = "Use SSL for the connection"
+ }
+ "-t" = {
+ value = "$ssl_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-D" = {{
+ var days_warn = macro("$ssl_cert_valid_days_warn$")
+ var days_critical = macro("$ssl_cert_valid_days_critical$")
+ if (days_warn) {
+ if (days_critical) {
+ return days_warn + "," + days_critical
+ } else {
+ return days_warn
+ }
+ }
+ }}
+ "--sni" = {
+ value = "$ssl_sni$"
+ description = "Enable SSL/TLS hostname extension support (SNI)"
+ }
+ }
+
+ vars.ssl_address = "$check_address$"
+ vars.ssl_port = 443
+ vars.ssl_cert_valid_days_warn = false
+ vars.ssl_cert_valid_days_critical = false
+}
+
+
+object CheckCommand "udp" {
+ import "ipv4-or-ipv6"
+
+ command = [
+ PluginDir + "/check_udp",
+ "-H", "$udp_address$",
+ "-p", "$udp_port$"
+ ]
+
+ arguments = {
+ "-s" = {
+ value = "$udp_send$"
+ required = true
+ description = "String to send to the server"
+ }
+ "-e" = {
+ value = "$udp_expect$"
+ required = true
+ description = " String to expect in server response"
+ }
+ "-q" = {
+ value = "$udp_quit$"
+ description = "String to send server to initiate a clean close of the connection"
+ }
+ "-4" = {
+ set_if = "$udp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$udp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.udp_address = "$check_address$"
+ vars.check_ipv4 = "$udp_ipv4$"
+ vars.check_ipv6 = "$udp_ipv6$"
+}
+
+object CheckCommand "http" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_http" ]
+
+ arguments = {
+ "-H" = {
+ value = "$http_vhost$"
+ description = "Host name argument for servers using host headers (virtual host)"
+ }
+ "-I" = {
+ set_if = {{ string(macro("$http_address$")) != "" }}
+ value = "$http_address$"
+ description = "IP address or name (use numeric address if possible to bypass DNS lookup)"
+ }
+ "-u" = {
+ value = "$http_uri$"
+ description = "URL to GET or POST (default: /)"
+ }
+ "-p" = {
+ value = "$http_port$"
+ description = "Port number (default: 80)"
+ }
+ "-S" = {
+ set_if = "$http_ssl$"
+ description = "Connect via SSL"
+ }
+ "-S1" = {
+ set_if = "$http_ssl_force_tlsv1$"
+ description = "Connect via SSL version TLSv1"
+ }
+ "-S1.1" = {
+ set_if = "$http_ssl_force_tlsv1_1$"
+ description = "Connect via SSL version TLSv1.1"
+ }
+ "-S1.2" = {
+ set_if = "$http_ssl_force_tlsv1_2$"
+ description = "Connect via SSL version TLSv1.2"
+ }
+ "-S2" = {
+ set_if = "$http_ssl_force_sslv2$"
+ description = "Connect via SSL version SSLv2"
+ }
+ "-S3" = {
+ set_if = "$http_ssl_force_sslv3$"
+ description = "Connect via SSL version SSLv3"
+ }
+ "-S1+" = {
+ set_if = "$http_ssl_force_tlsv1_or_higher$"
+ description = "Connect via SSL version TLSv1 and newer"
+ }
+ "-S1.1+" = {
+ set_if = "$http_ssl_force_tlsv1_1_or_higher$"
+ description = "Connect via SSL version TLSv1.1 and newer"
+ }
+ "-S1.2+" = {
+ set_if = "$http_ssl_force_tlsv1_2_or_higher$"
+ description = "Connect via SSL version TLSv1.2 and newer"
+ }
+ "-S2+" = {
+ set_if = "$http_ssl_force_sslv2_or_higher$"
+ description = "Connect via SSL version SSLv2 and newer"
+ }
+ "-S3+" = {
+ set_if = "$http_ssl_force_sslv3_or_higher$"
+ description = "Connect via SSL version SSLv3 and newer"
+ }
+ "--sni" = {
+ set_if = "$http_sni$"
+ description = "Enable SSL/TLS hostname extension support (SNI)"
+ }
+ "-C" = {
+ value = "$http_certificate$"
+ description = "Minimum number of days a certificate has to be valid. This parameter explicitely sets the port to 443 and ignores the URL if passed."
+ }
+ "-J" = {
+ value = "$http_clientcert$"
+ description = "Name of file contains the client certificate (PEM format)"
+ }
+ "-K" = {
+ value = "$http_privatekey$"
+ description = "Name of file contains the private key (PEM format)"
+ }
+ "-a" = {
+ value = "$http_auth_pair$"
+ description = "Username:password on sites with basic authentication"
+ }
+ "--no-body" = {
+ set_if = "$http_ignore_body$"
+ description = "Don't wait for document body: stop reading after headers"
+ }
+ "-w" = {
+ value = "$http_warn_time$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$http_critical_time$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-e" = {
+ value = "$http_expect$"
+ description = "Comma-delimited list of strings, at least one of them is expected in the first (status) line of the server response (default: HTTP/1.)"
+ }
+ "-d" = {
+ value = "$http_headerstring$"
+ description = "String to expect in the response headers"
+ }
+ "-s" = {
+ value = "$http_string$"
+ description = "String to expect in the content"
+ }
+ "-P" = {
+ value = "$http_post$"
+ description = "URL encoded http POST data"
+ }
+ "-j" = {
+ value = "$http_method$"
+ description = "Set http method (for example: HEAD, OPTIONS, TRACE, PUT, DELETE)"
+ }
+ "-M" = {
+ value = "$http_maxage$"
+ description = "Warn if document is more than seconds old"
+ }
+ "-T" = {
+ value = "$http_contenttype$"
+ description = "Specify Content-Type header when POSTing"
+ }
+ "-l" = {
+ set_if = "$http_linespan$"
+ description = "Allow regex to span newline"
+ order = 1
+ }
+ "-r" = {
+ value = "$http_expect_body_regex$"
+ description = "Search page for regex"
+ order = 2
+ }
+ "-R" = {
+ value = "$http_expect_body_eregi$"
+ description = "Search page for case-insensitive regex"
+ order = 2
+ }
+ "--invert-regex" = {
+ set_if = "$http_invertregex$"
+ description = "Return CRITICAL if found, OK if not"
+ }
+ "-b" = {
+ value = "$http_proxy_auth_pair$"
+ description = "Username:password on proxy-servers with basic authentication"
+ }
+ "-A" = {
+ value = "$http_useragent$"
+ description = "String to be sent in http header as User Agent"
+ }
+ "-k" = {
+ value = "$http_header$"
+ description = "Any other tags to be sent in http header (may be repeated)"
+ }
+ "-E" = {
+ set_if = "$http_extendedperfdata$"
+ description = "Print additional perfdata"
+ }
+ "-f" = {
+ value = "$http_onredirect$"
+ description = "How to handle redirect pages"
+ }
+ "-m" = {
+ value = "$http_pagesize$"
+ description = "Minim page size required:Maximum page size required"
+ }
+ "-t" = {
+ value = "$http_timeout$"
+ description = "Seconds before connection times out"
+ }
+ "-4" = {
+ set_if = "$http_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$http_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ "-L" = {
+ set_if = "$http_link$"
+ description = "Wrap output in HTML link"
+ }
+ "-v" = {
+ set_if = "$http_verbose$"
+ description = "Show details for command-line debugging"
+ }
+ "--verify-host" = {
+ set_if = "$http_verify_host$"
+ description = "Verify SSL certificate is for the -H hostname (with --sni and -S)"
+ }
+ }
+
+ vars.http_address = "$check_address$"
+ vars.http_ssl = false
+ vars.http_sni = false
+ vars.http_linespan = false
+ vars.http_invertregex = false
+ vars.check_ipv4 = "$http_ipv4$"
+ vars.check_ipv6 = "$http_ipv6$"
+ vars.http_link = false
+ vars.http_verbose = false
+}
+
+object CheckCommand "ftp" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ftp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ftp_address$"
+ description = "The host's address. Defaults to $address$ or $address6$ if the address attribute is not set."
+ }
+ "-p" = {
+ value = "$ftp_port$"
+ description = "The FTP port number. Defaults to none"
+ }
+ "-e" = {
+ value = "$ftp_expect$"
+ description = "String to expect in server response (may be repeated)."
+ }
+ "-A" = {
+ set_if = "$ftp_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-E_send" = {
+ key = "-E"
+ order = 1
+ set_if = "$ftp_escape_send$"
+ description = "Enable usage of \n, \r, \t or \\ in send string. Default is nothing."
+ }
+ "-s" = {
+ order = 2
+ value = "$ftp_send$"
+ description = "String to send to the server."
+ }
+ "-E_quit" = {
+ key = "-E"
+ order = 3
+ set_if = "$ftp_escape_quit$"
+ description = "Can use \n, \r, \t or \\ in quit string. Default is \r\n added to end of quit."
+ }
+ "-q" = {
+ order = 4
+ value = "$ftp_quit$"
+ description = "String to send server to initiate a clean close of the connection."
+ }
+ "-r" = {
+ value = "$ftp_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit. Defaults to crit."
+ }
+ "-M" = {
+ value = "$ftp_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit. Defaults to warn."
+ }
+ "-j" = {
+ set_if = "$ftp_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$ftp_maxbytes$"
+ description = "Close connection once more than this number of bytes are received."
+ }
+ "-d" = {
+ value = "$ftp_delay$"
+ description = "Seconds to wait between sending string and polling for response."
+ }
+ "-D" = {
+ value = "$ftp_certificate$"
+ description = "Minimum number of days a certificate has to be valid. 1st value is number of days for warning, 2nd is critical (if not specified: 0) - seperated by comma."
+ }
+ "-S" = {
+ set_if = "$ftp_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$ftp_wtime$"
+ description = "Response time to result in warning status (seconds)."
+ }
+ "-c" = {
+ value = "$ftp_ctime$"
+ description = "Response time to result in critical status (seconds)."
+ }
+ "-t" = {
+ value = "$ftp_timeout$"
+ description = "Seconds before connection times out. Defaults to 10."
+ }
+ "-4" = {
+ set_if = "$ftp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$ftp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.ftp_address = "$check_address$"
+ vars.ftp_ssl = false
+ vars.ftp_refuse = "crit"
+ vars.ftp_mismatch = "warn"
+ vars.ftp_timeout = 10
+ vars.check_ipv4 = "$ftp_ipv4$"
+ vars.check_ipv6 = "$ftp_ipv6$"
+}
+
+object CheckCommand "smtp" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_smtp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$smtp_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$smtp_port$"
+ description = "Port number (default: 25)"
+ }
+ "-f" = {
+ value = "$smtp_mail_from$"
+ description = "FROM-address to include in MAIL command, required by Exchange 2000"
+ }
+ "-e" = {
+ value = "$smtp_expect$"
+ description = "String to expect in first line of server response (default: '220')"
+ }
+ "-C" = {
+ value = "$smtp_command$"
+ description = "SMTP command"
+ }
+ "-R" = {
+ value = "$smtp_response$"
+ description = "Expected response to command (may be used repeatedly)"
+ }
+ "-F" = {
+ value = "$smtp_helo_fqdn$"
+ description = "FQDN used for HELO"
+ }
+ "-D" = {
+ value = "$smtp_certificate_age$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-S" = {
+ set_if = "$smtp_starttls$"
+ description = "Use STARTTLS for the connection."
+ }
+ "-A" = {
+ value = "$smtp_authtype$"
+ description = "SMTP AUTH type to check (default none, only LOGIN supported)"
+ }
+ "-U" = {
+ value = "$smtp_authuser$"
+ description = "SMTP AUTH username"
+ }
+ "-P" = {
+ value = "$smtp_authpass$"
+ description = "SMTP AUTH password"
+ }
+ "-q" = {
+ value = "$smtp_ignore_quit$"
+ description = "Ignore failure when sending QUIT command to server"
+ }
+ "-w" = {
+ value = "$smtp_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$smtp_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$smtp_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$smtp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$smtp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.smtp_address = "$check_address$"
+ vars.check_ipv4 = "$smtp_ipv4$"
+ vars.check_ipv6 = "$smtp_ipv6$"
+}
+
+object CheckCommand "ssmtp" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ssmtp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ssmtp_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$ssmtp_port$"
+ description = "Port number (default: none)"
+ }
+ "-E" = {
+ value = "$ssmtp_escape$"
+ description = "Can use \n, \r, \t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \r\n added to end of quit"
+ }
+ "-s" = {
+ value = "$ssmtp_send$"
+ description = "String to send to the server"
+ }
+ "-e" = {
+ value = "$ssmtp_expect$"
+ description = "String to expect in server response (may be repeated)"
+ }
+ "-A" = {
+ set_if = "$ssmtp_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-q" = {
+ value = "$ssmtp_quit$"
+ description = "String to send server to initiate a clean close of the connection"
+ }
+ "-r" = {
+ value = "$ssmtp_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit (default: crit)"
+ }
+ "-M" = {
+ value = "$ssmtp_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit (default: warn)"
+ }
+ "-j" = {
+ set_if = "$ssmtp_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$ssmtp_maxbytes$"
+ description = "Close connection once more than this number of bytes are received"
+ }
+ "-d" = {
+ value = "$ssmtp_delay$"
+ description = "Seconds to wait between sending string and polling for response"
+ }
+ "-D" = {
+ value = "$ssmtp_certificate_age$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-S" = {
+ set_if = "$ssmtp_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$ssmtp_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$ssmtp_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$ssmtp_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$ssmtp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$ssmtp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.ssmtp_address = "$check_address$"
+ vars.check_ipv4 = "$ssmtp_ipv4$"
+ vars.check_ipv6 = "$ssmtp_ipv6$"
+}
+
+object CheckCommand "imap" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_imap" ]
+
+ arguments = {
+ "-H" = {
+ value = "$imap_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$imap_port$"
+ description = "Port number (default: none)"
+ }
+ "-E" = {
+ value = "$imap_escape$"
+ description = "Can use \n, \r, \t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \r\n added to end of quit"
+ }
+ "-s" = {
+ value = "$imap_send$"
+ description = "String to send to the server"
+ }
+ "-e" = {
+ value = "$imap_expect$"
+ description = "String to expect in server response (may be repeated)"
+ }
+ "-A" = {
+ set_if = "$imap_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-q" = {
+ value = "$imap_quit$"
+ description = "String to send server to initiate a clean close of the connection"
+ }
+ "-r" = {
+ value = "$imap_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit (default: crit)"
+ }
+ "-M" = {
+ value = "$imap_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit (default: warn)"
+ }
+ "-j" = {
+ set_if = "$imap_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$imap_maxbytes$"
+ description = "Close connection once more than this number of bytes are received"
+ }
+ "-d" = {
+ value = "$imap_delay$"
+ description = "Seconds to wait between sending string and polling for response"
+ }
+ "-D" = {
+ value = "$imap_certificate_age$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-S" = {
+ set_if = "$imap_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$imap_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$imap_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$imap_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$imap_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$imap_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.imap_address = "$check_address$"
+ vars.check_ipv4 = "$imap_ipv4$"
+ vars.check_ipv6 = "$imap_ipv6$"
+}
+
+object CheckCommand "simap" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_simap" ]
+
+ arguments = {
+ "-H" = {
+ value = "$simap_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$simap_port$"
+ description = "Port number (default: none)"
+ }
+ "-E" = {
+ value = "$simap_escape$"
+ description = "Can use \n, \r, \t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \r\n added to end of quit"
+ }
+ "-s" = {
+ value = "$simap_send$"
+ description = "String to send to the server"
+ }
+ "-e" = {
+ value = "$simap_expect$"
+ description = "String to expect in server response (may be repeated)"
+ }
+ "-A" = {
+ set_if = "$simap_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-q" = {
+ value = "$simap_quit$"
+ description = "String to send server to initiate a clean close of the connection"
+ }
+ "-r" = {
+ value = "$simap_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit (default: crit)"
+ }
+ "-M" = {
+ value = "$simap_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit (default: warn)"
+ }
+ "-j" = {
+ set_if = "$simap_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$simap_maxbytes$"
+ description = "Close connection once more than this number of bytes are received"
+ }
+ "-d" = {
+ value = "$simap_delay$"
+ description = "Seconds to wait between sending string and polling for response"
+ }
+ "-D" = {
+ value = "$simap_certificate_age$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-S" = {
+ set_if = "$simap_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$simap_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$simap_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$simap_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$simap_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$simap_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.simap_address = "$check_address$"
+ vars.check_ipv4 = "$simap_ipv4$"
+ vars.check_ipv6 = "$simap_ipv6$"
+}
+
+object CheckCommand "pop" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_pop" ]
+
+ arguments = {
+ "-H" = {
+ value = "$pop_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$pop_port$"
+ description = "Port number (default: none)"
+ }
+ "-E" = {
+ value = "$pop_escape$"
+ description = "Can use \n, \r, \t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \r\n added to end of quit"
+ }
+ "-s" = {
+ value = "$pop_send$"
+ description = "String to send to the server"
+ }
+ "-e" = {
+ value = "$pop_expect$"
+ description = "String to expect in server response (may be repeated)"
+ }
+ "-A" = {
+ set_if = "$pop_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-q" = {
+ value = "$pop_quit$"
+ description = "String to send server to initiate a clean close of the connection"
+ }
+ "-r" = {
+ value = "$pop_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit (default: crit)"
+ }
+ "-M" = {
+ value = "$pop_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit (default: warn)"
+ }
+ "-j" = {
+ set_if = "$pop_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$pop_maxbytes$"
+ description = "Close connection once more than this number of bytes are received"
+ }
+ "-d" = {
+ value = "$pop_delay$"
+ description = "Seconds to wait between sending string and polling for response"
+ }
+ "-D" = {
+ value = "$pop_certificate_age$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-S" = {
+ set_if = "$pop_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$pop_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$pop_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$pop_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$pop_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$pop_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.pop_address = "$check_address$"
+ vars.check_ipv4 = "$pop_ipv4$"
+ vars.check_ipv6 = "$pop_ipv6$"
+}
+
+object CheckCommand "spop" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_spop" ]
+
+ arguments = {
+ "-H" = {
+ value = "$spop_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$spop_port$"
+ description = "Port number (default: none)"
+ }
+ "-E" = {
+ value = "$spop_escape$"
+ description = "Can use \n, \r, \t or \\ in send or quit string. Must come before send or quit option. Default: nothing added to send, \r\n added to end of quit"
+ }
+ "-s" = {
+ value = "$spop_send$"
+ description = "String to send to the server"
+ }
+ "-e" = {
+ value = "$spop_expect$"
+ description = "String to expect in server response (may be repeated)"
+ }
+ "-A" = {
+ set_if = "$spop_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-q" = {
+ value = "$spop_quit$"
+ description = "String to send server to initiate a clean close of the connection"
+ }
+ "-r" = {
+ value = "$spop_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit (default: crit)"
+ }
+ "-M" = {
+ value = "$spop_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit (default: warn)"
+ }
+ "-j" = {
+ set_if = "$spop_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$spop_maxbytes$"
+ description = "Close connection once more than this number of bytes are received"
+ }
+ "-d" = {
+ value = "$spop_delay$"
+ description = "Seconds to wait between sending string and polling for response"
+ }
+ "-D" = {
+ value = "$spop_certificate_age$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-S" = {
+ set_if = "$spop_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$spop_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$spop_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$spop_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$spop_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$spop_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.spop_address = "$check_address$"
+ vars.check_ipv4 = "$spop_ipv4$"
+ vars.check_ipv6 = "$spop_ipv6$"
+}
+
+object CheckCommand "ntp_time" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ntp_time" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ntp_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$ntp_port$"
+ description = "Port number (default: 123)"
+ }
+ "-q" = {
+ set_if = "$ntp_quiet$"
+ description = "Returns UNKNOWN instead of CRITICAL if offset cannot be found"
+ }
+ "-w" = {
+ value = "$ntp_warning$"
+ description = "Offset to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$ntp_critical$"
+ description = "Offset to result in critical status (seconds)"
+ }
+ "-o" = {
+ value = "$ntp_timeoffset$"
+ description = "Expected offset of the ntp server relative to local server (seconds)"
+ }
+ "-t" = {
+ value = "$ntp_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$ntp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$ntp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.ntp_address = "$check_address$"
+ vars.check_ipv4 = "$ntp_ipv4$"
+ vars.check_ipv6 = "$ntp_ipv6$"
+}
+
+object CheckCommand "ntp_peer" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ntp_peer" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ntp_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$ntp_port$"
+ description = "Port number (default: 123)"
+ }
+ "-q" = {
+ set_if = "$ntp_quiet$"
+ description = "Returns UNKNOWN instead of CRITICAL or WARNING if server isn't synchronized"
+ }
+ "-w" = {
+ value = "$ntp_warning$"
+ description = "Offset to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$ntp_critical$"
+ description = "Offset to result in critical status (seconds)"
+ }
+ "-W" = {
+ value = "$ntp_wstratum$"
+ description = "Warning threshold for stratum of server's synchronization peer"
+ }
+ "-C" = {
+ value = "$ntp_cstratum$"
+ description = "Critical threshold for stratum of server's synchronization peer"
+ }
+ "-j" = {
+ value = "$ntp_wjitter$"
+ description = "Warning threshold for jitter"
+ }
+ "-k" = {
+ value = "$ntp_cjitter$"
+ description = "Critical threshold for jitter"
+ }
+ "-m" = {
+ value = "$ntp_wsource$"
+ description = "Warning threshold for number of usable time sources (truechimers)"
+ }
+ "-n" = {
+ value = "$ntp_csource$"
+ description = "Critical threshold for number of usable time sources (truechimers)"
+ }
+ "-t" = {
+ value = "$ntp_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$ntp_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$ntp_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.ntp_address = "$check_address$"
+ vars.check_ipv4 = "$ntp_ipv4$"
+ vars.check_ipv6 = "$ntp_ipv6$"
+}
+
+object CheckCommand "ssh" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ssh" ]
+
+ arguments = {
+ "-p" = {
+ value = "$ssh_port$"
+ description = "Port number (default: 22)"
+ }
+ "-t" = {
+ value = "$ssh_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "host" = {
+ value = "$ssh_address$"
+ skip_key = true
+ order = 1
+ }
+ "-4" = {
+ set_if = "$ssh_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$ssh_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ }
+
+ vars.ssh_address = "$check_address$"
+ vars.check_ipv4 = "$ssh_ipv4$"
+ vars.check_ipv6 = "$ssh_ipv6$"
+}
+
+object CheckCommand "disk" {
+ command = [ PluginDir + "/check_disk" ]
+
+ arguments = {
+ "-w" = {
+ value = "$disk_wfree$"
+ description = "Exit with WARNING status if less than INTEGER units of disk are free or Exit with WARNING status if less than PERCENT of disk space is free"
+ required = true
+ order = -3
+ }
+ "-c" = {
+ value = "$disk_cfree$"
+ description = "Exit with CRITICAL status if less than INTEGER units of disk are free or Exit with CRITCAL status if less than PERCENT of disk space is free"
+ required = true
+ order = -3
+ }
+ "-W" = {
+ value = "$disk_inode_wfree$"
+ description = "Exit with WARNING status if less than PERCENT of inode space is free"
+ order = -3
+ }
+ "-K" = {
+ value = "$disk_inode_cfree$"
+ description = "Exit with CRITICAL status if less than PERCENT of inode space is free"
+ order = -3
+ }
+ "-P" = {
+ description = "Display inode usage in perfdata"
+ set_if = "$disk_inode_perfdata$"
+ }
+ "-p" = {
+ value = "$disk_partitions$"
+ description = "Path or partition (may be repeated)"
+ repeat_key = true
+ order = 1
+ }
+ "-p_old" = {
+ key = "-p"
+ value = "$disk_partition$"
+ order = 1
+ }
+ "-x" = {
+ value = "$disk_partitions_excluded$"
+ description = "Ignore device (only works if -p unspecified)"
+ }
+ "-x_old" = {
+ key = "-x"
+ value = "$disk_partition_excluded$"
+ }
+ "-C" = {
+ set_if = "$disk_clear$"
+ description = "Clear thresholds"
+ }
+ "-E" = {
+ set_if = "$disk_exact_match$"
+ description = "For paths or partitions specified with -p, only check for exact paths"
+ }
+ "-e" = {
+ set_if = "$disk_errors_only$"
+ description = "Display only devices/mountpoints with errors"
+ }
+ "-f" = {
+ set_if = "$disk_ignore_reserved$"
+ description = "Don't account root-reserved blocks into freespace in perfdata"
+ }
+ "-g" = {
+ value = "$disk_group$"
+ description = "Group paths. Thresholds apply to (free-)space of all partitions together"
+ }
+ "-k" = {
+ set_if = "$disk_kilobytes$"
+ description = "Same as --units kB"
+ }
+ "-l" = {
+ set_if = "$disk_local$"
+ description = " Only check local filesystems"
+ }
+ "-L" = {
+ set_if = "$disk_stat_remote_fs$"
+ description = "Only check local filesystems against thresholds. Yet call stat on remote filesystems to test if they are accessible (e.g. to detect Stale NFS Handles)"
+ }
+ "-M" = {
+ set_if = "$disk_mountpoint$"
+ description = "Display the mountpoint instead of the partition"
+ }
+ "-m" = {
+ set_if = "$disk_megabytes$"
+ description = "Same as --units MB"
+ }
+ "-A" = {
+ set_if = "$disk_all$"
+ description = "Explicitly select all paths. This is equivalent to -R .*"
+ order = 1
+ }
+ "-R" = {
+ value = "$disk_eregi_path$"
+ description = "Case insensitive regular expression for path/partition (may be repeated)"
+ repeat_key = true
+ }
+ "-r" = {
+ value = "$disk_ereg_path$"
+ description = "Regular expression for path or partition (may be repeated)"
+ repeat_key = true
+ }
+ "-I" = {
+ value = "$disk_ignore_eregi_path$"
+ description = "Regular expression to ignore selected path/partition (case insensitive) (may be repeated)"
+ repeat_key = true
+ order = 2
+ }
+ "-i" = {
+ value = "$disk_ignore_ereg_path$"
+ description = "Regular expression to ignore selected path or partition (may be repeated)"
+ repeat_key = true
+ order = 2
+ }
+ "-t" = {
+ value = "$disk_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-u" = {
+ value = "$disk_units$"
+ description = "Choose bytes, kB, MB, GB, TB"
+ }
+ "-X" = {
+ value = "$disk_exclude_type$"
+ description = "Ignore all filesystems of indicated type (may be repeated)"
+ repeat_key = true
+ }
+ "-N" = {
+ value = "$disk_include_type$"
+ description = "Check only filesystems of indicated type (may be repeated)"
+ repeat_key = true
+ }
+ }
+
+ vars.disk_wfree = "20%"
+ vars.disk_cfree = "10%"
+ vars.disk_exclude_type = [
+ "none",
+ "tmpfs",
+ "sysfs",
+ "proc",
+ "configfs",
+ "devtmpfs",
+ "devfs",
+ "mtmfs",
+ "tracefs",
+ "cgroup",
+ "fuse.gvfsd-fuse",
+ "fuse.gvfs-fuse-daemon",
+ "fuse.portal",
+ "fdescfs",
+ "overlay",
+ "nsfs",
+ "squashfs"
+ ]
+}
+
+object CheckCommand "disk_smb" {
+ command = [ PluginDir + "/check_disk_smb" ]
+
+ arguments = {
+ "-H" = {
+ value = "$disk_smb_hostname$"
+ description = "NetBIOS name of the server."
+ }
+ "-s" = {
+ value = "$disk_smb_share$"
+ description = "Share name to be tested."
+ }
+ "-W" = {
+ value = "$disk_smb_workgroup$"
+ description = "Workgroup or Domain used (Defaults to 'WORKGROUP' if omitted)."
+ }
+ "-a" = {
+ value = "$disk_smb_address$"
+ description = "IP-address of HOST (only necessary if HOST is in another network)."
+ }
+ "-u" = {
+ value = "$disk_smb_username$"
+ description = "Username to log in to server. (Defaults to 'guest' if omitted)."
+ }
+ "-p" = {
+ value = "$disk_smb_password$"
+ description = "Password to log in to server. (Defaults to an empty password if omitted)."
+ }
+ "-w" = {
+ value = "$disk_smb_wused$"
+ description = "Percent of used space at which a warning will be generated (Default: 85%)."
+ }
+ "-c" = {
+ value = "$disk_smb_cused$"
+ description = "Percent of used space at which a critical will be generated (Defaults: 95%)"
+ }
+ "-P" = {
+ value = "$disk_smb_port$"
+ description = "Port to be used to connect to. Some Windows boxes use 139, others 445 (Defaults to smbclient default if omitted)."
+ }
+ }
+
+ vars.disk_smb_wused = "85%"
+ vars.disk_smb_cused = "95%"
+}
+
+object CheckCommand "users" {
+ command = [ PluginDir + "/check_users" ]
+
+ arguments = {
+ "-w" = {
+ value = "$users_wgreater$"
+ description = "Set WARNING status if more than INTEGER users are logged in"
+ }
+ "-c" = {
+ value = "$users_cgreater$"
+ description = "Set CRITICAL status if more than INTEGER users are logged in"
+ }
+ }
+
+ vars.users_wgreater = 20
+ vars.users_cgreater = 50
+}
+
+object CheckCommand "procs" {
+ command = [ PluginDir + "/check_procs" ]
+
+ arguments = {
+ "-w" = {
+ value = "$procs_warning$"
+ description = "Generate warning state if metric is outside this range"
+ }
+ "-c" = {
+ value = "$procs_critical$"
+ description = "Generate critical state if metric is outside this range"
+ }
+ "-m" = {
+ value = "$procs_metric$"
+ description = "Check thresholds against metric"
+ }
+ "-t" = {
+ value = "$procs_timeout$"
+ description = "Seconds before plugin times out"
+ }
+ "-T" = {
+ set_if = "$procs_traditional$"
+ description = "Filter own process the traditional way by PID instead of /proc/pid/exe"
+ }
+ "-s" = {
+ value = "$procs_state$"
+ description = "Only scan for processes that have one or more of the status flags you specify"
+ }
+ "-p" = {
+ value = "$procs_ppid$"
+ description = "Only scan for children of the parent process ID indicated"
+ }
+ "-z" = {
+ value = "$procs_vsz$"
+ description = "Only scan for processes with VSZ higher than indicated"
+ }
+ "-r" = {
+ value = "$procs_rss$"
+ description = "Only scan for processes with RSS higher than indicated"
+ }
+ "-P" = {
+ value = "$procs_pcpu$"
+ description = "Only scan for processes with PCPU higher than indicated"
+ }
+ "-u" = {
+ value = "$procs_user$"
+ description = "Only scan for processes with user name or ID indicated"
+ }
+ "-a" = {
+ value = "$procs_argument$"
+ description = "Only scan for processes with args that contain STRING"
+ }
+ "--ereg-argument-array" = {
+ value = "$procs_argument_regex$"
+ description = "Only scan for processes with args that contain the regex STRING"
+ }
+ "-C" = {
+ value = "$procs_command$"
+ description = "Only scan for exact matches of COMMAND (without path)"
+ }
+ "-k" = {
+ set_if = "$procs_nokthreads$"
+ description = "Only scan for non kernel threads"
+ }
+ }
+
+ vars.procs_traditional = false
+ vars.procs_nokthreads = false
+ vars.procs_warning = 250
+ vars.procs_critical = 400
+}
+
+object CheckCommand "swap" {
+ command = [ PluginDir + "/check_swap" ]
+
+ arguments = {
+ "-w" = {{
+ if (macro("$swap_integer$")) {
+ return macro("$swap_wfree$")
+ } else {
+ return macro("$swap_wfree$%")
+ }
+ }}
+ "-c" = {{
+ if (macro("$swap_integer$")) {
+ return macro("$swap_cfree$")
+ } else {
+ return macro("$swap_cfree$%")
+ }
+ }}
+ "-a" = {
+ set_if = "$swap_allswaps$"
+ description = "Conduct comparisons for all swap partitions, one by one"
+ }
+ "-n" = {
+ value = "$swap_noswap$"
+ description = "Resulting state when there is no swap regardless of thresholds. Possible values are \"ok\", \"warning\", \"critical\", \"unknown\". Defaults to \"critical\""
+ }
+ }
+
+ vars.swap_wfree = 50
+ vars.swap_cfree = 25
+ vars.swap_integer = false
+ vars.swap_allswaps = false
+}
+
+object CheckCommand "load" {
+ command = [ PluginDir + "/check_load" ]
+
+ arguments = {
+ "-w" = {
+ value = "$load_wload1$,$load_wload5$,$load_wload15$"
+ description = "Exit with WARNING status if load average exceeds WLOADn"
+ }
+ "-c" = {
+ value = "$load_cload1$,$load_cload5$,$load_cload15$"
+ description = "Exit with CRITICAL status if load average exceed CLOADn; the load average format is the same used by 'uptime' and 'w'"
+ }
+ "-r" = {
+ set_if = "$load_percpu$"
+ description = "Divide the load averages by the number of CPUs (when possible)"
+ }
+ }
+
+ vars.load_wload1 = 5.0
+ vars.load_wload5 = 4.0
+ vars.load_wload15 = 3.0
+
+ vars.load_cload1 = 10.0
+ vars.load_cload5 = 6.0
+ vars.load_cload15 = 4.0
+
+ vars.load_percpu = false
+}
+
+object CheckCommand "snmp" {
+ command = [ PluginDir + "/check_snmp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$snmp_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-o" = {
+ value = "$snmp_oid$"
+ description = "Object identifier(s) or SNMP variables whose value you wish to query"
+ }
+ "-C" = {
+ value = "$snmp_community$"
+ description = "Optional community string for SNMP communication (default is 'public')"
+ }
+ "-c" = {
+ value = "$snmp_crit$"
+ description = "Critical threshold range(s)"
+ }
+ "-w" = {
+ value = "$snmp_warn$"
+ description = "Warning threshold range(s)"
+ }
+ "-s" = {
+ value = "$snmp_string$"
+ description = "Return OK state (for that OID) if STRING is an exact match"
+ }
+ "-r" = {
+ value = "$snmp_ereg$"
+ description = "Return OK state (for that OID) if extended regular expression REGEX matches"
+ }
+ "-R" = {
+ value = "$snmp_eregi$"
+ description = "Return OK state (for that OID) if case-insensitive extended REGEX matches"
+ }
+ "-l" = {
+ value = "$snmp_label$"
+ description = "Prefix label for output from plugin"
+ }
+ "-u" = {
+ value = "$snmp_units$"
+ description = "Units label(s) for output data (e.g., 'sec.')"
+ }
+ "-t" = {
+ value = "$snmp_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-p" = {
+ value = "$snmp_port$"
+ description = "Port number (default: 161)"
+ }
+ "-e" = {
+ value = "$snmp_retries$"
+ description = "Number of retries to be used in the requests"
+ }
+ "--invert-search" = {
+ set_if = "$snmp_invert_search$"
+ description = "Invert search result and return CRITICAL if found"
+ }
+ "-P" = {
+ value = "$snmp_version$"
+ description = "SNMP protocol version"
+ }
+ "-m" = {
+ value = "$snmp_miblist$"
+ description = "List of MIBS to be loaded (default = none if using numeric OIDs or 'ALL' for symbolic OIDs.)"
+ }
+ "--rate-multiplier" = {
+ value = "$snmp_rate_multiplier$"
+ description = "Converts rate per second. For example, set to 60 to convert to per minute"
+ }
+ "--rate" = {
+ set_if = "$snmp_rate$"
+ description = "Enable rate calculation"
+ }
+ "-n" = {
+ set_if = "$snmp_getnext$"
+ description = "Use SNMP GETNEXT instead of SNMP GET"
+ }
+ "--offset" = {
+ value = "$snmp_offset$"
+ description = "Add/substract the specified OFFSET to numeric sensor data"
+ }
+ "-D" = {
+ value = "$snmp_output_delimiter$"
+ description = "Separates output on multiple OID requests"
+ }
+ "-O" = {
+ set_if = "$snmp_perf_oids$"
+ description = "Label performance data with OIDs instead of --label's"
+ }
+ }
+
+ vars.snmp_address = {{
+ var addr_v4 = macro("$address$")
+ var addr_v6 = macro("$address6$")
+
+ if (addr_v4) {
+ return addr_v4
+ } else {
+ return "udp6:[" + addr_v6 + "]"
+ }
+ }}
+
+ vars.snmp_community = "public"
+ vars.snmp_invert_search = false
+ vars.snmp_timeout = "10"
+}
+
+object CheckCommand "snmpv3" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_snmp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$snmpv3_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$snmpv3_port$"
+ description = "Port number"
+ }
+ "-n" = {
+ set_if = "$snmpv3_getnext$"
+ description = "Use SNMP GETNEXT instead of SNMP GET"
+ }
+ "-P" = {
+ value = 3
+ description = "SNMP protocol version"
+ }
+ "-L" = {
+ value = "$snmpv3_seclevel$"
+ description = "SNMPv3 securityLevel"
+ }
+ "-a" = {
+ value = "$snmpv3_auth_alg$"
+ description = "SNMPv3 auth proto"
+ }
+ "-U" = {
+ value = "$snmpv3_user$"
+ description = "SNMPv3 username"
+ }
+ "-N" = {
+ value = "$snmpv3_context$"
+ description = "SNMPv3 context"
+ }
+ "-A" = {
+ value = "$snmpv3_auth_key$"
+ description = "SNMPv3 authentication password"
+ }
+ "-X" = {
+ value = "$snmpv3_priv_key$"
+ description = "SNMPv3 privacy password"
+ }
+ "-o" = {
+ value = "$snmpv3_oid$"
+ description = "Object identifier(s) or SNMP variables whose value you wish to query"
+ }
+ "-x" = {
+ value = "$snmpv3_priv_alg$"
+ description = "SNMPv3 priv proto (default DES)"
+ }
+ "-w" = {
+ value = "$snmpv3_warn$"
+ description = "Warning threshold range(s)"
+ }
+ "-c" = {
+ value = "$snmpv3_crit$"
+ description = "Critical threshold range(s)"
+ }
+ "-s" = {
+ value = "$snmpv3_string$"
+ description = "Return OK state (for that OID) if STRING is an exact match"
+ }
+ "-r" = {
+ value = "$snmpv3_ereg$"
+ description = "Return OK state (for that OID) if extended regular expression REGEX matches"
+ }
+ "-R" = {
+ value = "$snmpv3_eregi$"
+ description = "Return OK state (for that OID) if case-insensitive extended REGEX matches"
+ }
+ "--invert-search" = {
+ set_if = "$snmpv3_invert_search$"
+ description = "Invert search result and return CRITICAL if found"
+ }
+ "-l" = {
+ value = "$snmpv3_label$"
+ description = "Prefix label for output from plugin"
+ }
+ "-m" = {
+ value = "$snmpv3_miblist$"
+ description = "List of SNMP MIBs for translating OIDs between numeric and textual representation"
+ }
+ "-u" = {
+ value = "$snmpv3_units$"
+ description = "Units label(s) for output data (e.g., 'sec.')"
+ }
+ "--rate-multiplier" = {
+ value = "$snmpv3_rate_multiplier$"
+ description = "Converts rate per second. For example, set to 60 to convert to per minute"
+ }
+ "--rate" = {
+ set_if = "$snmpv3_rate$"
+ description = "Enable rate calculation"
+ }
+ "-t" = {
+ value = "$snmpv3_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ }
+
+ vars.snmpv3_address = "$check_address$"
+ vars.snmpv3_auth_alg = "SHA"
+ vars.snmpv3_priv_alg = "AES"
+ vars.snmpv3_seclevel = "authPriv"
+ vars.snmpv3_timeout = "10"
+}
+
+object CheckCommand "snmp-uptime" {
+ import "snmp"
+
+ vars.snmp_oid = "1.3.6.1.2.1.1.3.0"
+}
+
+object CheckCommand "apt" {
+ command = [ PluginDir + "/check_apt" ]
+
+ arguments = {
+ // apt-get takes only options starting with "-" (like "-sqq") before
+ // the upgrade command, so passing e.g. "foobar" as "--upgrade=foobar"
+ // makes no sense. This allows to easily decide between "-x=-y"
+ // (strings starting with "-") and "-x" (everything else).
+ "--upgrade" = {
+ set_if = {{
+ var v = macro("$apt_upgrade$")
+ return match("-*", string(v).trim()) ? false : v
+ }}
+ description = "[Default] Perform an upgrade. If an optional OPTS argument is provided, apt-get will be run with these command line options instead of the default."
+ }
+ "--dist-upgrade" = {
+ set_if = {{
+ var v = macro("$apt_dist_upgrade$")
+ return match("-*", string(v).trim()) ? false : v
+ }}
+ description = "Perform a dist-upgrade instead of normal upgrade. Like with -U OPTS can be provided to override the default options."
+ }
+ "--upgrade=OPTS" = {
+ set_if = {{ match("-*", string(macro("$apt_upgrade$")).trim()) }}
+ key = "--upgrade"
+ separator = "="
+ value = "$apt_upgrade$"
+ description = "[Default] Perform an upgrade. If an optional OPTS argument is provided, apt-get will be run with these command line options instead of the default."
+ }
+ "--dist-upgrade=OPTS" = {
+ set_if = {{ match("-*", string(macro("$apt_dist_upgrade$")).trim()) }}
+ key = "--dist-upgrade"
+ separator = "="
+ value = "$apt_dist_upgrade$"
+ description = "Perform a dist-upgrade instead of normal upgrade. Like with -U OPTS can be provided to override the default options."
+ }
+
+ "--extra-opts" = {
+ value = "$apt_extra_opts$"
+ description = "Read options from an ini file."
+ }
+ "--include" = {
+ value = "$apt_include$"
+ description = "Include only packages matching REGEXP. Can be specified multiple times the values will be combined together."
+ }
+ "--exclude" = {
+ value = "$apt_exclude$"
+ description = "Exclude packages matching REGEXP from the list of packages that would otherwise be included. Can be specified multiple times."
+ }
+ "--critical" = {
+ value = "$apt_critical$"
+ description = "If the full package information of any of the upgradable packages match this REGEXP, the plugin will return CRITICAL status. Can be specified multiple times."
+ }
+ "--timeout" = {
+ value = "$apt_timeout$"
+ description = "Seconds before plugin times out (default: 10)."
+ }
+ "--only-critical" = {
+ set_if = "$apt_only_critical$"
+ description = "Only warn about critical upgrades."
+ }
+ "--list" = {
+ set_if = "$apt_list$"
+ description = "List packages available for upgrade."
+ }
+ }
+
+ timeout = 5m
+}
+
+object CheckCommand "dhcp" {
+ command = [ PluginDir + "/check_dhcp" ]
+
+ arguments = {
+ "-s" = {
+ value = "$dhcp_serverip$"
+ description = "IP address of DHCP server that we must hear from"
+ }
+ "-r" = {
+ value = "$dhcp_requestedip$"
+ description = "IP address that should be offered by at least one DHCP server"
+ }
+ "-t" = {
+ value = "$dhcp_timeout$"
+ description = "Seconds to wait for DHCPOFFER before timeout occurs"
+ }
+ "-i" = {
+ value = "$dhcp_interface$"
+ description = "Interface to to use for listening (i.e. eth0)"
+ }
+ "-m" = {
+ value = "$dhcp_mac$"
+ description = "MAC address to use in the DHCP request"
+ }
+ "-u" = {
+ set_if = "$dhcp_unicast$"
+ description = "Unicast testing: mimic a DHCP relay"
+ }
+ }
+
+ vars.dhcp_unicast = false
+}
+
+object CheckCommand "dns" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_dns" ]
+
+ arguments = {
+ "-H" = {
+ value = "$dns_lookup$"
+ description = "The name or address you want to query."
+ }
+ "-s" = {
+ value = "$dns_server$"
+ description = "Optional DNS server you want to use for the lookup."
+ }
+ "-q" = {
+ value = "$dns_query_type$"
+ description = "Optional DNS record query type where TYPE =(A, AAAA, SRV, TXT, MX, ANY). The default query type is 'A' (IPv4 host entry)"
+ }
+ "-a" = {
+ value = "$dns_expected_answers$"
+ description = "Optional ip address or host you expect the DNS server to return. Host must end with a dot (.). This option can be repeated multiple times (Returns OK if any value match). If multiple addresses are returned at once: for version 2.2 and earlier of the monitoring-plugins, you have to match the whole string of addresses separated with commas (sorted alphabetically). For version 2.3 and later, you need to pass a list of strings, one for each address you want to match."
+ }
+ "-L" = {
+ set_if = "$dns_all_expected$"
+ description = "Return critical if the list of expected addresses does not match all addresses returned. Only supported in newer versions of monitoring-plugins (2.3 and later), and is needed in such versions to replicate behaviour of previous versions of the plugins. When not passed, one address is enough for success, instead of all."
+ }
+ "-A" = {
+ set_if = "$dns_authoritative$"
+ description = "Optionally expect the DNS server to be authoritative for the lookup"
+ }
+ "-n" = {
+ set_if = "$dns_accept_cname$"
+ description = "Optionally accept cname responses as a valid result to a query. The default is to ignore cname responses as part of the result"
+ }
+ "-w" = {
+ value = "$dns_wtime$"
+ description = "Return warning if elapsed time exceeds value."
+ }
+ "-c" = {
+ value = "$dns_ctime$"
+ description = "Return critical if elapsed time exceeds value."
+ }
+ "-t" = {
+ value = "$dns_timeout$"
+ description = "Seconds before connection times out. Defaults to 10."
+ }
+ }
+
+ vars.dns_lookup = "$host.name$"
+ vars.dns_timeout = 10
+}
+
+object CheckCommand "dig" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_dig" ]
+
+ arguments = {
+ "-H" = {
+ value = "$dig_server$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$dig_port$"
+ description = "Port number (default: 53)"
+ }
+ "-l" = {
+ value = "$dig_lookup$"
+ required = true
+ description = "Machine name to lookup"
+ }
+ "-T" = {
+ value = "$dig_record_type$"
+ description = "Record type to lookup (default: A)"
+ }
+ "-a" = {
+ value = "$dig_expected_address$"
+ description = "An address expected to be in the answer section"
+ }
+ "-A" = {
+ value = "$dig_arguments$"
+ description = "Pass STRING as argument(s) to dig"
+ }
+ "-w" = {
+ value = "$dig_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$dig_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$dig_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-4" = {
+ set_if = "$dig_ipv4$"
+ description = "Force dig to only use IPv4 query transport"
+ }
+ "-6" = {
+ set_if = "$dig_ipv6$"
+ description = "Force dig to only use IPv6 query transport"
+ }
+ }
+
+ vars.dig_server = "$check_address$"
+ vars.check_ipv4 = "$dig_ipv4$"
+ vars.check_ipv6 = "$dig_ipv6$"
+}
+
+object CheckCommand "nscp" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_nt" ]
+
+ arguments = {
+ "-H" = {
+ value = "$nscp_address$"
+ description = "Name of the host to check"
+ }
+ "-p" = {
+ value = "$nscp_port$"
+ description = "Optional port number (default: 1248)"
+ }
+ "-s" = {
+ value = "$nscp_password$"
+ description = "Password needed for the request"
+ }
+ "-v" = {
+ value = "$nscp_variable$"
+ required = true
+ description = "Variable to check"
+ }
+ "-l" = {
+ value = "$nscp_params$"
+ repeat_key = false
+ }
+ "-w" = {
+ value = "$nscp_warn$"
+ description = "Threshold which will result in a warning status"
+ }
+ "-c" = {
+ value = "$nscp_crit$"
+ description = "Threshold which will result in a critical status"
+ }
+ "-t" = {
+ value = "$nscp_timeout$"
+ description = "Seconds before connection attempt times out"
+ }
+ "-d" = {
+ value = "SHOWALL"
+ set_if = "$nscp_showall$"
+ description = "Use with SERVICESTATE to see working services or PROCSTATE for running processes"
+ }
+ }
+
+ vars.nscp_address = "$check_address$"
+ vars.nscp_port = 12489
+ vars.nscp_showall = false
+}
+
+object CheckCommand "by_ssh" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_by_ssh" ]
+
+ arguments = {
+ "-H" = {
+ value = "$by_ssh_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$by_ssh_port$"
+ description = "Port number (default: none)"
+ }
+ "-C" = {{
+ var command = macro("$by_ssh_command$")
+ var arguments = macro("$by_ssh_arguments$")
+
+ if (typeof(command) == String && !arguments) {
+ return command
+ }
+
+ var escaped_args = []
+ for (arg in resolve_arguments(command, arguments)) {
+ escaped_args.add(escape_shell_arg(arg))
+ }
+ return escaped_args.join(" ")
+ }}
+ "-l" = {
+ value = "$by_ssh_logname$"
+ description = "SSH user name on remote host [optional]"
+ }
+ "-i" = {
+ value = "$by_ssh_identity$"
+ description = "identity of an authorized key [optional]"
+ }
+ "-q" = {
+ set_if = "$by_ssh_quiet$"
+ description = "Tell ssh to suppress warning and diagnostic messages [optional]"
+ }
+ "-w" = {
+ value = "$by_ssh_warn$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$by_ssh_crit$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$by_ssh_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-o" = {
+ value = "$by_ssh_options$"
+ description = "Provide ssh options (may be repeated)"
+ }
+ "-4" = {
+ set_if = "$by_ssh_ipv4$"
+ description = "Use IPv4 only"
+ }
+ "-6" = {
+ set_if = "$by_ssh_ipv6$"
+ description = "Use IPv6 only"
+ }
+ "-E" = {
+ value = "$by_ssh_skip_stderr$"
+ description = "Ignore all or (if specified) first n lines on STDERR [optional]"
+ }
+ }
+
+ vars.by_ssh_address = "$check_address$"
+ vars.by_ssh_quiet = false
+ vars.check_ipv4 = "$by_ssh_ipv4$"
+ vars.check_ipv6 = "$by_ssh_ipv6$"
+}
+
+object CheckCommand "ups" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ups" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ups_address$"
+ description = "Address of the upsd server"
+ required = true
+ }
+ "-u" = {
+ value = "$ups_name$"
+ description = "Name of the UPS to monitor"
+ required = true
+ }
+ "-p" = {
+ value = "$ups_port$"
+ description = "Port number (default: 3493)"
+ }
+ "-v" = {
+ value = "$ups_variable$"
+ description = "Variable to monitor, valid strings are LINE, TEMP, BATTPCT or LOADPCT"
+ }
+ "-w" = {
+ value = "$ups_warning$"
+ description = "Warning threshold for the selected variable"
+ }
+ "-c" = {
+ value = "$ups_critical$"
+ description = "Critical threshold for the selected variable"
+ }
+ "-T" = {
+ set_if = "$ups_celsius$"
+ description = "Display temperature in degrees Celsius instead of Fahrenheit"
+ }
+ "-t" = {
+ value = "$ups_timeout$"
+ description = "Seconds before the connection times out (default: 10)"
+ }
+ }
+
+ vars.ups_address = "$check_address$"
+ vars.ups_name = "ups"
+}
+
+object CheckCommand "nrpe" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_nrpe" ]
+
+ arguments = {
+ "-H" = {
+ value = "$nrpe_address$"
+ description = "The address of the host running the NRPE daemon"
+ }
+ "-p" = {
+ value = "$nrpe_port$"
+ }
+ "-c" = {
+ value = "$nrpe_command$"
+ }
+ "-n" = {
+ set_if = "$nrpe_no_ssl$"
+ description = "Do not use SSL"
+ }
+ "-u" = {
+ set_if = "$nrpe_timeout_unknown$"
+ description = "Make socket timeouts return an UNKNOWN state instead of CRITICAL"
+ }
+ "-t" = {
+ value = "$nrpe_timeout$"
+ description = "<interval>:<state> = <Number of seconds before connection times out>:<Check state to exit with in the event of a timeout (default=CRITICAL)>"
+ }
+ "-a" = {
+ value = "$nrpe_arguments$"
+ repeat_key = false
+ order = 1
+ }
+ "-4" = {
+ set_if = "$nrpe_ipv4$"
+ description = "Use IPv4 connection"
+ }
+ "-6" = {
+ set_if = "$nrpe_ipv6$"
+ description = "Use IPv6 connection"
+ }
+ "-2" = {
+ set_if = "$nrpe_version_2$"
+ description = "Use this if you want to connect to NRPE v2"
+ }
+ "-3" = {
+ set_if = "$nrpe_version_3$"
+ description = "Use this if you want to connect to NRPE v3"
+ }
+ "-P" = {
+ value = "$nrpe_payload_size$"
+ description = "Specify non-default payload size for NSClient++"
+ }
+ "-A" = {
+ value = "$nrpe_ca$"
+ description = "The CA file to use for PKI"
+ }
+ "-C" = {
+ value = "$nrpe_cert$"
+ description = "The cert file to use for PKI"
+ }
+ "-K" = {
+ value = "$nrpe_key$"
+ description = "The key file to use for PKI"
+ }
+ "-S" = {
+ value = "$nrpe_ssl_version$"
+ description = "The SSL/TLS version to use"
+ }
+ "-L" = {
+ value = "$nrpe_cipher_list$"
+ description = "The list of SSL ciphers to use"
+ }
+ "-d" = {
+ value = "$nrpe_dh_opt$"
+ description = "Anonymous Diffie Hellman use: 0 = deny, 1 = allow, 2 = force"
+ }
+ "-D" = {
+ set_if = "$nrpe_no_logging$"
+ description = "Disable check_nrpe plugin from logging to syslog (requires check_nrpe >= 4.0)"
+ }
+ }
+
+ vars.nrpe_address = "$check_address$"
+ vars.nrpe_no_ssl = false
+ vars.nrpe_timeout_unknown = false
+ vars.check_ipv4 = "$nrpe_ipv4$"
+ vars.check_ipv6 = "$nrpe_ipv6$"
+ vars.nrpe_version_2 = false
+ timeout = 5m
+}
+
+object CheckCommand "hpjd" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_hpjd" ]
+
+ arguments = {
+ "-H" = {
+ value = "$hpjd_address$"
+ description = "Host address"
+ }
+ "-C" = {
+ value = "$hpjd_community$"
+ description = "The SNMP community name (default=public)"
+ }
+ "-p" = {
+ value = "$hpjd_port$"
+ description = "Specify the port to check (default=161)"
+ }
+ }
+
+ vars.hpjd_address = "$check_address$"
+}
+
+object CheckCommand "icmp" {
+ command = [ PluginDir + "/check_icmp" ]
+
+ arguments = {
+ "-H" = {
+ value = "$icmp_address$"
+ repeat_key = false
+ order = 1
+ description = "Host address"
+ }
+ "-w" = {
+ value = "$icmp_wrta$,$icmp_wpl$%"
+ description = "warning threshold (currently 200.000ms,10%)"
+ }
+ "-c" = {
+ value = "$icmp_crta$,$icmp_cpl$%"
+ description = "critical threshold (currently 500.000ms,30%)"
+ }
+ "-s" = {
+ value = "$icmp_source$"
+ description = "specify a source IP address or device name"
+ }
+ "-n" = {
+ value = "$icmp_packets$"
+ description = "number of packets to send (currently 5)"
+ }
+ "-i" = {
+ value = "$icmp_packet_interval$"
+ description = "max packet interval (currently 80.000ms)"
+ }
+ "-I" = {
+ value = "$icmp_target_interval$"
+ description = "max target interval (currently 0.000ms)"
+ }
+ "-m" = {
+ value = "$icmp_hosts_alive$"
+ description = "number of alive hosts required for success"
+ }
+ "-b" = {
+ value = "$icmp_data_bytes$"
+ description = "Number of icmp data bytes to send. Packet size will be data bytes + icmp header (currently 68 + 8)"
+ }
+ "-t" = {
+ value = "$icmp_timeout$"
+ description = "timeout value (seconds, currently 10)"
+ }
+ "-l" = {
+ value = "$icmp_ttl$"
+ description = "TTL on outgoing packets (currently 0)"
+ }
+ }
+
+ vars.icmp_address = "$address$"
+ vars.icmp_wrta = 100
+ vars.icmp_wpl = 10
+ vars.icmp_crta = 200
+ vars.icmp_cpl = 30
+}
+
+object CheckCommand "ldap" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_ldap" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ldap_address$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-p" = {
+ value = "$ldap_port$"
+ description = "Port number (default: 389)"
+ }
+ "-a" = {
+ value = "$ldap_attr$"
+ description = "ldap attribute to search (default: \"(objectclass=*)\""
+ }
+ "-b" = {
+ value = "$ldap_base$"
+ required = true
+ description = "ldap base (eg. ou=my unit, o=my org, c=at"
+ }
+ "-D" = {
+ value = "$ldap_bind$"
+ description = "ldap bind DN (if required)"
+ }
+ "-P" = {
+ value = "$ldap_pass$"
+ description = "ldap password (if required)"
+ }
+ "-T" = {
+ set_if = "$ldap_starttls$"
+ description = "use starttls mechanism introduced in protocol version 3"
+ }
+ "-S" = {
+ set_if = "$ldap_ssl$"
+ description = "use ldaps (ldap v2 ssl method). this also sets the default port to 636"
+ }
+ "-2" = {
+ set_if = "$ldap_v2$"
+ description = "Use LDAP protocol version 2"
+ }
+ "-3" = {
+ set_if = "$ldap_v3$"
+ description = "Use LDAP protocol version 3"
+ }
+ "-w" = {
+ value = "$ldap_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$ldap_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-W" = {
+ value = "$ldap_warning_entries$"
+ description = "Number of found entries to result in warning status (optional)"
+ }
+ "-C" = {
+ value = "$ldap_critical_entries$"
+ description = "Number of found entries to result in critical status (optional)"
+ }
+ "-t" = {
+ value = "$ldap_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-v" = {
+ set_if = "$ldap_verbose$"
+ description = "Show details for command-line debugging"
+ }
+ }
+
+ vars.ldap_address = "$check_address$"
+ vars.ldap_v2 = true
+ vars.ldap_v3 = false
+ vars.ldap_timeout = 10s
+ vars.ldap_verbose = false
+}
+
+object CheckCommand "clamd" {
+ command = [ PluginDir + "/check_clamd" ]
+
+ arguments = {
+ "-H" = {
+ value = "$clamd_address$"
+ description = "The host's address or unix socket (must be an absolute path)."
+ required = true
+ }
+ "-p" = {
+ value = "$clamd_port$"
+ description = "Port number (default: none)."
+ }
+ "-e" = {
+ value = "$clamd_expect$"
+ description = "String to expect in server response (may be repeated)."
+ repeat_key = true
+ }
+ "-A" = {
+ set_if = "$clamd_all$"
+ description = "All expect strings need to occur in server response. Default is any."
+ }
+ "-E_send" = {
+ key = "-E"
+ order = 1
+ set_if = "$clamd_escape_send$"
+ description = "Enable usage of \n, \r, \t or \\ in send string. Default is nothing."
+ }
+ "-s" = {
+ order = 2
+ value = "$clamd_send$"
+ description = "String to send to the server."
+ }
+ "-E_quit" = {
+ key = "-E"
+ order = 3
+ set_if = "$clamd_escape_quit$"
+ description = "Can use \n, \r, \t or \\ in quit string. Default is \r\n added to end of quit."
+ }
+ "-q" = {
+ order = 4
+ value = "$clamd_quit$"
+ description = "String to send server to initiate a clean close of the connection."
+ }
+ "-r" = {
+ value = "$clamd_refuse$"
+ description = "Accept TCP refusals with states ok, warn, crit. Defaults to crit."
+ }
+ "-M" = {
+ value = "$clamd_mismatch$"
+ description = "Accept expected string mismatches with states ok, warn, crit. Defaults to warn."
+ }
+ "-j" = {
+ set_if = "$clamd_jail$"
+ description = "Hide output from TCP socket."
+ }
+ "-m" = {
+ value = "$clamd_maxbytes$"
+ description = "Close connection once more than this number of bytes are received."
+ }
+ "-d" = {
+ value = "$clamd_delay$"
+ description = "Seconds to wait between sending string and polling for response."
+ }
+ "-D" = {
+ value = "$clamd_certificate$"
+ description = "Minimum number of days a certificate has to be valid. 1st value is number of days for warning, 2nd is critical (if not specified: 0) - seperated by comma."
+ }
+ "-S" = {
+ set_if = "$clamd_ssl$"
+ description = "Use SSL for the connection."
+ }
+ "-w" = {
+ value = "$clamd_wtime$"
+ description = "Response time to result in warning status (seconds)."
+ }
+ "-c" = {
+ value = "$clamd_ctime$"
+ description = "Response time to result in critical status (seconds)."
+ }
+ "-t" = {
+ value = "$clamd_timeout$"
+ description = "Seconds before connection times out. Defaults to 10."
+ }
+ "-4" = {
+ set_if = "$clamd_ipv4$"
+ description = "Use IPv4 only"
+ }
+ "-6" = {
+ set_if = "$clamd_ipv6$"
+ description = "Use IPv6 only"
+ }
+ }
+
+ vars.clamd_ssl = false
+ vars.clamd_refuse = "crit"
+ vars.clamd_mismatch = "warn"
+ vars.clamd_timeout = 10
+ vars.check_ipv4 = "$clamd_ipv4$"
+ vars.check_ipv6 = "$clamd_ipv6$"
+}
+
+object CheckCommand "mailq" {
+ command = [ PluginDir + "/check_mailq" ]
+
+ arguments = {
+ "-w" = {
+ value = "$mailq_warning$"
+ description = "Min. number of messages in queue to generate warning"
+ required = true
+ }
+ "-c" = {
+ value = "$mailq_critical$"
+ description = "Min. number of messages in queue to generate critical alert ( w < c )"
+ required = true
+ }
+ "-W" = {
+ value = "$mailq_domain_warning$"
+ description = "Min. number of messages for same domain in queue to generate warning"
+ }
+ "-C" = {
+ value = "$mailq_domain_critical$"
+ description = "Min. number of messages for same domain in queue to generate critical alert ( W < C )"
+ }
+ "-t" = {
+ value = "$mailq_timeout$"
+ description = "Plugin timeout in seconds (default = 15)"
+ }
+ "-M" = {
+ value = "$mailq_servertype$"
+ description = "[ sendmail | qmail | postfix | exim | nullmailer ] (default = autodetect)"
+ }
+ "-s" = {
+ set_if = "$mailq_sudo$"
+ description = "Use sudo for mailq command"
+ }
+ }
+}
+
+object CheckCommand "pgsql" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_pgsql" ]
+
+ arguments = {
+ "--extra-opts" = {
+ value = "$pgsql_extra_opts$"
+ description = "Read options from an ini file"
+ }
+ "-H" = {
+ value = "$pgsql_hostname$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-P" = {
+ value = "$pgsql_port$"
+ description = "Port number (default: 5432)"
+ }
+ "-d" = {
+ value = "$pgsql_database$"
+ description = "Database to check (default: template1)"
+ }
+ "-l" = {
+ value = "$pgsql_username$"
+ description = "Login name of user"
+ }
+ "-p" = {
+ value = "$pgsql_password$"
+ description = "Password (BIG SECURITY ISSUE)"
+ }
+ "-o" = {
+ value = "$pgsql_options$"
+ description = "Connection parameters (keyword = value), see below"
+ }
+ "-w" = {
+ value = "$pgsql_warning$"
+ description = "Response time to result in warning status (seconds)"
+ }
+ "-c" = {
+ value = "$pgsql_critical$"
+ description = "Response time to result in critical status (seconds)"
+ }
+ "-t" = {
+ value = "$pgsql_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-q" = {
+ value = "$pgsql_query$"
+ description = "SQL query to run. Only first column in first row will be read"
+ }
+ "-W" = {
+ value = "$pgsql_query_warning$"
+ description = "SQL query value to result in warning status (double)"
+ }
+ "-C" = {
+ value = "$pgsql_query_critical$"
+ description = "SQL query value to result in critical status (double)"
+ }
+ }
+
+ vars.pgsql_hostname = "$check_address$"
+}
+
+object CheckCommand "mysql" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_mysql" ]
+
+ arguments = {
+ "--extra-opts" = {
+ value = "$mysql_extra_opts$"
+ description = "Read options from an ini file"
+ }
+ "-H" = {
+ set_if = {{ !macro("$mysql_socket$") }}
+ value = "$mysql_hostname$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-P" = {
+ value = "$mysql_port$"
+ description = "Port number (default: 3306)"
+ }
+ "-n" = {
+ set_if = "$mysql_ignore_auth$"
+ description = "Ignore authentication failure and check for mysql connectivity only"
+ }
+ "-s" = {
+ value = "$mysql_socket$"
+ description = "Use the specified socket"
+ }
+ "-d" = {
+ value = "$mysql_database$"
+ description = "Check database with indicated name"
+ }
+ "-f" = {
+ value = "$mysql_file$"
+ description = "Read from the specified client options file"
+ }
+ "-g" = {
+ value = "$mysql_group$"
+ description = "Use a client options group"
+ }
+ "-u" = {
+ value = "$mysql_username$"
+ description = "Connect using the indicated username"
+ }
+ "-p" = {
+ value = "$mysql_password$"
+ description = "Use the indicated password to authenticate the connection"
+ }
+ "-S" = {
+ set_if = "$mysql_check_slave$"
+ description = "Check if the slave thread is running properly"
+ }
+ "-w" = {
+ value = "$mysql_warning$"
+ description = "Exit with WARNING status if slave server is more than INTEGER seconds behind master"
+ }
+ "-c" = {
+ value = "$mysql_critical$"
+ description = "Exit with CRITICAL status if slave server is more then INTEGER seconds behind master"
+ }
+ "-l" = {
+ set_if = "$mysql_ssl$"
+ description = "Use ssl encryptation"
+ }
+ "-C" = {
+ value = "$mysql_cacert$"
+ description = "Path to CA signing the cert"
+ }
+ "-a" = {
+ value = "$mysql_cert$"
+ description = "Path to SSL certificate"
+ }
+ "-k" = {
+ value = "$mysql_key$"
+ description = "Path to private SSL key"
+ }
+ "-D" = {
+ value = "$mysql_cadir$"
+ description = "Path to CA directory"
+ }
+ "-L" = {
+ value = "$mysql_ciphers$"
+ description = "List of valid SSL ciphers"
+ }
+ }
+
+ vars.mysql_hostname = "$check_address$"
+}
+
+object CheckCommand "negate" {
+ command = [ PluginDir + "/negate" ]
+
+ arguments = {
+ "-t" = {
+ value = "$negate_timeout$"
+ description = "Seconds before plugin times out (default: 11)"
+ }
+ "-T" = {
+ value = "$negate_timeout_result$"
+ description = "Custom result on Negate timeouts"
+ }
+ "-o" = {
+ value = "$negate_ok$"
+ }
+ "-w" = {
+ value = "$negate_warning$"
+ }
+ "-c" = {
+ value = "$negate_critical$"
+ }
+ "-u" = {
+ value = "$negate_unknown$"
+ }
+ "-s" = {
+ set_if = "$negate_substitute$"
+ description = "Substitute output text as well. Will only substitute text in CAPITALS"
+ }
+ "--wrapped-plugin" = {
+ value = {{
+ var command = macro("$negate_command$")
+ var arguments = macro("$negate_arguments$")
+
+ if (typeof(command) == String && !arguments) {
+ return command
+ }
+
+ var escaped_args = []
+ for (arg in resolve_arguments(command, arguments)) {
+ escaped_args.add(arg)
+ }
+ return escaped_args.join(" ")
+ }}
+ skip_key = true
+ order = 1
+ }
+ }
+
+ vars.negate_timeout_result = "UNKNOWN"
+}
+
+object CheckCommand "file_age" {
+ command = [ PluginDir + "/check_file_age" ]
+
+ arguments = {
+ "-w" = {
+ value = "$file_age_warning_time$"
+ description = "File must be no more than this many seconds old (default: 240s)"
+ }
+ "-c" = {
+ value = "$file_age_critical_time$"
+ description = "File must be no more than this many seconds old (default: 600s)"
+ }
+ "-W" = {
+ value = "$file_age_warning_size$"
+ description = "File must be at least this many bytes long"
+ }
+ "-C" = {
+ value = "$file_age_critical_size$"
+ description = "File must be at least this many bytes long (default: 0B)"
+ }
+ "-i" = {
+ set_if = "$file_age_ignoremissing$"
+ description = "return OK if the file does not exist"
+ }
+ "-f" = {
+ value = "$file_age_file$"
+ description = "File to monitor"
+ }
+ }
+
+ vars.file_age_ignoremissing = false
+}
+
+object CheckCommand "smart" {
+ command = [ PluginDir + "/check_ide_smart" ]
+
+ arguments = {
+ "-d" = {
+ value = "$smart_device$"
+ description = "Name of a local hard drive to monitor"
+ required = true
+ }
+ }
+}
+
+object CheckCommand "breeze" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_breeze" ]
+
+ arguments = {
+ "-H" = {
+ value = "$breeze_hostname$"
+ description = "Name or IP address of host to check"
+ required = true
+ }
+ "-C" = {
+ value = "$breeze_community$"
+ description = "SNMPv1 community (default public)"
+ }
+ "-w" = {
+ value = "$breeze_warning$"
+ description = "Percentage strength below which a WARNING status will result"
+ required = true
+ }
+ "-c" = {
+ value = "$breeze_critical$"
+ description = "Percentage strength below which a CRITICAL status will result"
+ required = true
+ }
+ }
+
+ vars.breeze_hostname = "$check_address$"
+ vars.breeze_warning = "50"
+ vars.breeze_critical = "20"
+}
+
+object CheckCommand "flexlm" {
+ command = [ PluginDir + "/check_flexlm" ]
+
+ arguments = {
+ "-F" = {
+ value = "$flexlm_licensefile$"
+ description = "Name of license file (usually license.dat)"
+ required = true
+ }
+ "-t" = {
+ value = "$flexlm_timeout$"
+ description = "Plugin time out in seconds (default = 15)"
+ }
+ }
+}
+
+object CheckCommand "game" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_game" ]
+
+ arguments = {
+ "-P" = {
+ value = "$game_port$"
+ description = "Port to connect to"
+ }
+ "-t" = {
+ value = "$game_timeout$"
+ description = "Seconds before connection times out (default: 10)"
+ }
+ "-g" = {
+ value = "$game_gamefield$"
+ description = "Field number in raw qstat output that contains game name"
+ }
+ "-m" = {
+ value = "$game_mapfield$"
+ description = "Field number in raw qstat output that contains map name"
+ }
+ "-p" = {
+ value = "$game_pingfield$"
+ description = "Field number in raw qstat output that contains ping time"
+ }
+ "-G" = {
+ value = "$game_gametime$"
+ description = "Field number in raw qstat output that contains game time"
+ }
+ "-H" = {
+ value = "$game_hostname$"
+ description = "Name of the host running the game"
+ }
+ "game" = {
+ value = "$game_game$"
+ description = "Name of the game"
+ order = 1
+ skip_key = true
+ }
+ "ipaddress" = {
+ value = "$game_ipaddress$"
+ description = "Ipaddress of the game server to query"
+ order = 2
+ skip_key = true
+ }
+ }
+}
+
+object CheckCommand "mysql_query" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_mysql_query" ]
+
+ arguments = {
+ "-H" = {
+ value = "$mysql_query_hostname$"
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-P" = {
+ value = "$mysql_query_port$"
+ description = "Port number (default: 3306)"
+ }
+ "-u" = {
+ value = "$mysql_query_username$"
+ description = "Username to login with"
+ }
+ "-p" = {
+ value = "$mysql_query_password$"
+ description = "Password to login with"
+ }
+ "-d" = {
+ value = "$mysql_query_database$"
+ description = "Database to check"
+ }
+ "-f" = {
+ value = "$mysql_query_file$"
+ description = "Read from the specified client options file"
+ }
+ "-g" = {
+ value = "$mysql_query_group$"
+ description = "Use a client options group"
+ }
+ "-q" = {
+ value = "$mysql_query_execute$"
+ description = "SQL query to run. Only first column in first row will be read"
+ }
+ "-w" = {
+ value = "$mysql_query_warning$"
+ description = "Warning range (format: start:end). Alert if outside this range"
+ }
+ "-c" = {
+ value = "$mysql_query_critical$"
+ description = "Critical range"
+ }
+ }
+
+ vars.mysql_query_hostname = "$check_address$"
+}
+
+object CheckCommand "radius" {
+ import "ipv4-or-ipv6"
+
+ command = [
+ PluginDir + "/check_radius",
+ ]
+
+ arguments = {
+ "-H" = {
+ value = "$radius_address$",
+ description = "Host name, IP Address, or unix socket (must be an absolute path)"
+ }
+ "-F" = {
+ value = "$radius_config_file$",
+ description = "Configuration file"
+ }
+ "-u" = {
+ value = "$radius_username$",
+ description = "The user to authenticate"
+ }
+ "-p" = {
+ value = "$radius_password$",
+ description = "Password for authentication"
+ }
+ "-P" = {
+ value = "$radius_port$",
+ description = "Port number (default: 1645)"
+ },
+ "-n" = {
+ value = "$radius_nas_id$",
+ description = "NAS identifier"
+ }
+ "-N" = {
+ value = "$radius_nas_address$",
+ description = "NAS IP Address"
+ },
+ "-e" = {
+ value = "$radius_expect$",
+ description = "Response string to expect from the server"
+ },
+ "-r" = {
+ value = "$radius_retries$",
+ description = "Number of times to retry a failed connection"
+ },
+ "-t" = {
+ value = "$radius_timeout$",
+ description = "Seconds before connection times out (default: 10) Optional :<timeout state> can be a state integer (0,1,2,3) or a state STRING"
+ },
+ }
+
+ vars.radius_address = "$check_address$"
+}
+
+object CheckCommand "nscp_api" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_nscp_api" ]
+
+ arguments = {
+ "-H" = {
+ value = "$nscp_api_host$"
+ description = "NSCP API host address"
+ required = true
+ }
+ "-P" = {
+ value = "$nscp_api_port$"
+ description = "NSCP API host port. Defaults to 8443."
+ }
+ "--password" = {
+ value = "$nscp_api_password$"
+ description = "NSCP API password"
+ }
+ "-q" = {
+ value = "$nscp_api_query$"
+ description = "NSCPI API Query endpoint to use"
+ }
+ "-a" = {
+ value = "$nscp_api_arguments$"
+ description = "NSCP API Query arguments"
+ repeat_key = true
+ }
+ }
+
+ vars.nscp_api_host = "$check_address$"
+}
+
+object CheckCommand "rpc" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginDir + "/check_rpc" ]
+
+ arguments = {
+ "-H" = {
+ value = "$rpc_address$"
+ description = "RPC host address"
+ required = true
+ }
+ "-C" = {
+ value = "$rpc_command$"
+ description = "Programm name (or number)"
+ required = true
+ }
+ "-p" = {
+ value = "$rpc_port$"
+ description = "RPC port"
+ }
+ "-c" = {
+ value = "$rpc_version$"
+ description = "The version to check"
+ }
+ "-u" = {
+ set_if = "$rpc_udp$"
+ description = "Test UDP"
+ }
+ "-t" = {
+ set_if = "$rpc_tcp$"
+ description = "Test TCP"
+ }
+ "-v" = {
+ set_if = "$rpc_verbose$"
+ description = "Show verbose details"
+ }
+ }
+
+ vars.rpc_address = "$check_address$"
+}
+
+object CheckCommand "uptime" {
+ command = [ PluginDir + "/check_uptime" ]
+
+ arguments = {
+ "--warning" = {
+ value = "$uptime_warning$"
+ description = "Min. number of uptime to generate warning"
+ required = true
+ }
+ "--critical" = {
+ value = "$uptime_critical$"
+ description = "Min. number of uptime to generate critical alert ( w < c )"
+ required = true
+ }
+ "--for" = {
+ set_if = "$uptime_for$"
+ description = "Show uptime in a pretty format (Running for x weeks, x days, ...)"
+ }
+ "--since" = {
+ set_if = "$uptime_since$"
+ description = "Show last boot in yyyy-mm-dd HH:MM:SS format (output from 'uptime -s')"
+ }
+ }
+
+ vars.uptime_warning = "30m"
+ vars.uptime_critical = "15m"
+}
+
diff --git a/itl/hangman b/itl/hangman
new file mode 100644
index 0000000..b2feb3a
--- /dev/null
+++ b/itl/hangman
@@ -0,0 +1,165 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+if (!globals.contains("irc")) {
+ globals.irc = log
+}
+
+hm = {
+ max_errors = 6
+
+ h_word = null
+ h_arr = null
+ guesses = 0
+ errors = 0
+ a_arr = null
+ misses = ""
+
+ if (irc) {
+ hangman_output = irc
+ } else {
+ hangman_output = log
+ }
+
+ function str2arr(str) {
+ var arr = []
+
+ for (i in range(str.len())) {
+ arr.add(str.substr(i, 1))
+ }
+
+ return arr
+ }
+
+ function init(s) {
+ s = s.upper()
+ h_word = s
+ h_arr = str2arr(h_word)
+ guesses = 0
+ errors = 0
+ a_arr = h_arr.clone()
+ misses = ""
+
+ for (x in range(a_arr.len())) {
+ a_arr[x] = (h_arr[x] == " ")
+ }
+ }
+
+ function print_progress() {
+ var ir
+
+ for (x in range(a_arr.len())) {
+ if (a_arr[x]) {
+ ir += h_arr[x]
+ } else {
+ ir += "_"
+ }
+
+ ir += " "
+ }
+
+ hangman_output(ir)
+ hangman_output(errors + "/" + (max_errors + 1) + " errors made: " + misses)
+ }
+
+ function create_hint() {
+ var r = Math.floor(Math.random() * a_arr.len())
+ if (!a_arr[r]) {
+ a_arr[r] = true
+ for (x in range(h_arr.len())) {
+ if (h_arr[x] == h_arr[r]) {
+ a_arr[x]=true
+ }
+ }
+ } else {
+ if (a_arr.contains(false)) {
+ create_hint()
+ } else {
+ winner()
+ }
+ }
+
+ if (!a_arr.contains(false)) {
+ winner()
+ }
+ }
+
+ function hint(i) {
+ for (x in range(i)) {
+ create_hint()
+ }
+
+ print_progress()
+ }
+
+ function winner() {
+ if (h_word) {
+ hangman_output("Congratulations, you are a winner in " + guesses + " guesses.")
+ h_word = null
+ }
+ }
+
+ function guess(s) {
+ if (!h_word) {
+ hangman_output("Please set a word with hm.init(\"word\")")
+ return
+ }
+
+ s = s.upper()
+
+ if (s.len() != 1) {
+ hangman_output("NEIN!")
+ return
+ }
+
+ var correct = false
+ for (x in range(h_arr.len())) {
+ if (h_arr[x] == s) {
+ a_arr[x] = true
+ correct = true
+ }
+ }
+
+ if (!correct) {
+ misses += s + " "
+ errors += 1
+ }
+
+ print_progress()
+
+ guesses += 1
+
+ if (!a_arr.contains(false)) {
+ winner()
+ return
+ }
+
+ if (errors > max_errors) {
+ hangman_output("You died...")
+ hangman_output(" ________")
+ hangman_output(" |/ |")
+ hangman_output(" | (_)")
+ hangman_output(" | \\|/")
+ hangman_output(" | |")
+ hangman_output(" | / \\")
+ hangman_output(" |")
+ hangman_output("_|___")
+ remove("h_word")
+ return
+ }
+ }
+
+ function clone() {
+ var n = Dictionary.prototype.clone.call(this)
+
+ if (h_arr) {
+ n.h_arr = h_arr.clone()
+ }
+
+ if (a_arr) {
+ n.a_arr = a_arr.clone()
+ }
+
+ return n
+ }
+}
+
diff --git a/itl/itl b/itl/itl
new file mode 100644
index 0000000..8f98fe6
--- /dev/null
+++ b/itl/itl
@@ -0,0 +1,8 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+/**
+ * This is the Icinga Templare Library, a collection of general purpose Icinga
+ * configuration templates.
+ */
+
+include "command-icinga.conf"
diff --git a/itl/manubulon b/itl/manubulon
new file mode 100644
index 0000000..6b6855d
--- /dev/null
+++ b/itl/manubulon
@@ -0,0 +1,7 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+/**
+ * This is the SNMP Manubulon Template Library.
+ */
+
+include "command-plugins-manubulon.conf"
diff --git a/itl/nscp b/itl/nscp
new file mode 100644
index 0000000..c62513e
--- /dev/null
+++ b/itl/nscp
@@ -0,0 +1,3 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+include "command-nscp-local.conf"
diff --git a/itl/plugins b/itl/plugins
new file mode 100644
index 0000000..edfe4ce
--- /dev/null
+++ b/itl/plugins
@@ -0,0 +1,8 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+/**
+ * This is the Icinga Templare Library, a collection of general purpose Icinga
+ * configuration templates. This part includes the plugin commands.
+ */
+
+include "command-plugins.conf"
diff --git a/itl/plugins-contrib b/itl/plugins-contrib
new file mode 100644
index 0000000..f2f56e0
--- /dev/null
+++ b/itl/plugins-contrib
@@ -0,0 +1,9 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+/**
+ * This is a directory for various contributed command definitions.
+ * Plugins should be organized by a category, e.g. database plugins are in
+ * plugins-contrib.d/databases.conf
+ */
+
+include_recursive "plugins-contrib.d"
diff --git a/itl/plugins-contrib.d/CMakeLists.txt b/itl/plugins-contrib.d/CMakeLists.txt
new file mode 100644
index 0000000..b6386b8
--- /dev/null
+++ b/itl/plugins-contrib.d/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+install(
+ FILES big-data.conf databases.conf hardware.conf icingacli.conf ipmi.conf logmanagement.conf metrics.conf network-components.conf network-services.conf operating-system.conf raid-controller.conf smart-attributes.conf storage.conf systemd.conf virtualization.conf vmware.conf web.conf
+ DESTINATION ${ICINGA2_INCLUDEDIR}/plugins-contrib.d
+)
diff --git a/itl/plugins-contrib.d/big-data.conf b/itl/plugins-contrib.d/big-data.conf
new file mode 100644
index 0000000..7d3d6f1
--- /dev/null
+++ b/itl/plugins-contrib.d/big-data.conf
@@ -0,0 +1,112 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "cloudera_service_status" {
+ command = [ PluginContribDir + "/check_cloudera_service_status.py" ]
+
+ arguments = {
+ "-H" = {
+ description = "host"
+ value = "$cloudera_host$"
+ required = true
+ }
+ "-P" = {
+ description = "port"
+ value = "$cloudera_port$"
+ required = false
+ }
+ "-u" = {
+ description = "user"
+ value = "$cloudera_user$"
+ required = true
+ }
+ "-p" = {
+ description = "pass"
+ value = "$cloudera_pass$"
+ required = true
+ }
+ "-v" = {
+ description = "api_version"
+ value = "$cloudera_api_version$"
+ required = true
+ }
+ "-c" = {
+ description = "cluster"
+ value = "$cloudera_cluster$"
+ required = true
+ }
+ "-s" = {
+ description = "service"
+ value = "$cloudera_service$"
+ required = true
+ }
+ "-k" = {
+ description = "verify_ssl"
+ value = "$cloudera_verify_ssl$"
+ required = false
+ }
+ }
+}
+
+object CheckCommand "cloudera_hdfs_space" {
+ command = [ PluginContribDir + "/check_cloudera_hdfs_space.py" ]
+
+ arguments = {
+ "-H" = {
+ description = "Namenode host"
+ value = "$cloudera_hdfs_space_host$"
+ required = true
+ }
+ "-P" = {
+ description = "Namenode port (default 50070)"
+ value = "$cloudera_hdfs_space_port$"
+ required = false
+ }
+ "-d" = {
+ description = "HDFS disk to check"
+ value = "$cloudera_hdfs_space_disk$"
+ required = true
+ }
+ "-w" = {
+ description = "Warning threshold in percent"
+ value = "$cloudera_hdfs_space_warn$"
+ required = true
+ }
+ "-c" = {
+ description = "Critical threshold in percent"
+ value = "$cloudera_hdfs_space_crit$"
+ required = true
+ }
+ }
+}
+
+object CheckCommand "cloudera_hdfs_files" {
+ command = [ PluginContribDir + "/check_cloudera_hdfs_files.py" ]
+
+ arguments = {
+ "-H" = {
+ description = "Namenode host"
+ value = "$cloudera_hdfs_files_host$"
+ required = true
+ }
+ "-P" = {
+ description = "Namenode port (default 50070)"
+ value = "$cloudera_hdfs_files_port$"
+ required = false
+ }
+ "-w" = {
+ description = "Warning threshold"
+ value = "$cloudera_hdfs_files_warn$"
+ required = true
+ }
+ "-c" = {
+ description = "Critical threshold"
+ value = "$cloudera_hdfs_files_crit$"
+ required = true
+ }
+ "-m" = {
+ description = "Max files count that causes problems (default 140000000)"
+ value = "$cloudera_hdfs_files_max$"
+ required = false
+ }
+ }
+}
diff --git a/itl/plugins-contrib.d/databases.conf b/itl/plugins-contrib.d/databases.conf
new file mode 100644
index 0000000..29b1246
--- /dev/null
+++ b/itl/plugins-contrib.d/databases.conf
@@ -0,0 +1,973 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "mssql_health" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_mssql_health" ]
+
+ arguments = {
+ "--hostname" = {
+ value = "$mssql_health_hostname$"
+ description = "the database server's hostname"
+ }
+ "--username" = {
+ value = "$mssql_health_username$"
+ description = "the mssql db user"
+ }
+ "--password" = {
+ value = "$mssql_health_password$"
+ description = "the mssql db user's password"
+ }
+ "--port" = {
+ value = "$mssql_health_port$"
+ description = "the database's port"
+ }
+ "--server" = {
+ value = "$mssql_health_server$"
+ description = "the name of a predefined connection"
+ }
+ "--currentdb" = {
+ value = "$mssql_health_currentdb$"
+ description = "the name of a database which is used as the current database for the connection"
+ }
+ "--offlineok" = {
+ set_if = "$mssql_health_offlineok$"
+ description = "if offline databases are perfectly ok for you"
+ }
+ "--nooffline" = {
+ set_if = "$mssql_health_nooffline$"
+ description = "Skip the offline databases"
+ }
+ "--dbthresholds" = {
+ value = "$mssql_health_dbthresholds$"
+ description = "Read thresholds from a database table"
+ }
+ "--notemp" = {
+ set_if = "$mssql_health_notemp$"
+ description = "Ignore temporary databases/tablespaces"
+ }
+ "--commit" = {
+ set_if = "$mssql_health_commit$"
+ description = "turns on autocommit for the dbd::sybase module"
+ }
+ "--method" = {
+ value = "$mssql_health_method$"
+ description = "how the plugin should connect to the database (dbi for using DBD::Sybase (default), sqlrelay for the SQLRelay proxy)"
+ }
+ "--mode" = {
+ value = "$mssql_health_mode$"
+ description = "the mode of the plugin"
+ }
+ "--regexp" = {
+ set_if = "$mssql_health_regexp$"
+ description = "name will be interpreted as a regular expression"
+ }
+ "--warning" = {
+ value = "$mssql_health_warning$"
+ description = "the warning range"
+ }
+ "--critical" = {
+ value = "$mssql_health_critical$"
+ description = "the critical range"
+ }
+ "--warningx" = {
+ value = "$mssql_health_warningx$"
+ description = "The extended warning thresholds"
+ }
+ "--criticalx" = {
+ value = "$mssql_health_criticalx$"
+ description = "The extended critical thresholds"
+ }
+ "--units" = {
+ value = "$mssql_health_units$"
+ description = "This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free"
+ }
+ "--name" = {
+ value = "$mssql_health_name$"
+ description = "the name of the database etc depending on the mode"
+ }
+ "--name2" = {
+ value = "$mssql_health_name2$"
+ description = "if name is a sql statement, name2 can be used to appear in the output and the performance data"
+ }
+ "--name3" = {
+ value = "$mssql_health_name3$"
+ description = "The tertiary name of a component"
+ }
+ "--extra-opts" = {
+ value = "$mssql_health_extraopts$"
+ description = "read command line arguments from an external file"
+ }
+ "--blacklist" = {
+ value = "$mssql_health_blacklist$"
+ description = "Blacklist some (missing/failed) components"
+ }
+ "--mitigation" = {
+ value = "$mssql_health_mitigation$"
+ description = "The parameter allows you to change a critical error to a warning."
+ }
+ "--lookback" = {
+ value = "$mssql_health_lookback$"
+ description = "The amount of time you want to look back when calculating average rates"
+ }
+ "--environment" = {
+ value = "$mssql_health_environment$"
+ description = "Add a variable to the plugin's environment."
+ }
+ "--negate" = {
+ value = "$mssql_health_negate$"
+ description = "Emulate the negate plugin. --negate warning=critical --negate unknown=critical."
+ }
+ "--morphmessage" = {
+ value = "$mssql_health_morphmessage$"
+ description = "Modify the final output message."
+ }
+ "--morphperfdata" = {
+ value = "$mssql_health_morphperfdata$"
+ description = "The parameter allows you to change performance data labels."
+ }
+ "--selectedperfdata" = {
+ value = "$mssql_health_selectedperfdata$"
+ description = "The parameter allows you to limit the list of performance data."
+ }
+ "--report" = {
+ value = "$mssql_health_report$"
+ description = "Report can be used to output only the bad news (short,long,html)"
+ }
+ "--multiline" = {
+ value = "$mssql_health_multiline$"
+ description = "Multiline output."
+ }
+ "--with-mymodules-dyn-dir" = {
+ value = "$mssql_health_withmymodulesdyndir$"
+ description = "Add-on modules for the my-modes will be searched in this directory."
+ }
+ "--statefilesdir" = {
+ value = "$mssql_health_statefilesdir$"
+ description = "An alternate directory where the plugin can save files."
+ }
+ "--isvalidtime" = {
+ value = "$mssql_health_isvalidtime$"
+ description = "Signals the plugin to return OK if now is not a valid check time."
+ }
+ "--timeout" = {
+ value = "$mssql_health_timeout$"
+ description = "Seconds before plugin times out (default: 15)"
+ }
+ }
+
+ vars.mssql_health_regexp = false
+ vars.mssql_health_offlineok = false
+ vars.mssql_health_commit = false
+ vars.mssql_health_notemp = false
+ vars.mssql_health_nooffline = false
+ vars.mssql_health_report = "short"
+}
+
+object CheckCommand "mysql_health" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_mysql_health" ]
+
+ arguments = {
+ "--hostname" = {
+ value = "$mysql_health_hostname$"
+ description = "the database server's hostname"
+ }
+ "--port" = {
+ value = "$mysql_health_port$"
+ description = "the database's port"
+ }
+ "--socket" = {
+ value = "$mysql_health_socket$"
+ description = "the database's unix socket"
+ }
+ "--username" = {
+ value = "$mysql_health_username$"
+ description = "the mysql db user"
+ }
+ "--password" = {
+ value = "$mysql_health_password$"
+ description = "the mysql db user's password"
+ }
+ "--database" = {
+ value = "$mysql_health_database$"
+ description = "the database's name"
+ }
+ "--warning" = {
+ value = "$mysql_health_warning$"
+ description = "the warning range"
+ }
+ "--critical" = {
+ value = "$mysql_health_critical$"
+ description = "the critical range"
+ }
+ "--warningx" = {
+ value = "$mysql_health_warningx$"
+ description = "The extended warning thresholds"
+ }
+ "--criticalx" = {
+ value = "$mysql_health_criticalx$"
+ description = "The extended critical thresholds"
+ }
+ "--mode" = {
+ value = "$mysql_health_mode$"
+ description = "the mode of the plugin"
+ }
+ "--method" = {
+ value = "$mysql_health_method$"
+ description = "how the plugin should connect to the database (dbi for using DBD::mysql (default), mysql for using the mysql-Tool)"
+ }
+ "--commit" = {
+ value = "$mysql_health_commit$"
+ description = "turns on autocommit for the dbd::* module"
+ }
+ "--notemp" = {
+ value = "$mysql_health_notemp$"
+ description = "Ignore temporary databases/tablespaces"
+ }
+ "--nooffline" = {
+ value = "$mysql_health_nooffline$"
+ description = "skip the offline databases"
+ }
+ "--regexp" = {
+ value = "$mysql_health_regexp$"
+ description = " Parameter name/name2/name3 will be interpreted as (perl) regular expression."
+ }
+ "--name" = {
+ value = "$mysql_health_name$"
+ description = "The name of a specific component to check"
+ }
+ "--name2" = {
+ value = "$mysql_health_name2$"
+ description = "The secondary name of a component"
+ }
+ "--name3" = {
+ value = "$mysql_health_name3$"
+ description = "The tertiary name of a component"
+ }
+ "--units" = {
+ value = "$mysql_health_units$"
+ description = "This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free"
+ }
+ "--labelformat" = {
+ value = "$mysql_health_labelformat$"
+ description = "one of pnp4nagios (which is the default) or groundwork"
+ }
+ "--extra-opts" = {
+ value = "$mysql_health_extraopts$"
+ description = "Read command line arguments from an external file."
+ }
+ "--blacklist" = {
+ value = "$mysql_health_blacklist$"
+ description = "Blacklist some (missing/failed) components"
+ }
+ "--mitigation" = {
+ value = "$mysql_health_mitigation$"
+ description = "The parameter allows you to change a critical error to a warning."
+ }
+ "--lookback" = {
+ value = "$mysql_health_lookback$"
+ description = "The amount of time you want to look back when calculating average rates."
+ }
+ "--environment" = {
+ value = "$mysql_health_environment$"
+ description = "Add a variable to the plugin's environment."
+ }
+ "--morphmessage" = {
+ value = "$mysql_health_morphmessage$"
+ description = "Modify the final output message."
+ }
+ "--morphperfdata" = {
+ value = "$mysql_health_morphperfdata$"
+ description = "The parameter allows you to change performance data labels."
+ }
+ "--selectedperfdata" = {
+ value = "$mysql_health_selectedperfdata$"
+ description = "The parameter allows you to limit the list of performance data."
+ }
+ "--report" = {
+ value = "$mysql_health_report$"
+ description = "Can be used to shorten the output."
+ }
+ "--multiline" = {
+ value = "$mysql_health_multiline$"
+ description = "Multiline output."
+ }
+ "--negate" = {
+ value = "$mysql_health_negate$"
+ description = "Emulate the negate plugin. --negate warning=critical --negate unknown=critical."
+ }
+ "--with-mymodules-dyn-dir" = {
+ value = "$mysql_health_withmymodulesdyndir$"
+ description = "Add-on modules for the my-modes will be searched in this directory."
+ }
+ "--statefilesdir" = {
+ value = "$mysql_health_statefilesdir$"
+ description = "An alternate directory where the plugin can save files."
+ }
+ "--isvalidtime" = {
+ value = "$mysql_health_isvalidtime$"
+ description = "Signals the plugin to return OK if now is not a valid check time."
+ }
+ "--timeout" = {
+ value = "$mysql_health_timeout$"
+ description = "plugin timeout. Default is 60 seconds"
+ }
+ }
+
+ vars.mysql_health_hostname = "$check_address$"
+}
+
+object CheckCommand "db2_health" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_db2_health" ]
+
+ arguments = {
+ "--hostname" = {
+ value = "$db2_health_hostname$"
+ set_if = "$db2_health_not_catalogued$"
+ description = "the host to connect"
+ order = -2
+ }
+ "--database" = {
+ value = "$db2_health_database$"
+ description = "the database to connect"
+ order = 0
+ }
+ "--username" = {
+ value = "$db2_health_username$"
+ description = "the db2 user"
+ order = 1
+ }
+ "--password" = {
+ value = "$db2_health_password$"
+ description = "the db2 user's password"
+ order = 2
+ }
+ "--port" = {
+ value = "$db2_health_port$"
+ description = "the db2 port for connection"
+ order = -1
+ }
+ "--warning" = {
+ value = "$db2_health_warning$"
+ description = "the warning range"
+ order = 5
+ }
+ "--critical" = {
+ value = "$db2_health_critical$"
+ description = "the critical range"
+ order = 6
+ }
+ "--mode" = {
+ value = "$db2_health_mode$"
+ description = "the mode of the plugin"
+ order = 3
+ }
+ "--name" = {
+ value = "$db2_health_name$"
+ description = "the name of the tablespace, datafile, wait event, latch, enqueue, or sql statement depending on the mode"
+ order = 4
+ }
+ "--name2" = {
+ value = "$db2_health_name2$"
+ description = "if name is a sql statement, name2 can be used to appear in the output and the performance data"
+ order = 7
+ }
+ "--regexp" = {
+ set_if = "$db2_health_regexp$"
+ description = "name will be interpreted as a regular expression"
+ }
+ "--units" = {
+ value = "$db2_health_units$"
+ description = "This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free"
+ }
+ "--maxinactivity" = {
+ value = "$db2_health_maxinactivity$"
+ description = "used for the maximum amount of time a certain event has not happened."
+ }
+ "--mitigation" = {
+ value = "$db2_health_mitigation$"
+ description = "let you classify the severity of an offline tablespace."
+ }
+ "--lookback" = {
+ value = "$db2_health_lookback$"
+ description = "How many days iin the past db2_health check should look back to calculate exitcode."
+ }
+ "--report" = {
+ value = "$db2_health_report$"
+ description = "Report can be used to output only the bad news (short,long,html)"
+ }
+ }
+
+ env = {
+ "DB2_HOME" = "$db2_health_env_db2_home$"
+ "DB2_VERSION" = "$db2_health_env_db2_version$"
+ }
+
+ vars.db2_health_regexp = false
+ vars.db2_health_not_catalogued = true
+ vars.db2_health_hostname = "$check_address$"
+ vars.db2_health_report = "short"
+
+ vars.db2_health_env_db2_home = "/opt/ibm/db2/V10.5"
+ vars.db2_health_env_db2_version = "10.5"
+}
+
+object CheckCommand "oracle_health" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_oracle_health" ]
+
+ arguments = {
+ "--connect" = {
+ value = "$oracle_health_connect$"
+ description = "the connect string"
+ }
+ "--username" = {
+ value = "$oracle_health_username$"
+ description = "the oracle user"
+ }
+ "--password" = {
+ value = "$oracle_health_password$"
+ description = "the oracle user's password"
+ }
+ "--warning" = {
+ value = "$oracle_health_warning$"
+ description = "the warning range"
+ }
+ "--critical" = {
+ value = "$oracle_health_critical$"
+ description = "the critical range"
+ }
+ "--mode" = {
+ value = "$oracle_health_mode$"
+ description = "the mode of the plugin"
+ }
+ "--method" = {
+ value = "$oracle_health_method$"
+ description = "how the plugin should connect to the database (dbi for using DBD::Oracle (default), sqlplus for using the sqlplus-Tool)"
+ }
+ "--name" = {
+ value = "$oracle_health_name$"
+ description = "the name of the tablespace, datafile, wait event, latch, enqueue, or sql statement depending on the mode"
+ }
+ "--name2" = {
+ value = "$oracle_health_name2$"
+ description = "if name is a sql statement, name2 can be used to appear in the output and the performance data"
+ }
+ "--regexp" = {
+ set_if = "$oracle_health_regexp$"
+ description = "name will be interpreted as a regular expression"
+ }
+ "--units" = {
+ value = "$oracle_health_units$"
+ description = "This is used for a better output of mode=sql and for specifying thresholds for mode=tablespace-free"
+ }
+ "--ident" = {
+ set_if = "$oracle_health_ident$"
+ description = "outputs instance and database names"
+ }
+ "--commit" = {
+ set_if = "$oracle_health_commit$"
+ description = "turns on autocommit for the dbd::oracle module"
+ }
+ "--noperfdata" = {
+ set_if = "$oracle_health_noperfdata$"
+ description = "do not output performance data"
+ }
+ "--timeout" = {
+ value = "$oracle_health_timeout$"
+ description = "plugin timeout. Default is 60 seconds"
+ }
+ "--report" = {
+ value = "$oracle_health_report$"
+ description = "select the plugin output format. Can be short, long or html. Default is long"
+ }
+ "--notemp" = {
+ set_if = "$oracle_health_notemp$"
+ description = "exclude temporary and system tables"
+ }
+ }
+
+ env = {
+ "ORACLE_HOME" = "$oracle_home$"
+ "LD_LIBRARY_PATH" = "$oracle_ld_library_path$"
+ "TNS_ADMIN" = "$oracle_tns_admin$"
+ }
+
+ vars.oracle_health_regexp = false
+ vars.oracle_health_ident = false
+ vars.oracle_health_commit = false
+ vars.oracle_health_noperfdata = false
+ vars.oracle_health_report = "long"
+ vars.oracle_health_notemp = false
+
+ vars.oracle_home = "/usr/lib/oracle/11.2/client64/lib"
+ vars.oracle_ld_library_path = "/usr/lib/oracle/11.2/client64/lib"
+ vars.oracle_tns_admin = ConfigDir + "/plugin-configs"
+}
+
+object CheckCommand "postgres" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_postgres.pl" ]
+
+ arguments = {
+ "-H" = {
+ value = "$postgres_host$"
+ set_if = {{ macro("$postgres_unixsocket$") == false }}
+ description = "hostname(s) to connect to; defaults to none (Unix socket)"
+ }
+ "-p" = {
+ value = "$postgres_port$"
+ description = "port(s) to connect to; defaults to 5432"
+ }
+ "-db" = {
+ value = "$postgres_dbname$"
+ description = "database name(s) to connect to; defaults to 'postgres' or 'template1'"
+ }
+ "-u" = {
+ value = "$postgres_dbuser$"
+ description = "database user(s) to connect as; defaults to 'postgres'"
+ }
+ "--dbpass" = {
+ value = "$postgres_dbpass$"
+ description = "database password(s); use a .pgpass file instead when possible"
+ }
+ "--dbservice" = {
+ value = "$postgres_dbservice$"
+ description = "service name to use inside of pg_service.conf"
+ }
+ "-w" = {
+ value = "$postgres_warning$"
+ description = "the warning threshold, range depends on the action"
+ }
+ "-c" = {
+ value = "$postgres_critical$"
+ description = "the critical threshold, range depends on the action"
+ }
+ "--include" = {
+ value = "$postgres_include$"
+ description = "name(s) items to specifically include (e.g. tables), depends on the action"
+ }
+ "--exclude" = {
+ value = "$postgres_exclude$"
+ description = "name(s) items to specifically exclude (e.g. tables), depends on the action"
+ }
+ "--includeuser" = {
+ value = "$postgres_includeuser$"
+ description = "include objects owned by certain users"
+ }
+ "--excludeuser" = {
+ value = "$postgres_excludeuser$"
+ description = "exclude objects owned by certain users"
+ }
+ "--assume-standby-mode" = {
+ set_if = "$postgres_standby$"
+ description = "assume that server in continious WAL recovery mode"
+ }
+ "--assume-prod" = {
+ set_if = "$postgres_production$"
+ description = "assume that server in production mode"
+ }
+ "--action" = {
+ value = "$postgres_action$"
+ description = "determines the test executed"
+ }
+ "--query" = {
+ value = "$postgres_query$"
+ description = "query for custom_query action"
+ }
+ "--valtype" = {
+ value = "$postgres_valtype$"
+ description = "determines the result type for custom_query action"
+ }
+ "--reverse" = {
+ set_if = "$postgres_reverse$"
+ description = "reverses warning and critical for custom_query action"
+ }
+ "--tempdir" = {
+ value = "$postgres_tempdir$"
+ description = "specify directory for temporary files. default depends on the OS"
+ }
+ "--datadir" = {
+ value = "$postgres_datadir$"
+ description = "location of the PostgreSQL data directory"
+ }
+ "--language" = {
+ value = "$postgres_language$"
+ description = "language to use for messages"
+ }
+ "--perflimit" = {
+ value = "$postgres_perflimit$"
+ description = "limit the number of performance data to report"
+ }
+ }
+
+ env = {
+ "PGCONTROLDATA" = "$postgres_pgcontroldata$"
+ }
+
+ vars.postgres_host = "$check_address$"
+ vars.postgres_standby = false
+ vars.postgres_production = false
+ vars.postgres_unixsocket = false
+}
+
+object CheckCommand "mongodb" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_mongodb.py" ]
+
+ arguments = {
+ "-H" = {
+ value = "$mongodb_host$"
+ description = "The hostname you want to connect to"
+ }
+ "-P" = {
+ value = "$mongodb_port$"
+ description = "The port mongodb is runnung on"
+ }
+ "-u" = {
+ value = "$mongodb_user$"
+ description = "The username you want to login as"
+ }
+ "-p" = {
+ value = "$mongodb_passwd$"
+ description = "The password you want to use for that user"
+ }
+ "-a" = {
+ value = "$mongodb_authdb$"
+ description = "The database you want to authenticate against"
+ }
+ "-A" = {
+ value = "$mongodb_action$"
+ description = "The action you want to take"
+ }
+ "-c" = {
+ value = "$mongodb_collection$"
+ description = "Specify the collection to check"
+ }
+ "-T" = {
+ value = "$mongodb_sampletime$"
+ description = "Time used to sample number of pages faults"
+ }
+ "-q" = {
+ value = "$mongodb_querytype$"
+ description = "The query type to check [query|insert|update|delete|getmore|command] from queries_per_second"
+ }
+ "--database" = {
+ value = "$mongodb_database$"
+ description = "Specify the database to check"
+ }
+ "-D" = {
+ set_if = "$mongodb_perfdata$"
+ description = "Enable output of Nagios performance data"
+ }
+ "--max-lag" = {
+ set_if = "$mongodb_maxlag$"
+ description = "Get max replication lag (for replication_lag action only)"
+ }
+ "--mapped-memory" = {
+ set_if = "$mongodb_mappedmemory$"
+ description = "Get mapped memory instead of resident (if resident memory can not be read)"
+ }
+ "--ssl" = {
+ set_if = "$mongodb_ssl$"
+ description = "Connect using SSL"
+ }
+ "--ssl-ca-cert-file" = {
+ value = "$mongodb_ssl_ca_cert_file$"
+ description = "Path to certificate authority file for SSL"
+ }
+ "--replicaset" = {
+ value = "$mongodb_replicaset$"
+ set_if = "$mongodb_replcheck$"
+ description = "Connect to replicaset"
+ }
+ "--all-databases" = {
+ set_if = "$mongodb_alldatabases$"
+ description = "Check all databases (action database_size)"
+ }
+ "-C" = {
+ value = "$mongodb_critical$"
+ description = "The critical threshold we want to set"
+ }
+ "-W" = {
+ value = "$mongodb_warning$"
+ description = "The warning threshold we want to set"
+ }
+ "--disable_retry_writes" = {
+ set_if = "$mongodb_disableretrywrites$"
+ description = "Disable Retry Writes"
+ }
+ }
+
+ vars.mongodb_host = {{
+ var mongodbAddress = macro("$mongodb_address$")
+ var checkAddress = macro("$check_address$")
+
+ if (mongodbAddress) {
+ log(LogWarning, "CheckerComponent", "The attribute 'mongodb_address' is deprecated, use 'mongodb_host' instead.")
+ return mongodbAddress
+ } else {
+ return checkAddress
+ }
+ }}
+
+ vars.mongodb_perfdata = true
+ vars.mongodb_action = "connections"
+}
+
+object CheckCommand "elasticsearch" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_elasticsearch" ]
+
+ arguments = {
+ "--failure-domain" = {
+ value = "$elasticsearch_failuredomain$"
+ description = "A comma-separated list of ElasticSearch attributes that make up your cluster's failure domain"
+ }
+ "--host" = {
+ value = "$elasticsearch_host$"
+ description = "Hostname or network address to probe, defaults to 'localhost'"
+ }
+ "--master-nodes" = {
+ value = "$elasticsearch_masternodes$"
+ description = "Issue a warning if the number of master-eligible nodes in the cluster drops below this number. By default, do not monitor the number of nodes in the cluster"
+ }
+ "--port" = {
+ value = "$elasticsearch_port$"
+ description = "TCP port to probe, defaults to 9200"
+ }
+ "--prefix" = {
+ value = "$elasticsearch_prefix$"
+ description = "Optional prefix for the ElasticSearch API, defaults to ''"
+ }
+ "--yellow-critical" = {
+ value = "TRUE"
+ set_if = "$elasticsearch_yellowcritical$"
+ description = "Instead of issuing a 'warning' for a yellow cluster state, issue a 'critical' alert"
+ }
+ }
+
+ vars.elasticsearch_host = "$check_address$"
+ vars.elasticsearch_yellowcritical = false
+}
+
+object CheckCommand "redis" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_redis.pl" ]
+
+ arguments = {
+ "--hostname" = {
+ value = "$redis_hostname$"
+ description = "Hostname or IP Address to check."
+ }
+ "--port" = {
+ value = "$redis_port$"
+ description = "Port number (default: 6379)."
+ }
+ "--database" = {
+ value = "$redis_database$"
+ description = "Optional database name (usually a number), needed for redis_query."
+ }
+ "--password" = {
+ value = "$redis_password$"
+ description = "Password for Redis authentication. Safer alternative is to put them in a file and use redis_credentials."
+ }
+ "--credentials" = {
+ value = "$redis_credentials$"
+ description = "Credentials file to read for Redis authentication."
+ }
+ "--timeout" = {
+ value = "$redis_timeout$"
+ description = "Allows to set timeout for execution of this plugin."
+ }
+ "--variables" = {
+ value = "$redis_variables$"
+ description = "List of variables from info data to do threshold checks on."
+ }
+ "--warn" = {
+ value = "$redis_warn$"
+ description = "This option can only be used if redis_variables is used and number of values listed here must exactly match number of variables specified."
+ }
+ "--crit" = {
+ value = "$redis_crit$"
+ description = "This option can only be used if redis_variables is used and number of values listed here must exactly match number of variables specified."
+ }
+ "--perfparse" = {
+ set_if = "$redis_perfparse$"
+ description = "This should only be used with variables and causes variable data not only to be printed as part of main status line but also as perfparse compatible output."
+ }
+ "--perfvars" = {
+ value = "$redis_perfvars$"
+ description = "This allows to list variables which values will go only into perfparse output (and not for threshold checking)."
+ }
+ "--prev_perfdata" = {
+ value = "$service.perfdata$"
+ set_if = "$redis_prev_perfdata$"
+ description = "Previous performance data used to calculate rate of change for counter statistics variables and for proper calculation of hitrate."
+ }
+ "--rate_label" = {
+ value = "$redis_rate_label$"
+ description = "Prefix or Suffix label used to create a new variable which has rate of change of another base variable."
+ }
+ "--query" = {
+ value = "$redis_query$"
+ repeat_key = true
+ description = "Option specifies key to query and optional variable name to assign the results to after. See the help output of the plugin for the detailed format."
+ }
+ "--option" = {
+ value = "$redis_option$"
+ repeat_key = true
+ description = "Specifiers are separated by , and must include NAME or PATTERN. See the help output of the plugin for the detailed format."
+ }
+ "--response_time" = {
+ value = "$redis_response_time$"
+ description = "If this is used plugin will measure and output connection response time in seconds. With perfparse this would also be provided on perf variables."
+ }
+ "--hitrate" = {
+ value = "$redis_hitrate$"
+ description = "Calculates Hitrate."
+ }
+ "--memory_utilization" = {
+ value = "$redis_memory_utilization$"
+ description = "This calculates percent of total memory on system used by redis."
+ }
+ "--total_memory" = {
+ value = "$redis_total_memory$"
+ description = "Amount of memory on a system for memory utilization calculations above."
+ }
+ "--replication_delay" = {
+ value = "$redis_replication_delay$"
+ description = "Allows to set threshold on replication delay info."
+ }
+ }
+
+ vars.redis_hostname = "$check_address$"
+ vars.redis_perfparse = false
+ vars.redis_prev_perfdata = false
+}
+
+object CheckCommand "proxysql" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_proxysql" ]
+
+ arguments = {
+ "--user" = {
+ value = "$proxysql_user$"
+ description = "ProxySQL admin username (default=admin)"
+ }
+ "--password" = {
+ value = "$proxysql_password$"
+ description = "ProxySQL admin password (default=admin)"
+ }
+ "--host" = {
+ value = "$proxysql_host$"
+ description = "ProxySQL hostname / IP (default=127.0.0.1)"
+ }
+ "--port" = {
+ value = "$proxysql_port$"
+ description = "ProxySQL admin port (default=6032)"
+ }
+ "--defaults-file" = {
+ value = "$proxysql_defaultfile$"
+ description = "ProxySQL defaults file"
+ }
+ "--type" = {
+ value = "$proxysql_type$"
+ description = "ProxySQL check type (one of conns,hg,rules,status,var)"
+ required = true
+ }
+ "--name" = {
+ value = "$proxysql_name$"
+ description = "ProxySQL variable name to check"
+ }
+ "--lower" = {
+ value = "$proxysql_lower$"
+ description = "Alert if ProxySQL value are LOWER than defined WARN / CRIT thresholds (only applies to 'var' check type)"
+ }
+ "--runtime" = {
+ value = "$proxysql_runtime$"
+ description = "Force ProxySQL Nagios check to query the runtime_mysql_XXX tables rather than the mysql_XXX tables"
+ }
+ "--warning" = {
+ value = "$proxysql_warning$"
+ description = "Warning threshold"
+ }
+ "--critical" = {
+ value = "$proxysql_critical$"
+ description = "Critical threshold"
+ }
+ "--include-hostgroup" = {
+ value = "$proxysql_include_hostgroup$"
+ description = "ProxySQL hostgroup(s) to include (only applies to '--type hg' checks, accepts comma-separated list)"
+ }
+ "--ignore-hostgroup" = {
+ value = "$proxysql_ignore_hostgroup$"
+ description = "ProxySQL hostgroup(s) to ignore (only applies to '--type hg' checks, accepts comma-separated list)"
+ }
+ }
+}
+
+object CheckCommand "memcached" {
+ command = [ PluginContribDir + "/check_memcached" ]
+
+ arguments = {
+ "-H" = {
+ value = "$memcached_hostname$"
+ required = true
+ description = "Hostname or IP address (required) optional ':port' overrides -p"
+ }
+ "-p" = {
+ value = "$memcached_port$"
+ description = "Port number (default: 11211)"
+ }
+ "-v" = {
+ set_if = "$memcached_verbose$"
+ description = "verbose messages"
+ }
+ "-n" = {
+ value = "$memcached_keep$"
+ description = "Keep up to this many items in the history object in memcached (default: 30)"
+ }
+ "-T" = {
+ value = "$memcached_minimum_stat_interval$"
+ description = "Minimum time interval (in minutes) to use to analyse stats. (default: 30)"
+ }
+ "-w" = {
+ value = "$memcached_warning_hits_misses$"
+ description = "Generate warning if quotient of hits/misses falls below this value (default: 2.0)"
+ }
+ "-E" = {
+ value = "$memcached_warning_evictions$"
+ description = "Generate warning if number of evictions exceeds this threshold. 0=disable. (default: 10)"
+ }
+ "-t" = {
+ value = "$memcached_timeout$"
+ description = "timeout in seconds (default: 1.0)"
+ }
+ "-k" = {
+ value = "$memcached_key$"
+ description = "key name for history object (default: check_memcached)"
+ }
+ "-K" = {
+ value = "$memcached_expiry$"
+ description = "expiry time in seconds for history object (default: 7200)"
+ }
+ "-r" = {
+ set_if = "$memcached_performance_output$"
+ description = "output performance statistics as rate-per-minute figures (better suited to pnp4nagios)"
+ }
+ }
+
+ vars.memcached_hostname = "127.0.0.1"
+ vars.memcached_minimum_stat_interval = "10"
+ vars.memcached_performance_output = true
+}
diff --git a/itl/plugins-contrib.d/hardware.conf b/itl/plugins-contrib.d/hardware.conf
new file mode 100644
index 0000000..c412cb7
--- /dev/null
+++ b/itl/plugins-contrib.d/hardware.conf
@@ -0,0 +1,267 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "hpasm" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_hpasm" ]
+
+ arguments = {
+ "--hostname" = {
+ value = "$hpasm_hostname$"
+ set_if = "$hpasm_remote$"
+ description = "Hostname or IP-address of the server (SNMP mode only)"
+ }
+ "--community" = {
+ value = "$hpasm_community$"
+ description = "SNMP community of the server (SNMP v1/2 only)"
+ }
+ "--protocol" = {
+ value = "$hpasm_protocol$"
+ description = "The SNMP protocol to use (default: 2c, other possibilities: 1,3)"
+ }
+ "--port" = {
+ value = "$hpasm_port$"
+ description = "The SNMP port to use (default: 161)"
+ }
+ "--blacklist" = {
+ value = "$hpasm_blacklist$"
+ description = "Blacklist some (missing/failed) components"
+ }
+ "--ignore-dimms" = {
+ set_if = "$hpasm_ignore-dimms$"
+ description = "Ignore \"N/A\"-DIMM status on misc. servers (e.g. older DL320)"
+ }
+ "--ignore-fan-redundancy" = {
+ set_if = "$hpasm_ignore-fan-redundancy$"
+ description = "Ignore missing redundancy partners"
+ }
+ "--customthresholds" = {
+ value = "$hpasm_customthresholds$"
+ description = "Use custom thresholds for certain temperatures"
+ }
+ "--eventrange" = {
+ value = "$hpasm_eventrange$"
+ description = "Period of time before critical IML events respecively become warnings or vanish. A range is descibed as a number and a unit (s, m, h, d), e.g. --eventrange 1h/20m."
+ }
+ "--perfdata" = {
+ value = "$hpasm_perfdata$"
+ description = "Output performance data. If your performance data string becomes too long and is truncated by Nagios, then you can use --perfdata=short instead. This will output temperature tags without location information"
+ }
+ "--username" = {
+ value = "$hpasm_username$"
+ description = "The securityName for the USM security model (SNMPv3 only)"
+ }
+ "--authpassword" = {
+ value = "$hpasm_authpassword$"
+ description = "The authentication password for SNMPv3"
+ }
+ "--authprotocol" = {
+ value = "$hpasm_authprotocol$"
+ description = "The authentication protocol for SNMPv3 (md5|sha)"
+ }
+ "--privpassword" = {
+ value = "$hpasm_privpassword$"
+ description = "The password for authPriv security level"
+ }
+ "--privprotocol" = {
+ value = "$hpasm_privprotocol$"
+ description = "The private protocol for SNMPv3 (des|aes|aes128|3des|3desde)"
+ }
+ "--servertype" = {
+ value = "$hpasm_servertype$"
+ description = "The type of the server: proliant (default) or bladesystem"
+ }
+ "--eval-nics" = {
+ set_if = "$hpasm_eval-nics$"
+ description = "Check network interfaces (and groups). Try it and report me whyt you think about it. I need to build up some know how on this subject. If get an error and you think, it is not justified for your configuration, please tell me about it. (alwasy send the output of \"snmpwalk -On .... 1.3.6.1.4.1.232\" and a description how you setup your nics and why it is correct opposed to the plugins error message"
+ }
+ }
+ vars.hpasm_remote = true
+ vars.hpasm_hostname = "$check_address$"
+}
+
+object CheckCommand "openmanage" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_openmanage" ]
+ arguments += {
+ "--all" = {
+ set_if = "$openmanage_all$"
+ description = "Check everything, even log content"
+ }
+ "--blacklist" = {
+ value = "$openmanage_blacklist$"
+ repeat_key = true
+ description = "Blacklist missing and/or failed components"
+ }
+ "--check" = {
+ value = "$openmanage_check$"
+ description = "Fine-tune which components are checked"
+ }
+ "--community" = {
+ value = "$openmanage_community$"
+ description = "SNMP community string [default=public]"
+ }
+ "--config" = {
+ value = "$openmanage_config$"
+ description = "Specify configuration file"
+ }
+ "--critical" = {
+ value = "$openmanage_critical$"
+ description = "Custom temperature critical limits"
+ }
+ "--extinfo" = {
+ set_if = "$openmanage_extinfo$"
+ description = "Append system info to alerts"
+ }
+ "--fahrenheit" = {
+ set_if = "$openmanage_fahrenheit$"
+ description = "Use Fahrenheit as temperature unit"
+ }
+ "--hostname" = {
+ value = "$openmanage_hostname$"
+ description = "Hostname or IP (required for SNMP)"
+ }
+ "--htmlinfo" = {
+ set_if = "$openmanage_htmlinfo$"
+ description = "HTML output with clickable links"
+ }
+ "--info" = {
+ set_if = "$openmanage_info$"
+ description = "Prefix any alerts with the service tag"
+ }
+ "--ipv6" = {
+ set_if = "$openmanage_ipv6$"
+ description = "Use IPv6 instead of IPv4 [default=no]"
+ }
+ "--legacy-perfdata" = {
+ set_if = "$openmanage_legacy_perfdata$"
+ description = "legacy performance data output"
+ }
+ "--no-storage" = {
+ set_if = "$openmanage_no_storage$"
+ description = "Don't check storage"
+ }
+ "--only" = {
+ value = "$openmanage_only$"
+ description = "Only check a certain component or alert type"
+ }
+ "--perfdata" = {
+ set_if = "$openmanage_perfdata$"
+ description = "Output performance data [default=no]"
+ }
+ "--port" = {
+ value = "$openmanage_port$"
+ description = "SNMP port number [default=161]"
+ }
+ "--protocol" = {
+ value = "$openmanage_protocol$"
+ description = "SNMP protocol version [default=2c]"
+ }
+ "--short-state" = {
+ set_if = "$openmanage_short_state$"
+ description = "Prefix alerts with alert state abbreviated"
+ }
+ "--show-blacklist" = {
+ set_if = "$openmanage_show_blacklist$"
+ description = "Show blacklistings in OK output"
+ }
+ "--state" = {
+ set_if = "$openmanage_state$"
+ description = "Prefix alerts with alert state"
+ }
+ "--tcp" = {
+ set_if = "$openmanage_tcp$"
+ description = "Use TCP instead of UDP [default=no]"
+ }
+ "--timeout" = {
+ value = "$openmanage_timeout$"
+ description = "Plugin timeout in seconds [default=30]"
+ }
+ "--vdisk-critical" = {
+ set_if = "$openmanage_vdisk_critical$"
+ description = "Make any alerts on virtual disks critical"
+ }
+ "--warning" = {
+ value = "$openmanage_warning$"
+ description = "Custom temperature warning limits"
+ }
+ }
+}
+
+object CheckCommand "lmsensors" {
+ command = [ PluginContribDir + "/check_lmsensors" ]
+
+ arguments = {
+ "-w" = {
+ value = "$lmsensors_warning$"
+ description = "Exit with WARNING status if above INTEGER degrees"
+ required = true
+ }
+ "-c" = {
+ value = "$lmsensors_critical$"
+ description = "Exit with CRITICAL status if above INTEGER degrees"
+ required = true
+ }
+ "--sensor" = {
+ value = "$lmsensors_sensor$"
+ description = "Set what to monitor, for example CPU or MB (or M/B). Check sensors for the correct word. Default is CPU."
+ }
+ }
+
+ vars.lmsensors_warning = "75"
+ vars.lmsensors_critical = "80"
+ vars.lmsensors_sensor = "Core"
+}
+
+object CheckCommand "hddtemp" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_hddtemp" ]
+
+ arguments = {
+ "--server" = {
+ value = "$hddtemp_server$"
+ description = "server name or address"
+ required = true
+ }
+ "--port" = {
+ value = "$hddtemp_port$"
+ description = "port number"
+ }
+ "--devices" = {
+ value = "$hddtemp_devices$"
+ description = "comma separated devices list, or empty for all devices in hddtemp response"
+ }
+ "--separator" = {
+ value = "$hddtemp_separator$"
+ description = "hddtemp separator"
+ }
+ "--warning" = {
+ value = "$hddtemp_warning$"
+ description = "warning temperature"
+ required = true
+ }
+ "--critical" = {
+ value = "$hddtemp_critical$"
+ description = "critical temperature"
+ required = true
+ }
+ "--timeout" = {
+ value = "$hddtemp_timeout$"
+ description = "receiving data from hddtemp operation network timeout"
+ }
+ "--performance-data" = {
+ set_if = "$hddtemp_performance$"
+ description = "return performance data"
+ }
+ "--quiet" = {
+ set_if = "$hddtemp_quiet$"
+ description = "be quiet"
+ }
+ }
+
+ vars.hddtemp_server = "127.0.0.1"
+ vars.hddtemp_warning = 55
+ vars.hddtemp_critical = 60
+ vars.hddtemp_performance = true
+ vars.hddtemp_timeout = 5
+}
diff --git a/itl/plugins-contrib.d/icingacli.conf b/itl/plugins-contrib.d/icingacli.conf
new file mode 100644
index 0000000..6914f68
--- /dev/null
+++ b/itl/plugins-contrib.d/icingacli.conf
@@ -0,0 +1,145 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+template CheckCommand "icingacli" {
+ command = [ PrefixDir + "/bin/icingacli" ]
+}
+
+object CheckCommand "icingacli-businessprocess" {
+ import "icingacli"
+
+ command += [ "businessprocess", "process", "check" ]
+
+ arguments = {
+ "--ack-is-ok" = {
+ set_if = "$icingacli_businessprocess_ackisok$"
+ description = "Treat acknowledged hosts/services always as UP/OK"
+ }
+ "--blame" = {
+ set_if = "$icingacli_businessprocess_blame$"
+ description = "Show problem details as a tree reduced to the nodes which have the same state as the business process"
+ }
+ "--colors" = {
+ set_if = "$icingacli_businessprocess_colors$"
+ description = "Show colored output"
+ }
+ "--config" = {
+ value = "$icingacli_businessprocess_config$"
+ description = "Configuration file containing your business process without file extension"
+ }
+ "--details" = {
+ set_if = "$icingacli_businessprocess_details$"
+ description = "Get details for root cause analysis"
+ }
+ "--state-type" = {
+ value = "$icingacli_businessprocess_statetype$"
+ description = "Define which state type to look at. Could be either soft or hard, overrides an eventually configured default"
+ }
+ "--downtime-is-ok" = {
+ set_if = "$icingacli_businessprocess_downtimeisok$"
+ description = "Treat hosts/services in downtime always as UP/OK"
+ }
+ "--process" = {
+ value = "$icingacli_businessprocess_process$"
+ description = "Business process to monitor"
+ skip_key = true
+ required = true
+ order = -1
+ }
+ "--root-cause" = {
+ set_if = "$icingacli_businessprocess_rootcause$"
+ description = "Used in combination with --blame. Only shows the paths of the nodes which are responsible for the state of the business process"
+ }
+ }
+
+ vars.icingacli_businessprocess_ackisok = false
+ vars.icingacli_businessprocess_blame = false
+ vars.icingacli_businessprocess_colors = false
+ vars.icingacli_businessprocess_details = false
+ vars.icingacli_businessprocess_downtimeisok = false
+ vars.icingacli_businessprocess_rootcause = false
+}
+
+object CheckCommand "icingacli-director" {
+ import "icingacli"
+
+ command += [ "director", "health", "check" ]
+
+ arguments = {
+ "--check" = {
+ value = "$icingacli_director_check$"
+ description = "Run only a specific test suite"
+ }
+ "--db" = {
+ value = "$icingacli_director_db$"
+ description = "Use a specific Icinga Web DB resource"
+ }
+ }
+}
+
+object CheckCommand "icingacli-elasticsearch" {
+ import "icingacli"
+
+ command += [ "elasticsearch", "check" ]
+
+ arguments = {
+ "--instance" = {
+ value = "$icingacli_elasticsearch_instance$"
+ description = "Elasticsearch instance to connect to"
+ }
+ "--crit" = {
+ value = "$icingacli_elasticsearch_critical$"
+ description = "Critical threshold"
+ }
+ "--warn" = {
+ value = "$icingacli_elasticsearch_warning$"
+ description = "Warning threshold"
+ }
+ "--index" = {
+ value = "$icingacli_elasticsearch_index$"
+ description = "Index pattern to use when searching"
+ }
+ "--filter" = {
+ value = "$icingacli_elasticsearch_filter$"
+ description = "Filter for events"
+ }
+ "--from" = {
+ value = "$icingacli_elasticsearch_from$"
+ description = "Negative value of time to search from now"
+ }
+ }
+
+}
+
+object CheckCommand "icingacli-x509" {
+ import "icingacli"
+
+ command += [ "x509", "check", "host" ]
+
+ arguments = {
+ "--ip" = {
+ value = "$icingacli_x509_ip$"
+ description = "A hosts IP address"
+ }
+ "--host" = {
+ value = "$icingacli_x509_host$"
+ description = "A hosts name"
+ }
+ "--port" = {
+ value = "$icingacli_x509_port$"
+ description = "The port to check in particular"
+ }
+ "--warning" = {
+ value = "$icingacli_x509_warning$"
+ description = "Less remaining time results in state WARNING"
+ }
+ "--critical" = {
+ value = "$icingacli_x509_critical$"
+ description = "Less remaining time results in state CRITICAL"
+ }
+ "--allow-self-signed" = {
+ set_if = "$icingacli_x509_allow_self_signed$"
+ description = "Ignore if a certificate or its issuer has been self-signed"
+ }
+ }
+}
+
diff --git a/itl/plugins-contrib.d/ipmi.conf b/itl/plugins-contrib.d/ipmi.conf
new file mode 100644
index 0000000..6b72ae6
--- /dev/null
+++ b/itl/plugins-contrib.d/ipmi.conf
@@ -0,0 +1,123 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "ipmi-sensor" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_ipmi_sensor" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ipmi_address$"
+ description = "Hostname or ip address of the IPMI interface (default: host.address or host.address6)"
+ }
+ "-f" = {
+ value = "$ipmi_config_file$"
+ description = "Path to the FreeIPMI configuration file"
+ }
+ "-U" = {
+ value = "$ipmi_username$"
+ description = "Username to connect with"
+ }
+ "-P" = {
+ value = "$ipmi_password$"
+ description = "Password to use"
+ }
+ "-L" = {
+ value = "$ipmi_privilege_level$"
+ description = "Privilege level of the user"
+ }
+ "-b" = {
+ set_if = "$ipmi_backward_compatibility_mode$"
+ description = "Enable backward compatibility mode (for FreeIPMI 0.5.*)"
+ }
+ "-T" = {
+ value = "$ipmi_sensor_type$"
+ description = "Limit sensors to query based on IPMI sensor type (seperated by comma)"
+ }
+ "-ST" = {
+ value = "$ipmi_sel_type$"
+ description = "Limit SEL entries to specific types. (seperated by comma)"
+ }
+ "-x" = {
+ value = "$ipmi_exclude_sensor_id$"
+ description = "Exclude sensor matching ipmi_sensor_id"
+ }
+ "-xT" = {
+ value = "$ipmi_exclude_sensor$"
+ description = "Exclude sensors based on IPMI sensor type. (seperated by comma)"
+ }
+ "-xST" = {
+ value = "$ipmi_exclude_sel$"
+ description = "Exclude SEL entries of specific sensor types. (seperated by comma)"
+ }
+ "-i" = {
+ value = "$ipmi_sensor_id$"
+ description = "Include sensor matching ipmi_sensor_id"
+ }
+ "--nosel" = {
+ set_if = "$ipmi_no_sel_checking$"
+ description = "Turn off system event log checking via ipmi-sel"
+ }
+ "--nothresholds" = {
+ set_if = "$ipmi_no_thresholds$"
+ description = "Turn off performance data thresholds from output-sensor-thresholds"
+ }
+ "-D" = {
+ value = "$ipmi_protocol_lan_version$"
+ description = "Change the protocol LAN version (default: LAN_2_0)"
+ }
+ "-fc" = {
+ value = "$ipmi_number_of_active_fans$"
+ description = "Number of fans that should be active"
+ }
+ "--fru" = {
+ set_if = "$ipmi_show_fru$"
+ description = "Print the product serial number got by ipmi-fru"
+ }
+ "--assettag" = {
+ set_if = "$ipmi_show_assettag$"
+ description = "Print the assettag if it is available in the IPMI FRU data. (--fru is mandatory)"
+ }
+ "--board" = {
+ set_if = "$ipmi_show_board$"
+ description = "Print additional motherboard information if it is available in the IPMI FRU data. (--fru is mandatory)"
+ }
+ "--noentityabsent" = {
+ set_if = "$ipmi_noentityabsent$"
+ description = "Skip sensor checks for sensors that have 'noentityabsent' as event state"
+ }
+ "-vv" = {
+ set_if = "$ipmi_verbose$"
+ description = "Be Verbose multi line output, also with additional details for warnings"
+ }
+ "-vvv" = {
+ set_if = "$ipmi_debug$"
+ description = "Be Verbose debugging output, followed by normal multi line output"
+ }
+ "-us" = {
+ value = "$ipmi_unify_file$"
+ description = "Path to the unify file to unify sensor names."
+ }
+ }
+
+ vars.ipmi_address = "$check_address$"
+ vars.ipmi_protocol_lan_version = "LAN_2_0"
+}
+
+/*
+ * Icinga2 CheckCommand definition for an IPMI interface ping check
+*/
+
+template CheckCommand "ipmi-alive-common" {
+ vars.ping_wrta = 5000.0
+ vars.ping_wpl = 100
+
+ vars.ping_crta = 5000.0
+ vars.ping_cpl = 100
+
+ vars.ping_packets = 1
+}
+object CheckCommand "ipmi-alive" {
+ import "ping"
+ import "ipmi-alive-common"
+}
diff --git a/itl/plugins-contrib.d/logmanagement.conf b/itl/plugins-contrib.d/logmanagement.conf
new file mode 100644
index 0000000..627dead
--- /dev/null
+++ b/itl/plugins-contrib.d/logmanagement.conf
@@ -0,0 +1,160 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "logstash" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_logstash" ]
+
+ arguments = {
+ "-H" = {
+ value = "$logstash_hostname$"
+ description = "Hostname where Logstash is running"
+ }
+ "-p" = {
+ value = "$logstash_port$"
+ description = "Port where Logstash is listening for API requests"
+ }
+ "--file-descriptor-threshold-warn" = {
+ value = "$logstash_filedesc_warn$"
+ description = "Warning threshold of file descriptor usage in percent"
+ }
+ "--file-descriptor-threshold-crit" = {
+ value = "$logstash_filedesc_crit$"
+ description = "Critical threshold of file descriptor usage in percent"
+ }
+ "--heap-usage-threshold-warn" = {
+ value = "$logstash_heap_warn$"
+ description = "Warning threshold of heap usage in percent"
+ }
+ "--heap-usage-threshold-crit" = {
+ value = "$logstash_heap_crit$"
+ description = "Critical threshold of heap usage in percent"
+ }
+ "--inflight-events-warn" = {
+ value = "$logstash_inflight_warn$"
+ description = "Warning threshold of inflight events"
+ }
+ "--inflight-events-crit" = {
+ value = "$logstash_inflight_crit$"
+ description = "Critical threshold of inflight events"
+ }
+ "--cpu-usage-threshold-warn" = {
+ value = "$logstash_cpu_warn$"
+ description = "Warning threshold for cpu usage in percent"
+ }
+ "--cpu-usage-threshold-crit" = {
+ value = "$logstash_cpu_crit$"
+ description = "Critical threshold for cpu usage in percent"
+ }
+ }
+
+ vars.logstash_hostname = "$check_address$"
+ vars.logstash_port = 9600
+ vars.logstash_filedesc_warn = 85
+ vars.logstash_filedesc_crit = 95
+ vars.logstash_heap_warn = 70
+ vars.logstash_heap_crit = 80
+}
+
+object CheckCommand "logfiles" {
+ command = [ PluginContribDir + "/check_logfiles" ]
+
+ arguments = {
+ "--tag" = {
+ value = "$logfiles_tag$"
+ description = "A short unique descriptor for this search. It will appear in the output of the plugin and is used to separare the different services."
+ }
+ "--logfile" = {
+ value = "$logfiles_logfile$"
+ description = "This is the name of the log file you want to scan."
+ }
+ "--rotation" = {
+ value = "$logfiles_rotation$"
+ description = "This is the method how log files are rotated. One of the predefined methods or a regular expression, which helps identify the rotated archives. If this key is missing, check_logfiles assumes that the log file will be simply overwritten instead of rotated."
+ }
+ "--criticalpattern" = {
+ value = "$logfiles_critical_pattern$"
+ description = "A regular expression which will trigger a critical error."
+ }
+ "--warningpattern" = {
+ value = "$logfiles_warning_pattern$"
+ description = "A regular expression which will trigger a warning error."
+ }
+ "--criticalexception" = {
+ value = "$logfiles_critical_exception$"
+ description = "A regular expression, the exceptions which are not counted as critical errors."
+ }
+ "--warningexception" = {
+ value = "$logfiles_warning_exception$"
+ description = "A regular expression, the exceptions which are not counted as warning errors."
+ }
+ "--okpattern" = {
+ value = "$logfiles_ok_pattern$"
+ description = "A regular expression which resets the error counters."
+ }
+ "--noprotocol" = {
+ set_if = "$logfiles_no_protocol$"
+ description = "Normally all the matched lines are written into a protocol file with this file’s name appearing in the plugin’s output. This option switches this off."
+ }
+ "--syslogserver" = {
+ set_if = "$logfiles_syslog_server$"
+ description = "With this option you limit the pattern matching to lines originating from the host check_logfiles is running on."
+ }
+ "--syslogclient" = {
+ value = "$logfiles_syslog_client$"
+ description = "With this option you limit the pattern matching to lines originating from the host named in this option."
+ }
+ "--sticky" = {
+ value = "$logfiles_sticky$"
+ description = "Errors are propagated through successive runs."
+ }
+ "--unstick" = {
+ set_if = "$logfiles_unstick$"
+ description = "Resets sticky errors."
+ }
+ "--config" = {
+ value = "$logfiles_config$"
+ description = "The name of a configuration file."
+ }
+ "--configdir" = {
+ value = "$logfiles_configdir$"
+ description = "The name of a configuration directory. Configfiles ending in .cfg or .conf are (recursively) imported."
+ }
+ "--searches" = {
+ value = "$logfiles_searches$"
+ description = "A list of tags of those searches which are to be run. Using this parameter, not all searches listed in the config file are run, but only those selected."
+ }
+ "--selectedsearches" = {
+ value = "$logfiles_selectedsearches$"
+ description = "A list of tags of those searches which are to be run. Using this parameter, not all searches listed in the config file are run, but only those selected."
+ }
+ "--report" = {
+ value = "$logfiles_report$"
+ description = "This option turns on multiline output (Default: off). The setting html generates a table which display the last hits in the service details view. Possible values are: short, long, html or off"
+ }
+ "--maxlength" = {
+ value = "$logfiles_max_length$"
+ description = "With this parameter long lines are truncated (Default: off). Some programs (e.g. TrueScan) generate entries in the eventlog of such a length, that the output of the plugin becomes longer than 1024 characters. NSClient++ discards these."
+ }
+ "--winwarncrit" = {
+ set_if = "$logfiles_winwarncrit$"
+ description = "With this parameter messages in the eventlog are classified by the type WARNING/ERROR (Default: off). Replaces or complements warning/criticalpattern."
+ }
+ "--rununique" = {
+ set_if = "$logfiles_run_unique$"
+ description = "This parameter prevents check_logfiles from starting when there’s already another instance using the same config file. (exits with UNKNOWN)"
+ }
+ "--timeout" = {
+ value = "$logfiles_timeout$"
+ description = "This parameter causes an abort of a running search after a defined number of seconds. It is an aborted in a controlled manner, so that the lines which have been read so far, are used for the computation of the final result."
+ }
+ "--warning" = {
+ value = "$logfiles_warning$"
+ description = "Complex handler-scripts can be provided with a warning-parameter this way. Inside the scripts the value is accessible as the macro CL_WARNING."
+ }
+ "--critical" = {
+ value = "$logfiles_critical$"
+ description = "Complex handler-scripts can be provided with a critical-parameter this way. Inside the scripts the value is accessible as the macro CL_CRITICAL."
+ }
+ }
+}
diff --git a/itl/plugins-contrib.d/metrics.conf b/itl/plugins-contrib.d/metrics.conf
new file mode 100644
index 0000000..856ba75
--- /dev/null
+++ b/itl/plugins-contrib.d/metrics.conf
@@ -0,0 +1,62 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "graphite" {
+ command = [ PluginContribDir + "/check_graphite" ]
+
+ arguments = {
+ "-u" = {
+ value = "$graphite_url$"
+ description = "Target url"
+ required = true
+ }
+ "-m" = {
+ value = "$graphite_metric$"
+ description = "Metric path string"
+ required = true
+ }
+ "-s" = {
+ value = "$graphite_shortname$"
+ description = "Metric short name (used for performance data)"
+ }
+ "-d" = {
+ value = "$graphite_duration$"
+ description = "Length, in minute of data to parse (default: 5)"
+ }
+ "-f" = {
+ value = "$graphite_function$"
+ description = "Function applied to metrics for thresholds (default: average)"
+ }
+ "-w" = {
+ value = "$graphite_warning$"
+ description = "Warning threshold"
+ required = true
+ }
+ "-c" = {
+ value = "$graphite_critical$"
+ description = "Critical threshold"
+ required = true
+ }
+ "-U" = {
+ value = "$graphite_units$"
+ description = "Adds a text tag to the metric count in the plugin output. Useful to identify the metric units. Doesn't affect data queries."
+ }
+ "-M" = {
+ value = "$graphite_message$"
+ description = "Text message to output (default: 'metric count:')"
+ }
+ "-z" = {
+ set_if = "$graphite_zero_on_error$"
+ description = "Return 0 on a graphite 500 error"
+ }
+ "-l" = {
+ set_if = "$graphite_link_graph$"
+ description = "Add a link in the plugin output, showing a 24h graph for this metric in graphite."
+ }
+ }
+
+ vars.graphite_duration = "5"
+ vars.graphite_function = "average"
+ vars.graphite_message = "metric count:"
+ vars.graphite_zero_on_error = false
+ vars.graphite_link_graph = false
+}
diff --git a/itl/plugins-contrib.d/network-components.conf b/itl/plugins-contrib.d/network-components.conf
new file mode 100644
index 0000000..c7bcacc
--- /dev/null
+++ b/itl/plugins-contrib.d/network-components.conf
@@ -0,0 +1,1089 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "interfacetable" {
+ command = [ PluginContribDir + "/check_interface_table_v3t" ]
+
+ arguments = {
+ "-H" = {
+ value = "$interfacetable_hostquery$"
+ description = "Specifies the remote host to poll"
+ }
+ "-h" = {
+ value = "$interfacetable_hostdisplay$"
+ description = "Specifies the hostname to display in the HTML link"
+ }
+ "-r" = {
+ set_if = "$interfacetable_regex$"
+ description = "Interface names and property names for some other options will be interpreted as regular expressions"
+ }
+ "--outputshort" = {
+ set_if = "$interfacetable_outputshort$"
+ description = "Reduce the verbosity of the plugin output"
+ }
+ "-e" = {
+ value = "$interfacetable_exclude$"
+ description = "Comma separated list of interfaces globally excluded from the monitoring"
+ }
+ "-i" = {
+ value = "$interfacetable_include$"
+ description = "Comma separated list of interfaces globally included in the monitoring"
+ }
+ "--alias-matching" = {
+ set_if = "$interfacetable_aliasmatching$"
+ description = "Allow you to specify alias in addition to interface names"
+ }
+ "--et" = {
+ value = "$interfacetable_excludetraffic$"
+ description = "Comma separated list of interfaces excluded from traffic checks"
+ }
+ "--it" = {
+ value = "$interfacetable_includetraffic$"
+ description = "Comma separated list of interfaces included for traffic checks"
+ }
+ "--wt" = {
+ value = "$interfacetable_warningtraffic$"
+ description = "Interface traffic load percentage leading to a warning alert"
+ }
+ "--ct" = {
+ value = "$interfacetable_criticaltraffic$"
+ description = "Interface traffic load percentage leading to a critical alert"
+ }
+ "--pkt" = {
+ set_if = "$interfacetable_pkt$"
+ description = "Add unicast/non-unicast pkt stats for each interface"
+ }
+ "--trafficwithpkt" = {
+ set_if = "$interfacetable_trafficwithpkt$"
+ description = "Enable traffic calculation using pkt counters instead of octet counters. Useful when using 32-bit counters to track the load on > 1GbE interfaces."
+ }
+ "--tp" = {
+ value = "$interfacetable_trackproperty$"
+ description = "List of tracked properties"
+ }
+ "--ep" = {
+ value = "$interfacetable_excludeproperty$"
+ description = "Comma separated list of interfaces excluded from the property tracking"
+ }
+ "--ip" = {
+ value = "$interfacetable_includeproperty$"
+ description = "Comma separated list of interfaces included in the property tracking"
+ }
+ "--wp" = {
+ value = "$interfacetable_warningproperty$"
+ description = "Number of property changes before leading to a warning alert"
+ }
+ "--cp" = {
+ value = "$interfacetable_criticalproperty$"
+ description = "Number of property changes before leading to a critical alert"
+ }
+ "-C" = {
+ value = "$interfacetable_community$"
+ description = "Specifies the snmp v1/v2c community string"
+ }
+ "-2" = {
+ set_if = "$interfacetable_snmpv2$"
+ description = "Use snmp v2c"
+ }
+ "-l" = {
+ value = "$interfacetable_login$"
+ description = "Login for snmpv3 authentication"
+ }
+ "-x" = {
+ value = "$interfacetable_passwd$"
+ description = "Auth password for snmpv3 authentication"
+ }
+ "-X" = {
+ value = "$interfacetable_privpass$"
+ description = "Priv password for snmpv3"
+ }
+ "-L" = {
+ value = "$interfacetable_protocols$"
+ description = "Authentication protocol,Priv protocol"
+ }
+ "--domain" = {
+ value = "$interfacetable_domain$"
+ description = "SNMP transport domain"
+ }
+ "--contextname" = {
+ value = "$interfacetable_contextname$"
+ description = "Context name for the snmp requests"
+ }
+ "-P" = {
+ value = "$interfacetable_port$"
+ description = "SNMP port"
+ }
+ "--64bits" = {
+ set_if = "$interfacetable_64bits$"
+ description = "Use SNMP 64-bits counters"
+ }
+ "--max-repetitions" = {
+ value = "$interfacetable_maxrepetitions$"
+ description = "Increasing this value may enhance snmp query performances by gathering more results at one time"
+ }
+ "--snmp-timeout" = {
+ value = "$interfacetable_snmptimeout$"
+ description = "Define the Transport Layer timeout for the snmp queries"
+ }
+ "--snmp-retries" = {
+ value = "$interfacetable_snmpretries$"
+ description = "Define the number of times to retry sending a SNMP message"
+ }
+ "--snmp-maxmsgsize" = {
+ value = "$interfacetable_snmpmaxmsgsize$"
+ description = "Size of the SNMP message in octets, usefull in case of too long responses. Be carefull with network filters. Range 484 - 65535. Apply only to netsnmp perl bindings. The default is 1472 octets for UDP/IPv4, 1452 octets for UDP/IPv6, 1460 octets for TCP/IPv4, and 1440 octets for TCP/IPv6."
+ }
+ "--unixsnmp" = {
+ set_if = "$interfacetable_unixsnmp$"
+ description = "Use unix snmp utilities for snmp requests"
+ }
+ "-f" = {
+ set_if = "$interfacetable_enableperfdata$"
+ description = "Enable port performance data"
+ }
+ "--perfdataformat" = {
+ value = "$interfacetable_perfdataformat$"
+ description = "Define which performance data will be generated"
+ }
+ "--perfdatathreshold" = {
+ value = "$interfacetable_perfdatathreshold$"
+ description = "Define which thresholds are printed in the generated performance data"
+ }
+ "--perfdatadir" = {
+ value = "$interfacetable_perfdatadir$"
+ description = "When specified, the performance data are also written directly to a file, in the specified location"
+ }
+ "--perfdataservicedesc" = {
+ value = "$interfacetable_perfdataservicedesc$"
+ description = "Specify additional parameters for output performance data to PNP"
+ }
+ "-g" = {
+ value = "$interfacetable_grapher$"
+ description = "Specify the used graphing solution"
+ }
+ "--grapherurl" = {
+ value = "$interfacetable_grapherurl$"
+ description = "Graphing system url"
+ }
+ "--portperfunit" = {
+ value = "$interfacetable_portperfunit$"
+ description = "Traffic could be reported in bits (counters) or in bps (calculated value)"
+ }
+ "--nodetype" = {
+ value = "$interfacetable_nodetype$"
+ description = "Specify the node type, for specific information to be printed / specific oids to be used"
+ }
+ "--duplex" = {
+ set_if = "$interfacetable_duplex$"
+ description = "Add the duplex mode property for each interface in the interface table"
+ }
+ "--stp" = {
+ set_if = "$interfacetable_stp$"
+ description = "Add the stp state property for each interface in the interface table"
+ }
+ "--vlan" = {
+ set_if = "$interfacetable_vlan$"
+ description = "Add the vlan attribution property for each interface in the interface table"
+ }
+ "--noipinfo" = {
+ set_if = "$interfacetable_noipinfo$"
+ description = "Remove the ip information for each interface from the interface table"
+ }
+ "--alias" = {
+ set_if = "$interfacetable_alias$"
+ description = "Add the alias information for each interface in the interface table"
+ }
+ "--accessmethod" = {
+ value = "$interfacetable_accessmethod$"
+ description = "Access method for a shortcut to the host in the HTML page"
+ }
+ "--htmltablelinktarget" = {
+ value = "$interfacetable_htmltablelinktarget$"
+ description = "Specifies the windows or the frame where the [details] link will load the generated html page"
+ }
+ "--delta" = {
+ value = "$interfacetable_delta$"
+ description = "Set the delta used for interface throuput calculation"
+ }
+ "--ifs" = {
+ value = "$interfacetable_ifs$"
+ description = "Input field separator"
+ }
+ "--cache" = {
+ value = "$interfacetable_cache$"
+ description = "Define the retention time of the cached data"
+ }
+ "--noifloadgradient" = {
+ set_if = "$interfacetable_noifloadgradient$"
+ description = "Disable color gradient from green over yellow to red for the load percentage"
+ }
+ "--nohuman" = {
+ set_if = "$interfacetable_nohuman$"
+ description = "Do not translate bandwidth usage in human readable format"
+ }
+ "--snapshot" = {
+ set_if = "$interfacetable_snapshot$"
+ description = "Force the plugin to run like if it was the first launch"
+ }
+ "--timeout" = {
+ value = "$interfacetable_timeout$"
+ description = "Define the global timeout limit of the plugin"
+ }
+ "--css" = {
+ value = "$interfacetable_css$"
+ description = "Define the css stylesheet used by the generated html files"
+ }
+ "--config" = {
+ value = "$interfacetable_config$"
+ description = "Specify a config file to load"
+ }
+ "--noconfigtable" = {
+ set_if = "$interfacetable_noconfigtable$"
+ description = "Disable configuration table on the generated HTML page"
+ }
+ "--notips" = {
+ set_if = "$interfacetable_notips$"
+ description = "Disable the tips in the generated html tables"
+ }
+ "--default-table-sorting" = {
+ value = "$interfacetable_defaulttablesorting$"
+ description = "Default table sorting"
+ }
+ "--table-split" = {
+ set_if = "$interfacetable_tablesplit$"
+ description = "Generate multiple interface tables, one per interface type"
+ }
+ "--notype" = {
+ set_if = "$interfacetable_notype$"
+ description = "Remove the interface type for each interface"
+ }
+ }
+
+ vars.interfacetable_hostquery = "$address$"
+ vars.interfacetable_hostdisplay = "$host.display_name$"
+ vars.interfacetable_perfdataservicedesc = "$service.name$"
+ vars.interfacetable_regex = false
+ vars.interfacetable_outputshort = false
+ vars.interfacetable_aliasmatching = false
+ vars.interfacetable_pkt = false
+ vars.interfacetable_trafficwithpkt = false
+ vars.interfacetable_snmpv2 = false
+ vars.interfacetable_64bits = false
+ vars.interfacetable_unixsnmp = false
+ vars.interfacetable_enableperfdata = false
+ vars.interfacetable_duplex = false
+ vars.interfacetable_stp = false
+ vars.interfacetable_vlan = false
+ vars.interfacetable_noipinfo = false
+ vars.interfacetable_noifloadgradient = false
+ vars.interfacetable_nohuman = false
+ vars.interfacetable_snapshot = false
+ vars.interfacetable_noconfigtable = false
+ vars.interfacetable_notips = false
+ vars.interfacetable_notype = false
+}
+
+object CheckCommand "iftraffic" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_iftraffic.pl"]
+
+ arguments = {
+ "-H" = {
+ value = "$iftraffic_address$"
+ description = "Check interface on the indicated host."
+ required = true
+ }
+ "-C" = {
+ value = "$iftraffic_community$"
+ description = "SNMP community. Defaults to 'public' if omitted."
+ }
+ "-V" = {
+ value = "$iftraffic_version$"
+ description = "SNMP version. Defaults to '1' if omitted."
+ }
+ "-i" = {
+ value = "$iftraffic_interface$"
+ description = "Interface name."
+ required = true
+ }
+ "-b" = {
+ value = "$iftraffic_bandwidth$"
+ description = "Interface maximum speed in kilo/mega/giga/bits per second."
+ required = true
+ }
+ "-u" = {
+ value = "$iftraffic_units$"
+ description = "g=gigabits/s,m=megabits/s,k=kilobits/s,b=bits/s."
+ }
+ "-w" = {
+ value = "$iftraffic_warn$"
+ description = "% of bandwidth usage necessary to result in warning status (default: 85)"
+ }
+ "-c" = {
+ value = "$iftraffic_crit$"
+ description = "% of bandwidth usage necessary to result in critical status (default: 98)"
+ }
+ "-M" = {
+ value = "$iftraffic_max_counter$"
+ description = "Max counter value of net devices in kilo/mega/giga/bytes."
+ }
+ }
+
+ vars.iftraffic_address = "$check_address$"
+ vars.iftraffic_warn = "85"
+ vars.iftraffic_crit = "98"
+}
+
+object CheckCommand "iftraffic64" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_iftraffic64.pl"]
+
+ arguments = {
+ "-H" = {
+ value = "$iftraffic64_address$"
+ description = "Check interface on the indicated host."
+ required = true
+ }
+ "-C" = {
+ value = "$iftraffic64_community$"
+ description = "SNMP community. Defaults to 'public' if omitted."
+ }
+ "-i" = {
+ value = "$iftraffic64_interface$"
+ description = "Interface name."
+ required = true
+ }
+ "-b" = {
+ value = "$iftraffic64_bandwidth$"
+ description = "Interface maximum speed in kilo/mega/giga/bits per second."
+ required = true
+ }
+ "-u" = {
+ value = "$iftraffic64_units$"
+ description = "g=gigabits/s,m=megabits/s,k=kilobits/s,b=bits/s."
+ }
+ "-w" = {
+ value = "$iftraffic64_warn$"
+ description = "% of bandwidth usage necessary to result in warning status (default: 85)"
+ }
+ "-c" = {
+ value = "$iftraffic64_crit$"
+ description = "% of bandwidth usage necessary to result in critical status (default: 98)"
+ }
+ "-M" = {
+ value = "$iftraffic64_max_counter$"
+ description = "Max counter value of net devices in kilo/mega/giga/bytes."
+ }
+ }
+
+ vars.iftraffic64_address = "$check_address$"
+ vars.iftraffic64_warn = "85"
+ vars.iftraffic64_crit = "98"
+}
+
+object CheckCommand "interfaces" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_interfaces" ]
+
+ arguments = {
+ "--hostname" = "$interfaces_address$"
+ "--community" = {
+ value = "$interfaces_community$"
+ description = "The community string (default public)."
+ }
+ "--regex" = {
+ value = "$interfaces_regex$"
+ description = "Interface list regexp."
+ }
+ "--exclude-regex" = {
+ value = "$interfaces_exclude_regex$"
+ description = "Interface list negative regexp."
+ }
+ "--errors" = {
+ value = "$interfaces_errors$"
+ description = "Number of in errors (CRC errors for cisco) to consider a warning (default 50)."
+ }
+ "--out-errors" = {
+ value = "$interface_out_errors$"
+ description = "Number of out errors (collisions for cisco) to consider a warning (default same as in errors)."
+ }
+ "--perfdata" = {
+ value = "$interfaces_perfdata$"
+ }
+ "--prefix" = {
+ value = "$interfaces_prefix$"
+ description = "Prefix interface names with this label."
+ }
+ "--lastcheck" = {
+ value = "$interfaces_lastcheck$"
+ description = "Last checktime (unixtime)."
+ }
+ "--bandwidth" = {
+ value = "$interfaces_bandwidth$"
+ description = "Bandwidth warn level in percent."
+ }
+ "--speed" = {
+ value = "$interfaces_speed$"
+ description = "Override speed detection with this value (bits per sec)."
+ }
+ "--trim" = {
+ value = "$interfaces_trim$"
+ description = "Cut this number of characters from the start of interface descriptions."
+ }
+ "--mode" = {
+ value = "$interfaces_mode$"
+ description = "Special operating mode (default,cisco,nonbulk,bintec)."
+ }
+ "--auth-proto" = {
+ value = "$interfaces_auth_proto$"
+ description = "SNMPv3 Auth Protocol (SHA|MD5)"
+ }
+ "--auth-phrase" = {
+ value = "$interfaces_auth_phrase$"
+ description = "SNMPv3 Auth Phrase"
+ }
+ "--priv-proto" = {
+ value = "$interfaces_priv_proto$"
+ description = "SNMPv3 Privacy Protocol (AES|DES)"
+ }
+ "--priv-phrase" = {
+ value = "$interfaces_priv_phrase$"
+ description = "SNMPv3 Privacy Phrase"
+ }
+ "--user" = {
+ value = "$interfaces_user$"
+ description = "SNMPv3 User"
+ }
+ "--down-is-ok" = {
+ set_if = "$interfaces_down_is_ok$"
+ description = "Disables critical alerts for down interfaces."
+ }
+ "--aliases" = {
+ set_if = "$interfaces_aliases$"
+ description = "Retrieves the interface description."
+ }
+ "--match-aliases" = {
+ set_if = "$interfaces_match_aliases$"
+ description = "Also match against aliases (Option --aliases automatically enabled)."
+ }
+ "--timeout" = {
+ value = "$interfaces_timeout$"
+ description = "Sets the SNMP timeout (in ms)."
+ }
+ "--sleep" = {
+ value = "$interfaces_sleep$"
+ description = "Sleep between every SNMP query (in ms)."
+ }
+ "--if-names" = {
+ set_if = "$interfaces_names$"
+ description = "Use ifName instead of ifDescr."
+ }
+ }
+
+ vars.interfaces_address = "$check_address$"
+ vars.interfaces_down_is_ok = false
+ vars.interfaces_aliases = false
+ vars.interfaces_match_aliases = false
+}
+
+object CheckCommand "linux_netdev" {
+ command = [ PluginContribDir + "/check_linux_netdev" ]
+
+ arguments = {
+ "-d" = {
+ value = "$linux_netdev_duration$"
+ description = "For how long to run. E.g. '10s' or '2m'. Default: '1m'"
+ }
+ "-e" = {
+ value = "$linux_netdev_exclude$"
+ description = "Which NICs to exclude. E.g. 'eth0' or 'eth?*', may be an array. Default: none"
+ }
+ "INTERFACE:METRIC:THRESHOLD=RANGE" = {
+ order = 1
+ skip_key = true
+ value = "$linux_netdev_thresholds$"
+ description = "Warning and critical thresholds. E.g. 'eth?*:tx:bytes:persec:w=1000000000' (see https://github.com/Al2Klimov/check_linux_netdev#usage), may be an array. Default: none"
+ }
+ }
+}
+
+object CheckCommand "nwc_health" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_nwc_health" ]
+
+ arguments = {
+ "--timeout" = {
+ value = "$nwc_health_timeout$"
+ description = "Seconds before plugin times out (default: 15)"
+ }
+ "--blacklist" = {
+ value = "$nwc_health_blacklist$"
+ description = "Blacklist some (missing/failed) components"
+ }
+ "--hostname" = {
+ value = "$nwc_health_hostname$"
+ description = "Hostname or IP-address of the switch or router"
+ }
+ "--port" = {
+ value = "$nwc_health_port$"
+ description = "The SNMP port to use (default: 161)"
+ }
+ "--domain" = {
+ value = "$nwc_health_domain$"
+ description = "The transport domain to use (default: udp/ipv4, other possible values: udp6, udp/ipv6, tcp, tcp4, tcp/ipv4, tcp6, tcp/ipv6)"
+ }
+ "--protocol" = {
+ value = "$nwc_health_protocol$"
+ description = "The SNMP protocol to use (default: 2c, other possibilities: 1,3)"
+ }
+ "--community" = {
+ value = "$nwc_health_community$"
+ description = "SNMP community of the server (SNMP v1/2 only)"
+ }
+ "--username" = {
+ value = "$nwc_health_username$"
+ description = "The securityName for the USM security model (SNMPv3 only)"
+ set_if = {{ string(macro("$nwc_health_protocol$")) == "3" }}
+ }
+ "--authpassword" = {
+ value = "$nwc_health_authpassword$"
+ description = "The authentication password for SNMPv3"
+ set_if = {{ string(macro("$nwc_health_protocol$")) == "3" }}
+ }
+ "--authprotocol" = {
+ value = "$nwc_health_authprotocol$"
+ description = "The authentication protocol for SNMPv3 (md5|sha)"
+ set_if = {{ string(macro("$nwc_health_protocol$")) == "3" }}
+ }
+ "--privpassword" = {
+ value = "$nwc_health_privpassword$"
+ description = "The password for authPriv security level"
+ set_if = {{ string(macro("$nwc_health_protocol$")) == "3" }}
+ }
+ "--privprotocol" = {
+ value = "$nwc_health_privprotocol$"
+ description = "The private protocol for SNMPv3 (des|aes|aes128|3des|3desde)"
+ set_if = {{ string(macro("$nwc_health_protocol$")) == "3" }}
+ }
+ "--contextengineid" = {
+ value = "$nwc_health_contextengineid$"
+ description = "The context engine id for SNMPv3 (10 to 64 hex characters)"
+ }
+ "--contextname" = {
+ value = "$nwc_health_contextname$"
+ description = "The context name for SNMPv3 (empty represents the default context)"
+ }
+ "--community2" = {
+ value = "$nwc_health_community2$"
+ description = "SNMP community which can be used to switch the context during runtime"
+ }
+ "--mode" = {
+ value = "$nwc_health_mode$"
+ description = "Which mode should be executed. A list of all available modes can be found in the plugin documentation"
+ }
+ "--name" = {
+ value = "$nwc_health_name$"
+ description = "The name of an interface (ifDescr)"
+ }
+ "--drecksptkdb" = {
+ value = "$nwc_health_drecksptkdb$"
+ description = "This parameter must be used instead of --name, because Devel::ptkdb is stealing the latter from the command line"
+ }
+ "--alias" = {
+ value = "$nwc_health_alias$"
+ description = "The alias name of a 64bit-interface (ifAlias)"
+ }
+ "--regexp" = {
+ set_if = "$nwc_health_regexp$"
+ description = "A flag indicating that --name is a regular expression"
+ }
+ "--ifspeedin" = {
+ value = "$nwc_health_ifspeedin$"
+ description = "Override the ifspeed oid of an interface (only inbound)"
+ }
+ "--ifspeedout" = {
+ value = "$nwc_health_ifspeedout$"
+ description = "Override the ifspeed oid of an interface (only outbound)"
+ }
+ "--ifspeed" = {
+ value = "$nwc_health_ifspeed$"
+ description = "Override the ifspeed oid of an interface"
+ }
+ "--units" = {
+ value = "$nwc_health_units$"
+ description = "One of %, B, KB, MB, GB, Bit, KBi, MBi, GBi. (used for e.g. mode interface-usage)"
+ }
+ "--name2" = {
+ value = "$nwc_health_name2$"
+ description = "The secondary name of a component"
+ }
+ "--name3" = {
+ value = "$nwc_health_name3$"
+ description = "The tertiary name of a component"
+ }
+ "--role" = {
+ value = "$nwc_health_role$"
+ description = "The role of this device in a hsrp group (active/standby/listen)"
+ }
+ "--report" = {
+ value = "$nwc_health_report$"
+ description = "Can be used to shorten the output. Possible values are: 'long' (default), 'short' (to shorten if available), or 'html' (to produce some html outputs if available)"
+ }
+ "--lookback" = {
+ value = "$nwc_health_lookback$"
+ description = "The amount of time you want to look back when calculating average rates. Use it for mode interface-errors or interface-usage. Without --lookback the time between two runs of check_nwc_health is the base for calculations. If you want your checkresult to be based for example on the past hour, use --lookback 3600."
+ }
+ "--critical" = {
+ value = "$nwc_health_critical$"
+ description = "The critical threshold"
+ }
+ "--warning" = {
+ value = "$nwc_health_warning$"
+ description = "The warning threshold"
+ }
+ "--warningx" = {
+ value = "$nwc_health_warningx$"
+ repeat_key = true
+ description = "The extended warning thresholds"
+ }
+ "--criticalx" = {
+ value = "$nwc_health_criticalx$"
+ repeat_key = true
+ description = "The extended critical thresholds"
+ }
+ "--mitigation" = {
+ value = "$nwc_health_mitigation$"
+ description = "The parameter allows you to change a critical error to a warning."
+ }
+ "--selectedperfdata" = {
+ value = "$nwc_health_selectedperfdata$"
+ description = "The parameter allows you to limit the list of performance data. It's a perl regexp. Only matching perfdata show up in the output."
+ }
+ "--morphperfdata" = {
+ value = "$nwc_health_morphperfdata$"
+ description = "The parameter allows you to change performance data labels. It's a perl regexp and a substitution. --morphperfdata '(.*)ISATAP(.*)'='$1patasi$2'"
+ }
+ "--negate" = {
+ value = "$nwc_health_negate$"
+ description = "The parameter allows you to map exit levels, such as warning=critical"
+ }
+ "--with-mymodules-dyn-dir" = {
+ value = "$nwc_health_mymodules-dyn-dir$"
+ description = "A directory where own extensions can be found"
+ }
+ "--servertype" = {
+ value = "$nwc_health_servertype$"
+ description = "The type of the network device: cisco (default). Use it if auto-detection is not possible"
+ }
+ "--statefilesdir" = {
+ value = "$nwc_health_statefilesdir$"
+ description = "An alternate directory where the plugin can save files"
+ }
+ "--oids" = {
+ value = "$nwc_health_oids$"
+ description = "A list of oids which are downloaded and written to a cache file. Use it together with --mode oidcache"
+ }
+ "--offline" = {
+ value = "$nwc_health_offline$"
+ description = "The maximum number of seconds since the last update of cache file before it is considered too old"
+ }
+ "--multiline" = {
+ set_if = "$nwc_health_multiline$"
+ description = "Multiline output"
+ }
+ }
+
+ vars.nwc_health_hostname = "$check_address$"
+ vars.nwc_health_mode = "hardware-health"
+}
+
+object CheckCommand "printer_health" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_printer_health" ]
+
+ arguments = {
+ "--timeout" = {
+ value = "$printer_health_timeout$"
+ description = "Seconds before plugin times out (default: 15)"
+ }
+ "--blacklist" = {
+ value = "$printer_health_blacklist$"
+ description = "Blacklist some (missing/failed) components"
+ }
+ "--hostname" = {
+ value = "$printer_health_hostname$"
+ description = "Hostname or IP-address of the switch or router"
+ }
+ "--port" = {
+ value = "$printer_health_port$"
+ description = "The SNMP port to use (default: 161)"
+ }
+ "--domain" = {
+ value = "$printer_health_domain$"
+ description = "The transport domain to use (default: udp/ipv4, other possible values: udp6, udp/ipv6, tcp, tcp4, tcp/ipv4, tcp6, tcp/ipv6)"
+ }
+ "--protocol" = {
+ value = "$printer_health_protocol$"
+ description = "The SNMP protocol to use (default: 2c, other possibilities: 1,3)"
+ }
+ "--community" = {
+ value = "$printer_health_community$"
+ description = "SNMP community of the server (SNMP v1/2 only)"
+ }
+ "--username" = {
+ value = "$printer_health_username$"
+ description = "The securityName for the USM security model (SNMPv3 only)"
+ }
+ "--authpassword" = {
+ value = "$printer_health_authpassword$"
+ description = "The authentication password for SNMPv3"
+ }
+ "--authprotocol" = {
+ value = "$printer_health_authprotocol$"
+ description = "The authentication protocol for SNMPv3 (md5|sha)"
+ }
+ "--privpassword" = {
+ value = "$printer_health_privpassword$"
+ description = "The password for authPriv security level"
+ }
+ "--privprotocol" = {
+ value = "$printer_health_privprotocol$"
+ description = "The private protocol for SNMPv3 (des|aes|aes128|3des|3desde)"
+ }
+ "--contextengineid" = {
+ value = "$printer_health_contextengineid$"
+ description = "The context engine id for SNMPv3 (10 to 64 hex characters)"
+ }
+ "--contextname" = {
+ value = "$printer_health_contextname$"
+ description = "The context name for SNMPv3 (empty represents the default context)"
+ }
+ "--community2" = {
+ value = "$printer_health_community2$"
+ description = "SNMP community which can be used to switch the context during runtime"
+ }
+ "--mode" = {
+ value = "$printer_health_mode$"
+ description = "Which mode should be executed. Available modes: hardware-health, supplies-status and uptime."
+ }
+ "--name" = {
+ value = "$printer_health_name$"
+ description = "The name of an interface (ifDescr)"
+ }
+ "--regexp" = {
+ set_if = "$printer_health_regexp$"
+ description = "A flag indicating that --name is a regular expression"
+ }
+ "--units" = {
+ value = "$printer_health_units$"
+ description = "One of %, B, KB, MB, GB, Bit, KBi, MBi, GBi. (used for e.g. mode interface-usage)"
+ }
+ "--name2" = {
+ value = "$printer_health_name2$"
+ description = "The secondary name of a component"
+ }
+ "--name3" = {
+ value = "$printer_health_name3$"
+ description = "The teritary name of a component"
+ }
+ "--report" = {
+ value = "$printer_health_report$"
+ description = "Can be used to shorten the output."
+ }
+ "--lookback" = {
+ value = "$printer_health_lookback$"
+ description = "The amount of time you want to look back when calculating average rates. Use it for mode interface-errors or interface-usage. Without --lookback the time between two runs of check_printer_health is the base for calculations. If you want your checkresult to be based for example on the past hour, use --lookback 3600."
+ }
+ "--critical" = {
+ value = "$printer_health_critical$"
+ description = "The critical threshold"
+ }
+ "--warning" = {
+ value = "$printer_health_warning$"
+ description = "The warning threshold"
+ }
+ "--warningx" = {
+ value = "$printer_health_warningx$"
+ description = "The extended warning thresholds"
+ }
+ "--criticalx" = {
+ value = "$printer_health_criticalx$"
+ description = "The extended critical thresholds"
+ }
+ "--mitigation" = {
+ value = "$printer_health_mitigation$"
+ description = "The parameter allows you to change a critical error to a warning."
+ }
+ "--selectedperfdata" = {
+ value = "$printer_health_selectedperfdata$"
+ description = "The parameter allows you to limit the list of performance data. It's a perl regexp. Only matching perfdata show up in the output."
+ }
+ "--morphperfdata" = {
+ value = "$printer_health_morphperfdata$"
+ description = "The parameter allows you to change performance data labels. It's a perl regexp and a substitution. --morphperfdata '(.*)ISATAP(.*)'='$1patasi$2'"
+ }
+ "--negate" = {
+ value = "$printer_health_negate$"
+ description = "The parameter allows you to map exit levels, such as warning=critical"
+ }
+ "--with-mymodules-dyn-dir" = {
+ value = "$printer_health_mymodules-dyn-dir$"
+ description = "A directory where own extensions can be found"
+ }
+ "--servertype" = {
+ value = "$printer_health_servertype$"
+ description = "The type of the network device: cisco (default). Use it if auto-detection is not possible"
+ }
+ "--statefilesdir" = {
+ value = "$printer_health_statefilesdir$"
+ description = "An alternate directory where the plugin can save files"
+ }
+ "--oids" = {
+ value = "$printer_health_oids$"
+ description = "A list of oids which are downloaded and written to a cache file. Use it together with --mode oidcache"
+ }
+ "--offline" = {
+ value = "$printer_health_offline$"
+ description = "The maximum number of seconds since the last update of cache file before it is considered too old"
+ }
+ "--multiline" = {
+ set_if = "$printer_health_multiline$"
+ description = "Multiline output"
+ }
+ }
+
+ vars.printer_health_hostname = "$check_address$"
+ vars.printer_health_mode = "supplies-status"
+}
+
+template CheckCommand "generic-thola-check-command" {
+ command = [ PluginContribDir + "/thola-client", "check" ]
+
+ arguments = {
+ "--target-api" = {
+ required = true
+ value = "$thola_api_address$"
+ description = "Address of the thola API"
+ }
+ }
+}
+
+template CheckCommand "generic-thola-device-check-command" {
+ import "generic-thola-check-command"
+ import "ipv4-or-ipv6"
+
+ arguments += {
+ "thola_device_address" = {
+ order = 0
+ required = true
+ skip_key = true
+ value = "$thola_device_address$"
+ description = "IP address of target device"
+ }
+ "--snmp-community" = {
+ value = "$thola_device_snmp_community$"
+ description = "SNMP Community of target device"
+ }
+ "--snmp-version" = {
+ value = "$thola_device_snmp_protocol$"
+ description = "SNMP Version of target device"
+ }
+ }
+
+ vars.thola_device_address = "$check_address$"
+}
+
+object CheckCommand "thola-cpu-load" {
+ import "generic-thola-device-check-command"
+
+ command += [ "cpu-load" ]
+
+ arguments += {
+ "--critical" = {
+ value = "$thola_cpu_load_critical$"
+ description = "Critical threshold for the CPU load in %"
+ }
+ "--warning" = {
+ value = "$thola_cpu_load_warning$"
+ description = "Warning threshold for the CPU load in %"
+ }
+ }
+}
+
+object CheckCommand "thola-interface-metrics" {
+ import "generic-thola-device-check-command"
+
+ command += [ "interface-metrics" ]
+}
+
+object CheckCommand "thola-hardware-health" {
+ import "generic-thola-device-check-command"
+
+ command += [ "hardware-health" ]
+}
+
+object CheckCommand "thola-identify" {
+ import "generic-thola-device-check-command"
+
+ command += [ "identify" ]
+
+ arguments += {
+ "--model" = {
+ value = "$thola_identify_model$"
+ description = "Model that is compared to the actual model of the device"
+ }
+ "--os-version" = {
+ value = "$thola_identify_os_version$"
+ description = "OS-version that is compared to the actual OS-version of the device"
+ }
+ "--vendor" = {
+ value = "$thola_identify_vendor$"
+ description = "Vendor that is compared to the actual vendor of the device"
+ }
+ "--serial-number" = {
+ value = "$thola_identify_serial_number$"
+ description = "Serial number that is compared to the actual serial number of the device"
+ }
+ "--snmp-discover-retries" = {
+ value = "$thola_identify_discover_retries$"
+ description = "Number of discover retries"
+ }
+ "--snmp-discover-timeout" = {
+ value = "$thola_identify_discover_timeouts$"
+ description = "Number of discover timeouts"
+ }
+ }
+}
+
+object CheckCommand "thola-memory-usage" {
+ import "generic-thola-device-check-command"
+
+ command += [ "memory-usage" ]
+
+ arguments += {
+ "--critical" = {
+ value = "$thola_memory_usage_critical$"
+ description = "Critical threshold for the memory usage in %"
+ }
+ "--warning" = {
+ value = "$thola_memory_usage_warning$"
+ description = "Warning threshold for the memory usage in %"
+ }
+ }
+}
+
+object CheckCommand "thola-sbc" {
+ import "generic-thola-device-check-command"
+
+ command += [ "sbc" ]
+
+ arguments += {
+ "--system-health-score-critical" = {
+ value = "$thola_sbc_system_health_score_critical$"
+ description = "Critical threshold for the health score in %"
+ }
+ "--system-health-score-warning" = {
+ value = "$thola_sbc_system_health_score_warning$"
+ description = "Warning threshold for the health score in %"
+ }
+ }
+}
+
+object CheckCommand "thola-thola-server" {
+ import "generic-thola-check-command"
+
+ command += [ "thola-server" ]
+}
+
+object CheckCommand "thola-ups" {
+ import "generic-thola-device-check-command"
+
+ command += [ "ups" ]
+
+ arguments += {
+ "--batt-current-critical-max" = {
+ value = "$thola_ups_batt_current_critical_max$"
+ description = "High critical threshold for the battery current in Volt"
+ }
+ "--batt-current-critical-min" = {
+ value = "$thola_ups_batt_current_critical_min$"
+ description = "Low critical threshold for the battery current in Volt"
+ }
+ "--batt-current-warning-max" = {
+ value = "$thola_ups_batt_current_warning_max$"
+ description = "High warning threshold for the battery current in Volt"
+ }
+ "--batt-current-warning-min" = {
+ value = "$thola_ups_batt_current_warning_min$"
+ description = "Low warning threshold for the battery current in Volt"
+ }
+ "--batt-temperature-critical-max" = {
+ value = "$thola_ups_batt_temperature_critical_max$"
+ description = "High critical threshold for the battery temperature in degree celsius"
+ }
+ "--batt-temperature-critical-min" = {
+ value = "$thola_ups_batt_temperature_critical_min$"
+ description = "Low critical threshold for the battery temperature in degree celsius"
+ }
+ "--batt-temperature-warning-max" = {
+ value = "$thola_ups_batt_temperature_warning_max$"
+ description = "High warning threshold for the battery temperature in degree celsius"
+ }
+ "--batt-temperature-warning-min" = {
+ value = "$thola_ups_batt_temperature_warning_min$"
+ description = "Low warning threshold for the battery temperature in degree celsius"
+ }
+ "--current-load-critical-max" = {
+ value = "$thola_ups_current_load_critical_max$"
+ description = "High critical threshold for the current load in %"
+ }
+ "--current-load-critical-min" = {
+ value = "$thola_ups_current_load_critical_min$"
+ description = "Low critical threshold for the current load in %"
+ }
+ "--current-load-warning-max" = {
+ value = "$thola_ups_current_load_warning_max$"
+ description = "High warning threshold for the current load in %"
+ }
+ "--current-load-warning-min" = {
+ value = "$thola_ups_current_load_warning_min$"
+ description = "Low warning threshold for the current load in %"
+ }
+ "--rectifier-current-critical-max" = {
+ value = "$thola_ups_rectifier_current_critical_max$"
+ description = "High critical threshold for the current rectifier in Volt"
+ }
+ "--rectifier-current-critical-min" = {
+ value = "$thola_ups_rectifier_current_critical_min$"
+ description = "Low critical threshold for the current rectifier in Volt"
+ }
+ "--rectifier-current-warning-max" = {
+ value = "$thola_ups_rectifier_current_warning_max$"
+ description = "High warning threshold for the current rectifier in Volt"
+ }
+ "--rectifier-current-warning-min" = {
+ value = "$thola_ups_rectifier_current_warning_min$"
+ description = "Low warning threshold for the current rectifier in Volt"
+ }
+ "--system-voltage-critical-max" = {
+ value = "$thola_ups_system_voltage_critical_max$"
+ description = "High critical threshold for the system voltage in Volt"
+ }
+ "--system-voltage-critical-min" = {
+ value = "$thola_ups_system_voltage_critical_min$"
+ description = "Low critical threshold for the system voltage in Volt"
+ }
+ "--system-voltage-warning-max" = {
+ value = "$thola_ups_system_voltage_warning_max$"
+ description = "High warning threshold for the system voltage in Volt"
+ }
+ "--system-voltage-warning-min" = {
+ value = "$thola_ups_system_voltage_warning_min$"
+ description = "Low warning threshold for the system voltage in Volt"
+ }
+ }
+}
diff --git a/itl/plugins-contrib.d/network-services.conf b/itl/plugins-contrib.d/network-services.conf
new file mode 100644
index 0000000..28eb42c
--- /dev/null
+++ b/itl/plugins-contrib.d/network-services.conf
@@ -0,0 +1,123 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "kdc" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_kdc" ]
+
+ arguments = {
+ "-H" = {
+ value = "$kdc_address$"
+ description = "Name or IP address of KDC to check."
+ }
+ "-P" = {
+ value = "$kdc_port$"
+ description = "Port on which KDC runs (default 88)."
+ }
+ "-p" = {
+ value = "$kdc_principal$"
+ description = "Principal name to authenticate as (including realm)."
+ required = true
+ }
+ "-k" = {
+ value = "$kdc_keytab$"
+ description = "Keytab file containing principal's key."
+ required = true
+ }
+ }
+
+ vars.kdc_address = "$check_address$"
+}
+
+object CheckCommand "rbl" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_rbl" ]
+
+ arguments = {
+ "-H" = {
+ value = "$rbl_hostname$"
+ description = "Name or IP address of SMTP server to check."
+ }
+ "-s" = {
+ value = "$rbl_server$"
+ description = "List of RBL servers as an array."
+ required = true
+ repeat_key = true
+ }
+ "-w" = {
+ value = "$rbl_warning$"
+ description = "Number of blacklisting servers for a warning."
+ }
+ "-c" = {
+ value = "$rbl_critical$"
+ description = "Number of blacklisting servers for a critical."
+ }
+ "-t" = {
+ value = "$rbl_timeout$"
+ description = "Seconds before plugin times out (default: 15)."
+ }
+ }
+
+ vars.rbl_hostname = "$check_address$"
+ vars.rbl_timeout = 15
+ vars.rbl_warning = 1
+ vars.rbl_critical = 1
+}
+
+object CheckCommand "lsyncd" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_lsyncd" ]
+
+ arguments = {
+ "-s" = {
+ value = "$lsyncd_statfile$"
+ description = "Set status file path (default: /var/run/lsyncd.status)."
+ }
+ "-w" = {
+ value = "$lsyncd_warning$"
+ description = "Warning if more than N delays (default: 10)."
+ }
+ "-c" = {
+ value = "$lsyncd_critical$"
+ description = "Critical if more then N delays (default: 100)."
+ }
+ }
+}
+
+object CheckCommand "fail2ban" {
+ command = [ "sudo", PluginContribDir + "/check_fail2ban" ]
+
+ arguments = {
+ "-D" = {
+ value = "$fail2ban_display$"
+ description = "To modify the output display, default is 'CHECK FAIL2BAN ACTIVITY'"
+ }
+ "-P" = {
+ value = "$fail2ban_path$"
+ description = "Specify the path to the tw_cli binary, default value is /usr/bin/fail2ban-client"
+ }
+ "-w" = {
+ value = "$fail2ban_warning$"
+ description = "Specify a warning threshold, default is 1"
+ }
+ "-c" = {
+ value = "$fail2ban_critical$"
+ description = "Specify a critical threshold, default is 2"
+ }
+ "-s" = {
+ value = "$fail2ban_socket$"
+ description = "Specify a socket path, default is unset"
+ }
+ "-p" = {
+ set_if = "$fail2ban_perfdata$"
+ description = "If set to true, activate the perfdata output"
+ }
+ "-j" = {
+ value = "$fail2ban_jail$"
+ description = "Specify the name of the specific jail to monitor; omitted by default, i.e. all jails are being monitored"
+ }
+ }
+
+ vars.fail2ban_perfdata = true
+}
diff --git a/itl/plugins-contrib.d/operating-system.conf b/itl/plugins-contrib.d/operating-system.conf
new file mode 100644
index 0000000..41a46e8
--- /dev/null
+++ b/itl/plugins-contrib.d/operating-system.conf
@@ -0,0 +1,195 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "mem" {
+ command = [ PluginContribDir + "/check_mem.pl" ]
+
+ arguments = {
+ "-u" = {
+ set_if = "$mem_used$"
+ description = "Check USED memory"
+ }
+ "-a" = {
+ set_if = "$mem_available$"
+ description = "Check AVAILABLE memory"
+ }
+ "-f" = {
+ set_if = "$mem_free$"
+ description = "Check FREE memory"
+ }
+ "-C" = {
+ set_if = "$mem_cache$"
+ description = "Count OS caches as FREE memory"
+ }
+ "-w" = {
+ value = "$mem_warning$"
+ description = "Percent free/used when to warn"
+ }
+ "-c" = {
+ value = "$mem_critical$"
+ description = "Percent free/used when critical"
+ }
+ }
+
+ vars.mem_used = false
+ vars.mem_free = false
+ vars.mem_available = false
+ vars.mem_cache = false
+}
+
+object CheckCommand "sar-perf" {
+ command = [ PluginContribDir + "/check_sar_perf.py" ]
+
+ arguments = {
+ "sar_perf_profile" = {
+ value = "$sar_perf_profile$"
+ description = "Define the run profile: pagestat, cpu, memory_util, memory_stat, io_transfer, queueln_load, swap_util, swap_stat, task, kernel, disk <disk>. Can be a string or an array of multiple profiles."
+ skip_key = true
+ repeat_key = false
+ required = true
+ }
+ "sar_perf_disk" = {
+ value = "$sar_perf_disk$"
+ set_if = {{ macro("$sar_perf_profile$") == "disk" }}
+ description = "Disk name for the 'disk' profile"
+ skip_key = true
+ }
+ }
+}
+
+object CheckCommand "running_kernel" {
+ command = {{
+ var use_sudo = macro("$running_kernel_use_sudo$")
+
+ if (use_sudo == null && PlatformName == "Ubuntu") {
+ use_sudo = true
+ }
+
+ var args
+
+ if (use_sudo) {
+ args = [ "sudo" ]
+ } else {
+ args = []
+ }
+
+ args += [ PluginContribDir + "/check_running_kernel" ]
+
+ return args
+ }}
+}
+
+object CheckCommand "yum" {
+ command = [ PluginContribDir + "/check_yum" ]
+
+ arguments = {
+ "--all-updates" = {
+ set_if = "$yum_all_updates$"
+ description = "Do not distinguish between security and non-security updates. This may be used if the YUM security plugin is absent."
+ }
+ "--warn-on-any-update" = {
+ set_if = "$yum_warn_on_any_update$"
+ description = "Warns if there are any (non-security) package updates available."
+ }
+ "--cache-only" = {
+ set_if = "$yum_cache_only$"
+ description = "Run entirely from cache and do not update the cache."
+ }
+ "--no-warn-on-lock" = {
+ set_if = "$yum_no_warn_on_lock$"
+ description = "Return OK instead of WARNING when YUM is locked."
+ }
+ "--no-warn-on-updates" = {
+ set_if = "$yum_no_warn_on_updates$"
+ description = "Return OK instead of WARNING even when updates are available."
+ }
+ "--enablerepo" = {
+ value = "$yum_enablerepo$"
+ description = "Explicitly enables a reposity when calling YUM. Can take a comma separated list of repositories."
+ }
+ "--disablerepo" = {
+ value = "$yum_disablerepo$"
+ description = "Explicitly disables a reposity when calling YUM. Can take a comma separated list of repositories."
+ }
+ "--installroot" = {
+ value = "$yum_installroot$"
+ description = "Specifies another installation root directory"
+ }
+ "--timeout" = {
+ value = "$yum_timeout$"
+ description = "Sets a timeout in seconds after which the plugin will exit (defaults to 55 seconds)."
+ }
+ }
+
+ vars.yum_all_updates = false
+ vars.yum_warn_on_any_update = false
+ vars.yum_cache_only = false
+ vars.yum_no_warn_on_lock = false
+ vars.yum_no_warn_on_updates = false
+
+ timeout = 5m
+}
+
+object CheckCommand "iostat" {
+ command = [ PluginContribDir + "/check_iostat" ]
+
+ arguments = {
+ "-d" = {
+ value = "$iostat_disk$"
+ description = "Device to check without path. e.g. sda"
+ }
+ "-w" = {
+ value ="$iostat_wtps$,$iostat_wread$,$iostat_wwrite$"
+ description = "warning threshold for tps, KB_read/s and KB_written/s"
+ }
+ "-c" = {
+ value ="$iostat_ctps$,$iostat_cread$,$iostat_cwrite$"
+ description = "warning threshold for tps, KB_read/s and KB_written/s"
+ }
+ }
+
+ vars.iostat_disk = "sda"
+ vars.iostat_wtps = 100
+ vars.iostat_wread = 100
+ vars.iostat_wwrite = 100
+ vars.iostat_ctps = 200
+ vars.iostat_cread = 200
+ vars.iostat_cwrite = 200
+}
+
+object CheckCommand "iostats" {
+ command = [ PluginContribDir + "/check_iostats" ]
+
+ arguments = {
+ "-d" = {
+ value = "$iostats_disk$"
+ description = "Device to check without path. e.g. sda"
+ }
+ "-w" = {
+ value ="$iostats_warning_tps$,$iostats_warning_read$,$iostats_warning_write$"
+ description = "Sets the WARNING level for tps, KB_read/s and KB_written/s, respectively"
+ }
+ "-c" = {
+ value ="$iostats_critical_tps$,$iostats_critical_read$,$iostats_critical_write$"
+ description = "Sets the CRITICAL level for tps, KB_read/s and KB_written/s, respectively"
+ }
+ "-W" = {
+ value ="$iostats_warning_wait$"
+ description = "Sets the WARNING level for iowait"
+ }
+
+ "-C" = {
+ value ="$iostats_critical_wait$"
+ description = "Sets the CRITICAL level for iowait"
+ }
+ }
+
+ vars.iostats_disk = "sda"
+ vars.iostats_warning_tps = 3000
+ vars.iostats_warning_read = 50000
+ vars.iostats_warning_write = 10000
+ vars.iostats_warning_wait = 50
+ vars.iostats_critical_tps = 5000
+ vars.iostats_critical_read = 80000
+ vars.iostats_critical_write = 25000
+ vars.iostats_critical_wait = 80
+}
diff --git a/itl/plugins-contrib.d/raid-controller.conf b/itl/plugins-contrib.d/raid-controller.conf
new file mode 100644
index 0000000..17fb388
--- /dev/null
+++ b/itl/plugins-contrib.d/raid-controller.conf
@@ -0,0 +1,122 @@
+/*
+ * Icinga2 CheckCommand definitions to monitor RAID controller from Adaptec and Broadcom using
+ * the Adaptec RAID Monitoring Plugin and the LSI RAID Monitoring Plugin
+ */
+
+object CheckCommand "adaptec-raid" {
+ import "plugin-check-command"
+
+ command = [ PluginContribDir + "/check_adaptec_raid" ]
+
+ arguments = {
+ "-C" = {
+ required = true
+ value = "$adaptec_controller_number$"
+ description = "Insert the controller number to be checked."
+ }
+ "-p" = {
+ required = true
+ value = "$arcconf_path$"
+ description = "Insert the path to arcconf (e.g. /sbin/arcconf)."
+ }
+ }
+
+ vars.arcconf_path = "/sbin/arcconf"
+}
+
+object CheckCommand "lsi-raid" {
+ import "plugin-check-command"
+
+ command = [ PluginContribDir + "/check_lsi_raid" ]
+
+ arguments = {
+ "-C" = {
+ value = "$lsi_controller_number$"
+ description = "Insert the controller number to be checked."
+ }
+ "-p" = {
+ value = "$storcli_path$"
+ description = "Insert the path to storcli (e.g. /usr/sbin/storcli)."
+ }
+ "-EID" = {
+ value = "$lsi_enclosure_id$"
+ description = "Enclosure numbers to be checked, comma-separated."
+ }
+ "-LD" = {
+ value = "$lsi_ld_id$"
+ description = "Logical devices to be checked, comma-separated."
+ }
+ "-PD" = {
+ value = "$lsi_pd_id$"
+ description = "Physical devices to be checked, comma-separated."
+ }
+ "-Tw" = {
+ value = "$lsi_temp_warning$"
+ description = "RAID controller warning temperature."
+ }
+ "-Tc" = {
+ value = "$lsi_temp_critical$"
+ description = "RAID controller critical temperature."
+ }
+ "-PDTw" = {
+ value = "$lsi_pd_temp_warning$"
+ description = "Disk warning temperature."
+ }
+ "-PDTc" = {
+ value = "$lsi_pd_temp_critical$"
+ description = "Disk critical temperature."
+ }
+ "-BBUTw" = {
+ value = "$lsi_bbu_temp_warning$"
+ description = "Battery warning temperature."
+ }
+ "-BBUTc" = {
+ value = "$lsi_bbu_temp_critical$"
+ description = "Battery critical temperature."
+ }
+ "-CVTw" = {
+ value = "$lsi_cv_temp_warning$"
+ description = "CacheVault warning temperature."
+ }
+ "-CVTc" = {
+ value = "$lsi_cv_temp_critical$"
+ description = "CacheVault critical temperature."
+ }
+ "-Im" = {
+ value = "$lsi_ignored_media_errors$"
+ description = "Warning threshold for media errors."
+ }
+ "-Io" = {
+ value = "$lsi_ignored_other_errors$"
+ description = "Warning threshold for other errors."
+ }
+ "-Ip" = {
+ value = "$lsi_ignored_predictive_fails$"
+ description = "Warning threshold for predictive failures."
+ }
+ "-Is" = {
+ value = "$lsi_ignored_shield_counters$"
+ description = "Warning threshold for shield counter."
+ }
+ "-Ib" = {
+ value = "$lsi_ignored_bbm_counters$"
+ description = "Warning threshold for BBM counter."
+ }
+ "-b" = {
+ value = "$lsi_bbu$"
+ description = "Define if BBU is present and it's state should be checked."
+ }
+ "--noenclosures" = {
+ set_if = "$lsi_noenclosures$"
+ description = "Define if enclosures are present."
+ }
+ "--nosudo" = {
+ set_if = "$lsi_nosudo$"
+ description = "Do not use sudo when running storcli."
+ }
+ "--nocleanlogs" = {
+ set_if = "$lsi_nocleanlogs$"
+ description = "Do not clean up the log files after executing storcli checks."
+ }
+ }
+}
diff --git a/itl/plugins-contrib.d/smart-attributes.conf b/itl/plugins-contrib.d/smart-attributes.conf
new file mode 100644
index 0000000..20eb345
--- /dev/null
+++ b/itl/plugins-contrib.d/smart-attributes.conf
@@ -0,0 +1,24 @@
+/*
+ * Icinga2 CheckCommand definition for the SMART Attributes Monitoring Plugin
+ */
+
+object CheckCommand "smart-attributes" {
+ import "plugin-check-command"
+
+ command = [ PluginContribDir + "/check_smart_attributes" ]
+
+ arguments = {
+ "-dbj" = {
+ required = true
+ value = "$smart_attributes_config_path$"
+ description = "Path to the smart attributes config file (e.g. check_smartdb.json)"
+ }
+ "-d" = {
+ required = true
+ value = "$smart_attributes_device$"
+ description = "Insert the device name (e.g. /dev/sda) to monitor"
+ }
+ }
+
+ vars.smart_attributes_config_path = ConfigDir + "/plugins-config/check_smartdb.json"
+}
diff --git a/itl/plugins-contrib.d/storage.conf b/itl/plugins-contrib.d/storage.conf
new file mode 100644
index 0000000..dca080e
--- /dev/null
+++ b/itl/plugins-contrib.d/storage.conf
@@ -0,0 +1,119 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "glusterfs" {
+ command = [ "sudo", PluginContribDir + "/check_glusterfs" ]
+
+ arguments = {
+ "--perfdata" = {
+ set_if = "$glusterfs_perfdata$"
+ description = "Optional. Print perfdata of all or the specified volume."
+ }
+ "--warnonfailedheal" = {
+ set_if = "$glusterfs_warnonfailedheal$"
+ description = "Optional. Warn if the *heal-failed* log contains entries. The log can be cleared by restarting glusterd."
+ }
+ "--volume" = {
+ value = "$glusterfs_volume$"
+ description = "Optional. Only check the specified *VOLUME*. If --volume is not set, all volumes are checked."
+ }
+ "-w" = {
+ value = "$glusterfs_disk_warning$"
+ description = "Optional. Warn if disk usage is above *DISKWARN*. Defaults to 90 (percent)."
+ }
+ "-c" = {
+ value = "$glusterfs_disk_critical$"
+ description = "Optional. Return a critical error if disk usage is above *DISKCRIT*. Defaults to 95 (percent)."
+ }
+ "-W" = {
+ value = "$glusterfs_inode_warning$"
+ description = "Optional. Warn if inode usage is above *DISKWARN*. Defaults to 90 (percent)."
+ }
+ "-C" = {
+ value = "$glusterfs_inode_critical$"
+ description = "Optional. Return a critical error if inode usage is above *DISKCRIT*. Defaults to 95 (percent)."
+ }
+ }
+
+ vars.glusterfs_disk_warning = 90
+ vars.glusterfs_disk_critical = 95
+ vars.glusterfs_inode_warning = 90
+ vars.glusterfs_inode_critical = 95
+}
+
+object CheckCommand "ceph" {
+ command = [ PluginContribDir + "/check_ceph.py" ]
+
+ arguments = {
+ "-e" = {
+ value = "$ceph_exec_dir$"
+ required = false
+ description = "ceph executable [/usr/bin/ceph]"
+ }
+ "-c" = {
+ value = "$ceph_conf_file$"
+ required = false
+ description = "alternative ceph conf file"
+ }
+ "-m" = {
+ value = "$ceph_mon_address$"
+ required = false
+ description = "ceph monitor address[:port]"
+ }
+ "-i" = {
+ value = "$ceph_client_id$"
+ required = false
+ description = "ceph client id"
+ }
+ "-n" = {
+ value = "$ceph_client_name$"
+ required = false
+ description = "ceph client name"
+ }
+ "-k" = {
+ value = "$ceph_client_key$"
+ required = false
+ description = "ceph client keyring file"
+ }
+ "-w" = {
+ value = "$ceph_whitelist$"
+ required = false
+ description = "whitelist regexp for ceph health warnings"
+ }
+ "-d" = {
+ set_if = "$ceph_details$"
+ description = "exec 'ceph health detail'"
+ }
+ }
+}
+
+object CheckCommand "btrfs" {
+ import "plugin-check-command"
+ command = [ "sudo", PluginContribDir + "/check_btrfs" ]
+
+ arguments = {
+ "--allocated-warning-gib" = {
+ value = "$btrfs_awg$"
+ description = "Exit with WARNING status if less than the specified amount of disk space (in GiB) is unallocated"
+ }
+ "--allocated-critical-gib" = {
+ value = "$btrfs_acg$"
+ description = "Exit with CRITICAL status if less than the specified amount of disk space (in GiB) is unallocated"
+ }
+ "--allocated-warning-percent" = {
+ value = "$btrfs_awp$"
+ description = "Exit with WARNING status if more than the specified percent of disk space is allocated"
+ }
+ "--allocated-critical-percent" = {
+ value = "$btrfs_acp$"
+ description = "Exit with CRITICAL status if more than the specified percent of disk space is allocated"
+ }
+ "--mountpoint" = {
+ value = "$btrfs_mountpoint$"
+ description = "Path to the BTRFS mountpoint"
+ required = true
+ }
+ }
+ vars.btrfs_awp = 80
+ vars.btrfs_acp = 90
+}
+
diff --git a/itl/plugins-contrib.d/systemd.conf b/itl/plugins-contrib.d/systemd.conf
new file mode 100644
index 0000000..4c0bbca
--- /dev/null
+++ b/itl/plugins-contrib.d/systemd.conf
@@ -0,0 +1,51 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "systemd" {
+ command = [ PluginContribDir + "/check_systemd.py" ]
+
+ arguments = {
+ "--unit" = {
+ value = "$systemd_unit$"
+ description = "Name of the systemd unit that is being tested."
+ }
+ "--exclude" = {
+ value = "$systemd_exclude_unit$"
+ description = "Exclude a systemd unit from the checks. This option can be applied multiple times. Also supports regular expressions."
+ repeat_key = true
+ }
+ "--no-startup-time" = {
+ set_if = "$systemd_no_startup_time$"
+ description = "Don’t check the startup time. Using this option the options `systemd_warning` and `systemd_critical` have no effect. (Default: `false`)"
+ }
+ "--warning" = {
+ value = "$systemd_warning$"
+ description = "Startup time in seconds to result in a warning status. (Default: `60s`)"
+ }
+ "--critical" = {
+ value = "$systemd_critical$"
+ description = "Startup time in seconds to result in a critical status. (Default: `120s`)"
+ }
+ "--dead-timers" = {
+ set_if = "$systemd_dead_timers$"
+ description = "Detect dead / inactive timers. (Default: `false`)"
+ }
+ "--dead-timers-warning" = {
+ value = "$systemd_dead_timers_warning$"
+ description = "Time ago in seconds for dead / inactive timers to trigger a warning state (by default 6 days)."
+ }
+ "--dead-timers-critical" = {
+ value = "$systemd_dead_timers_critical$"
+ description = "Time ago in seconds for dead / inactive timers to trigger a critical state (by default 7 days)."
+ }
+ "-v" = {
+ set_if = {{ macro("$systemd_verbose_level$") == 1 }}
+ description = "Increase verbosity level (Accepted values: `1`, `2` or `3`). Defaults to none."
+ }
+ "-vv" = {
+ set_if = {{ macro("$systemd_verbose_level$") == 2 }}
+ }
+ "-vvv" = {
+ set_if = {{ macro("$systemd_verbose_level$") == 3 }}
+ }
+ }
+}
diff --git a/itl/plugins-contrib.d/virtualization.conf b/itl/plugins-contrib.d/virtualization.conf
new file mode 100644
index 0000000..530a875
--- /dev/null
+++ b/itl/plugins-contrib.d/virtualization.conf
@@ -0,0 +1,92 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "esxi_hardware" {
+ command = [ PluginContribDir + "/check_esxi_hardware.py" ]
+
+ arguments = {
+ "-H" = {
+ value = "$esxi_hardware_host$"
+ description = "report on HOST"
+ }
+ "-U" = {
+ value = "$esxi_hardware_user$"
+ description = "user to connect as"
+ }
+ "-P" = {
+ value = "$esxi_hardware_pass$"
+ description = "password"
+ }
+ "-C" = {
+ value = "$esxi_hardware_port$"
+ description = "cim port"
+ }
+ "-S" = {
+ value = "$esxi_hardware_sslproto$"
+ description = "Overwrite system default of SSL/TLS protocol to use. Must be one of: SSLv2, SSLv3, TLSv1, TLSv1.1, TLSv1.2, TLSv1.3"
+ }
+ "-V" = {
+ value = "$esxi_hardware_vendor$"
+ description = "Vendor code: auto, dell, hp, ibm, intel, or unknown"
+ }
+ "-I" = {
+ value = "$esxi_hardware_html$"
+ description = "generate html links for country XX"
+ }
+ "-i" = {
+ value = "$esxi_hardware_ignore$"
+ description = "comma-separated list of elements to ignore"
+ }
+ "-r" = {
+ set_if = "$esxi_hardware_regex$"
+ description = "Allow regular expression lookups of elements in ignore list"
+ }
+ "-p" = {
+ set_if = "$esxi_hardware_perfdata$"
+ description = "collect performance data for pnp4nagios"
+ }
+ "--format" = {
+ value = "$esxi_hardware_format$"
+ description = "Set output format to string or json (defaults to string)"
+ }
+ "--pretty" = {
+ set_if = "$esxi_hardware_pretty$"
+ description = "Show plugin output in a human readable format (in combination with --format json)"
+ }
+ "--no-power" = {
+ set_if = "$esxi_hardware_nopower$"
+ description = "don't collect power performance data"
+ }
+ "--no-volts" = {
+ set_if = "$esxi_hardware_novolts$"
+ description = "don't collect voltage performance data"
+ }
+ "--no-current" = {
+ set_if = "$esxi_hardware_nocurrent$"
+ description = "don't collect current performance data"
+ }
+ "--no-temp" = {
+ set_if = "$esxi_hardware_notemp$"
+ description = "don't collect temperature performance data"
+ }
+ "--no-fan" = {
+ set_if = "$esxi_hardware_nofan$"
+ description = "don't collect fan performance data"
+ }
+ "--no-lcd" = {
+ set_if = "$esxi_hardware_nolcd$"
+ description = "don't collect lcd/display status data"
+ }
+ }
+
+ vars.esxi_hardware_host = "$address$"
+ vars.esxi_hardware_port = 5989
+ vars.esxi_hardware_regex = false
+ vars.esxi_hardware_perfdata = false
+ vars.esxi_hardware_nopower = false
+ vars.esxi_hardware_novolts = false
+ vars.esxi_hardware_nocurrent = false
+ vars.esxi_hardware_notemp = false
+ vars.esxi_hardware_nofan = false
+ vars.esxi_hardware_nolcd = false
+}
+
diff --git a/itl/plugins-contrib.d/vmware.conf b/itl/plugins-contrib.d/vmware.conf
new file mode 100644
index 0000000..7017c83
--- /dev/null
+++ b/itl/plugins-contrib.d/vmware.conf
@@ -0,0 +1,1167 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+/**
+ * main vmware-esx template
+ */
+
+template CheckCommand "vmware-esx-command" {
+ command = [ PluginContribDir + "/check_vmware_esx" ]
+
+ arguments = {
+ "--ignore_unknown" = {
+ set_if = "$vmware_ignoreunknown$"
+ description = "Sometimes 3 (unknown) is returned from a component. But the check itself is ok. With this option the plugin will return OK (0) instead of UNKNOWN (3)."
+ }
+ "--ignore_warning" = {
+ set_if = "$vmware_ignorewarning$"
+ description = "Sometimes 2 (warning) is returned from a component. But the check itself is ok (from an operator view)."
+ }
+ "--timeout" = {
+ value = "$vmware_timeout$"
+ description = "Seconds before plugin times out (default: 90)."
+ }
+ "--trace" = {
+ set_if = "$vmware_trace$"
+ description = "Set verbosity level of vSphere API request/respond trace."
+ }
+ "--sessionfile" = {
+ value = "$vmware_sessionfile$"
+ description = "Vmware auth session file - no efect if $vmware_nosession$ var is true."
+ }
+ "--sessionfiledir" = {
+ value = "$vmware_sessionfiledir$"
+ description = "Vmware auth session file directory - no efect if $vmware_nosession$ var is true."
+ }
+ "--nosession" = {
+ set_if = "$vmware_nosession$"
+ description = "No auth session - IT SHOULD BE USED FOR TESTING PURPOSES ONLY!."
+ }
+ "--username" = {
+ value = "$vmware_username$"
+ description = "Username to connect with."
+ }
+ "--password" = {
+ value = "$vmware_password$"
+ description = "Password to use with the username."
+ }
+ "--authfile" = {
+ value = "$vmware_authfile$"
+ description = "Autentication file content: \
+ username=<username> \
+ password=<password>"
+ }
+ }
+
+ vars.vmware_timeout = "90"
+ vars.vmware_ignorewarning = false
+ vars.vmware_auth_nosession = false
+ vars.vmware_sessionfiledir = "/var/spool/icinga2/tmp"
+}
+
+
+/**
+ * VMware DC
+ */
+
+template CheckCommand "vmware-esx-dc" {
+ import "vmware-esx-command"
+
+ arguments += {
+ "--datacenter" = {
+ value = "$vmware_datacenter$"
+ required = true
+ description = "Datacenter/Vcenter hostname."
+ }
+ "--cluster" = {
+ value = "$vmware_cluster$"
+ required = false
+ description = "ESX or ESXi clustername."
+ }
+ "--sslport" = {
+ value = "$vmware_sslport$"
+ description = "If a SSL port different from 443 is used."
+ }
+ }
+
+ vars.vmware_sslport = "443"
+}
+
+object CheckCommand "vmware-esx-dc-volumes" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "volumes"
+ "--subselect" = "$vmware_subselect$"
+ "--gigabyte" = {
+ set_if = "$vmware_gigabyte$"
+ description = "Output in GB instead of MB."
+ }
+ "--usedspace" = {
+ set_if = "$vmware_usedspace$"
+ description = "Output used space instead of free."
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ description = "List only alerting volumes."
+ }
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+
+ vars.vmware_usedspace = true
+ vars.vmware_alertonly = false
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+object CheckCommand "vmware-esx-dc-runtime-info" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = {
+ value = "runtime"
+ description = "Shows all runtime info for the datacenter/Vcenter."
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-dc-runtime-listvms" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "listvms"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-dc-runtime-listhost" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "listhost"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-dc-runtime-listcluster" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "listcluster"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-dc-runtime-issues" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "issues"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-dc-runtime-status" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "status"
+ }
+}
+
+object CheckCommand "vmware-esx-dc-runtime-tools" {
+ import "vmware-esx-dc"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "tools"
+ "--poweredonly" = {
+ set_if = "$vmware_poweredonly$"
+ }
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ }
+ "--open_vm_tools_ok" = {
+ set_if = "$vmware_openvmtools$"
+ }
+ "--no_vm_tools_ok" = {
+ set_if = "$vmware_novmtools$"
+ }
+ }
+}
+
+
+/**
+ * VMware HOST
+ */
+
+template CheckCommand "vmware-esx-soap-host" {
+ import "vmware-esx-command"
+
+ arguments += {
+ "--host" = {
+ value = "$vmware_host$"
+ required = true
+ description = "ESX or ESXi hostname."
+ }
+ "--datacenter" = {
+ value = "$vmware_datacenter$"
+ required = false
+ description = "Datacenter/Vcenter hostname."
+ }
+ "--sslport" = {
+ value = "$vmware_sslport$"
+ description = "If a SSL port different from 443 is used."
+ }
+ }
+
+ vars.vmware_host = "$address$"
+ vars.vmware_sslport = "443"
+}
+
+object CheckCommand "vmware-esx-soap-host-check" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = {
+ value = "soap"
+ description = "Simple check to verify a successfull connection to VMWare SOAP API."
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-uptime" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "uptime"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-cpu" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "cpu"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+
+object CheckCommand "vmware-esx-soap-host-cpu-ready" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "cpu"
+ "--subselect" = "ready"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-cpu-wait" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "cpu"
+ "--subselect" = "wait"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-cpu-usage" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "cpu"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+object CheckCommand "vmware-esx-soap-host-mem" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "mem"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-mem-usage" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+object CheckCommand "vmware-esx-soap-host-mem-consumed" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "consumed"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-mem-swapused" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "swapused"
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-mem-overhead" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "overhead"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-mem-memctl" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "memctl"
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-net" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "net"
+ "--exclude" = "$vmware_exclude$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-net-usage" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-net-receive" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "receive"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-net-send" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "send"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-net-nic" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "nic"
+ "--exclude" = "$vmware_exclude$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-volumes" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "volumes"
+ "--subselect" = "$vmware_subselect$"
+ "--gigabyte" = {
+ set_if = "$vmware_gigabyte$"
+ description = "Output in GB instead of MB."
+ }
+ "--usedspace" = {
+ set_if = "$vmware_usedspace$"
+ description = "Output used space instead of free."
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ description = "List only alerting volumes."
+ }
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ "--spaceleft" = {
+ set_if = "$vmware_spaceleft$"
+ }
+ }
+
+ vars.vmware_usedspace = true
+ vars.vmware_alertonly = false
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+object CheckCommand "vmware-esx-soap-host-io" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-aborted" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "aborted"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-resets" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "resets"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-read" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "read"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-read-latency" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "read_latency"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-write" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "write"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-write-latency" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "write_latency"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-usage" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-kernel-latency" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "kernel_latency"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-device-latency" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "device_latency"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-queue-latency" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "queue_latency"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-io-total-latency" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "total_latency"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-media" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "hostmedia"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-service" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "service"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-con" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "con"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-listvms" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "listvms"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-status" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "status"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-health" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "health"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-health-listsensors" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "health"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--listsensors" = {}
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-health-nostoragestatus" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "health"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--nostoragestatus" = {}
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-storagehealth" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "storagehealth"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-temp" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "temp"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-runtime-issues" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "issues"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-storage" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "storage"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-storage-adapter" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "storage"
+ "--subselect" = "adapter"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-host-storage-lun" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "storage"
+ "--subselect" = "lun"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
+
+
+object CheckCommand "vmware-esx-soap-host-storage-path" {
+ import "vmware-esx-soap-host"
+
+ arguments += {
+ "--select" = "storage"
+ "--subselect" = "path"
+ "--exclude" = "$vmware_exclude$"
+ "--include" = "$vmware_include$"
+ "--isregexp" = {
+ set_if = "$vmware_isregexp$"
+ }
+ "--alertonly" = {
+ set_if = "$vmware_alertonly$"
+ }
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ "--standbyok" = {
+ set_if = "$vmware_standbyok$"
+ }
+ }
+}
+
+/**
+ * VMware VM
+ */
+template CheckCommand "vmware-esx-soap-vm" {
+ import "vmware-esx-command"
+
+ arguments += {
+ "--datacenter" = {
+ value = "$vmware_datacenter$"
+ description = "Datacenter/Vcenter hostname."
+ }
+ "--host" = {
+ value = "$vmware_host$"
+ description = "ESX or ESXi hostname."
+ }
+ "--name" = {
+ value = "$vmware_vmname$"
+ required = true
+ description = "Virtual machine name."
+ }
+ "--sslport" = {
+ value = "$vmware_sslport$"
+ description = "If a SSL port different from 443 is used."
+ }
+ }
+
+ vars.vmware_host = "$address$"
+ vars.vmware_sslport = "443"
+}
+
+object CheckCommand "vmware-esx-soap-vm-cpu" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "cpu"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-cpu-ready" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "cpu"
+ "--subselect" = "ready"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-cpu-wait" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "cpu"
+ "--subselect" = "wait"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-cpu-usage" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "cpu"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+object CheckCommand "vmware-esx-soap-vm-mem" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "mem"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-mem-usage" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+
+ vars.vmware_warn = "80%"
+ vars.vmware_crit = "90%"
+}
+
+object CheckCommand "vmware-esx-soap-vm-mem-consumed" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "consumed"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-mem-memctl" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "mem"
+ "--subselect" = "memctl"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-net" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "net"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-net-usage" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-net-receive" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "receive"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-net-send" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "net"
+ "--subselect" = "send"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-io" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "io"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-io-read" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "read"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-io-write" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "write"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-io-usage" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "io"
+ "--subselect" = "usage"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-con" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "con"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-powerstate" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "powerstate"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-status" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "status"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-consoleconnections" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "consoleconnections"
+ "--warning" = "$vmware_warn$"
+ "--critical" = "$vmware_crit$"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-gueststate" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "gueststate"
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-tools" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "tools"
+ "--open_vm_tools_ok" = {
+ set_if = "$vmware_openvmtools$"
+ }
+ "--no_vm_tools_ok" = {
+ set_if = "$vmware_novmtools$"
+ }
+ }
+}
+
+object CheckCommand "vmware-esx-soap-vm-runtime-issues" {
+ import "vmware-esx-soap-vm"
+
+ arguments += {
+ "--select" = "runtime"
+ "--subselect" = "issues"
+ "--multiline" = {
+ set_if = "$vmware_multiline$"
+ }
+ }
+}
diff --git a/itl/plugins-contrib.d/web.conf b/itl/plugins-contrib.d/web.conf
new file mode 100644
index 0000000..f5e65d5
--- /dev/null
+++ b/itl/plugins-contrib.d/web.conf
@@ -0,0 +1,759 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+object CheckCommand "webinject" {
+ command = [ PluginContribDir + "/check_webinject" ]
+
+ arguments = {
+ "-c" = {
+ value = "$webinject_config_file$"
+ description = "There is a configuration file named 'config.xml' that is used to store configuration settings for your project. You can use this to specify which test case files to run and to set some constants and settings to be used by WebInject."
+ }
+
+ "-o" = {
+ value = "$webinject_output$"
+ description = "This option is followed by a directory name or a prefix to prepended to the output files. This is used to specify the location for writing output files (http.log, results.html, and results.xml). If a directory name is supplied (use either an absolute or relative path and make sure to add the trailing slash), all output files are written to this directory. If the trailing slash is ommitted, it is assumed to a prefix and this will be prepended to the output files. You may also use a combination of a directory and prefix."
+ }
+
+ "-n" = {
+ set_if = "$webinject_no_output$"
+ description = "Suppresses all output to STDOUT except the results summary."
+ }
+
+ "-t" = {
+ value = "$webinject_timeout$"
+ description = "The value [given in seconds] will be compared to the global time elapsed to run all the tests. If the tests have all been successful, but have taken more time than the 'globaltimeout' value, a warning message is sent back to Icinga."
+ }
+
+ "-r" = {
+ value = "$webinject_report_type$"
+ description = "This setting is used to enable output formatting that is compatible for use with specific external programs. The available values you can set this to are: nagios, mrtg, external and standard"
+ }
+
+ "-s" = {
+ value = "$webinject_key_value$"
+ description = "key=value"
+ }
+
+ "testcase_file" = {
+ value = "$webinject_testcase_file$"
+ description = "When you launch WebInject in console mode, you can optionally supply an argument for a testcase file to run. It will look for this file in the directory that webinject.pl resides in. If no filename is passed from the command line, it will look in config.xml for testcasefile declarations. If no files are specified, it will look for a default file named 'testcases.xml' in the current [webinject] directory. If none of these are found, the engine will stop and give you an error."
+ skip_key = true
+ order = 1
+ }
+ }
+}
+
+object CheckCommand "jmx4perl" {
+ command = [ PluginContribDir + "/check_jmx4perl" ]
+
+ arguments = {
+ "-u" = {
+ value = "$jmx4perl_url$"
+ description = "URL to agent web application (e.g. http://server:8080/jolokia/)"
+ }
+ "--product" = {
+ value = "$jmx4perl_product$"
+ description = "Name of app server product (e.g. jboss)"
+ }
+ "--alias" = {
+ value = "$jmx4perl_alias$"
+ description = "Alias name for attribute (e.g. MEMORY_HEAP_USED)"
+ }
+ "-m" = {
+ value = "$jmx4perl_mbean$"
+ description = "MBean name (e.g. java.lang:type=Memory)"
+ }
+ "-a" = {
+ value = "$jmx4perl_attribute$"
+ description = "Attribute name (e.g. HeapMemoryUsage)"
+ }
+ "-o" = {
+ value = "$jmx4perl_operation$"
+ description = "Operation to execute"
+ }
+ "--value" = {
+ value = "$jmx4perl_value$"
+ description = "Shortcut for specifying mbean/attribute/path. Slashes within names must be escaped with backslash"
+ }
+ "--delta" = {
+ value = "$jmx4perl_delta$"
+ description = "Switches on incremental mode. Optional argument are seconds used for normalizing."
+ }
+ "-p" = {
+ value = "$jmx4perl_path$"
+ description = "Inner path for extracting a single value from a complex attribute or return value (e.g. used)"
+ }
+ "--target" = {
+ value = "$jmx4perl_target$"
+ description = "JSR-160 Service URL specifing the target server"
+ }
+ "--target-user" = {
+ value = "$jmx4perl_target_user$"
+ description = "Username to use for JSR-160 connection"
+ }
+ "--target-password" = {
+ value = "$jmx4perl_target_password$"
+ description = "Password to use for JSR-160 connection"
+ }
+ "--proxy" = {
+ value = "$jmx4perl_proxy$"
+ description = "Proxy to use"
+ }
+ "--user" = {
+ value = "$jmx4perl_user$"
+ description = "User for HTTP authentication"
+ }
+ "--password" = {
+ value = "$jmx4perl_password$"
+ description = "Password for HTTP authentication"
+ }
+ "-n" = {
+ value = "$jmx4perl_name$"
+ description = "Name to use for output. Optional, by default a standard value based on the MBean and attribute will be used"
+ }
+ "--method" = {
+ value = "$jmx4perl_method$"
+ description = "HTTP method to use. Either get or post"
+ }
+ "-b" = {
+ value = "$jmx4perl_base$"
+ description = "Base name, which when given, interprets critical and warning values as relative in the range 0 .. 100%. Must be given in the form mbean/attribute/path"
+ }
+ "--base-mbean" = {
+ value = "$jmx4perl_base_mbean$"
+ description = "Base MBean name, interprets critical and warning values as relative in the range 0 .. 100%. Requires a base-attribute, too"
+ }
+ "--base-attribute" = {
+ value = "$jmx4perl_base_attribute$"
+ description = "Base attribute for a relative check. Used together with base-mbean"
+ }
+ "--base-path" = {
+ value = "$jmx4perl_base_path$"
+ description = "Base path for relative checks, where this path is used on the base attribute's value"
+ }
+ "--unit" = {
+ value = "$jmx4perl_unit$"
+ description = "Unit of measurement of the data retreived. Recognized values are [B|KB|MN|GB|TB] for memory values and [us|ms|s|m|h|d] for time values"
+ }
+ "--null" = {
+ value = "$jmx4perl_null$"
+ description = "Value which should be used in case of a null return value of an operation or attribute. Is null by default"
+ }
+ "--string" = {
+ set_if = "$jmx4perl_string$"
+ description = "Force string comparison for critical and warning checks"
+ }
+ "--numeric" = {
+ set_if = "$jmx4perl_numeric$"
+ description = "Force numeric comparison for critical and warning checks"
+ }
+ "-c" = {
+ value = "$jmx4perl_critical$"
+ description = "Critical threshold for value"
+ }
+ "-w" = {
+ value = "$jmx4perl_warning$"
+ description = "Warning threshold for value"
+ }
+ "-l" = {
+ value = "$jmx4perl_label$"
+ description = "Label to be used for printing out the result of the check. Placeholders can be used."
+ }
+ "--perfdata" = {
+ value = "$jmx4perl_perfdata$"
+ description = "Whether performance data should be omitted, which are included by default."
+ }
+ "--unknown-is-critical" = {
+ set_if = "$jmx4perl_unknown_is_critical$"
+ description = "Map UNKNOWN errors to errors with a CRITICAL status"
+ }
+ "-t" = {
+ value = "$jmx4perl_timeout$"
+ description = "Seconds before plugin times out (default: 15)"
+ }
+ "--config" = {
+ value = "$jmx4perl_config$"
+ description = "Path to configuration file."
+ }
+ "--server" = {
+ value = "$jmx4perl_server$"
+ description = "Symbolic name of server url to use, which needs to be configured in the configuration file."
+ }
+ "--check" = {
+ value = "$jmx4perl_check$"
+ description = "Name of a check configuration as defined in the configuration file, use array if you need arguments."
+ order = 1
+ repeat_key = false
+ }
+ }
+
+ vars.jmx4perl_url = "http://$address$:8080/jolokia"
+ vars.jmx4perl_string = false
+ vars.jmx4perl_numeric = false
+ vars.jmx4perl_unknown_is_critical = false
+}
+
+object CheckCommand "squid" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_squid" ]
+
+ arguments = {
+ "--host" = {
+ value = "$squid_hostname$"
+ description = "Name of the proxy to check (default: localhost)"
+ }
+ "--data" = {
+ value = "$squid_data$"
+ description = "Optional data to fetch (default: Connections) available data : Connections Cache Resources Memory FileDescriptors"
+ }
+ "--port" = {
+ value = "$squid_port$"
+ description = "Optional port number (default: 3128)"
+ }
+ "--user" = {
+ value = "$squid_user$"
+ description = "WWW user"
+ }
+ "--password" = {
+ value = "$squid_password$"
+ description = "WWW password"
+ }
+ "--warning" = {
+ value = "$squid_warning$"
+ description = "Warning threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format."
+ }
+ "--critical" = {
+ value = "$squid_critical$"
+ description = "Critical threshold. See http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT for the threshold format."
+ }
+ "--squidclient" = {
+ value = "$squid_client$"
+ description = "Path of squidclient (default: /usr/bin/squidclient)"
+ }
+ "--timeout" = {
+ value = "$squid_timeout$"
+ description = "Seconds before plugin times out (default: 15)"
+ }
+ }
+
+ vars.squid_hostname = "$check_address$"
+ vars.squid_client = "/usr/bin/squidclient"
+}
+
+object CheckCommand "nginx_status" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_nginx_status.pl" ]
+
+ arguments = {
+ "--hostname" = {
+ value = "$nginx_status_host_address$"
+ description = "name or IP address of host to check"
+ }
+ "--port" = {
+ value = "$nginx_status_port$"
+ description = "the http port"
+ }
+ "--url" = {
+ value = "$nginx_status_url$"
+ description = "Specific URL to use, instead of the default 'http://<nginx_status_hostname>/nginx_status'"
+ }
+ "--servername" = {
+ value = "$nginx_status_servername$"
+ description = "ServerName, use it if you specified an IP in -H to match the good Virtualhost in your target"
+ }
+ "--ssl" = {
+ set_if = "$nginx_status_ssl$"
+ description = "Wether we should use HTTPS instead of HTTP"
+ }
+ "--disable-sslverifyhostname" = {
+ set_if = "$nginx_status_disable_sslverify$"
+ description = "Disable SSL hostname verification"
+ }
+ "--user" = {
+ value = "$nginx_status_user$"
+ description = "Username for basic auth"
+ }
+ "--pass" = {
+ value = "$nginx_status_pass$"
+ description = "Password for basic auth"
+ }
+ "--realm" = {
+ value = "$nginx_status_realm$"
+ description = "Realm for basic auth"
+ }
+ "--maxreach" = {
+ value = "$nginx_status_maxreach$"
+ description = "Number of max processes reached (since last check) that should trigger an alert"
+ }
+ "--timeout" = {
+ value = "$nginx_status_timeout$"
+ description = "timeout in seconds"
+ }
+ "--warn" = {
+ value = "$nginx_status_warn$"
+ description = "number of active connections, ReqPerSec or ConnPerSec that will cause a WARNING"
+ }
+ "--critical" = {
+ value = "$nginx_status_critical$"
+ description = "number of active connections, ReqPerSec or ConnPerSec that will cause a CRITICAL"
+ }
+ }
+
+ vars.nginx_status_host_address = "$check_address$"
+ vars.nginx_status_ssl = false
+ vars.nginx_status_disable_sslverify = false
+ vars.nginx_status_warn = "10000,100,200"
+ vars.nginx_status_critical = "20000,200,300"
+ vars.nginx_status_timeout = 15
+}
+
+object CheckCommand "apache-status" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_apache_status.pl" ]
+
+ arguments = {
+ "-H" = {
+ value = "$apache_status_address$"
+ description = "name or IP address of host to check"
+ }
+ "-p" = {
+ value = "$apache_status_port$"
+ description = "the http port"
+ }
+ "-s" = {
+ set_if = "$apache_status_ssl$"
+ description = "Whether we should use HTTPS instead of HTTP"
+ }
+ "-u" = {
+ value = "$apache_status_uri$"
+ description = "Specific URL to use, instead of the default 'http://<apache_status_address>/server-status'"
+ }
+ "-U" = {
+ value = "$apache_status_username$"
+ description = "username for basic auth"
+ }
+ "-P" = {
+ value = "$apache_status_password$"
+ description = "password for basic auth"
+ }
+ "-w" = {
+ value = "$apache_status_warning$"
+ description = "number of open slots, busy workers and idle workers that will cause a WARNING"
+ }
+ "-c" = {
+ value = "$apache_status_critical$"
+ description = "number of open slots, busy workers and idle workers that will cause a CRITICAL"
+ }
+ "-t" = {
+ value = "$apache_status_timeout$"
+ description = "timeout in seconds"
+ }
+ "-N" = {
+ set_if = "$apache_status_no_validate$"
+ description = "do not validate the SSL certificate chain"
+ }
+ "-R" = {
+ set_if = "$apache_status_unreachable$"
+ description = "CRITICAL if socket timed out or http code >= 500"
+ }
+ }
+
+ vars.apache_status_address = "$check_address$"
+ vars.apache_status_ssl = false
+}
+
+object CheckCommand "ssl_cert" {
+ import "ipv4-or-ipv6"
+
+ command = [ PluginContribDir + "/check_ssl_cert" ]
+
+ arguments = {
+ "-H" = {
+ value = "$ssl_cert_address$"
+ description = "The host's address"
+ required = true
+ }
+ "-p" = {
+ value = "$ssl_cert_port$"
+ description = "TCP port number (default: 443)"
+ }
+ "--proxy" = {
+ value = "$ssl_cert_proxy$"
+ description = "Sets http_proxy and the s_client -proxy option"
+ }
+ "-f" = {
+ value = "$ssl_cert_file$"
+ description = "Local file path (works with -H localhost only)"
+ }
+ "-w" = {
+ value = "$ssl_cert_warn$"
+ description = "Minimum number of days a certificate has to be valid"
+ }
+ "-c" = {
+ value = "$ssl_cert_critical$"
+ description = "Minimum number of days a certificate has to be valid to issue a critical status"
+ }
+ "-n" = {
+ value = "$ssl_cert_cn$"
+ description = "Pattern to match the CN of the certificate"
+ }
+ "--altnames" = {
+ set_if = "$ssl_cert_altnames$"
+ description = "Matches the pattern specified in -n with alternate"
+ }
+ "-i" = {
+ value = "$ssl_cert_issuer$"
+ description = "Pattern to match the issuer of the certificate"
+ }
+ "-o" = {
+ value = "$ssl_cert_org$"
+ description = "Pattern to match the organization of the certificate"
+ }
+ "-e" = {
+ value = "$ssl_cert_email$"
+ description = "Pattern to match the email address contained in the certificate"
+ }
+ "-N" = {
+ set_if = "$ssl_cert_match_host$"
+ description = "Match CN with the host name"
+ }
+ "--serial" = {
+ value = "$ssl_cert_serial$"
+ description = "Pattern to match the serial number"
+ }
+ "-A" = {
+ set_if = "$ssl_cert_noauth$"
+ description = "Ignore authority warnings (expiration only)"
+ }
+ "-s" = {
+ set_if = "$ssl_cert_selfsigned$"
+ description = "Allow self-signed certificate"
+ }
+ "--sni" = {
+ value = "$ssl_cert_sni$"
+ description = "Sets the TLS SNI (Server Name Indication) extension"
+ }
+ "-t" = {
+ value = "$ssl_cert_timeout$"
+ description = "Seconds before connection times out (default: 15)"
+ }
+ "-P" = {
+ value = "$ssl_cert_protocol$"
+ description = "Use the specific protocol {http|smtp|pop3|imap|ftp|xmpp|irc|ldap} (default: http)"
+ }
+ "-C" = {
+ value = "$ssl_cert_clientssl_cert$"
+ description = "Use client certificate to authenticate"
+ }
+ "--clientpass" = {
+ value = "$ssl_cert_clientpass$"
+ description = "Set passphrase for client certificate"
+ }
+ "-L" = {
+ value = "$ssl_cert_ssllabs$"
+ description = "SSL Labs assestment"
+ }
+ "--ignore-ssl-labs-cache" = {
+ set_if = "$ssl_cert_ssllabs_nocache$"
+ description = "Forces a new check by SSL Labs"
+ }
+ "-r" = {
+ value = "$ssl_cert_rootssl_cert$"
+ description = "Root certificate or directory to be used for certificate validation"
+ }
+ "--ssl2" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "ssl2"
+ }}
+ }
+ "--ssl3" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "ssl3"
+ }}
+ }
+ "--tls1" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "tls1"
+ }}
+ }
+ "--tls1_1" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "tls1_1"
+ }}
+ }
+ "--tls1_2" = {
+ set_if = {{
+ return macro("$ssl_cert_ssl_version$") == "tls1_2"
+ }}
+ }
+ "--no_ssl2" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "ssl2" in disable_versions
+ }}
+ }
+ "--no_ssl3" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "ssl3" in disable_versions
+ }}
+ }
+ "--no_tls1" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "tls1" in disable_versions
+ }}
+ }
+ "--no_tls1_1" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "tls1_1" in disable_versions
+ }}
+ }
+ "--no_tls1_2" = {
+ set_if = {{
+ var disable_versions = macro("$ssl_cert_disable_ssl_versions$")
+ if (typeof(disable_versions) == String) {
+ disable_versions = [ disable_versions ]
+ }
+ return "tls1_2" in disable_versions
+ }}
+ }
+ "--ecdsa" = {
+ set_if = {{
+ return macro("$ssl_cert_cipher$") == "ecdsa"
+ }}
+ description = "Cipher selection: force ECDSA authentication"
+ }
+ "--rsa" = {
+ set_if = {{
+ return macro("$ssl_cert_cipher$") == "rsa"
+ }}
+ description = "Cipher selection: force RSA authentication"
+ }
+ "--ignore-sig-alg" = {
+ set_if = "$ssl_cert_ignore_signature$"
+ description = "Do not check if the certificate was signed with SHA1 od MD5"
+ }
+ "--ignore-exp" = {
+ set_if = "$ssl_cert_ignore_expiration$"
+ description = "Ignore expiration date"
+ }
+ "--ignore-host-cn" = {
+ set_if = "$ssl_cert_ignore_host_cn$"
+ description = "Do not complain if the CN does not match"
+ }
+ "--ignore-ocsp" = {
+ set_if = "$ssl_cert_ignore_ocsp$"
+ description = "Do not check revocation with OCSP"
+ }
+ "--ignore-ocsp-errors" = {
+ set_if = "$ssl_cert_ignore_ocsp_errors$"
+ description = "Continue if the OCSP status cannot be checked"
+ }
+ "--ignore-ocsp-timeout" = {
+ set_if = "$ssl_cert_ignore_ocsp_timeout$"
+ description = "Ignore OCSP result when timeout occurs while checking"
+ }
+ "--ignore-sct" = {
+ set_if = "$ssl_cert_ignore_sct$"
+ description = "Do not check for signed certificate timestamps"
+ }
+ "--ignore-tls-renegotiation" = {
+ set_if = "$ssl_cert_ignore_tls_renegotiation$"
+ description = "Do not check for renegotiation"
+ }
+
+ }
+
+ vars.ssl_cert_address = "$check_address$"
+ vars.ssl_cert_port = 443
+}
+
+object CheckCommand "varnish" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_varnish" ]
+
+ arguments = {
+ "-n" = {
+ value = "$varnish_name$"
+ description = "Specify the Varnish instance name"
+ }
+ "-p" = {
+ value = "$varnish_param$"
+ description = "Specify the parameter to check (see below). The default is 'ratio'."
+ }
+ "-c" = {
+ value = "$varnish_critical$"
+ description = "Set critical threshold: [@][lo:]hi"
+ }
+ "-w" = {
+ value = "$varnish_warning$"
+ description = "Set warning threshold: [@][lo:]hi"
+ }
+ }
+}
+
+object CheckCommand "haproxy" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_haproxy" ]
+
+ arguments = {
+ "--username" = {
+ value = "$haproxy_username$"
+ description = "Username for HTTP Auth"
+ }
+ "--password" = {
+ value = "$haproxy_password$"
+ description = "Password for HTTP Auth"
+ }
+ "--url" = {
+ value = "$haproxy_url$"
+ description = "URL of the HAProxy csv statistics page"
+ required = true
+ }
+ "--timeout" = {
+ value = "$haproxy_timeout$"
+ description = "Seconds before plugin times out (default: 10)"
+ }
+ "-w" = {
+ value = "$haproxy_warning$"
+ description = "Warning request time threshold (in seconds)"
+ }
+ "-c" = {
+ value = "$haproxy_critical$"
+ description = "Critical request time threshold (in seconds)"
+ }
+ }
+}
+
+object CheckCommand "haproxy_status" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_haproxy_status" ]
+
+ arguments = {
+ "--defaults" = {
+ value = "$haproxy_status_default$"
+ description = "Set/Override the defaults which will be applied to all checks (unless specifically set by --overrides)."
+ }
+ "--frontends" = {
+ set_if = "$haproxy_status_frontends$"
+ description = "Enable checks for the frontends in HAProxy (that they're marked as OPEN and the session limits haven't been reached)."
+ }
+ "--nofrontends" = {
+ set_if = "$haproxy_status_nofrontends$"
+ description = "Disable checks for the frontends in HAProxy (that they're marked as OPEN and the session limits haven't been reached)."
+ }
+ "--backends" = {
+ set_if = "$haproxy_status_backends$"
+ description = "Enable checks for the backends in HAProxy (that they have the required quorum of servers, and that the session limits haven't been reached)."
+ }
+ "--nobackends" = {
+ set_if = "$haproxy_status_nobackends$"
+ description = "Disable checks for the backends in HAProxy (that they have the required quorum of servers, and that the session limits haven't been reached)."
+ }
+ "--servers" = {
+ set_if = "$haproxy_status_servers$"
+ description = "Enable checks for the servers in HAProxy (that they haven't reached the limits for the sessions or for queues)."
+ }
+ "--noservers" = {
+ set_if = "$haproxy_status_noservers$"
+ description = "Disable checks for the servers in HAProxy (that they haven't reached the limits for the sessions or for queues)."
+ }
+ "--overrides" = {
+ value = "$haproxy_status_overrides$"
+ description = "Override the defaults for a particular frontend or backend, in the form {name}:{override}, where {override} is the same format as --defaults above."
+ }
+ "--socket" = {
+ value = "$haproxy_status_socket$"
+ description = "Path to the socket check_haproxy should connect to"
+ required = true
+ }
+ }
+}
+
+object CheckCommand "phpfpm_status" {
+ import "plugin-check-command"
+ command = [ PluginContribDir + "/check_phpfpm_status" ]
+
+ arguments = {
+ "-H" = {
+ value = "$phpfpm_status_hostname$"
+ description = "name or IP address of host to check"
+ required = true
+ }
+ "-p" = {
+ value = "$phpfpm_status_port$"
+ description = "Http port, or Fastcgi port when using --fastcgi"
+ }
+ "-u" = {
+ value = "$phpfpm_status_url$"
+ description = "Specific URL (only the path part of it in fact) to use, instead of the default /fpm-status"
+ }
+ "-s" = {
+ value = "$phpfpm_status_servername$"
+ description = "ServerName, (host header of HTTP request) use it if you specified an IP in -H to match the good Virtualhost in your target"
+ }
+ "-f" = {
+ set_if = "$phpfpm_status_fastcgi$"
+ description = "Connect directly to php-fpm via network or local socket, using fastcgi protocol instead of HTTP."
+ }
+ "-U" = {
+ value = "$phpfpm_status_user$"
+ description = "Username for basic auth"
+ }
+ "-P" = {
+ value = "$phpfpm_status_pass$"
+ description = "Password for basic auth"
+ }
+ "-r" = {
+ value = "$phpfpm_status_realm$"
+ description = "Realm for basic auth"
+ }
+ "-d" = {
+ set_if = "$phpfpm_status_debug$"
+ description = "Debug mode (show http request response)"
+ }
+ "-t" = {
+ value = "$phpfpm_status_timeout$"
+ description = "timeout in seconds (Default: 15)"
+ }
+ "-S" = {
+ set_if = "$phpfpm_status_ssl$"
+ description = "Wether we should use HTTPS instead of HTTP. Note that you can give some extra parameters to this settings. Default value is 'TLSv1' but you could use things like 'TLSv1_1' or 'TLSV1_2' (or even 'SSLv23:!SSLv2:!SSLv3' for old stuff)."
+ }
+ "-x" = {
+ set_if = "$phpfpm_status_verifyssl$"
+ description = "verify certificate and hostname from ssl cert, default is 0 (no security), set it to 1 to really make SSL peer name and certificater checks."
+ }
+ "-X" = {
+ value = "$phpfpm_status_cacert$"
+ description = "Full path to the cacert.pem certificate authority used to verify ssl certificates (use with --verifyssl). if not given the cacert from Mozilla::CA cpan plugin will be used."
+ }
+ "-w" = {
+ value = "$phpfpm_status_warn$"
+ description = "MIN_AVAILABLE_PROCESSES,PROC_MAX_REACHED,QUEUE_MAX_REACHED number of available workers, or max states reached that will cause a warning. -1 for no warning"
+ }
+ "-c" = {
+ value = "$phpfpm_status_critical$"
+ description = "MIN_AVAILABLE_PROCESSES,PROC_MAX_REACHED,QUEUE_MAX_REACHED number of available workers, or max states reached that will cause an error, -1 for no CRITICAL"
+ }
+ }
+
+ vars.phpfpm_status_hostname = "$address$"
+}
diff --git a/itl/plugins-contrib.d/windows.conf b/itl/plugins-contrib.d/windows.conf
new file mode 100644
index 0000000..a52eadc
--- /dev/null
+++ b/itl/plugins-contrib.d/windows.conf
@@ -0,0 +1,28 @@
+
+object CheckCommand "file-age-windows" {
+ command = [ PluginDir + "/check_file_age.cmd" ]
+
+ arguments = {
+ "file" = {
+ skip_key = true
+ order = 0
+ value = "$file_age_win_file$"
+ description = "File name and location"
+ required = true
+ }
+ "warning" = {
+ skip_key = true
+ order = 1
+ value = "$file_age_win_warning$"
+ description = "Warning threshold of file age in seconds"
+ required = true
+ }
+ "critical" = {
+ skip_key = true
+ order = 2
+ value = "$file_age_win_critical$"
+ description = "Critical threshold of file age in seconds"
+ required = true
+ }
+ }
+}
diff --git a/itl/windows-plugins b/itl/windows-plugins
new file mode 100644
index 0000000..a6e00db
--- /dev/null
+++ b/itl/windows-plugins
@@ -0,0 +1,3 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+include "command-plugins-windows.conf"
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
new file mode 100644
index 0000000..aadbb39
--- /dev/null
+++ b/lib/CMakeLists.txt
@@ -0,0 +1,60 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+add_subdirectory(base)
+add_subdirectory(cli)
+add_subdirectory(config)
+add_subdirectory(remote)
+add_subdirectory(icinga)
+add_subdirectory(methods)
+
+if(ICINGA2_WITH_CHECKER)
+ add_subdirectory(checker)
+endif()
+
+if(ICINGA2_WITH_COMPAT)
+ add_subdirectory(compat)
+endif()
+
+if(ICINGA2_WITH_MYSQL OR ICINGA2_WITH_PGSQL)
+ add_subdirectory(db_ido)
+endif()
+
+if(ICINGA2_WITH_MYSQL)
+ find_package(MySQL)
+
+ if(MYSQL_FOUND)
+ add_subdirectory(db_ido_mysql)
+ add_subdirectory(mysql_shim)
+ else()
+ message(FATAL_ERROR "You have selected MySQL support, but MySQL could not be found. You can disable the MySQL IDO module using -DICINGA2_WITH_MYSQL=OFF.")
+ endif()
+endif()
+
+if(ICINGA2_WITH_PGSQL)
+ find_package(PostgreSQL)
+
+ if(PostgreSQL_FOUND)
+ add_subdirectory(db_ido_pgsql)
+ add_subdirectory(pgsql_shim)
+ else()
+ message(FATAL_ERROR "You have selected PostgreSQL support, but PostgreSQL could not be found. You can disable the PostgreSQL IDO module using -DICINGA2_WITH_PGSQL=OFF.")
+ endif()
+endif()
+
+if(ICINGA2_WITH_LIVESTATUS)
+ add_subdirectory(livestatus)
+endif()
+
+if(ICINGA2_WITH_NOTIFICATION)
+ add_subdirectory(notification)
+endif()
+
+if(ICINGA2_WITH_PERFDATA)
+ add_subdirectory(perfdata)
+endif()
+
+if(ICINGA2_WITH_ICINGADB)
+ add_subdirectory(icingadb)
+endif()
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/base/CMakeLists.txt b/lib/base/CMakeLists.txt
new file mode 100644
index 0000000..e50e330
--- /dev/null
+++ b/lib/base/CMakeLists.txt
@@ -0,0 +1,160 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(application.ti application-ti.cpp application-ti.hpp)
+mkclass_target(configobject.ti configobject-ti.cpp configobject-ti.hpp)
+mkclass_target(configuration.ti configuration-ti.cpp configuration-ti.hpp)
+mkclass_target(datetime.ti datetime-ti.cpp datetime-ti.hpp)
+mkclass_target(filelogger.ti filelogger-ti.cpp filelogger-ti.hpp)
+mkclass_target(function.ti function-ti.cpp function-ti.hpp)
+mkclass_target(journaldlogger.ti journaldlogger-ti.cpp journaldlogger-ti.hpp)
+mkclass_target(logger.ti logger-ti.cpp logger-ti.hpp)
+mkclass_target(perfdatavalue.ti perfdatavalue-ti.cpp perfdatavalue-ti.hpp)
+mkclass_target(streamlogger.ti streamlogger-ti.cpp streamlogger-ti.hpp)
+mkclass_target(sysloglogger.ti sysloglogger-ti.cpp sysloglogger-ti.hpp)
+
+set(base_SOURCES
+ i2-base.hpp
+ application.cpp application.hpp application-ti.hpp application-version.cpp application-environment.cpp
+ array.cpp array.hpp array-script.cpp
+ atomic.hpp
+ atomic-file.cpp atomic-file.hpp
+ base64.cpp base64.hpp
+ boolean.cpp boolean.hpp boolean-script.cpp
+ bulker.hpp
+ configobject.cpp configobject.hpp configobject-ti.hpp configobject-script.cpp
+ configtype.cpp configtype.hpp
+ configuration.cpp configuration.hpp configuration-ti.hpp
+ configwriter.cpp configwriter.hpp
+ console.cpp console.hpp
+ context.cpp context.hpp
+ convert.cpp convert.hpp
+ datetime.cpp datetime.hpp datetime-ti.hpp datetime-script.cpp
+ debug.hpp
+ debuginfo.cpp debuginfo.hpp
+ dependencygraph.cpp dependencygraph.hpp
+ dictionary.cpp dictionary.hpp dictionary-script.cpp
+ exception.cpp exception.hpp
+ fifo.cpp fifo.hpp
+ filelogger.cpp filelogger.hpp filelogger-ti.hpp
+ function.cpp function.hpp function-ti.hpp function-script.cpp functionwrapper.hpp
+ initialize.cpp initialize.hpp
+ io-engine.cpp io-engine.hpp
+ journaldlogger.cpp journaldlogger.hpp journaldlogger-ti.hpp
+ json.cpp json.hpp json-script.cpp
+ lazy-init.hpp
+ library.cpp library.hpp
+ loader.cpp loader.hpp
+ logger.cpp logger.hpp logger-ti.hpp
+ math-script.cpp
+ netstring.cpp netstring.hpp
+ networkstream.cpp networkstream.hpp
+ namespace.cpp namespace.hpp namespace-script.cpp
+ number.cpp number.hpp number-script.cpp
+ object.cpp object.hpp object-script.cpp
+ objectlock.cpp objectlock.hpp
+ object-packer.cpp object-packer.hpp
+ objecttype.cpp objecttype.hpp
+ perfdatavalue.cpp perfdatavalue.hpp perfdatavalue-ti.hpp
+ primitivetype.cpp primitivetype.hpp
+ process.cpp process.hpp
+ reference.cpp reference.hpp reference-script.cpp
+ registry.hpp
+ ringbuffer.cpp ringbuffer.hpp
+ scriptframe.cpp scriptframe.hpp
+ scriptglobal.cpp scriptglobal.hpp
+ scriptutils.cpp scriptutils.hpp
+ serializer.cpp serializer.hpp
+ shared.hpp
+ shared-memory.hpp
+ shared-object.hpp
+ singleton.hpp
+ socket.cpp socket.hpp
+ stacktrace.cpp stacktrace.hpp
+ statsfunction.hpp
+ stdiostream.cpp stdiostream.hpp
+ stream.cpp stream.hpp
+ streamlogger.cpp streamlogger.hpp streamlogger-ti.hpp
+ string.cpp string.hpp string-script.cpp
+ sysloglogger.cpp sysloglogger.hpp sysloglogger-ti.hpp
+ tcpsocket.cpp tcpsocket.hpp
+ threadpool.cpp threadpool.hpp
+ timer.cpp timer.hpp
+ tlsstream.cpp tlsstream.hpp
+ tlsutility.cpp tlsutility.hpp
+ type.cpp type.hpp typetype-script.cpp
+ unix.hpp
+ unixsocket.cpp unixsocket.hpp
+ utility.cpp utility.hpp
+ value.cpp value.hpp value-operators.cpp
+ win32.hpp
+ workqueue.cpp workqueue.hpp
+)
+
+if(WIN32)
+ mkclass_target(windowseventloglogger.ti windowseventloglogger-ti.cpp windowseventloglogger-ti.hpp)
+ list(APPEND base_SOURCES windowseventloglogger.cpp windowseventloglogger.hpp windowseventloglogger-ti.hpp)
+
+ # Generate a DLL containing message definitions for the Windows Event Viewer.
+ # See also: https://docs.microsoft.com/en-us/windows/win32/eventlog/reporting-an-event
+ add_custom_command(
+ OUTPUT windowseventloglogger-provider.rc windowseventloglogger-provider.h
+ COMMAND mc ARGS -U ${CMAKE_CURRENT_SOURCE_DIR}/windowseventloglogger-provider.mc
+ DEPENDS windowseventloglogger-provider.mc
+ )
+
+ list(APPEND base_SOURCES windowseventloglogger-provider.h)
+
+ add_custom_command(
+ OUTPUT windowseventloglogger-provider.res
+ COMMAND rc ARGS windowseventloglogger-provider.rc
+ DEPENDS windowseventloglogger-provider.rc
+ )
+
+ add_library(eventprovider MODULE windowseventloglogger-provider.res windowseventloglogger-provider.rc)
+ set_target_properties(eventprovider PROPERTIES LINKER_LANGUAGE CXX)
+ target_link_libraries(eventprovider PRIVATE -noentry)
+
+ install(TARGETS eventprovider LIBRARY DESTINATION ${CMAKE_INSTALL_SBINDIR})
+endif()
+
+set_property(
+ SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/application-version.cpp ${CMAKE_CURRENT_SOURCE_DIR}/journaldlogger.cpp
+ PROPERTY EXCLUDE_UNITY_BUILD TRUE
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(base base base_SOURCES)
+endif()
+
+if(HAVE_SYSTEMD)
+ find_path(SYSTEMD_INCLUDE_DIR
+ NAMES systemd/sd-daemon.h
+ HINTS ${SYSTEMD_ROOT_DIR})
+ include_directories(${SYSTEMD_INCLUDE_DIR})
+ set_property(
+ SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/journaldlogger.cpp
+ APPEND PROPERTY COMPILE_DEFINITIONS
+ SD_JOURNAL_SUPPRESS_LOCATION
+ )
+endif()
+
+add_library(base OBJECT ${base_SOURCES})
+
+include_directories(${icinga2_SOURCE_DIR}/third-party/execvpe)
+link_directories(${icinga2_BINARY_DIR}/third-party/execvpe)
+
+include_directories(${icinga2_SOURCE_DIR}/third-party/mmatch)
+link_directories(${icinga2_BINARY_DIR}/third-party/mmatch)
+
+include_directories(${icinga2_SOURCE_DIR}/third-party/socketpair)
+link_directories(${icinga2_BINARY_DIR}/third-party/socketpair)
+
+set_target_properties (
+ base PROPERTIES
+ FOLDER Lib
+)
+
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_CACHEDIR}\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}/crash\")")
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/base/application-environment.cpp b/lib/base/application-environment.cpp
new file mode 100644
index 0000000..819783f
--- /dev/null
+++ b/lib/base/application-environment.cpp
@@ -0,0 +1,17 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/application.hpp"
+#include "base/scriptglobal.hpp"
+
+using namespace icinga;
+
+String Application::GetAppEnvironment()
+{
+ Value defaultValue = Empty;
+ return ScriptGlobal::Get("Environment", &defaultValue);
+}
+
+void Application::SetAppEnvironment(const String& name)
+{
+ ScriptGlobal::Set("Environment", name);
+}
diff --git a/lib/base/application-version.cpp b/lib/base/application-version.cpp
new file mode 100644
index 0000000..d17775b
--- /dev/null
+++ b/lib/base/application-version.cpp
@@ -0,0 +1,17 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/application.hpp"
+#include "icinga-version.h"
+#include "icinga-spec-version.h"
+
+using namespace icinga;
+
+String Application::GetAppVersion()
+{
+ return VERSION;
+}
+
+String Application::GetAppSpecVersion()
+{
+ return SPEC_VERSION;
+}
diff --git a/lib/base/application.cpp b/lib/base/application.cpp
new file mode 100644
index 0000000..89a0f55
--- /dev/null
+++ b/lib/base/application.cpp
@@ -0,0 +1,1238 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/application.hpp"
+#include "base/application-ti.cpp"
+#include "base/stacktrace.hpp"
+#include "base/timer.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/loader.hpp"
+#include "base/debug.hpp"
+#include "base/type.hpp"
+#include "base/convert.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/process.hpp"
+#include "base/tlsutility.hpp"
+#include <boost/algorithm/string/trim.hpp>
+#include <boost/exception/errinfo_api_function.hpp>
+#include <boost/exception/errinfo_errno.hpp>
+#include <boost/exception/errinfo_file_name.hpp>
+#include <boost/stacktrace.hpp>
+#include <sstream>
+#include <iostream>
+#include <fstream>
+#include <thread>
+#ifdef __linux__
+#include <sys/prctl.h>
+#endif /* __linux__ */
+#ifdef _WIN32
+#include <windows.h>
+#else /* _WIN32 */
+#include <signal.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+#ifdef _WIN32
+/* MSVC throws unhandled C++ exceptions as SEH exceptions with this specific error code.
+ * There seems to be no system header that actually defines this constant.
+ * See also https://devblogs.microsoft.com/oldnewthing/20160915-00/?p=94316
+ */
+#define EXCEPTION_CODE_CXX_EXCEPTION 0xe06d7363
+#endif /* _WIN32 */
+
+REGISTER_TYPE(Application);
+
+boost::signals2::signal<void ()> Application::OnReopenLogs;
+Application::Ptr Application::m_Instance = nullptr;
+bool Application::m_ShuttingDown = false;
+bool Application::m_RequestRestart = false;
+bool Application::m_RequestReopenLogs = false;
+pid_t Application::m_ReloadProcess = 0;
+
+#ifndef _WIN32
+pid_t Application::m_UmbrellaProcess = 0;
+#endif /* _WIN32 */
+
+static bool l_Restarting = false;
+static bool l_InExceptionHandler = false;
+int Application::m_ArgC;
+char **Application::m_ArgV;
+double Application::m_StartTime;
+bool Application::m_ScriptDebuggerEnabled = false;
+
+#ifdef _WIN32
+double Application::m_LastReloadFailed = 0;
+#else /* _WIN32 */
+SharedMemory<Application::AtomicTs> Application::m_LastReloadFailed (0);
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+static LPTOP_LEVEL_EXCEPTION_FILTER l_DefaultUnhandledExceptionFilter = nullptr;
+#endif /* _WIN32 */
+
+/**
+ * Constructor for the Application class.
+ */
+void Application::OnConfigLoaded()
+{
+ m_PidFile = nullptr;
+
+ ASSERT(m_Instance == nullptr);
+ m_Instance = this;
+}
+
+/**
+ * Destructor for the application class.
+ */
+void Application::Stop(bool runtimeRemoved)
+{
+ m_ShuttingDown = true;
+
+#ifdef _WIN32
+ WSACleanup();
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+ ClosePidFile(true);
+#endif /* _WIN32 */
+
+ ObjectImpl<Application>::Stop(runtimeRemoved);
+}
+
+Application::~Application()
+{
+ m_Instance = nullptr;
+}
+
+void Application::Exit(int rc)
+{
+ std::cout.flush();
+ std::cerr.flush();
+
+ for (const Logger::Ptr& logger : Logger::GetLoggers()) {
+ logger->Flush();
+ }
+
+ UninitializeBase();
+ _exit(rc); // Yay, our static destructors are pretty much beyond repair at this point.
+}
+
+void Application::InitializeBase()
+{
+#ifdef _WIN32
+ /* disable GUI-based error messages for LoadLibrary() */
+ SetErrorMode(SEM_FAILCRITICALERRORS);
+
+ WSADATA wsaData;
+ if (WSAStartup(MAKEWORD(1, 1), &wsaData) != 0) {
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("WSAStartup")
+ << errinfo_win32_error(WSAGetLastError()));
+ }
+#else /* _WIN32 */
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_IGN;
+ sigaction(SIGPIPE, &sa, nullptr);
+#endif /* _WIN32 */
+
+ Loader::ExecuteDeferredInitializers();
+
+ /* Make sure the thread pool gets initialized. */
+ GetTP().Start();
+
+ /* Make sure the timer thread gets initialized. */
+ Timer::Initialize();
+}
+
+void Application::UninitializeBase()
+{
+ Timer::Uninitialize();
+
+ GetTP().Stop();
+}
+
+/**
+ * Retrieves a pointer to the application singleton object.
+ *
+ * @returns The application object.
+ */
+Application::Ptr Application::GetInstance()
+{
+ return m_Instance;
+}
+
+void Application::SetResourceLimits()
+{
+#ifdef __linux__
+ rlimit rl;
+
+# ifdef RLIMIT_NOFILE
+ rlim_t fileLimit = Configuration::RLimitFiles;
+
+ if (fileLimit != 0) {
+ if (fileLimit < (rlim_t)GetDefaultRLimitFiles()) {
+ Log(LogWarning, "Application")
+ << "The user-specified value for RLimitFiles cannot be smaller than the default value (" << GetDefaultRLimitFiles() << "). Using the default value instead.";
+ fileLimit = GetDefaultRLimitFiles();
+ }
+
+ rl.rlim_cur = fileLimit;
+ rl.rlim_max = rl.rlim_cur;
+
+ if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
+ Log(LogWarning, "Application")
+ << "Failed to adjust resource limit for open file handles (RLIMIT_NOFILE) with error \"" << strerror(errno) << "\"";
+# else /* RLIMIT_NOFILE */
+ Log(LogNotice, "Application", "System does not support adjusting the resource limit for open file handles (RLIMIT_NOFILE)");
+# endif /* RLIMIT_NOFILE */
+ }
+
+# ifdef RLIMIT_NPROC
+ rlim_t processLimit = Configuration::RLimitProcesses;
+
+ if (processLimit != 0) {
+ if (processLimit < (rlim_t)GetDefaultRLimitProcesses()) {
+ Log(LogWarning, "Application")
+ << "The user-specified value for RLimitProcesses cannot be smaller than the default value (" << GetDefaultRLimitProcesses() << "). Using the default value instead.";
+ processLimit = GetDefaultRLimitProcesses();
+ }
+
+ rl.rlim_cur = processLimit;
+ rl.rlim_max = rl.rlim_cur;
+
+ if (setrlimit(RLIMIT_NPROC, &rl) < 0)
+ Log(LogWarning, "Application")
+ << "Failed adjust resource limit for number of processes (RLIMIT_NPROC) with error \"" << strerror(errno) << "\"";
+# else /* RLIMIT_NPROC */
+ Log(LogNotice, "Application", "System does not support adjusting the resource limit for number of processes (RLIMIT_NPROC)");
+# endif /* RLIMIT_NPROC */
+ }
+
+# ifdef RLIMIT_STACK
+ int argc = Application::GetArgC();
+ char **argv = Application::GetArgV();
+ bool set_stack_rlimit = true;
+
+ for (int i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "--no-stack-rlimit") == 0) {
+ set_stack_rlimit = false;
+ break;
+ }
+ }
+
+ if (getrlimit(RLIMIT_STACK, &rl) < 0) {
+ Log(LogWarning, "Application", "Could not determine resource limit for stack size (RLIMIT_STACK)");
+ rl.rlim_max = RLIM_INFINITY;
+ }
+
+ rlim_t stackLimit;
+
+ stackLimit = Configuration::RLimitStack;
+
+ if (stackLimit != 0) {
+ if (stackLimit < (rlim_t)GetDefaultRLimitStack()) {
+ Log(LogWarning, "Application")
+ << "The user-specified value for RLimitStack cannot be smaller than the default value (" << GetDefaultRLimitStack() << "). Using the default value instead.";
+ stackLimit = GetDefaultRLimitStack();
+ }
+
+ if (set_stack_rlimit)
+ rl.rlim_cur = stackLimit;
+ else
+ rl.rlim_cur = rl.rlim_max;
+
+ if (setrlimit(RLIMIT_STACK, &rl) < 0)
+ Log(LogWarning, "Application")
+ << "Failed adjust resource limit for stack size (RLIMIT_STACK) with error \"" << strerror(errno) << "\"";
+ else if (set_stack_rlimit) {
+ char **new_argv = static_cast<char **>(malloc(sizeof(char *) * (argc + 2)));
+
+ if (!new_argv) {
+ perror("malloc");
+ Exit(EXIT_FAILURE);
+ }
+
+ new_argv[0] = argv[0];
+ new_argv[1] = strdup("--no-stack-rlimit");
+
+ if (!new_argv[1]) {
+ perror("strdup");
+ exit(1);
+ }
+
+ for (int i = 1; i < argc; i++)
+ new_argv[i + 1] = argv[i];
+
+ new_argv[argc + 1] = nullptr;
+
+ (void) execvp(new_argv[0], new_argv);
+ perror("execvp");
+ _exit(EXIT_FAILURE);
+ }
+# else /* RLIMIT_STACK */
+ Log(LogNotice, "Application", "System does not support adjusting the resource limit for stack size (RLIMIT_STACK)");
+# endif /* RLIMIT_STACK */
+ }
+#endif /* __linux__ */
+}
+
+int Application::GetArgC()
+{
+ return m_ArgC;
+}
+
+void Application::SetArgC(int argc)
+{
+ m_ArgC = argc;
+}
+
+char **Application::GetArgV()
+{
+ return m_ArgV;
+}
+
+void Application::SetArgV(char **argv)
+{
+ m_ArgV = argv;
+}
+
+/**
+ * Processes events for registered sockets and timers and calls whatever
+ * handlers have been set up for these events.
+ */
+void Application::RunEventLoop()
+{
+ double lastLoop = Utility::GetTime();
+
+ while (!m_ShuttingDown) {
+ if (m_RequestRestart) {
+ m_RequestRestart = false; // we are now handling the request, once is enough
+
+#ifdef _WIN32
+ // are we already restarting? ignore request if we already are
+ if (!l_Restarting) {
+ l_Restarting = true;
+ m_ReloadProcess = StartReloadProcess();
+ }
+#else /* _WIN32 */
+ Log(LogNotice, "Application")
+ << "Got reload command, forwarding to umbrella process (PID " << m_UmbrellaProcess << ")";
+
+ (void)kill(m_UmbrellaProcess, SIGHUP);
+#endif /* _WIN32 */
+ } else {
+ /* Watches for changes to the system time. Adjusts timers if necessary. */
+ Utility::Sleep(2.5);
+
+ if (m_RequestReopenLogs) {
+ Log(LogNotice, "Application", "Reopening log files");
+ m_RequestReopenLogs = false;
+ OnReopenLogs();
+ }
+
+ double now = Utility::GetTime();
+ double timeDiff = lastLoop - now;
+
+ if (std::fabs(timeDiff) > 15) {
+ /* We made a significant jump in time. */
+ Log(LogInformation, "Application")
+ << "We jumped "
+ << (timeDiff < 0 ? "forward" : "backward")
+ << " in time: " << std::fabs(timeDiff) << " seconds";
+
+ Timer::AdjustTimers(-timeDiff);
+ }
+
+ lastLoop = now;
+ }
+ }
+
+ Log(LogInformation, "Application", "Shutting down...");
+
+ ConfigObject::StopObjects();
+ Application::GetInstance()->OnShutdown();
+
+#ifdef I2_DEBUG
+ UninitializeBase(); // Inspired from Exit()
+#endif /* I2_DEBUG */
+}
+
+bool Application::IsShuttingDown()
+{
+ return m_ShuttingDown;
+}
+
+bool Application::IsRestarting()
+{
+ return l_Restarting;
+}
+
+void Application::OnShutdown()
+{
+ /* Nothing to do here. */
+}
+
+static void ReloadProcessCallbackInternal(const ProcessResult& pr)
+{
+ if (pr.ExitStatus != 0) {
+ Application::SetLastReloadFailed(Utility::GetTime());
+ Log(LogCritical, "Application", "Found error in config: reloading aborted");
+ }
+#ifdef _WIN32
+ else
+ Application::Exit(7); /* keep this exit code in sync with icinga-app */
+#endif /* _WIN32 */
+}
+
+static void ReloadProcessCallback(const ProcessResult& pr)
+{
+ l_Restarting = false;
+
+ std::thread t([pr]() { ReloadProcessCallbackInternal(pr); });
+ t.detach();
+}
+
+pid_t Application::StartReloadProcess()
+{
+ // prepare arguments
+ ArrayData args;
+ args.push_back(GetExePath(m_ArgV[0]));
+
+ for (int i=1; i < Application::GetArgC(); i++) {
+ if (std::string(Application::GetArgV()[i]) != "--reload-internal")
+ args.push_back(Application::GetArgV()[i]);
+ else
+ i++; // the next parameter after --reload-internal is the pid, remove that too
+ }
+
+#ifndef _WIN32
+ args.push_back("--reload-internal");
+ args.push_back(Convert::ToString(Utility::GetPid()));
+#else /* _WIN32 */
+ args.push_back("--validate");
+#endif /* _WIN32 */
+
+ double reloadTimeout = Application::GetReloadTimeout();
+
+ Process::Ptr process = new Process(Process::PrepareCommand(new Array(std::move(args))));
+ process->SetTimeout(reloadTimeout);
+ process->Run(&ReloadProcessCallback);
+
+ Log(LogInformation, "Application")
+ << "Got reload command: Started new instance with PID '"
+ << (unsigned long)(process->GetPID()) << "' (timeout is "
+ << reloadTimeout << "s).";
+
+ return process->GetPID();
+}
+
+/**
+ * Signals the application to shut down during the next
+ * execution of the event loop.
+ */
+void Application::RequestShutdown()
+{
+ Log(LogInformation, "Application", "Received request to shut down.");
+
+ m_ShuttingDown = true;
+}
+
+/**
+ * Signals the application to restart during the next
+ * execution of the event loop.
+ */
+void Application::RequestRestart()
+{
+ m_RequestRestart = true;
+}
+
+/**
+ * Signals the application to reopen log files during the
+ * next execution of the event loop.
+ */
+void Application::RequestReopenLogs()
+{
+ m_RequestReopenLogs = true;
+}
+
+#ifndef _WIN32
+/**
+ * Sets the PID of the Icinga umbrella process.
+ *
+ * @param pid The PID of the Icinga umbrella process.
+ */
+void Application::SetUmbrellaProcess(pid_t pid)
+{
+ m_UmbrellaProcess = pid;
+}
+#endif /* _WIN32 */
+
+/**
+ * Retrieves the full path of the executable.
+ *
+ * @param argv0 The first command-line argument.
+ * @returns The path.
+ */
+String Application::GetExePath(const String& argv0)
+{
+ String executablePath;
+
+#ifndef _WIN32
+ char buffer[MAXPATHLEN];
+ if (!getcwd(buffer, sizeof(buffer))) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("getcwd")
+ << boost::errinfo_errno(errno));
+ }
+
+ String workingDirectory = buffer;
+
+ if (argv0[0] != '/')
+ executablePath = workingDirectory + "/" + argv0;
+ else
+ executablePath = argv0;
+
+ bool foundSlash = false;
+ for (size_t i = 0; i < argv0.GetLength(); i++) {
+ if (argv0[i] == '/') {
+ foundSlash = true;
+ break;
+ }
+ }
+
+ if (!foundSlash) {
+ String pathEnv = Utility::GetFromEnvironment("PATH");
+ if (!pathEnv.IsEmpty()) {
+ std::vector<String> paths = String(pathEnv).Split(":");
+
+ bool foundPath = false;
+ for (const String& path : paths) {
+ String pathTest = path + "/" + argv0;
+
+ if (access(pathTest.CStr(), X_OK) == 0) {
+ executablePath = pathTest;
+ foundPath = true;
+ break;
+ }
+ }
+
+ if (!foundPath) {
+ executablePath.Clear();
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not determine executable path."));
+ }
+ }
+ }
+
+ if (!realpath(executablePath.CStr(), buffer)) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("realpath")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(executablePath));
+ }
+
+ return buffer;
+#else /* _WIN32 */
+ char FullExePath[MAXPATHLEN];
+
+ if (!GetModuleFileName(nullptr, FullExePath, sizeof(FullExePath)))
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("GetModuleFileName")
+ << errinfo_win32_error(GetLastError()));
+
+ return FullExePath;
+#endif /* _WIN32 */
+}
+
+/**
+ * Display version and path information.
+ */
+void Application::DisplayInfoMessage(std::ostream& os, bool skipVersion)
+{
+ /* icinga-app prints its own version information, stack traces need it here. */
+ if (!skipVersion)
+ os << " Application version: " << GetAppVersion() << "\n\n";
+
+ os << "System information:\n"
+ << " Platform: " << Utility::GetPlatformName() << "\n"
+ << " Platform version: " << Utility::GetPlatformVersion() << "\n"
+ << " Kernel: " << Utility::GetPlatformKernel() << "\n"
+ << " Kernel version: " << Utility::GetPlatformKernelVersion() << "\n"
+ << " Architecture: " << Utility::GetPlatformArchitecture() << "\n";
+
+ Namespace::Ptr systemNS = ScriptGlobal::Get("System");
+
+ os << "\nBuild information:\n"
+ << " Compiler: " << systemNS->Get("BuildCompilerName") << " " << systemNS->Get("BuildCompilerVersion") << "\n"
+ << " Build host: " << systemNS->Get("BuildHostName") << "\n"
+ << " OpenSSL version: " << GetOpenSSLVersion() << "\n";
+
+ os << "\nApplication information:\n"
+ << "\nGeneral paths:\n"
+ << " Config directory: " << Configuration::ConfigDir << "\n"
+ << " Data directory: " << Configuration::DataDir << "\n"
+ << " Log directory: " << Configuration::LogDir << "\n"
+ << " Cache directory: " << Configuration::CacheDir << "\n"
+ << " Spool directory: " << Configuration::SpoolDir << "\n"
+ << " Run directory: " << Configuration::InitRunDir << "\n"
+ << "\nOld paths (deprecated):\n"
+ << " Installation root: " << Configuration::PrefixDir << "\n"
+ << " Sysconf directory: " << Configuration::SysconfDir << "\n"
+ << " Run directory (base): " << Configuration::RunDir << "\n"
+ << " Local state directory: " << Configuration::LocalStateDir << "\n"
+ << "\nInternal paths:\n"
+ << " Package data directory: " << Configuration::PkgDataDir << "\n"
+ << " State path: " << Configuration::StatePath << "\n"
+ << " Modified attributes path: " << Configuration::ModAttrPath << "\n"
+ << " Objects path: " << Configuration::ObjectsPath << "\n"
+ << " Vars path: " << Configuration::VarsPath << "\n"
+ << " PID path: " << Configuration::PidPath << "\n";
+
+}
+
+/**
+ * Displays a message that tells users what to do when they encounter a bug.
+ */
+void Application::DisplayBugMessage(std::ostream& os)
+{
+ os << "***" << "\n"
+ << "* This would indicate a runtime problem or configuration error. If you believe this is a bug in Icinga 2" << "\n"
+ << "* please submit a bug report at https://github.com/Icinga/icinga2 and include this stack trace as well as any other" << "\n"
+ << "* information that might be useful in order to reproduce this problem." << "\n"
+ << "***" << "\n";
+}
+
+String Application::GetCrashReportFilename()
+{
+ return Configuration::LogDir + "/crash/report." + Convert::ToString(Utility::GetTime());
+}
+
+
+void Application::AttachDebugger(const String& filename, bool interactive)
+{
+#ifndef _WIN32
+#ifdef __linux__
+ prctl(PR_SET_DUMPABLE, 1);
+#endif /* __linux __ */
+
+ String my_pid = Convert::ToString(Utility::GetPid());
+
+ pid_t pid = fork();
+
+ if (pid < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fork")
+ << boost::errinfo_errno(errno));
+ }
+
+ if (pid == 0) {
+ if (!interactive) {
+ int fd = open(filename.CStr(), O_CREAT | O_RDWR | O_APPEND, 0600);
+
+ if (fd < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("open")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(filename));
+ }
+
+ if (fd != 1) {
+ /* redirect stdout to the file */
+ dup2(fd, 1);
+ close(fd);
+ }
+
+ /* redirect stderr to stdout */
+ if (fd != 2)
+ close(2);
+
+ dup2(1, 2);
+ }
+
+ char **argv;
+ char *my_pid_str = strdup(my_pid.CStr());
+
+ if (interactive) {
+ const char *uargv[] = {
+ "gdb",
+ "-p",
+ my_pid_str,
+ nullptr
+ };
+
+ argv = const_cast<char **>(uargv);
+
+ (void) execvp(argv[0], argv);
+ } else {
+ const char *uargv[] = {
+ "gdb",
+ "--batch",
+ "-p",
+ my_pid_str,
+ "-ex",
+ "thread apply all bt full",
+ "-ex",
+ "detach",
+ "-ex",
+ "quit",
+ nullptr
+ };
+
+ argv = const_cast<char **>(uargv);
+
+ (void) execvp(argv[0], argv);
+ }
+
+ perror("Failed to launch GDB");
+ free(my_pid_str);
+ _exit(0);
+ }
+
+ int status;
+ if (waitpid(pid, &status, 0) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("waitpid")
+ << boost::errinfo_errno(errno));
+ }
+
+#ifdef __linux__
+ prctl(PR_SET_DUMPABLE, 0);
+#endif /* __linux __ */
+#else /* _WIN32 */
+ DebugBreak();
+#endif /* _WIN32 */
+}
+
+/**
+ * Signal handler for SIGUSR1. This signal causes Icinga to re-open
+ * its log files and is mainly for use by logrotate.
+ *
+ * @param - The signal number.
+ */
+void Application::SigUsr1Handler(int)
+{
+ Log(LogInformation, "Application")
+ << "Received USR1 signal, reopening application logs.";
+
+ RequestReopenLogs();
+}
+
+/**
+ * Signal handler for SIGABRT. Helps with debugging ASSERT()s.
+ *
+ * @param - The signal number.
+ */
+void Application::SigAbrtHandler(int)
+{
+#ifndef _WIN32
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ sigaction(SIGABRT, &sa, nullptr);
+#endif /* _WIN32 */
+
+ std::cerr << "Caught SIGABRT." << std::endl
+ << "Current time: " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime()) << std::endl
+ << std::endl;
+
+ String fname = GetCrashReportFilename();
+ String dirName = Utility::DirName(fname);
+
+ if (!Utility::PathExists(dirName)) {
+#ifndef _WIN32
+ if (mkdir(dirName.CStr(), 0700) < 0 && errno != EEXIST) {
+#else /*_ WIN32 */
+ if (mkdir(dirName.CStr()) < 0 && errno != EEXIST) {
+#endif /* _WIN32 */
+ std::cerr << "Could not create directory '" << dirName << "': Error " << errno << ", " << strerror(errno) << "\n";
+ }
+ }
+
+ bool interactive_debugger = Configuration::AttachDebugger;
+
+ if (!interactive_debugger) {
+ std::ofstream ofs;
+ ofs.open(fname.CStr());
+
+ Log(LogCritical, "Application")
+ << "Icinga 2 has terminated unexpectedly. Additional information can be found in '" << fname << "'" << "\n";
+
+ ofs << "Caught SIGABRT.\n"
+ << "Current time: " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime()) << "\n\n";
+
+ DisplayInfoMessage(ofs);
+
+ ofs << "\nStacktrace:\n" << StackTraceFormatter(boost::stacktrace::stacktrace()) << "\n";
+
+ DisplayBugMessage(ofs);
+
+ ofs << "\n";
+ ofs.close();
+ } else {
+ Log(LogCritical, "Application", "Icinga 2 has terminated unexpectedly. Attaching debugger...");
+ }
+
+ AttachDebugger(fname, interactive_debugger);
+}
+
+#ifdef _WIN32
+/**
+ * Console control handler. Prepares the application for cleanly
+ * shutting down during the next execution of the event loop.
+ */
+BOOL WINAPI Application::CtrlHandler(DWORD type)
+{
+ Application::Ptr instance = Application::GetInstance();
+
+ if (!instance)
+ return TRUE;
+
+ instance->RequestShutdown();
+
+ SetConsoleCtrlHandler(nullptr, FALSE);
+ return TRUE;
+}
+
+bool Application::IsProcessElevated() {
+ BOOL fIsElevated = FALSE;
+ DWORD dwError = ERROR_SUCCESS;
+ HANDLE hToken = nullptr;
+
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hToken))
+ dwError = GetLastError();
+ else {
+ TOKEN_ELEVATION elevation;
+ DWORD dwSize;
+
+ if (!GetTokenInformation(hToken, TokenElevation, &elevation, sizeof(elevation), &dwSize))
+ dwError = GetLastError();
+ else
+ fIsElevated = elevation.TokenIsElevated;
+ }
+
+ if (hToken) {
+ CloseHandle(hToken);
+ hToken = nullptr;
+ }
+
+ if (ERROR_SUCCESS != dwError) {
+ LPSTR mBuf = nullptr;
+ if (!FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr, dwError, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), mBuf, 0, nullptr))
+ BOOST_THROW_EXCEPTION(std::runtime_error("Failed to format error message, last error was: " + dwError));
+ else
+ BOOST_THROW_EXCEPTION(std::runtime_error(mBuf));
+ }
+
+ return fIsElevated;
+}
+#endif /* _WIN32 */
+
+/**
+ * Handler for unhandled exceptions.
+ */
+void Application::ExceptionHandler()
+{
+ if (l_InExceptionHandler)
+ for (;;)
+ Utility::Sleep(5);
+
+ l_InExceptionHandler = true;
+
+#ifndef _WIN32
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ sigaction(SIGABRT, &sa, nullptr);
+#endif /* _WIN32 */
+
+ String fname = GetCrashReportFilename();
+ String dirName = Utility::DirName(fname);
+
+ if (!Utility::PathExists(dirName)) {
+#ifndef _WIN32
+ if (mkdir(dirName.CStr(), 0700) < 0 && errno != EEXIST) {
+#else /*_ WIN32 */
+ if (mkdir(dirName.CStr()) < 0 && errno != EEXIST) {
+#endif /* _WIN32 */
+ std::cerr << "Could not create directory '" << dirName << "': Error " << errno << ", " << strerror(errno) << "\n";
+ }
+ }
+
+ bool interactive_debugger = Configuration::AttachDebugger;
+
+ if (!interactive_debugger) {
+ std::ofstream ofs;
+ ofs.open(fname.CStr());
+
+ ofs << "Caught unhandled exception.\n"
+ << "Current time: " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime()) << "\n\n";
+
+ DisplayInfoMessage(ofs);
+
+ try {
+ RethrowUncaughtException();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "Application")
+ << DiagnosticInformation(ex, false) << "\n"
+ << "\n"
+ << "Additional information is available in '" << fname << "'" << "\n";
+
+ /* On platforms where HAVE_CXXABI_H is defined, we prefer to print the stack trace that was saved
+ * when the last exception was thrown. Everywhere else, we do not have this information so we
+ * collect a stack trace here, which might lack some information, for example when an exception
+ * is rethrown, but this is still better than nothing.
+ */
+ boost::stacktrace::stacktrace *stack = nullptr;
+#ifndef HAVE_CXXABI_H
+ boost::stacktrace::stacktrace local_stack;
+ stack = &local_stack;
+#endif /* HAVE_CXXABI_H */
+
+ ofs << "\n"
+ << DiagnosticInformation(ex, true, stack)
+ << "\n";
+ }
+
+ DisplayBugMessage(ofs);
+
+ ofs.close();
+ }
+
+ AttachDebugger(fname, interactive_debugger);
+
+ abort();
+}
+
+#ifdef _WIN32
+LONG CALLBACK Application::SEHUnhandledExceptionFilter(PEXCEPTION_POINTERS exi)
+{
+ /* If an unhandled C++ exception occurs with both a termination handler (std::set_terminate()) and an unhandled
+ * SEH filter (SetUnhandledExceptionFilter()) set, the latter one is called. However, our termination handler is
+ * better suited for dealing with C++ exceptions. In this case, the SEH exception will have a specific code and
+ * we can just call the default filter function which will take care of calling the termination handler.
+ */
+ if (exi->ExceptionRecord->ExceptionCode == EXCEPTION_CODE_CXX_EXCEPTION) {
+ return l_DefaultUnhandledExceptionFilter(exi);
+ }
+
+ if (l_InExceptionHandler)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ l_InExceptionHandler = true;
+
+ String fname = GetCrashReportFilename();
+ String dirName = Utility::DirName(fname);
+
+ if (!Utility::PathExists(dirName)) {
+#ifndef _WIN32
+ if (mkdir(dirName.CStr(), 0700) < 0 && errno != EEXIST) {
+#else /*_ WIN32 */
+ if (mkdir(dirName.CStr()) < 0 && errno != EEXIST) {
+#endif /* _WIN32 */
+ std::cerr << "Could not create directory '" << dirName << "': Error " << errno << ", " << strerror(errno) << "\n";
+ }
+ }
+
+ std::ofstream ofs;
+ ofs.open(fname.CStr());
+
+ Log(LogCritical, "Application")
+ << "Icinga 2 has terminated unexpectedly. Additional information can be found in '" << fname << "'";
+
+ ofs << "Caught unhandled SEH exception.\n"
+ << "Current time: " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime()) << "\n\n";
+
+ DisplayInfoMessage(ofs);
+
+ std::ios::fmtflags savedflags(ofs.flags());
+ ofs << std::showbase << std::hex
+ << "\nSEH exception:\n"
+ << " Code: " << exi->ExceptionRecord->ExceptionCode << "\n"
+ << " Address: " << exi->ExceptionRecord->ExceptionAddress << "\n"
+ << " Flags: " << exi->ExceptionRecord->ExceptionFlags << "\n";
+ ofs.flags(savedflags);
+
+ ofs << "\nStacktrace:\n" << StackTraceFormatter(boost::stacktrace::stacktrace()) << "\n";
+
+ DisplayBugMessage(ofs);
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+#endif /* _WIN32 */
+
+/**
+ * Installs the exception handlers.
+ */
+void Application::InstallExceptionHandlers()
+{
+ std::set_terminate(&Application::ExceptionHandler);
+
+#ifndef _WIN32
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = &Application::SigAbrtHandler;
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGABRT, &sa, nullptr);
+#else /* _WIN32 */
+ l_DefaultUnhandledExceptionFilter = SetUnhandledExceptionFilter(&Application::SEHUnhandledExceptionFilter);
+#endif /* _WIN32 */
+}
+
+/**
+ * Runs the application.
+ *
+ * @returns The application's exit code.
+ */
+int Application::Run()
+{
+#ifndef _WIN32
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = &Application::SigUsr1Handler;
+ sa.sa_flags = SA_RESTART;
+ sigaction(SIGUSR1, &sa, nullptr);
+#else /* _WIN32 */
+ SetConsoleCtrlHandler(&Application::CtrlHandler, TRUE);
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+ try {
+ UpdatePidFile(Configuration::PidPath);
+ } catch (const std::exception&) {
+ Log(LogCritical, "Application")
+ << "Cannot update PID file '" << Configuration::PidPath << "'. Aborting.";
+ return EXIT_FAILURE;
+ }
+#endif /* _WIN32 */
+
+ SetStartTime(Utility::GetTime());
+
+ return Main();
+}
+
+void Application::UpdatePidFile(const String& filename)
+{
+ UpdatePidFile(filename, Utility::GetPid());
+}
+
+/**
+ * Grabs the PID file lock and updates the PID. Terminates the application
+ * if the PID file is already locked by another instance of the application.
+ *
+ * @param filename The name of the PID file.
+ * @param pid The PID to write; default is the current PID
+ */
+void Application::UpdatePidFile(const String& filename, pid_t pid)
+{
+ ObjectLock olock(this);
+
+ if (m_PidFile)
+ fclose(m_PidFile);
+
+ /* There's just no sane way of getting a file descriptor for a
+ * C++ ofstream which is why we're using FILEs here. */
+ m_PidFile = fopen(filename.CStr(), "r+");
+
+ if (!m_PidFile)
+ m_PidFile = fopen(filename.CStr(), "w");
+
+ if (!m_PidFile) {
+ Log(LogCritical, "Application")
+ << "Could not open PID file '" << filename << "'.";
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not open PID file '" + filename + "'"));
+ }
+
+#ifndef _WIN32
+ int fd = fileno(m_PidFile);
+
+ Utility::SetCloExec(fd);
+
+ struct flock lock;
+
+ lock.l_start = 0;
+ lock.l_len = 0;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+
+ if (fcntl(fd, F_SETLK, &lock) < 0) {
+ Log(LogCritical, "Application", "Could not lock PID file. Make sure that only one instance of the application is running.");
+
+ Application::Exit(EXIT_FAILURE);
+ }
+
+ if (ftruncate(fd, 0) < 0) {
+ Log(LogCritical, "Application")
+ << "ftruncate() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("ftruncate")
+ << boost::errinfo_errno(errno));
+ }
+#endif /* _WIN32 */
+
+ fprintf(m_PidFile, "%lu\n", (unsigned long)pid);
+ fflush(m_PidFile);
+}
+
+/**
+ * Closes the PID file. Does nothing if the PID file is not currently open.
+ */
+void Application::ClosePidFile(bool unlink)
+{
+ ObjectLock olock(this);
+
+ if (m_PidFile) {
+ if (unlink) {
+ String pidpath = Configuration::PidPath;
+ ::unlink(pidpath.CStr());
+ }
+
+ fclose(m_PidFile);
+ }
+
+ m_PidFile = nullptr;
+}
+
+/**
+ * Checks if another process currently owns the pidfile and read it
+ *
+ * @param filename The name of the PID file.
+ * @returns 0: no process owning the pidfile, pid of the process otherwise
+ */
+pid_t Application::ReadPidFile(const String& filename)
+{
+ FILE *pidfile = fopen(filename.CStr(), "r");
+
+ if (!pidfile)
+ return 0;
+
+#ifndef _WIN32
+ int fd = fileno(pidfile);
+
+ struct flock lock;
+
+ lock.l_start = 0;
+ lock.l_len = 0;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+
+ if (fcntl(fd, F_GETLK, &lock) < 0) {
+ int error = errno;
+ fclose(pidfile);
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fcntl")
+ << boost::errinfo_errno(error));
+ }
+
+ if (lock.l_type == F_UNLCK) {
+ // nobody has locked the file: no icinga running
+ fclose(pidfile);
+ return -1;
+ }
+#endif /* _WIN32 */
+
+ pid_t runningpid;
+ int res = fscanf(pidfile, "%d", &runningpid);
+ fclose(pidfile);
+
+ // bogus result?
+ if (res != 1)
+ return 0;
+
+#ifdef _WIN32
+ HANDLE hProcess = OpenProcess(0, FALSE, runningpid);
+
+ if (!hProcess)
+ return 0;
+
+ CloseHandle(hProcess);
+#endif /* _WIN32 */
+
+ return runningpid;
+}
+
+int Application::GetDefaultRLimitFiles()
+{
+ return 16 * 1024;
+}
+
+int Application::GetDefaultRLimitProcesses()
+{
+ return 16 * 1024;
+}
+
+int Application::GetDefaultRLimitStack()
+{
+ return 256 * 1024;
+}
+
+double Application::GetReloadTimeout()
+{
+ return ScriptGlobal::Get("ReloadTimeout");
+}
+
+/**
+ * Returns the global thread pool.
+ *
+ * @returns The global thread pool.
+ */
+ThreadPool& Application::GetTP()
+{
+ static ThreadPool tp;
+ return tp;
+}
+
+double Application::GetStartTime()
+{
+ return m_StartTime;
+}
+
+void Application::SetStartTime(double ts)
+{
+ m_StartTime = ts;
+}
+
+double Application::GetUptime()
+{
+ return Utility::GetTime() - m_StartTime;
+}
+
+bool Application::GetScriptDebuggerEnabled()
+{
+ return m_ScriptDebuggerEnabled;
+}
+
+void Application::SetScriptDebuggerEnabled(bool enabled)
+{
+ m_ScriptDebuggerEnabled = enabled;
+}
+
+double Application::GetLastReloadFailed()
+{
+#ifdef _WIN32
+ return m_LastReloadFailed;
+#else /* _WIN32 */
+ return m_LastReloadFailed.Get().load();
+#endif /* _WIN32 */
+}
+
+void Application::SetLastReloadFailed(double ts)
+{
+#ifdef _WIN32
+ m_LastReloadFailed = ts;
+#else /* _WIN32 */
+ m_LastReloadFailed.Get().store(ts);
+#endif /* _WIN32 */
+}
+
+void Application::ValidateName(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Application>::ValidateName(lvalue, utils);
+
+ if (lvalue() != "app")
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "name" }, "Application object must be named 'app'."));
+}
diff --git a/lib/base/application.hpp b/lib/base/application.hpp
new file mode 100644
index 0000000..f45c8bd
--- /dev/null
+++ b/lib/base/application.hpp
@@ -0,0 +1,170 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APPLICATION_H
+#define APPLICATION_H
+
+#include "base/i2-base.hpp"
+#include "base/atomic.hpp"
+#include "base/application-ti.hpp"
+#include "base/logger.hpp"
+#include "base/configuration.hpp"
+#include "base/shared-memory.hpp"
+#include <cstdint>
+#include <iosfwd>
+#include <type_traits>
+
+namespace icinga
+{
+
+class ThreadPool;
+
+/**
+ * Abstract base class for applications.
+ *
+ * @ingroup base
+ */
+class Application : public ObjectImpl<Application> {
+public:
+ DECLARE_OBJECT(Application);
+
+ static boost::signals2::signal<void ()> OnReopenLogs;
+
+ ~Application() override;
+
+ static void InitializeBase();
+ static void UninitializeBase();
+
+ static Application::Ptr GetInstance();
+
+ static void Exit(int rc);
+
+ int Run();
+
+ /**
+ * Starts the application.
+ *
+ * @returns The exit code of the application.
+ */
+ virtual int Main() = 0;
+
+ static void SetResourceLimits();
+
+ static int GetArgC();
+ static void SetArgC(int argc);
+
+ static char **GetArgV();
+ static void SetArgV(char **argv);
+
+ static void InstallExceptionHandlers();
+
+ static void RequestShutdown();
+ static void RequestRestart();
+ static void RequestReopenLogs();
+
+#ifndef _WIN32
+ static void SetUmbrellaProcess(pid_t pid);
+#endif /* _WIN32 */
+
+ static bool IsShuttingDown();
+ static bool IsRestarting();
+
+ static void SetDebuggingSeverity(LogSeverity severity);
+ static LogSeverity GetDebuggingSeverity();
+
+ void UpdatePidFile(const String& filename);
+ void UpdatePidFile(const String& filename, pid_t pid);
+ void ClosePidFile(bool unlink);
+ static pid_t ReadPidFile(const String& filename);
+
+ static String GetExePath(const String& argv0);
+
+#ifdef _WIN32
+ static bool IsProcessElevated();
+#endif /* _WIN32 */
+
+ static int GetDefaultRLimitFiles();
+ static int GetDefaultRLimitProcesses();
+ static int GetDefaultRLimitStack();
+
+ static double GetReloadTimeout();
+
+ static ThreadPool& GetTP();
+
+ static String GetAppVersion();
+ static String GetAppSpecVersion();
+
+ static String GetAppEnvironment();
+ static void SetAppEnvironment(const String& name);
+
+ static double GetStartTime();
+ static void SetStartTime(double ts);
+
+ static double GetUptime();
+
+ static bool GetScriptDebuggerEnabled();
+ static void SetScriptDebuggerEnabled(bool enabled);
+
+ static double GetLastReloadFailed();
+ static void SetLastReloadFailed(double ts);
+
+ static void DisplayInfoMessage(std::ostream& os, bool skipVersion = false);
+
+protected:
+ void OnConfigLoaded() override;
+ void Stop(bool runtimeRemoved) override;
+
+ void RunEventLoop();
+
+ pid_t StartReloadProcess();
+
+ virtual void OnShutdown();
+
+ void ValidateName(const Lazy<String>& lvalue, const ValidationUtils& utils) final;
+
+private:
+ static Application::Ptr m_Instance; /**< The application instance. */
+
+ static bool m_ShuttingDown; /**< Whether the application is in the process of shutting down. */
+ static bool m_RequestRestart; /**< A restart was requested through SIGHUP */
+ static pid_t m_ReloadProcess; /**< The PID of a subprocess doing a reload, only valid when l_Restarting==true */
+ static bool m_RequestReopenLogs; /**< Whether we should re-open log files. */
+
+#ifndef _WIN32
+ static pid_t m_UmbrellaProcess; /**< The PID of the Icinga umbrella process */
+#endif /* _WIN32 */
+
+ static int m_ArgC; /**< The number of command-line arguments. */
+ static char **m_ArgV; /**< Command-line arguments. */
+ FILE *m_PidFile = nullptr; /**< The PID file */
+ static bool m_Debugging; /**< Whether debugging is enabled. */
+ static LogSeverity m_DebuggingSeverity; /**< Whether debugging severity is set. */
+ static double m_StartTime;
+ static double m_MainTime;
+ static bool m_ScriptDebuggerEnabled;
+#ifdef _WIN32
+ static double m_LastReloadFailed;
+#else /* _WIN32 */
+ typedef Atomic<std::conditional_t<Atomic<double>::is_always_lock_free, double, uint32_t>> AtomicTs;
+ static_assert(AtomicTs::is_always_lock_free);
+ static SharedMemory<AtomicTs> m_LastReloadFailed;
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+ static BOOL WINAPI CtrlHandler(DWORD type);
+ static LONG WINAPI SEHUnhandledExceptionFilter(PEXCEPTION_POINTERS exi);
+#endif /* _WIN32 */
+
+ static void DisplayBugMessage(std::ostream& os);
+
+ static void SigAbrtHandler(int signum);
+ static void SigUsr1Handler(int signum);
+ static void ExceptionHandler();
+
+ static String GetCrashReportFilename();
+
+ static void AttachDebugger(const String& filename, bool interactive);
+};
+
+}
+
+#endif /* APPLICATION_H */
diff --git a/lib/base/application.ti b/lib/base/application.ti
new file mode 100644
index 0000000..3d5908a
--- /dev/null
+++ b/lib/base/application.ti
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library base;
+
+namespace icinga
+{
+
+abstract class Application : ConfigObject
+{
+};
+
+}
diff --git a/lib/base/array-script.cpp b/lib/base/array-script.cpp
new file mode 100644
index 0000000..a976683
--- /dev/null
+++ b/lib/base/array-script.cpp
@@ -0,0 +1,260 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/array.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/objectlock.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+static double ArrayLen()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->GetLength();
+}
+
+static void ArraySet(int index, const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Set(index, value);
+}
+
+static Value ArrayGet(int index)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Get(index);
+}
+
+static void ArrayAdd(const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Add(value);
+}
+
+static void ArrayRemove(int index)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Remove(index);
+}
+
+static bool ArrayContains(const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Contains(value);
+}
+
+static void ArrayClear()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Clear();
+}
+
+static Array::Ptr ArraySort(const std::vector<Value>& args)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ Array::Ptr arr = self->ShallowClone();
+
+ if (args.empty()) {
+ ObjectLock olock(arr);
+ std::sort(arr->Begin(), arr->End());
+ } else {
+ Function::Ptr function = args[0];
+ REQUIRE_NOT_NULL(function);
+
+ if (vframe->Sandboxed && !function->IsSideEffectFree())
+ BOOST_THROW_EXCEPTION(ScriptError("Sort function must be side-effect free."));
+
+ ObjectLock olock(arr);
+ std::sort(arr->Begin(), arr->End(), [&args](const Value& a, const Value& b) -> bool {
+ Function::Ptr cmp = args[0];
+ return cmp->Invoke({ a, b });
+ });
+ }
+
+ return arr;
+}
+
+static Array::Ptr ArrayShallowClone()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->ShallowClone();
+}
+
+static Value ArrayJoin(const Value& separator)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Join(separator);
+}
+
+static Array::Ptr ArrayReverse()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Reverse();
+}
+
+static Array::Ptr ArrayMap(const Function::Ptr& function)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ REQUIRE_NOT_NULL(function);
+
+ if (vframe->Sandboxed && !function->IsSideEffectFree())
+ BOOST_THROW_EXCEPTION(ScriptError("Map function must be side-effect free."));
+
+ ArrayData result;
+
+ ObjectLock olock(self);
+ for (const Value& item : self) {
+ result.push_back(function->Invoke({ item }));
+ }
+
+ return new Array(std::move(result));
+}
+
+static Value ArrayReduce(const Function::Ptr& function)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ REQUIRE_NOT_NULL(function);
+
+ if (vframe->Sandboxed && !function->IsSideEffectFree())
+ BOOST_THROW_EXCEPTION(ScriptError("Reduce function must be side-effect free."));
+
+ if (self->GetLength() == 0)
+ return Empty;
+
+ Value result = self->Get(0);
+
+ ObjectLock olock(self);
+ for (size_t i = 1; i < self->GetLength(); i++) {
+ result = function->Invoke({ result, self->Get(i) });
+ }
+
+ return result;
+}
+
+static Array::Ptr ArrayFilter(const Function::Ptr& function)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ REQUIRE_NOT_NULL(function);
+
+ if (vframe->Sandboxed && !function->IsSideEffectFree())
+ BOOST_THROW_EXCEPTION(ScriptError("Filter function must be side-effect free."));
+
+ ArrayData result;
+
+ ObjectLock olock(self);
+ for (const Value& item : self) {
+ if (function->Invoke({ item }))
+ result.push_back(item);
+ }
+
+ return new Array(std::move(result));
+}
+
+static bool ArrayAny(const Function::Ptr& function)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ REQUIRE_NOT_NULL(function);
+
+ if (vframe->Sandboxed && !function->IsSideEffectFree())
+ BOOST_THROW_EXCEPTION(ScriptError("Filter function must be side-effect free."));
+
+ ObjectLock olock(self);
+ for (const Value& item : self) {
+ if (function->Invoke({ item }))
+ return true;
+ }
+
+ return false;
+}
+
+static bool ArrayAll(const Function::Ptr& function)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ REQUIRE_NOT_NULL(function);
+
+ if (vframe->Sandboxed && !function->IsSideEffectFree())
+ BOOST_THROW_EXCEPTION(ScriptError("Filter function must be side-effect free."));
+
+ ObjectLock olock(self);
+ for (const Value& item : self) {
+ if (!function->Invoke({ item }))
+ return false;
+ }
+
+ return true;
+}
+static Array::Ptr ArrayUnique()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Unique();
+}
+
+static void ArrayFreeze()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Array::Ptr self = static_cast<Array::Ptr>(vframe->Self);
+ self->Freeze();
+}
+
+Object::Ptr Array::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "len", new Function("Array#len", ArrayLen, {}, true) },
+ { "set", new Function("Array#set", ArraySet, { "index", "value" }) },
+ { "get", new Function("Array#get", ArrayGet, { "index" }) },
+ { "add", new Function("Array#add", ArrayAdd, { "value" }) },
+ { "remove", new Function("Array#remove", ArrayRemove, { "index" }) },
+ { "contains", new Function("Array#contains", ArrayContains, { "value" }, true) },
+ { "clear", new Function("Array#clear", ArrayClear) },
+ { "sort", new Function("Array#sort", ArraySort, { "less_cmp" }, true) },
+ { "shallow_clone", new Function("Array#shallow_clone", ArrayShallowClone, {}, true) },
+ { "join", new Function("Array#join", ArrayJoin, { "separator" }, true) },
+ { "reverse", new Function("Array#reverse", ArrayReverse, {}, true) },
+ { "map", new Function("Array#map", ArrayMap, { "func" }, true) },
+ { "reduce", new Function("Array#reduce", ArrayReduce, { "reduce" }, true) },
+ { "filter", new Function("Array#filter", ArrayFilter, { "func" }, true) },
+ { "any", new Function("Array#any", ArrayAny, { "func" }, true) },
+ { "all", new Function("Array#all", ArrayAll, { "func" }, true) },
+ { "unique", new Function("Array#unique", ArrayUnique, {}, true) },
+ { "freeze", new Function("Array#freeze", ArrayFreeze, {}) }
+ });
+
+ return prototype;
+}
diff --git a/lib/base/array.cpp b/lib/base/array.cpp
new file mode 100644
index 0000000..08e06fa
--- /dev/null
+++ b/lib/base/array.cpp
@@ -0,0 +1,380 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/debug.hpp"
+#include "base/primitivetype.hpp"
+#include "base/dictionary.hpp"
+#include "base/configwriter.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+template class std::vector<Value>;
+
+REGISTER_PRIMITIVE_TYPE(Array, Object, Array::GetPrototype());
+
+Array::Array(const ArrayData& other)
+ : m_Data(other)
+{ }
+
+Array::Array(ArrayData&& other)
+ : m_Data(std::move(other))
+{ }
+
+Array::Array(std::initializer_list<Value> init)
+ : m_Data(init)
+{ }
+
+/**
+ * Restrieves a value from an array.
+ *
+ * @param index The index.
+ * @returns The value.
+ */
+Value Array::Get(SizeType index) const
+{
+ ObjectLock olock(this);
+
+ return m_Data.at(index);
+}
+
+/**
+ * Sets a value in the array.
+ *
+ * @param index The index.
+ * @param value The value.
+ * @param overrideFrozen Whether to allow modifying frozen arrays.
+ */
+void Array::Set(SizeType index, const Value& value, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value in array must not be modified."));
+
+ m_Data.at(index) = value;
+}
+
+/**
+ * Sets a value in the array.
+ *
+ * @param index The index.
+ * @param value The value.
+ * @param overrideFrozen Whether to allow modifying frozen arrays.
+ */
+void Array::Set(SizeType index, Value&& value, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.at(index).Swap(value);
+}
+
+/**
+ * Adds a value to the array.
+ *
+ * @param value The value.
+ * @param overrideFrozen Whether to allow modifying frozen arrays.
+ */
+void Array::Add(Value value, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.push_back(std::move(value));
+}
+
+/**
+ * Returns an iterator to the beginning of the array.
+ *
+ * Note: Caller must hold the object lock while using the iterator.
+ *
+ * @returns An iterator.
+ */
+Array::Iterator Array::Begin()
+{
+ ASSERT(OwnsLock());
+
+ return m_Data.begin();
+}
+
+/**
+ * Returns an iterator to the end of the array.
+ *
+ * Note: Caller must hold the object lock while using the iterator.
+ *
+ * @returns An iterator.
+ */
+Array::Iterator Array::End()
+{
+ ASSERT(OwnsLock());
+
+ return m_Data.end();
+}
+
+/**
+ * Returns the number of elements in the array.
+ *
+ * @returns Number of elements.
+ */
+size_t Array::GetLength() const
+{
+ ObjectLock olock(this);
+
+ return m_Data.size();
+}
+
+/**
+ * Checks whether the array contains the specified value.
+ *
+ * @param value The value.
+ * @returns true if the array contains the value, false otherwise.
+ */
+bool Array::Contains(const Value& value) const
+{
+ ObjectLock olock(this);
+
+ return (std::find(m_Data.begin(), m_Data.end(), value) != m_Data.end());
+}
+
+/**
+ * Insert the given value at the specified index
+ *
+ * @param index The index
+ * @param value The value to add
+ * @param overrideFrozen Whether to allow modifying frozen arrays.
+ */
+void Array::Insert(SizeType index, Value value, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ ASSERT(index <= m_Data.size());
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.insert(m_Data.begin() + index, std::move(value));
+}
+
+/**
+ * Removes the specified index from the array.
+ *
+ * @param index The index.
+ * @param overrideFrozen Whether to allow modifying frozen arrays.
+ */
+void Array::Remove(SizeType index, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ if (index >= m_Data.size())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Index to remove must be within bounds."));
+
+ m_Data.erase(m_Data.begin() + index);
+}
+
+/**
+ * Removes the item specified by the iterator from the array.
+ *
+ * @param it The iterator.
+ * @param overrideFrozen Whether to allow modifying frozen arrays.
+ */
+void Array::Remove(Array::Iterator it, bool overrideFrozen)
+{
+ ASSERT(OwnsLock());
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.erase(it);
+}
+
+void Array::Resize(SizeType newSize, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.resize(newSize);
+}
+
+void Array::Clear(bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.clear();
+}
+
+void Array::Reserve(SizeType newSize, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ m_Data.reserve(newSize);
+}
+
+void Array::CopyTo(const Array::Ptr& dest) const
+{
+ ObjectLock olock(this);
+ ObjectLock xlock(dest);
+
+ if (dest->m_Frozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ std::copy(m_Data.begin(), m_Data.end(), std::back_inserter(dest->m_Data));
+}
+
+/**
+ * Makes a shallow copy of an array.
+ *
+ * @returns a copy of the array.
+ */
+Array::Ptr Array::ShallowClone() const
+{
+ Array::Ptr clone = new Array();
+ CopyTo(clone);
+ return clone;
+}
+
+/**
+ * Makes a deep clone of an array
+ * and its elements.
+ *
+ * @returns a copy of the array.
+ */
+Object::Ptr Array::Clone() const
+{
+ ArrayData arr;
+
+ ObjectLock olock(this);
+ for (const Value& val : m_Data) {
+ arr.push_back(val.Clone());
+ }
+
+ return new Array(std::move(arr));
+}
+
+Array::Ptr Array::Reverse() const
+{
+ Array::Ptr result = new Array();
+
+ ObjectLock olock(this);
+ ObjectLock xlock(result);
+
+ std::copy(m_Data.rbegin(), m_Data.rend(), std::back_inserter(result->m_Data));
+
+ return result;
+}
+
+void Array::Sort(bool overrideFrozen)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Array must not be modified."));
+
+ std::sort(m_Data.begin(), m_Data.end());
+}
+
+String Array::ToString() const
+{
+ std::ostringstream msgbuf;
+ ConfigWriter::EmitArray(msgbuf, 1, const_cast<Array *>(this));
+ return msgbuf.str();
+}
+
+Value Array::Join(const Value& separator) const
+{
+ Value result;
+ bool first = true;
+
+ ObjectLock olock(this);
+
+ for (const Value& item : m_Data) {
+ if (first) {
+ first = false;
+ } else {
+ result = result + separator;
+ }
+
+ result = result + item;
+ }
+
+ return result;
+}
+
+Array::Ptr Array::Unique() const
+{
+ std::set<Value> result;
+
+ ObjectLock olock(this);
+
+ for (const Value& item : m_Data) {
+ result.insert(item);
+ }
+
+ return Array::FromSet(result);
+}
+
+void Array::Freeze()
+{
+ ObjectLock olock(this);
+ m_Frozen = true;
+}
+
+Value Array::GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const
+{
+ int index;
+
+ try {
+ index = Convert::ToLong(field);
+ } catch (...) {
+ return Object::GetFieldByName(field, sandboxed, debugInfo);
+ }
+
+ ObjectLock olock(this);
+
+ if (index < 0 || static_cast<size_t>(index) >= GetLength())
+ BOOST_THROW_EXCEPTION(ScriptError("Array index '" + Convert::ToString(index) + "' is out of bounds.", debugInfo));
+
+ return Get(index);
+}
+
+void Array::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo)
+{
+ ObjectLock olock(this);
+
+ int index = Convert::ToLong(field);
+
+ if (index < 0)
+ BOOST_THROW_EXCEPTION(ScriptError("Array index '" + Convert::ToString(index) + "' is out of bounds.", debugInfo));
+
+ if (static_cast<size_t>(index) >= GetLength())
+ Resize(index + 1, overrideFrozen);
+
+ Set(index, value, overrideFrozen);
+}
+
+Array::Iterator icinga::begin(const Array::Ptr& x)
+{
+ return x->Begin();
+}
+
+Array::Iterator icinga::end(const Array::Ptr& x)
+{
+ return x->End();
+}
diff --git a/lib/base/array.hpp b/lib/base/array.hpp
new file mode 100644
index 0000000..2c9a9dd
--- /dev/null
+++ b/lib/base/array.hpp
@@ -0,0 +1,117 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ARRAY_H
+#define ARRAY_H
+
+#include "base/i2-base.hpp"
+#include "base/objectlock.hpp"
+#include "base/value.hpp"
+#include <boost/range/iterator.hpp>
+#include <vector>
+#include <set>
+
+namespace icinga
+{
+
+typedef std::vector<Value> ArrayData;
+
+/**
+ * An array of Value items.
+ *
+ * @ingroup base
+ */
+class Array final : public Object
+{
+public:
+ DECLARE_OBJECT(Array);
+
+ /**
+ * An iterator that can be used to iterate over array elements.
+ */
+ typedef std::vector<Value>::iterator Iterator;
+
+ typedef std::vector<Value>::size_type SizeType;
+
+ Array() = default;
+ Array(const ArrayData& other);
+ Array(ArrayData&& other);
+ Array(std::initializer_list<Value> init);
+
+ Value Get(SizeType index) const;
+ void Set(SizeType index, const Value& value, bool overrideFrozen = false);
+ void Set(SizeType index, Value&& value, bool overrideFrozen = false);
+ void Add(Value value, bool overrideFrozen = false);
+
+ Iterator Begin();
+ Iterator End();
+
+ size_t GetLength() const;
+ bool Contains(const Value& value) const;
+
+ void Insert(SizeType index, Value value, bool overrideFrozen = false);
+ void Remove(SizeType index, bool overrideFrozen = false);
+ void Remove(Iterator it, bool overrideFrozen = false);
+
+ void Resize(SizeType newSize, bool overrideFrozen = false);
+ void Clear(bool overrideFrozen = false);
+
+ void Reserve(SizeType newSize, bool overrideFrozen = false);
+
+ void CopyTo(const Array::Ptr& dest) const;
+ Array::Ptr ShallowClone() const;
+
+ static Object::Ptr GetPrototype();
+
+ template<typename T>
+ static Array::Ptr FromVector(const std::vector<T>& v)
+ {
+ Array::Ptr result = new Array();
+ ObjectLock olock(result);
+ std::copy(v.begin(), v.end(), std::back_inserter(result->m_Data));
+ return result;
+ }
+
+ template<typename T>
+ std::set<T> ToSet()
+ {
+ ObjectLock olock(this);
+ return std::set<T>(Begin(), End());
+ }
+
+ template<typename T>
+ static Array::Ptr FromSet(const std::set<T>& v)
+ {
+ Array::Ptr result = new Array();
+ ObjectLock olock(result);
+ std::copy(v.begin(), v.end(), std::back_inserter(result->m_Data));
+ return result;
+ }
+
+ Object::Ptr Clone() const override;
+
+ Array::Ptr Reverse() const;
+
+ void Sort(bool overrideFrozen = false);
+
+ String ToString() const override;
+ Value Join(const Value& separator) const;
+
+ Array::Ptr Unique() const;
+ void Freeze();
+
+ Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const override;
+ void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo) override;
+
+private:
+ std::vector<Value> m_Data; /**< The data for the array. */
+ bool m_Frozen{false};
+};
+
+Array::Iterator begin(const Array::Ptr& x);
+Array::Iterator end(const Array::Ptr& x);
+
+}
+
+extern template class std::vector<icinga::Value>;
+
+#endif /* ARRAY_H */
diff --git a/lib/base/atomic-file.cpp b/lib/base/atomic-file.cpp
new file mode 100644
index 0000000..762f384
--- /dev/null
+++ b/lib/base/atomic-file.cpp
@@ -0,0 +1,123 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#include "base/atomic-file.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include <utility>
+
+#ifdef _WIN32
+# include <io.h>
+# include <windows.h>
+#else /* _WIN32 */
+# include <errno.h>
+# include <unistd.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+void AtomicFile::Write(String path, int mode, const String& content)
+{
+ AtomicFile af (path, mode);
+ af << content;
+ af.Commit();
+}
+
+AtomicFile::AtomicFile(String path, int mode) : m_Path(std::move(path))
+{
+ m_TempFilename = m_Path + ".tmp.XXXXXX";
+
+#ifdef _WIN32
+ m_Fd = Utility::MksTemp(&m_TempFilename[0]);
+#else /* _WIN32 */
+ m_Fd = mkstemp(&m_TempFilename[0]);
+#endif /* _WIN32 */
+
+ if (m_Fd < 0) {
+ auto error (errno);
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("mkstemp")
+ << boost::errinfo_errno(error)
+ << boost::errinfo_file_name(m_TempFilename));
+ }
+
+ try {
+ exceptions(failbit | badbit);
+
+ open(boost::iostreams::file_descriptor(
+ m_Fd,
+ // Rationale: https://github.com/boostorg/iostreams/issues/152
+ boost::iostreams::never_close_handle
+ ));
+
+ if (chmod(m_TempFilename.CStr(), mode) < 0) {
+ auto error (errno);
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("chmod")
+ << boost::errinfo_errno(error)
+ << boost::errinfo_file_name(m_TempFilename));
+ }
+ } catch (...) {
+ if (is_open()) {
+ close();
+ }
+
+ (void)::close(m_Fd);
+ (void)unlink(m_TempFilename.CStr());
+ throw;
+ }
+}
+
+AtomicFile::~AtomicFile()
+{
+ if (is_open()) {
+ try {
+ close();
+ } catch (...) {
+ // Destructor must not throw
+ }
+ }
+
+ if (m_Fd >= 0) {
+ (void)::close(m_Fd);
+ }
+
+ if (!m_TempFilename.IsEmpty()) {
+ (void)unlink(m_TempFilename.CStr());
+ }
+}
+
+void AtomicFile::Commit()
+{
+ flush();
+
+ auto h ((*this)->handle());
+
+#ifdef _WIN32
+ if (!FlushFileBuffers(h)) {
+ auto err (GetLastError());
+
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("FlushFileBuffers")
+ << errinfo_win32_error(err)
+ << boost::errinfo_file_name(m_TempFilename));
+ }
+#else /* _WIN32 */
+ if (fsync(h)) {
+ auto err (errno);
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fsync")
+ << boost::errinfo_errno(err)
+ << boost::errinfo_file_name(m_TempFilename));
+ }
+#endif /* _WIN32 */
+
+ close();
+ (void)::close(m_Fd);
+ m_Fd = -1;
+
+ Utility::RenameFile(m_TempFilename, m_Path);
+ m_TempFilename = "";
+}
diff --git a/lib/base/atomic-file.hpp b/lib/base/atomic-file.hpp
new file mode 100644
index 0000000..c5c7897
--- /dev/null
+++ b/lib/base/atomic-file.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#ifndef ATOMIC_FILE_H
+#define ATOMIC_FILE_H
+
+#include "base/string.hpp"
+#include <boost/iostreams/device/file_descriptor.hpp>
+#include <boost/iostreams/stream.hpp>
+
+namespace icinga
+{
+
+/**
+ * Atomically replaces a file's content.
+ *
+ * @ingroup base
+ */
+class AtomicFile : public boost::iostreams::stream<boost::iostreams::file_descriptor>
+{
+public:
+ static void Write(String path, int mode, const String& content);
+
+ AtomicFile(String path, int mode);
+ ~AtomicFile();
+
+ inline const String& GetTempFilename() const noexcept
+ {
+ return m_TempFilename;
+ }
+
+ void Commit();
+
+private:
+ String m_Path;
+ String m_TempFilename;
+ int m_Fd;
+};
+
+}
+
+#endif /* ATOMIC_FILE_H */
diff --git a/lib/base/atomic.hpp b/lib/base/atomic.hpp
new file mode 100644
index 0000000..c8f169c
--- /dev/null
+++ b/lib/base/atomic.hpp
@@ -0,0 +1,91 @@
+/* Icinga 2 | (c) 2019 Icinga GmbH | GPLv2+ */
+
+#ifndef ATOMIC_H
+#define ATOMIC_H
+
+#include <atomic>
+#include <mutex>
+#include <type_traits>
+#include <utility>
+
+namespace icinga
+{
+
+/**
+ * Extends std::atomic with an atomic constructor.
+ *
+ * @ingroup base
+ */
+template<class T>
+class Atomic : public std::atomic<T> {
+public:
+ /**
+ * Like std::atomic#atomic, but operates atomically
+ *
+ * @param desired Initial value
+ */
+ inline Atomic(T desired)
+ {
+ this->store(desired);
+ }
+
+ /**
+ * Like std::atomic#atomic, but operates atomically
+ *
+ * @param desired Initial value
+ * @param order Initial store operation's memory order
+ */
+ inline Atomic(T desired, std::memory_order order)
+ {
+ this->store(desired, order);
+ }
+};
+
+/**
+ * Wraps any T into a std::atomic<T>-like interface that locks using a mutex.
+ *
+ * In contrast to std::atomic<T>, Locked<T> is also valid for types that are not trivially copyable.
+ * In case T is trivially copyable, std::atomic<T> is almost certainly the better choice.
+ *
+ * @ingroup base
+ */
+template<typename T>
+class Locked
+{
+public:
+ inline T load() const
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_Value;
+ }
+
+ inline void store(T desired)
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ m_Value = std::move(desired);
+ }
+
+private:
+ mutable std::mutex m_Mutex;
+ T m_Value;
+};
+
+/**
+ * Type alias for std::atomic<T> if possible, otherwise Locked<T> is used as a fallback.
+ *
+ * @ingroup base
+ */
+template <typename T>
+using AtomicOrLocked =
+#if defined(__GNUC__) && __GNUC__ < 5
+ // GCC does not implement std::is_trivially_copyable until version 5.
+ typename std::conditional<std::is_fundamental<T>::value || std::is_pointer<T>::value, std::atomic<T>, Locked<T>>::type;
+#else /* defined(__GNUC__) && __GNUC__ < 5 */
+ typename std::conditional<std::is_trivially_copyable<T>::value, std::atomic<T>, Locked<T>>::type;
+#endif /* defined(__GNUC__) && __GNUC__ < 5 */
+
+}
+
+#endif /* ATOMIC_H */
diff --git a/lib/base/base64.cpp b/lib/base/base64.cpp
new file mode 100644
index 0000000..42999c3
--- /dev/null
+++ b/lib/base/base64.cpp
@@ -0,0 +1,53 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/base64.hpp"
+#include <openssl/bio.h>
+#include <openssl/evp.h>
+#include <openssl/buffer.h>
+#include <sstream>
+
+using namespace icinga;
+
+String Base64::Encode(const String& input)
+{
+ BIO *biomem = BIO_new(BIO_s_mem());
+ BIO *bio64 = BIO_new(BIO_f_base64());
+ BIO_push(bio64, biomem);
+ BIO_set_flags(bio64, BIO_FLAGS_BASE64_NO_NL);
+ BIO_write(bio64, input.CStr(), input.GetLength());
+ (void) BIO_flush(bio64);
+
+ char *outbuf;
+ long len = BIO_get_mem_data(biomem, &outbuf);
+
+ String ret = String(outbuf, outbuf + len);
+ BIO_free_all(bio64);
+
+ return ret;
+}
+
+String Base64::Decode(const String& input)
+{
+ BIO *biomem = BIO_new_mem_buf(
+ const_cast<char*>(input.CStr()), input.GetLength());
+ BIO *bio64 = BIO_new(BIO_f_base64());
+ BIO_push(bio64, biomem);
+ BIO_set_flags(bio64, BIO_FLAGS_BASE64_NO_NL);
+
+ auto *outbuf = new char[input.GetLength()];
+
+ size_t len = 0;
+ int rc;
+
+ while ((rc = BIO_read(bio64, outbuf + len, input.GetLength() - len)) > 0)
+ len += rc;
+
+ String ret = String(outbuf, outbuf + len);
+ BIO_free_all(bio64);
+ delete [] outbuf;
+
+ if (ret.IsEmpty() && !input.IsEmpty())
+ throw std::invalid_argument("Not a valid base64 string");
+
+ return ret;
+}
diff --git a/lib/base/base64.hpp b/lib/base/base64.hpp
new file mode 100644
index 0000000..8abbdbf
--- /dev/null
+++ b/lib/base/base64.hpp
@@ -0,0 +1,25 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef BASE64_H
+#define BASE64_H
+
+#include "remote/i2-remote.hpp"
+#include "base/string.hpp"
+
+namespace icinga
+{
+
+/**
+ * Base64
+ *
+ * @ingroup remote
+ */
+struct Base64
+{
+ static String Decode(const String& data);
+ static String Encode(const String& data);
+};
+
+}
+
+#endif /* BASE64_H */
diff --git a/lib/base/boolean-script.cpp b/lib/base/boolean-script.cpp
new file mode 100644
index 0000000..a9167ca
--- /dev/null
+++ b/lib/base/boolean-script.cpp
@@ -0,0 +1,26 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/boolean.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+static String BooleanToString()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ bool self = vframe->Self;
+ return self ? "true" : "false";
+}
+
+Object::Ptr Boolean::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "to_string", new Function("Boolean#to_string", BooleanToString, {}, true) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/boolean.cpp b/lib/base/boolean.cpp
new file mode 100644
index 0000000..683a727
--- /dev/null
+++ b/lib/base/boolean.cpp
@@ -0,0 +1,9 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/boolean.hpp"
+#include "base/primitivetype.hpp"
+
+using namespace icinga;
+
+REGISTER_BUILTIN_TYPE(Boolean, Boolean::GetPrototype());
+
diff --git a/lib/base/boolean.hpp b/lib/base/boolean.hpp
new file mode 100644
index 0000000..6533cb4
--- /dev/null
+++ b/lib/base/boolean.hpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef BOOLEAN_H
+#define BOOLEAN_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+
+namespace icinga {
+
+class Value;
+
+/**
+ * Boolean class.
+ */
+class Boolean
+{
+public:
+ static Object::Ptr GetPrototype();
+
+private:
+ Boolean();
+};
+
+}
+
+#endif /* BOOLEAN_H */
diff --git a/lib/base/bulker.hpp b/lib/base/bulker.hpp
new file mode 100644
index 0000000..2c30dc3
--- /dev/null
+++ b/lib/base/bulker.hpp
@@ -0,0 +1,119 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#ifndef BULKER_H
+#define BULKER_H
+
+#include <boost/config.hpp>
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <utility>
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * A queue which outputs the input as bulks of a defined size
+ * or after a defined time, whichever is reached first
+ *
+ * @ingroup base
+ */
+template<class T>
+class Bulker
+{
+private:
+ typedef std::chrono::steady_clock Clock;
+
+public:
+ typedef std::vector<T> Container;
+ typedef typename Container::size_type SizeType;
+ typedef typename Clock::duration Duration;
+
+ Bulker(SizeType bulkSize, Duration threshold)
+ : m_BulkSize(bulkSize), m_Threshold(threshold), m_NextConsumption(NullTimePoint()) { }
+
+ void ProduceOne(T needle);
+ Container ConsumeMany();
+ SizeType Size();
+
+ inline SizeType GetBulkSize() const noexcept
+ {
+ return m_BulkSize;
+ }
+
+private:
+ typedef std::chrono::time_point<Clock> TimePoint;
+
+ static inline
+ TimePoint NullTimePoint()
+ {
+ return TimePoint::min();
+ }
+
+ inline void UpdateNextConsumption()
+ {
+ m_NextConsumption = Clock::now() + m_Threshold;
+ }
+
+ const SizeType m_BulkSize;
+ const Duration m_Threshold;
+
+ std::mutex m_Mutex;
+ std::condition_variable m_CV;
+ std::queue<Container> m_Bulks;
+ TimePoint m_NextConsumption;
+};
+
+template<class T>
+void Bulker<T>::ProduceOne(T needle)
+{
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ if (m_Bulks.empty() || m_Bulks.back().size() == m_BulkSize) {
+ m_Bulks.emplace();
+ }
+
+ m_Bulks.back().emplace_back(std::move(needle));
+
+ if (m_Bulks.size() == 1u && m_Bulks.back().size() == m_BulkSize) {
+ m_CV.notify_one();
+ }
+}
+
+template<class T>
+typename Bulker<T>::Container Bulker<T>::ConsumeMany()
+{
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ if (BOOST_UNLIKELY(m_NextConsumption == NullTimePoint())) {
+ UpdateNextConsumption();
+ }
+
+ auto deadline (m_NextConsumption);
+
+ m_CV.wait_until(lock, deadline, [this]() { return !m_Bulks.empty() && m_Bulks.front().size() == m_BulkSize; });
+ UpdateNextConsumption();
+
+ if (m_Bulks.empty()) {
+ return Container();
+ }
+
+ auto haystack (std::move(m_Bulks.front()));
+
+ m_Bulks.pop();
+ return haystack;
+}
+
+template<class T>
+typename Bulker<T>::SizeType Bulker<T>::Size()
+{
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ return m_Bulks.empty() ? 0 : (m_Bulks.size() - 1u) * m_BulkSize + m_Bulks.back().size();
+}
+
+}
+
+#endif /* BULKER_H */
diff --git a/lib/base/configobject-script.cpp b/lib/base/configobject-script.cpp
new file mode 100644
index 0000000..46a9ca2
--- /dev/null
+++ b/lib/base/configobject-script.cpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+static void ConfigObjectModifyAttribute(const String& attr, const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ ConfigObject::Ptr self = vframe->Self;
+ REQUIRE_NOT_NULL(self);
+ return self->ModifyAttribute(attr, value);
+}
+
+static void ConfigObjectRestoreAttribute(const String& attr)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ ConfigObject::Ptr self = vframe->Self;
+ REQUIRE_NOT_NULL(self);
+ return self->RestoreAttribute(attr);
+}
+
+Object::Ptr ConfigObject::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "modify_attribute", new Function("ConfigObject#modify_attribute", ConfigObjectModifyAttribute, { "attr", "value" }, false) },
+ { "restore_attribute", new Function("ConfigObject#restore_attribute", ConfigObjectRestoreAttribute, { "attr", "value" }, false) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/configobject.cpp b/lib/base/configobject.cpp
new file mode 100644
index 0000000..4317771
--- /dev/null
+++ b/lib/base/configobject.cpp
@@ -0,0 +1,701 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/atomic-file.hpp"
+#include "base/configobject.hpp"
+#include "base/configobject-ti.cpp"
+#include "base/configtype.hpp"
+#include "base/serializer.hpp"
+#include "base/netstring.hpp"
+#include "base/json.hpp"
+#include "base/stdiostream.hpp"
+#include "base/debug.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/function.hpp"
+#include "base/initialize.hpp"
+#include "base/workqueue.hpp"
+#include "base/context.hpp"
+#include "base/application.hpp"
+#include <fstream>
+#include <boost/exception/errinfo_api_function.hpp>
+#include <boost/exception/errinfo_errno.hpp>
+#include <boost/exception/errinfo_file_name.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE_WITH_PROTOTYPE(ConfigObject, ConfigObject::GetPrototype());
+
+boost::signals2::signal<void (const ConfigObject::Ptr&)> ConfigObject::OnStateChanged;
+
+bool ConfigObject::IsActive() const
+{
+ return GetActive();
+}
+
+bool ConfigObject::IsPaused() const
+{
+ return GetPaused();
+}
+
+void ConfigObject::SetExtension(const String& key, const Value& value)
+{
+ Dictionary::Ptr extensions = GetExtensions();
+
+ if (!extensions) {
+ extensions = new Dictionary();
+ SetExtensions(extensions);
+ }
+
+ extensions->Set(key, value);
+}
+
+Value ConfigObject::GetExtension(const String& key)
+{
+ Dictionary::Ptr extensions = GetExtensions();
+
+ if (!extensions)
+ return Empty;
+
+ return extensions->Get(key);
+}
+
+void ConfigObject::ClearExtension(const String& key)
+{
+ Dictionary::Ptr extensions = GetExtensions();
+
+ if (!extensions)
+ return;
+
+ extensions->Remove(key);
+}
+
+class ModAttrValidationUtils final : public ValidationUtils
+{
+public:
+ bool ValidateName(const String& type, const String& name) const override
+ {
+ Type::Ptr ptype = Type::GetByName(type);
+ auto *dtype = dynamic_cast<ConfigType *>(ptype.get());
+
+ if (!dtype)
+ return false;
+
+ if (!dtype->GetObject(name))
+ return false;
+
+ return true;
+ }
+};
+
+void ConfigObject::ModifyAttribute(const String& attr, const Value& value, bool updateVersion)
+{
+ Dictionary::Ptr original_attributes = GetOriginalAttributes();
+ bool updated_original_attributes = false;
+
+ Type::Ptr type = GetReflectionType();
+
+ std::vector<String> tokens = attr.Split(".");
+
+ String fieldName = tokens[0];
+
+ int fid = type->GetFieldId(fieldName);
+ Field field = type->GetFieldInfo(fid);
+
+ if (field.Attributes & FANoUserModify)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Attribute cannot be modified."));
+
+ if (field.Attributes & FAConfig) {
+ if (!original_attributes) {
+ original_attributes = new Dictionary();
+ SetOriginalAttributes(original_attributes, true);
+ }
+ }
+
+ Value oldValue = GetField(fid);
+ Value newValue;
+
+ if (tokens.size() > 1) {
+ newValue = oldValue.Clone();
+ Value current = newValue;
+
+ if (current.IsEmpty()) {
+ current = new Dictionary();
+ newValue = current;
+ }
+
+ String prefix = tokens[0];
+
+ for (std::vector<String>::size_type i = 1; i < tokens.size() - 1; i++) {
+ if (!current.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value must be a dictionary."));
+
+ Dictionary::Ptr dict = current;
+
+ const String& key = tokens[i];
+ prefix += "." + key;
+
+ if (!dict->Get(key, &current)) {
+ current = new Dictionary();
+ dict->Set(key, current);
+ }
+ }
+
+ if (!current.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value must be a dictionary."));
+
+ Dictionary::Ptr dict = current;
+
+ const String& key = tokens[tokens.size() - 1];
+ prefix += "." + key;
+
+ /* clone it for original attributes */
+ oldValue = dict->Get(key).Clone();
+
+ if (field.Attributes & FAConfig) {
+ updated_original_attributes = true;
+
+ if (oldValue.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr oldDict = oldValue;
+ ObjectLock olock(oldDict);
+ for (const auto& kv : oldDict) {
+ String key = prefix + "." + kv.first;
+ if (!original_attributes->Contains(key))
+ original_attributes->Set(key, kv.second);
+ }
+
+ /* store the new value as null */
+ if (value.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr valueDict = value;
+ ObjectLock olock(valueDict);
+ for (const auto& kv : valueDict) {
+ String key = attr + "." + kv.first;
+ if (!original_attributes->Contains(key))
+ original_attributes->Set(key, Empty);
+ }
+ }
+ } else if (!original_attributes->Contains(attr))
+ original_attributes->Set(attr, oldValue);
+ }
+
+ dict->Set(key, value);
+ } else {
+ newValue = value;
+
+ if (field.Attributes & FAConfig) {
+ if (!original_attributes->Contains(attr)) {
+ updated_original_attributes = true;
+ original_attributes->Set(attr, oldValue);
+ }
+ }
+ }
+
+ ModAttrValidationUtils utils;
+ ValidateField(fid, Lazy<Value>{newValue}, utils);
+
+ SetField(fid, newValue);
+
+ if (updateVersion && (field.Attributes & FAConfig))
+ SetVersion(Utility::GetTime());
+
+ if (updated_original_attributes)
+ NotifyOriginalAttributes();
+}
+
+void ConfigObject::RestoreAttribute(const String& attr, bool updateVersion)
+{
+ Type::Ptr type = GetReflectionType();
+
+ std::vector<String> tokens = attr.Split(".");
+
+ String fieldName = tokens[0];
+
+ int fid = type->GetFieldId(fieldName);
+
+ Value currentValue = GetField(fid);
+
+ Dictionary::Ptr original_attributes = GetOriginalAttributes();
+
+ if (!original_attributes)
+ return;
+
+ Value oldValue = original_attributes->Get(attr);
+ Value newValue;
+
+ if (tokens.size() > 1) {
+ newValue = currentValue.Clone();
+ Value current = newValue;
+
+ if (current.IsEmpty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot restore non-existent object attribute"));
+
+ String prefix = tokens[0];
+
+ for (std::vector<String>::size_type i = 1; i < tokens.size() - 1; i++) {
+ if (!current.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value must be a dictionary."));
+
+ Dictionary::Ptr dict = current;
+
+ const String& key = tokens[i];
+ prefix += "." + key;
+
+ if (!dict->Contains(key))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot restore non-existent object attribute"));
+
+ current = dict->Get(key);
+ }
+
+ if (!current.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value must be a dictionary."));
+
+ Dictionary::Ptr dict = current;
+
+ const String& key = tokens[tokens.size() - 1];
+ prefix += "." + key;
+
+ std::vector<String> restoredAttrs;
+
+ {
+ ObjectLock olock(original_attributes);
+ for (const auto& kv : original_attributes) {
+ std::vector<String> originalTokens = String(kv.first).Split(".");
+
+ if (tokens.size() > originalTokens.size())
+ continue;
+
+ bool match = true;
+ for (std::vector<String>::size_type i = 0; i < tokens.size(); i++) {
+ if (tokens[i] != originalTokens[i]) {
+ match = false;
+ break;
+ }
+ }
+
+ if (!match)
+ continue;
+
+ Dictionary::Ptr dict;
+
+ if (tokens.size() == originalTokens.size())
+ dict = current;
+ else {
+ Value currentSub = current;
+
+ for (std::vector<String>::size_type i = tokens.size() - 1; i < originalTokens.size() - 1; i++) {
+ dict = currentSub;
+ currentSub = dict->Get(originalTokens[i]);
+
+ if (!currentSub.IsObjectType<Dictionary>()) {
+ currentSub = new Dictionary();
+ dict->Set(originalTokens[i], currentSub);
+ }
+ }
+
+ dict = currentSub;
+ }
+
+ dict->Set(originalTokens[originalTokens.size() - 1], kv.second);
+ restoredAttrs.push_back(kv.first);
+ }
+ }
+
+ for (const String& attr : restoredAttrs)
+ original_attributes->Remove(attr);
+
+
+ } else {
+ newValue = oldValue;
+ }
+
+ original_attributes->Remove(attr);
+ SetField(fid, newValue);
+
+ if (updateVersion)
+ SetVersion(Utility::GetTime());
+}
+
+bool ConfigObject::IsAttributeModified(const String& attr) const
+{
+ Dictionary::Ptr original_attributes = GetOriginalAttributes();
+
+ if (!original_attributes)
+ return false;
+
+ return original_attributes->Contains(attr);
+}
+
+void ConfigObject::Register()
+{
+ ASSERT(!OwnsLock());
+
+ TypeImpl<ConfigObject>::Ptr type = static_pointer_cast<TypeImpl<ConfigObject> >(GetReflectionType());
+ type->RegisterObject(this);
+}
+
+void ConfigObject::Unregister()
+{
+ ASSERT(!OwnsLock());
+
+ TypeImpl<ConfigObject>::Ptr type = static_pointer_cast<TypeImpl<ConfigObject> >(GetReflectionType());
+ type->UnregisterObject(this);
+}
+
+void ConfigObject::Start(bool runtimeCreated)
+{
+ ObjectImpl<ConfigObject>::Start(runtimeCreated);
+
+ ObjectLock olock(this);
+
+ SetStartCalled(true);
+}
+
+void ConfigObject::PreActivate()
+{
+ CONTEXT("Setting 'active' to true for object '" << GetName() << "' of type '" << GetReflectionType()->GetName() << "'");
+
+ ASSERT(!IsActive());
+ SetActive(true, true);
+}
+
+void ConfigObject::Activate(bool runtimeCreated, const Value& cookie)
+{
+ CONTEXT("Activating object '" << GetName() << "' of type '" << GetReflectionType()->GetName() << "'");
+
+ {
+ ObjectLock olock(this);
+
+ Start(runtimeCreated);
+
+ ASSERT(GetStartCalled());
+
+ if (GetHAMode() == HARunEverywhere)
+ SetAuthority(true);
+ }
+
+ NotifyActive(cookie);
+}
+
+void ConfigObject::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<ConfigObject>::Stop(runtimeRemoved);
+
+ ObjectLock olock(this);
+
+ SetStopCalled(true);
+}
+
+void ConfigObject::Deactivate(bool runtimeRemoved, const Value& cookie)
+{
+ CONTEXT("Deactivating object '" << GetName() << "' of type '" << GetReflectionType()->GetName() << "'");
+
+ {
+ ObjectLock olock(this);
+
+ if (!IsActive())
+ return;
+
+ SetActive(false, true);
+
+ SetAuthority(false);
+
+ Stop(runtimeRemoved);
+ }
+
+ ASSERT(GetStopCalled());
+
+ NotifyActive(cookie);
+}
+
+void ConfigObject::OnConfigLoaded()
+{
+ /* Nothing to do here. */
+}
+
+void ConfigObject::OnAllConfigLoaded()
+{
+ static ConfigType *ctype = dynamic_cast<ConfigType *>(Type::GetByName("Zone").get());
+ String zoneName = GetZoneName();
+
+ if (!zoneName.IsEmpty())
+ m_Zone = ctype->GetObject(zoneName);
+}
+
+void ConfigObject::CreateChildObjects(const Type::Ptr& childType)
+{
+ /* Nothing to do here. */
+}
+
+void ConfigObject::OnStateLoaded()
+{
+ /* Nothing to do here. */
+}
+
+void ConfigObject::Pause()
+{
+ SetPauseCalled(true);
+}
+
+void ConfigObject::Resume()
+{
+ SetResumeCalled(true);
+}
+
+void ConfigObject::SetAuthority(bool authority)
+{
+ ObjectLock olock(this);
+
+ if (authority && GetPaused()) {
+ SetResumeCalled(false);
+ Resume();
+ ASSERT(GetResumeCalled());
+ SetPaused(false);
+ } else if (!authority && !GetPaused()) {
+ SetPaused(true);
+ SetPauseCalled(false);
+ Pause();
+ ASSERT(GetPauseCalled());
+ }
+}
+
+void ConfigObject::DumpObjects(const String& filename, int attributeTypes)
+{
+ Log(LogInformation, "ConfigObject")
+ << "Dumping program state to file '" << filename << "'";
+
+ try {
+ Utility::Glob(filename + ".tmp.*", &Utility::Remove, GlobFile);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ConfigObject") << DiagnosticInformation(ex);
+ }
+
+ AtomicFile fp (filename, 0600);
+ StdioStream::Ptr sfp = new StdioStream(&fp, false);
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+ Dictionary::Ptr update = Serialize(object, attributeTypes);
+
+ if (!update)
+ continue;
+
+ Dictionary::Ptr persistentObject = new Dictionary({
+ { "type", type->GetName() },
+ { "name", object->GetName() },
+ { "update", update }
+ });
+
+ String json = JsonEncode(persistentObject);
+
+ NetString::WriteStringToStream(sfp, json);
+ }
+ }
+
+ sfp->Close();
+ fp.Commit();
+}
+
+void ConfigObject::RestoreObject(const String& message, int attributeTypes)
+{
+ Dictionary::Ptr persistentObject = JsonDecode(message);
+
+ String type = persistentObject->Get("type");
+ String name = persistentObject->Get("name");
+
+ ConfigObject::Ptr object = GetObject(type, name);
+
+ if (!object)
+ return;
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "ConfigObject")
+ << "Restoring object '" << name << "' of type '" << type << "'.";
+#endif /* I2_DEBUG */
+ Dictionary::Ptr update = persistentObject->Get("update");
+ Deserialize(object, update, false, attributeTypes);
+ object->OnStateLoaded();
+ object->SetStateLoaded(true);
+}
+
+void ConfigObject::RestoreObjects(const String& filename, int attributeTypes)
+{
+ if (!Utility::PathExists(filename))
+ return;
+
+ Log(LogInformation, "ConfigObject")
+ << "Restoring program state from file '" << filename << "'";
+
+ std::fstream fp;
+ fp.open(filename.CStr(), std::ios_base::in);
+
+ StdioStream::Ptr sfp = new StdioStream (&fp, false);
+
+ unsigned long restored = 0;
+
+ WorkQueue upq(25000, Configuration::Concurrency);
+ upq.SetName("ConfigObject::RestoreObjects");
+
+ String message;
+ StreamReadContext src;
+ for (;;) {
+ StreamReadStatus srs = NetString::ReadStringFromStream(sfp, &message, src);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ upq.Enqueue([message, attributeTypes]() { RestoreObject(message, attributeTypes); });
+ restored++;
+ }
+
+ sfp->Close();
+
+ upq.Join();
+
+ unsigned long no_state = 0;
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+ if (!object->GetStateLoaded()) {
+ object->OnStateLoaded();
+ object->SetStateLoaded(true);
+
+ no_state++;
+ }
+ }
+ }
+
+ Log(LogInformation, "ConfigObject")
+ << "Restored " << restored << " objects. Loaded " << no_state << " new objects without state.";
+}
+
+void ConfigObject::StopObjects()
+{
+ std::vector<Type::Ptr> types = Type::GetAllTypes();
+
+ std::sort(types.begin(), types.end(), [](const Type::Ptr& a, const Type::Ptr& b) {
+ if (a->GetActivationPriority() > b->GetActivationPriority())
+ return true;
+ return false;
+ });
+
+ for (const Type::Ptr& type : types) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+#ifdef I2_DEBUG
+ Log(LogDebug, "ConfigObject")
+ << "Deactivate() called for config object '" << object->GetName() << "' with type '" << type->GetName() << "'.";
+#endif /* I2_DEBUG */
+ object->Deactivate();
+ }
+ }
+}
+
+void ConfigObject::DumpModifiedAttributes(const std::function<void(const ConfigObject::Ptr&, const String&, const Value&)>& callback)
+{
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+ Dictionary::Ptr originalAttributes = object->GetOriginalAttributes();
+
+ if (!originalAttributes)
+ continue;
+
+ ObjectLock olock(originalAttributes);
+ for (const Dictionary::Pair& kv : originalAttributes) {
+ String key = kv.first;
+
+ Type::Ptr type = object->GetReflectionType();
+
+ std::vector<String> tokens = key.Split(".");
+
+ String fieldName = tokens[0];
+ int fid = type->GetFieldId(fieldName);
+
+ Value currentValue = object->GetField(fid);
+ Value modifiedValue;
+
+ if (tokens.size() > 1) {
+ Value current = currentValue;
+
+ for (std::vector<String>::size_type i = 1; i < tokens.size() - 1; i++) {
+ if (!current.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value must be a dictionary."));
+
+ Dictionary::Ptr dict = current;
+ const String& key = tokens[i];
+
+ if (!dict->Contains(key))
+ break;
+
+ current = dict->Get(key);
+ }
+
+ if (!current.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value must be a dictionary."));
+
+ Dictionary::Ptr dict = current;
+ const String& key = tokens[tokens.size() - 1];
+
+ modifiedValue = dict->Get(key);
+ } else
+ modifiedValue = currentValue;
+
+ callback(object, key, modifiedValue);
+ }
+ }
+ }
+
+}
+
+ConfigObject::Ptr ConfigObject::GetObject(const String& type, const String& name)
+{
+ Type::Ptr ptype = Type::GetByName(type);
+ auto *ctype = dynamic_cast<ConfigType *>(ptype.get());
+
+ if (!ctype)
+ return nullptr;
+
+ return ctype->GetObject(name);
+}
+
+ConfigObject::Ptr ConfigObject::GetZone() const
+{
+ return m_Zone;
+}
+
+Dictionary::Ptr ConfigObject::GetSourceLocation() const
+{
+ DebugInfo di = GetDebugInfo();
+
+ return new Dictionary({
+ { "path", di.Path },
+ { "first_line", di.FirstLine },
+ { "first_column", di.FirstColumn },
+ { "last_line", di.LastLine },
+ { "last_column", di.LastColumn }
+ });
+}
+
+NameComposer::~NameComposer()
+{ }
diff --git a/lib/base/configobject.hpp b/lib/base/configobject.hpp
new file mode 100644
index 0000000..5596363
--- /dev/null
+++ b/lib/base/configobject.hpp
@@ -0,0 +1,101 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGOBJECT_H
+#define CONFIGOBJECT_H
+
+#include "base/i2-base.hpp"
+#include "base/configobject-ti.hpp"
+#include "base/object.hpp"
+#include "base/type.hpp"
+#include "base/dictionary.hpp"
+#include <boost/signals2.hpp>
+
+namespace icinga
+{
+
+class ConfigType;
+
+/**
+ * A dynamic object that can be instantiated from the configuration file.
+ *
+ * @ingroup base
+ */
+class ConfigObject : public ObjectImpl<ConfigObject>
+{
+public:
+ DECLARE_OBJECT(ConfigObject);
+
+ static boost::signals2::signal<void (const ConfigObject::Ptr&)> OnStateChanged;
+
+ bool IsActive() const;
+ bool IsPaused() const;
+
+ void SetExtension(const String& key, const Value& value);
+ Value GetExtension(const String& key);
+ void ClearExtension(const String& key);
+
+ ConfigObject::Ptr GetZone() const;
+
+ void ModifyAttribute(const String& attr, const Value& value, bool updateVersion = true);
+ void RestoreAttribute(const String& attr, bool updateVersion = true);
+ bool IsAttributeModified(const String& attr) const;
+
+ void Register();
+ void Unregister();
+
+ void PreActivate();
+ void Activate(bool runtimeCreated = false, const Value& cookie = Empty);
+ void Deactivate(bool runtimeRemoved = false, const Value& cookie = Empty);
+ void SetAuthority(bool authority);
+
+ void Start(bool runtimeCreated = false) override;
+ void Stop(bool runtimeRemoved = false) override;
+
+ virtual void Pause();
+ virtual void Resume();
+
+ virtual void OnConfigLoaded();
+ virtual void CreateChildObjects(const Type::Ptr& childType);
+ virtual void OnAllConfigLoaded();
+ virtual void OnStateLoaded();
+
+ Dictionary::Ptr GetSourceLocation() const override;
+
+ template<typename T>
+ static intrusive_ptr<T> GetObject(const String& name)
+ {
+ typedef TypeImpl<T> ObjType;
+ auto *ptype = static_cast<ObjType *>(T::TypeInstance.get());
+ return static_pointer_cast<T>(ptype->GetObject(name));
+ }
+
+ static ConfigObject::Ptr GetObject(const String& type, const String& name);
+
+ static void DumpObjects(const String& filename, int attributeTypes = FAState);
+ static void RestoreObjects(const String& filename, int attributeTypes = FAState);
+ static void StopObjects();
+
+ static void DumpModifiedAttributes(const std::function<void(const ConfigObject::Ptr&, const String&, const Value&)>& callback);
+
+ static Object::Ptr GetPrototype();
+
+private:
+ ConfigObject::Ptr m_Zone;
+
+ static void RestoreObject(const String& message, int attributeTypes);
+};
+
+#define DECLARE_OBJECTNAME(klass) \
+ inline static String GetTypeName() \
+ { \
+ return #klass; \
+ } \
+ \
+ inline static intrusive_ptr<klass> GetByName(const String& name) \
+ { \
+ return ConfigObject::GetObject<klass>(name); \
+ }
+
+}
+
+#endif /* CONFIGOBJECT_H */
diff --git a/lib/base/configobject.ti b/lib/base/configobject.ti
new file mode 100644
index 0000000..ea67dfa
--- /dev/null
+++ b/lib/base/configobject.ti
@@ -0,0 +1,94 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/debuginfo.hpp"
+#include "base/configtype.hpp"
+
+library base;
+
+namespace icinga
+{
+
+code {{{
+enum HAMode
+{
+ HARunOnce,
+ HARunEverywhere
+};
+
+class NameComposer
+{
+public:
+ virtual ~NameComposer();
+
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const = 0;
+ virtual Dictionary::Ptr ParseName(const String& name) const = 0;
+};
+}}}
+
+abstract class ConfigObjectBase
+{ };
+
+code {{{
+class ConfigObjectBase : public ObjectImpl<ConfigObjectBase>
+{
+public:
+ inline DebugInfo GetDebugInfo() const
+ {
+ return m_DebugInfo;
+ }
+
+ void SetDebugInfo(const DebugInfo& di)
+ {
+ m_DebugInfo = di;
+ }
+
+ inline virtual void Start(bool /* runtimeCreated */)
+ { }
+
+ inline virtual void Stop(bool /* runtimeRemoved */)
+ { }
+
+private:
+ DebugInfo m_DebugInfo;
+};
+
+}}}
+
+abstract class ConfigObject : ConfigObjectBase < ConfigType
+{
+ [config, no_user_modify] String __name (Name);
+ [config, no_user_modify, required] String "name" (ShortName) {
+ get {{{
+ String shortName = m_ShortName.load();
+ if (shortName.IsEmpty())
+ return GetName();
+ else
+ return shortName;
+ }}}
+ };
+ [config, no_user_modify] name(Zone) zone (ZoneName);
+ [config, no_user_modify] String package;
+ [config, get_protected, no_user_modify] Array::Ptr templates;
+ [config, no_storage, no_user_modify] Dictionary::Ptr source_location {
+ get;
+ };
+ [get_protected, no_user_modify] bool active;
+ [get_protected, no_user_modify] bool paused {
+ default {{{ return true; }}}
+ };
+ [get_protected, no_user_view, no_user_modify] bool start_called;
+ [get_protected, no_user_view, no_user_modify] bool stop_called;
+ [get_protected, no_user_view, no_user_modify] bool pause_called;
+ [get_protected, no_user_view, no_user_modify] bool resume_called;
+ [enum] HAMode ha_mode (HAMode);
+ [protected, no_user_view, no_user_modify] Dictionary::Ptr extensions;
+
+ [protected, no_user_view, no_user_modify] bool state_loaded;
+ [no_user_modify] Dictionary::Ptr original_attributes;
+ [state, no_user_modify] double version {
+ default {{{ return 0; }}}
+ };
+ [no_user_view, no_user_modify] String icingadb_identifier;
+};
+
+}
diff --git a/lib/base/configtype.cpp b/lib/base/configtype.cpp
new file mode 100644
index 0000000..b266cd2
--- /dev/null
+++ b/lib/base/configtype.cpp
@@ -0,0 +1,76 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+ConfigType::~ConfigType()
+{ }
+
+ConfigObject::Ptr ConfigType::GetObject(const String& name) const
+{
+ std::shared_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ auto nt = m_ObjectMap.find(name);
+
+ if (nt == m_ObjectMap.end())
+ return nullptr;
+
+ return nt->second;
+}
+
+void ConfigType::RegisterObject(const ConfigObject::Ptr& object)
+{
+ String name = object->GetName();
+
+ {
+ std::unique_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ auto it = m_ObjectMap.find(name);
+
+ if (it != m_ObjectMap.end()) {
+ if (it->second == object)
+ return;
+
+ auto *type = dynamic_cast<Type *>(this);
+
+ BOOST_THROW_EXCEPTION(ScriptError("An object with type '" + type->GetName() + "' and name '" + name + "' already exists (" +
+ Convert::ToString(it->second->GetDebugInfo()) + "), new declaration: " + Convert::ToString(object->GetDebugInfo()),
+ object->GetDebugInfo()));
+ }
+
+ m_ObjectMap[name] = object;
+ m_ObjectVector.push_back(object);
+ }
+}
+
+void ConfigType::UnregisterObject(const ConfigObject::Ptr& object)
+{
+ String name = object->GetName();
+
+ {
+ std::unique_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ m_ObjectMap.erase(name);
+ m_ObjectVector.erase(std::remove(m_ObjectVector.begin(), m_ObjectVector.end(), object), m_ObjectVector.end());
+ }
+}
+
+std::vector<ConfigObject::Ptr> ConfigType::GetObjects() const
+{
+ std::shared_lock<decltype(m_Mutex)> lock (m_Mutex);
+ return m_ObjectVector;
+}
+
+std::vector<ConfigObject::Ptr> ConfigType::GetObjectsHelper(Type *type)
+{
+ return static_cast<TypeImpl<ConfigObject> *>(type)->GetObjects();
+}
+
+int ConfigType::GetObjectCount() const
+{
+ std::shared_lock<decltype(m_Mutex)> lock (m_Mutex);
+ return m_ObjectVector.size();
+}
diff --git a/lib/base/configtype.hpp b/lib/base/configtype.hpp
new file mode 100644
index 0000000..c77fc5e
--- /dev/null
+++ b/lib/base/configtype.hpp
@@ -0,0 +1,64 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGTYPE_H
+#define CONFIGTYPE_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include "base/type.hpp"
+#include "base/dictionary.hpp"
+#include <shared_mutex>
+#include <unordered_map>
+
+namespace icinga
+{
+
+class ConfigObject;
+
+class ConfigType
+{
+public:
+ virtual ~ConfigType();
+
+ intrusive_ptr<ConfigObject> GetObject(const String& name) const;
+
+ void RegisterObject(const intrusive_ptr<ConfigObject>& object);
+ void UnregisterObject(const intrusive_ptr<ConfigObject>& object);
+
+ std::vector<intrusive_ptr<ConfigObject> > GetObjects() const;
+
+ template<typename T>
+ static TypeImpl<T> *Get()
+ {
+ typedef TypeImpl<T> ObjType;
+ return static_cast<ObjType *>(T::TypeInstance.get());
+ }
+
+ template<typename T>
+ static std::vector<intrusive_ptr<T> > GetObjectsByType()
+ {
+ std::vector<intrusive_ptr<ConfigObject> > objects = GetObjectsHelper(T::TypeInstance.get());
+ std::vector<intrusive_ptr<T> > result;
+ result.reserve(objects.size());
+for (const auto& object : objects) {
+ result.push_back(static_pointer_cast<T>(object));
+ }
+ return result;
+ }
+
+ int GetObjectCount() const;
+
+private:
+ typedef std::unordered_map<String, intrusive_ptr<ConfigObject> > ObjectMap;
+ typedef std::vector<intrusive_ptr<ConfigObject> > ObjectVector;
+
+ mutable std::shared_timed_mutex m_Mutex;
+ ObjectMap m_ObjectMap;
+ ObjectVector m_ObjectVector;
+
+ static std::vector<intrusive_ptr<ConfigObject> > GetObjectsHelper(Type *type);
+};
+
+}
+
+#endif /* CONFIGTYPE_H */
diff --git a/lib/base/configuration.cpp b/lib/base/configuration.cpp
new file mode 100644
index 0000000..908c161
--- /dev/null
+++ b/lib/base/configuration.cpp
@@ -0,0 +1,379 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configuration.hpp"
+#include "base/configuration-ti.cpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(Configuration);
+
+String Configuration::ApiBindHost = []() {
+#ifndef _WIN32
+ // Automatically fall back to an IPv4 default if socket() tells us that IPv6 is not supported.
+ int fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd < 0 && errno == EAFNOSUPPORT) {
+ return "0.0.0.0";
+ } else if (fd >= 0) {
+ close(fd);
+ }
+#endif /* _WIN32 */
+
+ return "::";
+}();
+
+String Configuration::ApiBindPort{"5665"};
+bool Configuration::AttachDebugger{false};
+String Configuration::CacheDir;
+int Configuration::Concurrency{1};
+bool Configuration::ConcurrencyWasModified{false};
+String Configuration::ConfigDir;
+String Configuration::DataDir;
+String Configuration::EventEngine;
+String Configuration::IncludeConfDir;
+String Configuration::InitRunDir;
+String Configuration::LogDir;
+String Configuration::ModAttrPath;
+String Configuration::ObjectsPath;
+String Configuration::PidPath;
+String Configuration::PkgDataDir;
+String Configuration::PrefixDir;
+String Configuration::ProgramData;
+int Configuration::RLimitFiles;
+int Configuration::RLimitProcesses;
+int Configuration::RLimitStack;
+String Configuration::RunAsGroup;
+String Configuration::RunAsUser;
+String Configuration::SpoolDir;
+String Configuration::StatePath;
+double Configuration::TlsHandshakeTimeout{10};
+String Configuration::VarsPath;
+String Configuration::ZonesDir;
+
+/* deprecated */
+String Configuration::LocalStateDir;
+String Configuration::RunDir;
+String Configuration::SysconfDir;
+
+/* internal */
+bool Configuration::m_ReadOnly{false};
+
+template<typename T>
+void HandleUserWrite(const String& name, T *target, const T& value, bool readOnly)
+{
+ if (readOnly)
+ BOOST_THROW_EXCEPTION(ScriptError("Configuration attribute '" + name + "' is read-only."));
+
+ *target = value;
+}
+
+String Configuration::GetApiBindHost() const
+{
+ return Configuration::ApiBindHost;
+}
+
+void Configuration::SetApiBindHost(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ApiBindHost", &Configuration::ApiBindHost, val, m_ReadOnly);
+}
+
+String Configuration::GetApiBindPort() const
+{
+ return Configuration::ApiBindPort;
+}
+
+void Configuration::SetApiBindPort(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ApiBindPort", &Configuration::ApiBindPort, val, m_ReadOnly);
+}
+
+bool Configuration::GetAttachDebugger() const
+{
+ return Configuration::AttachDebugger;
+}
+
+void Configuration::SetAttachDebugger(bool val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("AttachDebugger", &Configuration::AttachDebugger, val, m_ReadOnly);
+}
+
+String Configuration::GetCacheDir() const
+{
+ return Configuration::CacheDir;
+}
+
+void Configuration::SetCacheDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("CacheDir", &Configuration::CacheDir, val, m_ReadOnly);
+}
+
+int Configuration::GetConcurrency() const
+{
+ return Configuration::Concurrency;
+}
+
+void Configuration::SetConcurrency(int val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("Concurrency", &Configuration::Concurrency, val, m_ReadOnly);
+ Configuration::ConcurrencyWasModified = true;
+}
+
+String Configuration::GetConfigDir() const
+{
+ return Configuration::ConfigDir;
+}
+
+void Configuration::SetConfigDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ConfigDir", &Configuration::ConfigDir, val, m_ReadOnly);
+}
+
+String Configuration::GetDataDir() const
+{
+ return Configuration::DataDir;
+}
+
+void Configuration::SetDataDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("DataDir", &Configuration::DataDir, val, m_ReadOnly);
+}
+
+String Configuration::GetEventEngine() const
+{
+ return Configuration::EventEngine;
+}
+
+void Configuration::SetEventEngine(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("EventEngine", &Configuration::EventEngine, val, m_ReadOnly);
+}
+
+String Configuration::GetIncludeConfDir() const
+{
+ return Configuration::IncludeConfDir;
+}
+
+void Configuration::SetIncludeConfDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("IncludeConfDir", &Configuration::IncludeConfDir, val, m_ReadOnly);
+}
+
+String Configuration::GetInitRunDir() const
+{
+ return Configuration::InitRunDir;
+}
+
+void Configuration::SetInitRunDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("InitRunDir", &Configuration::InitRunDir, val, m_ReadOnly);
+}
+
+String Configuration::GetLogDir() const
+{
+ return Configuration::LogDir;
+}
+
+void Configuration::SetLogDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("LogDir", &Configuration::LogDir, val, m_ReadOnly);
+}
+
+String Configuration::GetModAttrPath() const
+{
+ return Configuration::ModAttrPath;
+}
+
+void Configuration::SetModAttrPath(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ModAttrPath", &Configuration::ModAttrPath, val, m_ReadOnly);
+}
+
+String Configuration::GetObjectsPath() const
+{
+ return Configuration::ObjectsPath;
+}
+
+void Configuration::SetObjectsPath(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ObjectsPath", &Configuration::ObjectsPath, val, m_ReadOnly);
+}
+
+String Configuration::GetPidPath() const
+{
+ return Configuration::PidPath;
+}
+
+void Configuration::SetPidPath(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("PidPath", &Configuration::PidPath, val, m_ReadOnly);
+}
+
+String Configuration::GetPkgDataDir() const
+{
+ return Configuration::PkgDataDir;
+}
+
+void Configuration::SetPkgDataDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("PkgDataDir", &Configuration::PkgDataDir, val, m_ReadOnly);
+}
+
+String Configuration::GetPrefixDir() const
+{
+ return Configuration::PrefixDir;
+}
+
+void Configuration::SetPrefixDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("PrefixDir", &Configuration::PrefixDir, val, m_ReadOnly);
+}
+
+String Configuration::GetProgramData() const
+{
+ return Configuration::ProgramData;
+}
+
+void Configuration::SetProgramData(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ProgramData", &Configuration::ProgramData, val, m_ReadOnly);
+}
+
+int Configuration::GetRLimitFiles() const
+{
+ return Configuration::RLimitFiles;
+}
+
+void Configuration::SetRLimitFiles(int val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("RLimitFiles", &Configuration::RLimitFiles, val, m_ReadOnly);
+}
+
+int Configuration::GetRLimitProcesses() const
+{
+ return RLimitProcesses;
+}
+
+void Configuration::SetRLimitProcesses(int val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("RLimitProcesses", &Configuration::RLimitProcesses, val, m_ReadOnly);
+}
+
+int Configuration::GetRLimitStack() const
+{
+ return Configuration::RLimitStack;
+}
+
+void Configuration::SetRLimitStack(int val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("RLimitStack", &Configuration::RLimitStack, val, m_ReadOnly);
+}
+
+String Configuration::GetRunAsGroup() const
+{
+ return Configuration::RunAsGroup;
+}
+
+void Configuration::SetRunAsGroup(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("RunAsGroup", &Configuration::RunAsGroup, val, m_ReadOnly);
+}
+
+String Configuration::GetRunAsUser() const
+{
+ return Configuration::RunAsUser;
+}
+
+void Configuration::SetRunAsUser(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("RunAsUser", &Configuration::RunAsUser, val, m_ReadOnly);
+}
+
+String Configuration::GetSpoolDir() const
+{
+ return Configuration::SpoolDir;
+}
+
+void Configuration::SetSpoolDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("SpoolDir", &Configuration::SpoolDir, val, m_ReadOnly);
+}
+
+String Configuration::GetStatePath() const
+{
+ return Configuration::StatePath;
+}
+
+void Configuration::SetStatePath(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("StatePath", &Configuration::StatePath, val, m_ReadOnly);
+}
+
+double Configuration::GetTlsHandshakeTimeout() const
+{
+ return Configuration::TlsHandshakeTimeout;
+}
+
+void Configuration::SetTlsHandshakeTimeout(double val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("TlsHandshakeTimeout", &Configuration::TlsHandshakeTimeout, val, m_ReadOnly);
+}
+
+String Configuration::GetVarsPath() const
+{
+ return Configuration::VarsPath;
+}
+
+void Configuration::SetVarsPath(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("VarsPath", &Configuration::VarsPath, val, m_ReadOnly);
+}
+
+String Configuration::GetZonesDir() const
+{
+ return Configuration::ZonesDir;
+}
+
+void Configuration::SetZonesDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("ZonesDir", &Configuration::ZonesDir, val, m_ReadOnly);
+}
+
+String Configuration::GetLocalStateDir() const
+{
+ return Configuration::LocalStateDir;
+}
+
+void Configuration::SetLocalStateDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("LocalStateDir", &Configuration::LocalStateDir, val, m_ReadOnly);
+}
+
+String Configuration::GetSysconfDir() const
+{
+ return Configuration::SysconfDir;
+}
+
+void Configuration::SetSysconfDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("SysconfDir", &Configuration::SysconfDir, val, m_ReadOnly);
+}
+
+String Configuration::GetRunDir() const
+{
+ return Configuration::RunDir;
+}
+
+void Configuration::SetRunDir(const String& val, bool suppress_events, const Value& cookie)
+{
+ HandleUserWrite("RunDir", &Configuration::RunDir, val, m_ReadOnly);
+}
+
+bool Configuration::GetReadOnly()
+{
+ return m_ReadOnly;
+}
+
+void Configuration::SetReadOnly(bool readOnly)
+{
+ m_ReadOnly = readOnly;
+}
diff --git a/lib/base/configuration.hpp b/lib/base/configuration.hpp
new file mode 100644
index 0000000..a5aed01
--- /dev/null
+++ b/lib/base/configuration.hpp
@@ -0,0 +1,157 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGURATION_H
+#define CONFIGURATION_H
+
+#include "base/i2-base.hpp"
+#include "base/configuration-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * Global configuration.
+ *
+ * @ingroup base
+ */
+class Configuration : public ObjectImpl<Configuration>
+{
+public:
+ DECLARE_OBJECT(Configuration);
+
+ String GetApiBindHost() const override;
+ void SetApiBindHost(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetApiBindPort() const override;
+ void SetApiBindPort(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ bool GetAttachDebugger() const override;
+ void SetAttachDebugger(bool value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetCacheDir() const override;
+ void SetCacheDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ int GetConcurrency() const override;
+ void SetConcurrency(int value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetConfigDir() const override;
+ void SetConfigDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetDataDir() const override;
+ void SetDataDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetEventEngine() const override;
+ void SetEventEngine(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetIncludeConfDir() const override;
+ void SetIncludeConfDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetInitRunDir() const override;
+ void SetInitRunDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetLogDir() const override;
+ void SetLogDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetModAttrPath() const override;
+ void SetModAttrPath(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetObjectsPath() const override;
+ void SetObjectsPath(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetPidPath() const override;
+ void SetPidPath(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetPkgDataDir() const override;
+ void SetPkgDataDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetPrefixDir() const override;
+ void SetPrefixDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetProgramData() const override;
+ void SetProgramData(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ int GetRLimitFiles() const override;
+ void SetRLimitFiles(int value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ int GetRLimitProcesses() const override;
+ void SetRLimitProcesses(int value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ int GetRLimitStack() const override;
+ void SetRLimitStack(int value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetRunAsGroup() const override;
+ void SetRunAsGroup(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetRunAsUser() const override;
+ void SetRunAsUser(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetSpoolDir() const override;
+ void SetSpoolDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetStatePath() const override;
+ void SetStatePath(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ double GetTlsHandshakeTimeout() const override;
+ void SetTlsHandshakeTimeout(double value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetVarsPath() const override;
+ void SetVarsPath(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetZonesDir() const override;
+ void SetZonesDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ /* deprecated */
+ String GetLocalStateDir() const override;
+ void SetLocalStateDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetSysconfDir() const override;
+ void SetSysconfDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ String GetRunDir() const override;
+ void SetRunDir(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ static bool GetReadOnly();
+ static void SetReadOnly(bool readOnly);
+
+ static String ApiBindHost;
+ static String ApiBindPort;
+ static bool AttachDebugger;
+ static String CacheDir;
+ static int Concurrency;
+ static bool ConcurrencyWasModified;
+ static String ConfigDir;
+ static String DataDir;
+ static String EventEngine;
+ static String IncludeConfDir;
+ static String InitRunDir;
+ static String LogDir;
+ static String ModAttrPath;
+ static String ObjectsPath;
+ static String PidPath;
+ static String PkgDataDir;
+ static String PrefixDir;
+ static String ProgramData;
+ static int RLimitFiles;
+ static int RLimitProcesses;
+ static int RLimitStack;
+ static String RunAsGroup;
+ static String RunAsUser;
+ static String SpoolDir;
+ static String StatePath;
+ static double TlsHandshakeTimeout;
+ static String VarsPath;
+ static String ZonesDir;
+
+ /* deprecated */
+ static String LocalStateDir;
+ static String RunDir;
+ static String SysconfDir;
+
+private:
+ static bool m_ReadOnly;
+
+};
+
+}
+
+#endif /* CONFIGURATION_H */
diff --git a/lib/base/configuration.ti b/lib/base/configuration.ti
new file mode 100644
index 0000000..72fa92d
--- /dev/null
+++ b/lib/base/configuration.ti
@@ -0,0 +1,164 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library base;
+
+namespace icinga
+{
+
+abstract class Configuration
+{
+ [config, no_storage, virtual] String ApiBindHost {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String ApiBindPort {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] bool AttachDebugger {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String CacheDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] int Concurrency {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String ConfigDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String DataDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String EventEngine {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String IncludeConfDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String InitRunDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String LogDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String ModAttrPath {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String ObjectsPath {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String PidPath {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String PkgDataDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String PrefixDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String ProgramData {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] int RLimitFiles {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] int RLimitProcesses {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] int RLimitStack {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String RunAsGroup {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String RunAsUser {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String SpoolDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String StatePath {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] double TlsHandshakeTimeout {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String VarsPath {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String ZonesDir {
+ get;
+ set;
+ };
+
+ /* deprecated */
+ [config, no_storage, virtual] String LocalStateDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String RunDir {
+ get;
+ set;
+ };
+
+ [config, no_storage, virtual] String SysconfDir {
+ get;
+ set;
+ };
+};
+
+}
diff --git a/lib/base/configwriter.cpp b/lib/base/configwriter.cpp
new file mode 100644
index 0000000..c9dd582
--- /dev/null
+++ b/lib/base/configwriter.cpp
@@ -0,0 +1,260 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configwriter.hpp"
+#include "base/exception.hpp"
+#include <boost/regex.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <set>
+#include <iterator>
+
+using namespace icinga;
+
+void ConfigWriter::EmitBoolean(std::ostream& fp, bool val)
+{
+ fp << (val ? "true" : "false");
+}
+
+void ConfigWriter::EmitNumber(std::ostream& fp, double val)
+{
+ fp << std::fixed << val;
+}
+
+void ConfigWriter::EmitString(std::ostream& fp, const String& val)
+{
+ fp << "\"" << EscapeIcingaString(val) << "\"";
+}
+
+void ConfigWriter::EmitEmpty(std::ostream& fp)
+{
+ fp << "null";
+}
+
+void ConfigWriter::EmitArray(std::ostream& fp, int indentLevel, const Array::Ptr& val)
+{
+ fp << "[ ";
+ EmitArrayItems(fp, indentLevel, val);
+ if (val->GetLength() > 0)
+ fp << " ";
+ fp << "]";
+}
+
+void ConfigWriter::EmitArrayItems(std::ostream& fp, int indentLevel, const Array::Ptr& val)
+{
+ bool first = true;
+
+ ObjectLock olock(val);
+ for (const Value& item : val) {
+ if (first)
+ first = false;
+ else
+ fp << ", ";
+
+ EmitValue(fp, indentLevel, item);
+ }
+}
+
+void ConfigWriter::EmitScope(std::ostream& fp, int indentLevel, const Dictionary::Ptr& val,
+ const Array::Ptr& imports, bool splitDot)
+{
+ fp << "{";
+
+ if (imports && imports->GetLength() > 0) {
+ ObjectLock xlock(imports);
+ for (const Value& import : imports) {
+ fp << "\n";
+ EmitIndent(fp, indentLevel);
+ fp << "import \"" << import << "\"";
+ }
+
+ fp << "\n";
+ }
+
+ if (val) {
+ ObjectLock olock(val);
+ for (const Dictionary::Pair& kv : val) {
+ fp << "\n";
+ EmitIndent(fp, indentLevel);
+
+ if (splitDot) {
+ std::vector<String> tokens = kv.first.Split(".");
+
+ EmitIdentifier(fp, tokens[0], true);
+
+ for (std::vector<String>::size_type i = 1; i < tokens.size(); i++) {
+ fp << "[";
+ EmitString(fp, tokens[i]);
+ fp << "]";
+ }
+ } else
+ EmitIdentifier(fp, kv.first, true);
+
+ fp << " = ";
+ EmitValue(fp, indentLevel + 1, kv.second);
+ }
+ }
+
+ fp << "\n";
+ EmitIndent(fp, indentLevel - 1);
+ fp << "}";
+}
+
+void ConfigWriter::EmitValue(std::ostream& fp, int indentLevel, const Value& val)
+{
+ if (val.IsObjectType<Array>())
+ EmitArray(fp, indentLevel, val);
+ else if (val.IsObjectType<Dictionary>())
+ EmitScope(fp, indentLevel, val);
+ else if (val.IsObjectType<ConfigIdentifier>())
+ EmitIdentifier(fp, static_cast<ConfigIdentifier::Ptr>(val)->GetName(), false);
+ else if (val.IsString())
+ EmitString(fp, val);
+ else if (val.IsNumber())
+ EmitNumber(fp, val);
+ else if (val.IsBoolean())
+ EmitBoolean(fp, val);
+ else if (val.IsEmpty())
+ EmitEmpty(fp);
+}
+
+void ConfigWriter::EmitRaw(std::ostream& fp, const String& val)
+{
+ fp << val;
+}
+
+void ConfigWriter::EmitIndent(std::ostream& fp, int indentLevel)
+{
+ for (int i = 0; i < indentLevel; i++)
+ fp << "\t";
+}
+
+void ConfigWriter::EmitIdentifier(std::ostream& fp, const String& identifier, bool inAssignment)
+{
+ static std::set<String> keywords;
+ static std::mutex mutex;
+
+ {
+ std::unique_lock<std::mutex> lock(mutex);
+ if (keywords.empty()) {
+ const std::vector<String>& vkeywords = GetKeywords();
+ std::copy(vkeywords.begin(), vkeywords.end(), std::inserter(keywords, keywords.begin()));
+ }
+ }
+
+ if (keywords.find(identifier) != keywords.end()) {
+ fp << "@" << identifier;
+ return;
+ }
+
+ boost::regex expr("^[a-zA-Z_][a-zA-Z0-9\\_]*$");
+ boost::smatch what;
+ if (boost::regex_search(identifier.GetData(), what, expr))
+ fp << identifier;
+ else if (inAssignment)
+ EmitString(fp, identifier);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid identifier"));
+}
+
+void ConfigWriter::EmitConfigItem(std::ostream& fp, const String& type, const String& name, bool isTemplate,
+ bool ignoreOnError, const Array::Ptr& imports, const Dictionary::Ptr& attrs)
+{
+ if (isTemplate)
+ fp << "template ";
+ else
+ fp << "object ";
+
+ EmitIdentifier(fp, type, false);
+ fp << " ";
+ EmitString(fp, name);
+
+ if (ignoreOnError)
+ fp << " ignore_on_error";
+
+ fp << " ";
+ EmitScope(fp, 1, attrs, imports, true);
+}
+
+void ConfigWriter::EmitComment(std::ostream& fp, const String& text)
+{
+ fp << "/* " << text << " */\n";
+}
+
+void ConfigWriter::EmitFunctionCall(std::ostream& fp, const String& name, const Array::Ptr& arguments)
+{
+ EmitIdentifier(fp, name, false);
+ fp << "(";
+ EmitArrayItems(fp, 0, arguments);
+ fp << ")";
+}
+
+String ConfigWriter::EscapeIcingaString(const String& str)
+{
+ String result = str;
+ boost::algorithm::replace_all(result, "\\", "\\\\");
+ boost::algorithm::replace_all(result, "\n", "\\n");
+ boost::algorithm::replace_all(result, "\t", "\\t");
+ boost::algorithm::replace_all(result, "\r", "\\r");
+ boost::algorithm::replace_all(result, "\b", "\\b");
+ boost::algorithm::replace_all(result, "\f", "\\f");
+ boost::algorithm::replace_all(result, "\"", "\\\"");
+ return result;
+}
+
+const std::vector<String>& ConfigWriter::GetKeywords()
+{
+ static std::vector<String> keywords;
+ static std::mutex mutex;
+ std::unique_lock<std::mutex> lock(mutex);
+
+ if (keywords.empty()) {
+ keywords.emplace_back("object");
+ keywords.emplace_back("template");
+ keywords.emplace_back("include");
+ keywords.emplace_back("include_recursive");
+ keywords.emplace_back("include_zones");
+ keywords.emplace_back("library");
+ keywords.emplace_back("null");
+ keywords.emplace_back("true");
+ keywords.emplace_back("false");
+ keywords.emplace_back("const");
+ keywords.emplace_back("var");
+ keywords.emplace_back("this");
+ keywords.emplace_back("globals");
+ keywords.emplace_back("locals");
+ keywords.emplace_back("use");
+ keywords.emplace_back("using");
+ keywords.emplace_back("namespace");
+ keywords.emplace_back("default");
+ keywords.emplace_back("ignore_on_error");
+ keywords.emplace_back("current_filename");
+ keywords.emplace_back("current_line");
+ keywords.emplace_back("apply");
+ keywords.emplace_back("to");
+ keywords.emplace_back("where");
+ keywords.emplace_back("import");
+ keywords.emplace_back("assign");
+ keywords.emplace_back("ignore");
+ keywords.emplace_back("function");
+ keywords.emplace_back("return");
+ keywords.emplace_back("break");
+ keywords.emplace_back("continue");
+ keywords.emplace_back("for");
+ keywords.emplace_back("if");
+ keywords.emplace_back("else");
+ keywords.emplace_back("while");
+ keywords.emplace_back("throw");
+ keywords.emplace_back("try");
+ keywords.emplace_back("except");
+ }
+
+ return keywords;
+}
+
+ConfigIdentifier::ConfigIdentifier(String identifier)
+ : m_Name(std::move(identifier))
+{ }
+
+String ConfigIdentifier::GetName() const
+{
+ return m_Name;
+}
diff --git a/lib/base/configwriter.hpp b/lib/base/configwriter.hpp
new file mode 100644
index 0000000..a0c70f7
--- /dev/null
+++ b/lib/base/configwriter.hpp
@@ -0,0 +1,67 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGWRITER_H
+#define CONFIGWRITER_H
+
+#include "base/object.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include <iosfwd>
+
+namespace icinga
+{
+
+/**
+ * A config identifier.
+ *
+ * @ingroup base
+ */
+class ConfigIdentifier final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigIdentifier);
+
+ ConfigIdentifier(String name);
+
+ String GetName() const;
+
+private:
+ String m_Name;
+};
+
+/**
+ * A configuration writer.
+ *
+ * @ingroup base
+ */
+class ConfigWriter
+{
+public:
+ static void EmitBoolean(std::ostream& fp, bool val);
+ static void EmitNumber(std::ostream& fp, double val);
+ static void EmitString(std::ostream& fp, const String& val);
+ static void EmitEmpty(std::ostream& fp);
+ static void EmitArray(std::ostream& fp, int indentLevel, const Array::Ptr& val);
+ static void EmitArrayItems(std::ostream& fp, int indentLevel, const Array::Ptr& val);
+ static void EmitScope(std::ostream& fp, int indentLevel, const Dictionary::Ptr& val,
+ const Array::Ptr& imports = nullptr, bool splitDot = false);
+ static void EmitValue(std::ostream& fp, int indentLevel, const Value& val);
+ static void EmitRaw(std::ostream& fp, const String& val);
+ static void EmitIndent(std::ostream& fp, int indentLevel);
+
+ static void EmitIdentifier(std::ostream& fp, const String& identifier, bool inAssignment);
+ static void EmitConfigItem(std::ostream& fp, const String& type, const String& name, bool isTemplate,
+ bool ignoreOnError, const Array::Ptr& imports, const Dictionary::Ptr& attrs);
+
+ static void EmitComment(std::ostream& fp, const String& text);
+ static void EmitFunctionCall(std::ostream& fp, const String& name, const Array::Ptr& arguments);
+
+ static const std::vector<String>& GetKeywords();
+private:
+ static String EscapeIcingaString(const String& str);
+ ConfigWriter();
+};
+
+}
+
+#endif /* CONFIGWRITER_H */
diff --git a/lib/base/console.cpp b/lib/base/console.cpp
new file mode 100644
index 0000000..99a5fad
--- /dev/null
+++ b/lib/base/console.cpp
@@ -0,0 +1,203 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/console.hpp"
+#include "base/initialize.hpp"
+#include <iostream>
+
+using namespace icinga;
+
+static ConsoleType l_ConsoleType = Console_Dumb;
+
+static void InitializeConsole()
+{
+ l_ConsoleType = Console_Dumb;
+
+#ifndef _WIN32
+ if (isatty(1))
+ l_ConsoleType = Console_VT100;
+#else /* _WIN32 */
+ l_ConsoleType = Console_Windows;
+#endif /* _WIN32 */
+}
+
+INITIALIZE_ONCE(InitializeConsole);
+
+ConsoleColorTag::ConsoleColorTag(int color, ConsoleType consoleType)
+ : m_Color(color), m_ConsoleType(consoleType)
+{ }
+
+std::ostream& icinga::operator<<(std::ostream& fp, const ConsoleColorTag& cct)
+{
+#ifndef _WIN32
+ if (cct.m_ConsoleType == Console_VT100 || Console::GetType(fp) == Console_VT100)
+ Console::PrintVT100ColorCode(fp, cct.m_Color);
+#else /* _WIN32 */
+ if (Console::GetType(fp) == Console_Windows) {
+ fp.flush();
+ Console::SetWindowsConsoleColor(fp, cct.m_Color);
+ }
+#endif /* _WIN32 */
+
+ return fp;
+}
+
+void Console::SetType(std::ostream& fp, ConsoleType type)
+{
+ if (&fp == &std::cout || &fp == &std::cerr)
+ l_ConsoleType = type;
+}
+
+ConsoleType Console::GetType(std::ostream& fp)
+{
+ if (&fp == &std::cout || &fp == &std::cerr)
+ return l_ConsoleType;
+ else
+ return Console_Dumb;
+}
+
+#ifndef _WIN32
+void Console::PrintVT100ColorCode(std::ostream& fp, int color)
+{
+ if (color == Console_Normal) {
+ fp << "\33[0m";
+ return;
+ }
+
+ switch (color & 0xff) {
+ case Console_ForegroundBlack:
+ fp << "\33[30m";
+ break;
+ case Console_ForegroundRed:
+ fp << "\33[31m";
+ break;
+ case Console_ForegroundGreen:
+ fp << "\33[32m";
+ break;
+ case Console_ForegroundYellow:
+ fp << "\33[33m";
+ break;
+ case Console_ForegroundBlue:
+ fp << "\33[34m";
+ break;
+ case Console_ForegroundMagenta:
+ fp << "\33[35m";
+ break;
+ case Console_ForegroundCyan:
+ fp << "\33[36m";
+ break;
+ case Console_ForegroundWhite:
+ fp << "\33[37m";
+ break;
+ }
+
+ switch (color & 0xff00) {
+ case Console_BackgroundBlack:
+ fp << "\33[40m";
+ break;
+ case Console_BackgroundRed:
+ fp << "\33[41m";
+ break;
+ case Console_BackgroundGreen:
+ fp << "\33[42m";
+ break;
+ case Console_BackgroundYellow:
+ fp << "\33[43m";
+ break;
+ case Console_BackgroundBlue:
+ fp << "\33[44m";
+ break;
+ case Console_BackgroundMagenta:
+ fp << "\33[45m";
+ break;
+ case Console_BackgroundCyan:
+ fp << "\33[46m";
+ break;
+ case Console_BackgroundWhite:
+ fp << "\33[47m";
+ break;
+ }
+
+ if (color & Console_Bold)
+ fp << "\33[1m";
+}
+#else /* _WIN32 */
+void Console::SetWindowsConsoleColor(std::ostream& fp, int color)
+{
+ CONSOLE_SCREEN_BUFFER_INFO consoleInfo;
+ HANDLE hConsole;
+
+ if (&fp == &std::cout)
+ hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
+ else if (&fp == &std::cerr)
+ hConsole = GetStdHandle(STD_ERROR_HANDLE);
+ else
+ return;
+
+ if (!GetConsoleScreenBufferInfo(hConsole, &consoleInfo))
+ return;
+
+ WORD attrs = 0;
+
+ if (color == Console_Normal)
+ attrs = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE;
+
+ switch (color & 0xff) {
+ case Console_ForegroundBlack:
+ attrs |= 0;
+ break;
+ case Console_ForegroundRed:
+ attrs |= FOREGROUND_RED;
+ break;
+ case Console_ForegroundGreen:
+ attrs |= FOREGROUND_GREEN;
+ break;
+ case Console_ForegroundYellow:
+ attrs |= FOREGROUND_RED | FOREGROUND_GREEN;
+ break;
+ case Console_ForegroundBlue:
+ attrs |= FOREGROUND_BLUE;
+ break;
+ case Console_ForegroundMagenta:
+ attrs |= FOREGROUND_RED | FOREGROUND_BLUE;
+ break;
+ case Console_ForegroundCyan:
+ attrs |= FOREGROUND_GREEN | FOREGROUND_BLUE;
+ break;
+ case Console_ForegroundWhite:
+ attrs |= FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE;
+ break;
+ }
+
+ switch (color & 0xff00) {
+ case Console_BackgroundBlack:
+ attrs |= 0;
+ break;
+ case Console_BackgroundRed:
+ attrs |= BACKGROUND_RED;
+ break;
+ case Console_BackgroundGreen:
+ attrs |= BACKGROUND_GREEN;
+ break;
+ case Console_BackgroundYellow:
+ attrs |= BACKGROUND_RED | BACKGROUND_GREEN;
+ break;
+ case Console_BackgroundBlue:
+ attrs |= BACKGROUND_BLUE;
+ break;
+ case Console_BackgroundMagenta:
+ attrs |= BACKGROUND_RED | BACKGROUND_BLUE;
+ break;
+ case Console_BackgroundCyan:
+ attrs |= BACKGROUND_GREEN | BACKGROUND_BLUE;
+ break;
+ case Console_BackgroundWhite:
+ attrs |= BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE;
+ break;
+ }
+
+ if (color & Console_Bold)
+ attrs |= FOREGROUND_INTENSITY;
+
+ SetConsoleTextAttribute(hConsole, attrs);
+}
+#endif /* _WIN32 */
diff --git a/lib/base/console.hpp b/lib/base/console.hpp
new file mode 100644
index 0000000..f5b8c9a
--- /dev/null
+++ b/lib/base/console.hpp
@@ -0,0 +1,91 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONSOLE_H
+#define CONSOLE_H
+
+#include "base/i2-base.hpp"
+#include <iosfwd>
+
+namespace icinga
+{
+
+enum ConsoleColor
+{
+ Console_Normal,
+
+ // bit 0-7: foreground
+ Console_ForegroundBlack = 1,
+ Console_ForegroundRed = 2,
+ Console_ForegroundGreen = 3,
+ Console_ForegroundYellow = 4,
+ Console_ForegroundBlue = 5,
+ Console_ForegroundMagenta = 6,
+ Console_ForegroundCyan = 7,
+ Console_ForegroundWhite = 8,
+
+ // bit 8-15: background
+ Console_BackgroundBlack = 256,
+ Console_BackgroundRed = 266,
+ Console_BackgroundGreen = 267,
+ Console_BackgroundYellow = 268,
+ Console_BackgroundBlue = 269,
+ Console_BackgroundMagenta = 270,
+ Console_BackgroundCyan = 271,
+ Console_BackgroundWhite = 272,
+
+ // bit 16-23: flags
+ Console_Bold = 65536
+};
+
+enum ConsoleType
+{
+ Console_Autodetect = -1,
+
+ Console_Dumb,
+#ifndef _WIN32
+ Console_VT100,
+#else /* _WIN32 */
+ Console_Windows,
+#endif /* _WIN32 */
+};
+
+class ConsoleColorTag
+{
+public:
+ ConsoleColorTag(int color, ConsoleType consoleType = Console_Autodetect);
+
+ friend std::ostream& operator<<(std::ostream& fp, const ConsoleColorTag& cct);
+
+private:
+ int m_Color;
+ int m_ConsoleType;
+};
+
+std::ostream& operator<<(std::ostream& fp, const ConsoleColorTag& cct);
+
+/**
+ * Console utilities.
+ *
+ * @ingroup base
+ */
+class Console
+{
+public:
+ static void DetectType();
+
+ static void SetType(std::ostream& fp, ConsoleType type);
+ static ConsoleType GetType(std::ostream& fp);
+
+#ifndef _WIN32
+ static void PrintVT100ColorCode(std::ostream& fp, int color);
+#else /* _WIN32 */
+ static void SetWindowsConsoleColor(std::ostream& fp, int color);
+#endif /* _WIN32 */
+
+private:
+ Console();
+};
+
+}
+
+#endif /* CONSOLE_H */
diff --git a/lib/base/context.cpp b/lib/base/context.cpp
new file mode 100644
index 0000000..9c0a781
--- /dev/null
+++ b/lib/base/context.cpp
@@ -0,0 +1,64 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/context.hpp"
+#include <boost/thread/tss.hpp>
+#include <iostream>
+#include <sstream>
+#include <utility>
+
+using namespace icinga;
+
+static boost::thread_specific_ptr<std::vector<std::function<void(std::ostream&)>>> l_Frames;
+
+ContextFrame::ContextFrame(std::function<void(std::ostream&)> message)
+{
+ GetFrames().emplace_back(std::move(message));
+}
+
+ContextFrame::~ContextFrame()
+{
+ GetFrames().pop_back();
+}
+
+std::vector<std::function<void(std::ostream&)>>& ContextFrame::GetFrames()
+{
+ if (!l_Frames.get())
+ l_Frames.reset(new std::vector<std::function<void(std::ostream&)>>());
+
+ return *l_Frames;
+}
+
+ContextTrace::ContextTrace()
+{
+ for (auto frame (ContextFrame::GetFrames().rbegin()); frame != ContextFrame::GetFrames().rend(); ++frame) {
+ std::ostringstream oss;
+
+ (*frame)(oss);
+ m_Frames.emplace_back(oss.str());
+ }
+}
+
+void ContextTrace::Print(std::ostream& fp) const
+{
+ if (m_Frames.empty())
+ return;
+
+ fp << "\n";
+
+ int i = 0;
+ for (const String& frame : m_Frames) {
+ fp << "\t(" << i << ") " << frame << "\n";
+ i++;
+ }
+}
+
+size_t ContextTrace::GetLength() const
+{
+ return m_Frames.size();
+}
+
+std::ostream& icinga::operator<<(std::ostream& stream, const ContextTrace& trace)
+{
+ trace.Print(stream);
+ return stream;
+}
diff --git a/lib/base/context.hpp b/lib/base/context.hpp
new file mode 100644
index 0000000..d6fe733
--- /dev/null
+++ b/lib/base/context.hpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include <functional>
+#include <vector>
+
+namespace icinga
+{
+
+class ContextTrace
+{
+public:
+ ContextTrace();
+
+ void Print(std::ostream& fp) const;
+
+ size_t GetLength() const;
+
+private:
+ std::vector<String> m_Frames;
+};
+
+std::ostream& operator<<(std::ostream& stream, const ContextTrace& trace);
+
+/**
+ * A context frame.
+ *
+ * @ingroup base
+ */
+class ContextFrame
+{
+public:
+ ContextFrame(std::function<void(std::ostream&)> message);
+ ~ContextFrame();
+
+private:
+ static std::vector<std::function<void(std::ostream&)>>& GetFrames();
+
+ friend class ContextTrace;
+};
+
+/* The currentContextFrame variable has to be volatile in order to prevent
+ * the compiler from optimizing it away. */
+#define CONTEXT(message) volatile icinga::ContextFrame currentContextFrame ([&](std::ostream& _CONTEXT_stream) { \
+_CONTEXT_stream << message; \
+})
+
+}
+
+#endif /* CONTEXT_H */
diff --git a/lib/base/convert.cpp b/lib/base/convert.cpp
new file mode 100644
index 0000000..19d3e44
--- /dev/null
+++ b/lib/base/convert.cpp
@@ -0,0 +1,46 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/convert.hpp"
+#include "base/datetime.hpp"
+#include <boost/lexical_cast.hpp>
+#include <iomanip>
+
+using namespace icinga;
+
+String Convert::ToString(const String& val)
+{
+ return val;
+}
+
+String Convert::ToString(const Value& val)
+{
+ return val;
+}
+
+String Convert::ToString(double val)
+{
+ double integral;
+ double fractional = std::modf(val, &integral);
+
+ std::ostringstream msgbuf;
+ if (fractional == 0) {
+ msgbuf << std::setprecision(0);
+ }
+ msgbuf << std::fixed << val;
+ return msgbuf.str();
+}
+
+double Convert::ToDateTimeValue(double val)
+{
+ return val;
+}
+
+double Convert::ToDateTimeValue(const Value& val)
+{
+ if (val.IsNumber())
+ return val;
+ else if (val.IsObjectType<DateTime>())
+ return static_cast<DateTime::Ptr>(val)->GetValue();
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Not a DateTime value."));
+}
diff --git a/lib/base/convert.hpp b/lib/base/convert.hpp
new file mode 100644
index 0000000..e0754b3
--- /dev/null
+++ b/lib/base/convert.hpp
@@ -0,0 +1,84 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONVERT_H
+#define CONVERT_H
+
+#include "base/i2-base.hpp"
+#include "base/value.hpp"
+#include <boost/lexical_cast.hpp>
+
+namespace icinga
+{
+
+/**
+ * Utility class for converting types.
+ *
+ * @ingroup base
+ */
+class Convert
+{
+public:
+ template<typename T>
+ static long ToLong(const T& val)
+ {
+ try {
+ return boost::lexical_cast<long>(val);
+ } catch (const std::exception&) {
+ std::ostringstream msgbuf;
+ msgbuf << "Can't convert '" << val << "' to an integer.";
+ BOOST_THROW_EXCEPTION(std::invalid_argument(msgbuf.str()));
+ }
+ }
+
+ template<typename T>
+ static double ToDouble(const T& val)
+ {
+ try {
+ return boost::lexical_cast<double>(val);
+ } catch (const std::exception&) {
+ std::ostringstream msgbuf;
+ msgbuf << "Can't convert '" << val << "' to a floating point number.";
+ BOOST_THROW_EXCEPTION(std::invalid_argument(msgbuf.str()));
+ }
+ }
+
+ static long ToLong(const Value& val)
+ {
+ return val;
+ }
+
+ static long ToLong(double val)
+ {
+ return static_cast<long>(val);
+ }
+
+ static double ToDouble(const Value& val)
+ {
+ return val;
+ }
+
+ static bool ToBool(const Value& val)
+ {
+ return val.ToBool();
+ }
+
+ template<typename T>
+ static String ToString(const T& val)
+ {
+ return boost::lexical_cast<std::string>(val);
+ }
+
+ static String ToString(const String& val);
+ static String ToString(const Value& val);
+ static String ToString(double val);
+
+ static double ToDateTimeValue(double val);
+ static double ToDateTimeValue(const Value& val);
+
+private:
+ Convert();
+};
+
+}
+
+#endif /* CONVERT_H */
diff --git a/lib/base/datetime-script.cpp b/lib/base/datetime-script.cpp
new file mode 100644
index 0000000..6c18381
--- /dev/null
+++ b/lib/base/datetime-script.cpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/datetime.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+static String DateTimeFormat(const String& format)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ DateTime::Ptr self = static_cast<DateTime::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ return self->Format(format);
+}
+
+Object::Ptr DateTime::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "format", new Function("DateTime#format", DateTimeFormat, { "format" }) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/datetime.cpp b/lib/base/datetime.cpp
new file mode 100644
index 0000000..aa7b5e5
--- /dev/null
+++ b/lib/base/datetime.cpp
@@ -0,0 +1,58 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/datetime.hpp"
+#include "base/datetime-ti.cpp"
+#include "base/utility.hpp"
+#include "base/primitivetype.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE_WITH_PROTOTYPE(DateTime, DateTime::GetPrototype());
+
+DateTime::DateTime(double value)
+ : m_Value(value)
+{ }
+
+DateTime::DateTime(const std::vector<Value>& args)
+{
+ if (args.empty())
+ m_Value = Utility::GetTime();
+ else if (args.size() == 3 || args.size() == 6) {
+ struct tm tms;
+ tms.tm_year = args[0] - 1900;
+ tms.tm_mon = args[1] - 1;
+ tms.tm_mday = args[2];
+
+ if (args.size() == 6) {
+ tms.tm_hour = args[3];
+ tms.tm_min = args[4];
+ tms.tm_sec = args[5];
+ } else {
+ tms.tm_hour = 0;
+ tms.tm_min = 0;
+ tms.tm_sec = 0;
+ }
+
+ tms.tm_isdst = -1;
+
+ m_Value = mktime(&tms);
+ } else if (args.size() == 1)
+ m_Value = args[0];
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid number of arguments for the DateTime constructor."));
+}
+
+double DateTime::GetValue() const
+{
+ return m_Value;
+}
+
+String DateTime::Format(const String& format) const
+{
+ return Utility::FormatDateTime(format.CStr(), m_Value);
+}
+
+String DateTime::ToString() const
+{
+ return Format("%Y-%m-%d %H:%M:%S %z");
+}
diff --git a/lib/base/datetime.hpp b/lib/base/datetime.hpp
new file mode 100644
index 0000000..e7b8a1f
--- /dev/null
+++ b/lib/base/datetime.hpp
@@ -0,0 +1,40 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DATETIME_H
+#define DATETIME_H
+
+#include "base/i2-base.hpp"
+#include "base/datetime-ti.hpp"
+#include "base/value.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * A date/time value.
+ *
+ * @ingroup base
+ */
+class DateTime final : public ObjectImpl<DateTime>
+{
+public:
+ DECLARE_OBJECT(DateTime);
+
+ DateTime(double value);
+ DateTime(const std::vector<Value>& args);
+
+ String Format(const String& format) const;
+
+ double GetValue() const override;
+ String ToString() const override;
+
+ static Object::Ptr GetPrototype();
+
+private:
+ double m_Value;
+};
+
+}
+
+#endif /* DATETIME_H */
diff --git a/lib/base/datetime.ti b/lib/base/datetime.ti
new file mode 100644
index 0000000..b9d7375
--- /dev/null
+++ b/lib/base/datetime.ti
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+library base;
+
+namespace icinga
+{
+
+vararg_constructor class DateTime
+{
+ [state, no_storage] Timestamp value {
+ get;
+ };
+};
+
+}
diff --git a/lib/base/debug.hpp b/lib/base/debug.hpp
new file mode 100644
index 0000000..54b424c
--- /dev/null
+++ b/lib/base/debug.hpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include "i2-base.hpp"
+
+#ifndef I2_DEBUG
+# define ASSERT(expr) ((void)0)
+#else /* I2_DEBUG */
+# define ASSERT(expr) ((expr) ? 0 : icinga_assert_fail(#expr, __FILE__, __LINE__))
+#endif /* I2_DEBUG */
+
+#define VERIFY(expr) ((expr) ? 0 : icinga_assert_fail(#expr, __FILE__, __LINE__))
+
+#ifdef _MSC_VER
+# define NORETURNPRE __declspec(noreturn)
+#else /* _MSC_VER */
+# define NORETURNPRE
+#endif /* _MSC_VER */
+
+#ifdef __GNUC__
+# define NORETURNPOST __attribute__((noreturn))
+#else /* __GNUC__ */
+# define NORETURNPOST
+#endif /* __GNUC__ */
+
+NORETURNPRE int icinga_assert_fail(const char *expr, const char *file, int line) NORETURNPOST;
+
+#ifdef _MSC_VER
+# pragma warning( push )
+# pragma warning( disable : 4646 ) /* function declared with __declspec(noreturn) has non-void return type */
+#endif /* _MSC_VER */
+
+inline int icinga_assert_fail(const char *expr, const char *file, int line)
+{
+ fprintf(stderr, "%s:%d: assertion failed: %s\n", file, line, expr);
+ std::abort();
+
+#if !defined(__GNUC__) && !defined(_MSC_VER)
+ return 0;
+#endif /* !defined(__GNUC__) && !defined(_MSC_VER) */
+}
+
+#ifdef _MSC_VER
+# pragma warning( pop )
+#endif /* _MSC_VER */
+
+#endif /* DEBUG_H */
diff --git a/lib/base/debuginfo.cpp b/lib/base/debuginfo.cpp
new file mode 100644
index 0000000..99006ac
--- /dev/null
+++ b/lib/base/debuginfo.cpp
@@ -0,0 +1,98 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/debuginfo.hpp"
+#include "base/convert.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+/**
+ * Outputs a DebugInfo struct to a stream.
+ *
+ * @param out The output stream.
+ * @param val The DebugInfo struct.
+ * @returns The output stream.
+ */
+std::ostream& icinga::operator<<(std::ostream& out, const DebugInfo& val)
+{
+ out << "in " << val.Path << ": "
+ << val.FirstLine << ":" << val.FirstColumn
+ << "-"
+ << val.LastLine << ":" << val.LastColumn;
+
+ return out;
+}
+
+DebugInfo icinga::DebugInfoRange(const DebugInfo& start, const DebugInfo& end)
+{
+ DebugInfo result;
+ result.Path = start.Path;
+ result.FirstLine = start.FirstLine;
+ result.FirstColumn = start.FirstColumn;
+ result.LastLine = end.LastLine;
+ result.LastColumn = end.LastColumn;
+ return result;
+}
+
+#define EXTRA_LINES 2
+
+void icinga::ShowCodeLocation(std::ostream& out, const DebugInfo& di, bool verbose)
+{
+ if (di.Path.IsEmpty())
+ return;
+
+ out << "Location: " << di;
+
+ std::ifstream ifs;
+ ifs.open(di.Path.CStr(), std::ifstream::in);
+
+ int lineno = 0;
+ char line[1024];
+
+ while (ifs.good() && lineno <= di.LastLine + EXTRA_LINES) {
+ if (lineno == 0)
+ out << "\n";
+
+ lineno++;
+
+ ifs.getline(line, sizeof(line));
+
+ for (int i = 0; line[i]; i++)
+ if (line[i] == '\t')
+ line[i] = ' ';
+
+ int extra_lines = verbose ? EXTRA_LINES : 0;
+
+ if (lineno < di.FirstLine - extra_lines || lineno > di.LastLine + extra_lines)
+ continue;
+
+ String pathInfo = di.Path + "(" + Convert::ToString(lineno) + "): ";
+ out << pathInfo;
+ out << line << "\n";
+
+ if (lineno >= di.FirstLine && lineno <= di.LastLine) {
+ int start, end;
+
+ start = 0;
+ end = strlen(line);
+
+ if (lineno == di.FirstLine)
+ start = di.FirstColumn - 1;
+
+ if (lineno == di.LastLine)
+ end = di.LastColumn;
+
+ if (start < 0) {
+ end -= start;
+ start = 0;
+ }
+
+ out << String(pathInfo.GetLength(), ' ');
+ out << String(start, ' ');
+ out << String(end - start, '^');
+
+ out << "\n";
+ }
+ }
+}
+
diff --git a/lib/base/debuginfo.hpp b/lib/base/debuginfo.hpp
new file mode 100644
index 0000000..d47db91
--- /dev/null
+++ b/lib/base/debuginfo.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DEBUGINFO_H
+#define DEBUGINFO_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+
+namespace icinga
+{
+
+/**
+ * Debug information for a configuration element.
+ *
+ * @ingroup config
+ */
+struct DebugInfo
+{
+ String Path;
+
+ int FirstLine{0};
+ int FirstColumn{0};
+
+ int LastLine{0};
+ int LastColumn{0};
+};
+
+std::ostream& operator<<(std::ostream& out, const DebugInfo& val);
+
+DebugInfo DebugInfoRange(const DebugInfo& start, const DebugInfo& end);
+
+void ShowCodeLocation(std::ostream& out, const DebugInfo& di, bool verbose = true);
+
+}
+
+#endif /* DEBUGINFO_H */
diff --git a/lib/base/defer.hpp b/lib/base/defer.hpp
new file mode 100644
index 0000000..2a23261
--- /dev/null
+++ b/lib/base/defer.hpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DEFER
+#define DEFER
+
+#include <functional>
+#include <utility>
+
+namespace icinga
+{
+
+/**
+ * An action to be executed at end of scope.
+ *
+ * @ingroup base
+ */
+class Defer
+{
+public:
+ inline
+ Defer(std::function<void()> func) : m_Func(std::move(func))
+ {
+ }
+
+ Defer(const Defer&) = delete;
+ Defer(Defer&&) = delete;
+ Defer& operator=(const Defer&) = delete;
+ Defer& operator=(Defer&&) = delete;
+
+ inline
+ ~Defer()
+ {
+ if (m_Func) {
+ try {
+ m_Func();
+ } catch (...) {
+ // https://stackoverflow.com/questions/130117/throwing-exceptions-out-of-a-destructor
+ }
+ }
+ }
+
+ inline
+ void Cancel()
+ {
+ m_Func = nullptr;
+ }
+
+private:
+ std::function<void()> m_Func;
+};
+
+}
+
+#endif /* DEFER */
diff --git a/lib/base/dependencygraph.cpp b/lib/base/dependencygraph.cpp
new file mode 100644
index 0000000..025eb3e
--- /dev/null
+++ b/lib/base/dependencygraph.cpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dependencygraph.hpp"
+
+using namespace icinga;
+
+std::mutex DependencyGraph::m_Mutex;
+std::map<Object *, std::map<Object *, int> > DependencyGraph::m_Dependencies;
+
+void DependencyGraph::AddDependency(Object *parent, Object *child)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Dependencies[child][parent]++;
+}
+
+void DependencyGraph::RemoveDependency(Object *parent, Object *child)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ auto& refs = m_Dependencies[child];
+ auto it = refs.find(parent);
+
+ if (it == refs.end())
+ return;
+
+ it->second--;
+
+ if (it->second == 0)
+ refs.erase(it);
+
+ if (refs.empty())
+ m_Dependencies.erase(child);
+}
+
+std::vector<Object::Ptr> DependencyGraph::GetParents(const Object::Ptr& child)
+{
+ std::vector<Object::Ptr> objects;
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ auto it = m_Dependencies.find(child.get());
+
+ if (it != m_Dependencies.end()) {
+ typedef std::pair<Object *, int> kv_pair;
+ for (const kv_pair& kv : it->second) {
+ objects.emplace_back(kv.first);
+ }
+ }
+
+ return objects;
+}
diff --git a/lib/base/dependencygraph.hpp b/lib/base/dependencygraph.hpp
new file mode 100644
index 0000000..51aa90e
--- /dev/null
+++ b/lib/base/dependencygraph.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DEPENDENCYGRAPH_H
+#define DEPENDENCYGRAPH_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include <map>
+#include <mutex>
+
+namespace icinga {
+
+/**
+ * A graph that tracks dependencies between objects.
+ *
+ * @ingroup base
+ */
+class DependencyGraph
+{
+public:
+ static void AddDependency(Object *parent, Object *child);
+ static void RemoveDependency(Object *parent, Object *child);
+ static std::vector<Object::Ptr> GetParents(const Object::Ptr& child);
+
+private:
+ DependencyGraph();
+
+ static std::mutex m_Mutex;
+ static std::map<Object *, std::map<Object *, int> > m_Dependencies;
+};
+
+}
+
+#endif /* DEPENDENCYGRAPH_H */
diff --git a/lib/base/dictionary-script.cpp b/lib/base/dictionary-script.cpp
new file mode 100644
index 0000000..ad19c5b
--- /dev/null
+++ b/lib/base/dictionary-script.cpp
@@ -0,0 +1,119 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/array.hpp"
+
+using namespace icinga;
+
+static double DictionaryLen()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->GetLength();
+}
+
+static void DictionarySet(const String& key, const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Set(key, value);
+}
+
+static Value DictionaryGet(const String& key)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Get(key);
+}
+
+static void DictionaryRemove(const String& key)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Remove(key);
+}
+
+static void DictionaryClear()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Clear();
+}
+
+static bool DictionaryContains(const String& key)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Contains(key);
+}
+
+static Dictionary::Ptr DictionaryShallowClone()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->ShallowClone();
+}
+
+static Array::Ptr DictionaryKeys()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ ArrayData keys;
+ ObjectLock olock(self);
+ for (const Dictionary::Pair& kv : self) {
+ keys.push_back(kv.first);
+ }
+ return new Array(std::move(keys));
+}
+
+static Array::Ptr DictionaryValues()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ ArrayData values;
+ ObjectLock olock(self);
+ for (const Dictionary::Pair& kv : self) {
+ values.push_back(kv.second);
+ }
+ return new Array(std::move(values));
+}
+
+static void DictionaryFreeze()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Dictionary::Ptr self = static_cast<Dictionary::Ptr>(vframe->Self);
+ self->Freeze();
+}
+
+Object::Ptr Dictionary::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "len", new Function("Dictionary#len", DictionaryLen, {}, true) },
+ { "set", new Function("Dictionary#set", DictionarySet, { "key", "value" }) },
+ { "get", new Function("Dictionary#get", DictionaryGet, { "key" }) },
+ { "remove", new Function("Dictionary#remove", DictionaryRemove, { "key" }) },
+ { "clear", new Function("Dictionary#clear", DictionaryClear, {}) },
+ { "contains", new Function("Dictionary#contains", DictionaryContains, { "key" }, true) },
+ { "shallow_clone", new Function("Dictionary#shallow_clone", DictionaryShallowClone, {}, true) },
+ { "keys", new Function("Dictionary#keys", DictionaryKeys, {}, true) },
+ { "values", new Function("Dictionary#values", DictionaryValues, {}, true) },
+ { "freeze", new Function("Dictionary#freeze", DictionaryFreeze, {}) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/dictionary.cpp b/lib/base/dictionary.cpp
new file mode 100644
index 0000000..8d3f80d
--- /dev/null
+++ b/lib/base/dictionary.cpp
@@ -0,0 +1,317 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dictionary.hpp"
+#include "base/objectlock.hpp"
+#include "base/debug.hpp"
+#include "base/primitivetype.hpp"
+#include "base/configwriter.hpp"
+#include <sstream>
+
+using namespace icinga;
+
+template class std::map<String, Value>;
+
+REGISTER_PRIMITIVE_TYPE(Dictionary, Object, Dictionary::GetPrototype());
+
+Dictionary::Dictionary(const DictionaryData& other)
+{
+ for (const auto& kv : other)
+ m_Data.insert(kv);
+}
+
+Dictionary::Dictionary(DictionaryData&& other)
+{
+ for (auto& kv : other)
+ m_Data.insert(std::move(kv));
+}
+
+Dictionary::Dictionary(std::initializer_list<Dictionary::Pair> init)
+ : m_Data(init)
+{ }
+
+/**
+ * Retrieves a value from a dictionary.
+ *
+ * @param key The key whose value should be retrieved.
+ * @returns The value of an empty value if the key was not found.
+ */
+Value Dictionary::Get(const String& key) const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ auto it = m_Data.find(key);
+
+ if (it == m_Data.end())
+ return Empty;
+
+ return it->second;
+}
+
+/**
+ * Retrieves a value from a dictionary.
+ *
+ * @param key The key whose value should be retrieved.
+ * @param result The value of the dictionary item (only set when the key exists)
+ * @returns true if the key exists, false otherwise.
+ */
+bool Dictionary::Get(const String& key, Value *result) const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ auto it = m_Data.find(key);
+
+ if (it == m_Data.end())
+ return false;
+
+ *result = it->second;
+ return true;
+}
+
+/**
+ * Retrieves a value's address from a dictionary.
+ *
+ * @param key The key whose value's address should be retrieved.
+ * @returns nullptr if the key was not found.
+ */
+const Value * Dictionary::GetRef(const String& key) const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+ auto it (m_Data.find(key));
+
+ return it == m_Data.end() ? nullptr : &it->second;
+}
+
+/**
+ * Sets a value in the dictionary.
+ *
+ * @param key The key.
+ * @param value The value.
+ * @param overrideFrozen Whether to allow modifying frozen dictionaries.
+ */
+void Dictionary::Set(const String& key, Value value, bool overrideFrozen)
+{
+ ObjectLock olock(this);
+ std::unique_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ if (m_Frozen && !overrideFrozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Value in dictionary must not be modified."));
+
+ m_Data[key] = std::move(value);
+}
+
+/**
+ * Returns the number of elements in the dictionary.
+ *
+ * @returns Number of elements.
+ */
+size_t Dictionary::GetLength() const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ return m_Data.size();
+}
+
+/**
+ * Checks whether the dictionary contains the specified key.
+ *
+ * @param key The key.
+ * @returns true if the dictionary contains the key, false otherwise.
+ */
+bool Dictionary::Contains(const String& key) const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ return (m_Data.find(key) != m_Data.end());
+}
+
+/**
+ * Returns an iterator to the beginning of the dictionary.
+ *
+ * Note: Caller must hold the object lock while using the iterator.
+ *
+ * @returns An iterator.
+ */
+Dictionary::Iterator Dictionary::Begin()
+{
+ ASSERT(OwnsLock());
+
+ return m_Data.begin();
+}
+
+/**
+ * Returns an iterator to the end of the dictionary.
+ *
+ * Note: Caller must hold the object lock while using the iterator.
+ *
+ * @returns An iterator.
+ */
+Dictionary::Iterator Dictionary::End()
+{
+ ASSERT(OwnsLock());
+
+ return m_Data.end();
+}
+
+/**
+ * Removes the item specified by the iterator from the dictionary.
+ *
+ * @param it The iterator.
+ */
+void Dictionary::Remove(Dictionary::Iterator it)
+{
+ ASSERT(OwnsLock());
+ std::unique_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ if (m_Frozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Dictionary must not be modified."));
+
+ m_Data.erase(it);
+}
+
+/**
+ * Removes the specified key from the dictionary.
+ *
+ * @param key The key.
+ */
+void Dictionary::Remove(const String& key)
+{
+ ObjectLock olock(this);
+ std::unique_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ if (m_Frozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Dictionary must not be modified."));
+
+ Dictionary::Iterator it;
+ it = m_Data.find(key);
+
+ if (it == m_Data.end())
+ return;
+
+ m_Data.erase(it);
+}
+
+/**
+ * Removes all dictionary items.
+ */
+void Dictionary::Clear()
+{
+ ObjectLock olock(this);
+ std::unique_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ if (m_Frozen)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Dictionary must not be modified."));
+
+ m_Data.clear();
+}
+
+void Dictionary::CopyTo(const Dictionary::Ptr& dest) const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ for (const Dictionary::Pair& kv : m_Data) {
+ dest->Set(kv.first, kv.second);
+ }
+}
+
+/**
+ * Makes a shallow copy of a dictionary.
+ *
+ * @returns a copy of the dictionary.
+ */
+Dictionary::Ptr Dictionary::ShallowClone() const
+{
+ Dictionary::Ptr clone = new Dictionary();
+ CopyTo(clone);
+ return clone;
+}
+
+/**
+ * Makes a deep clone of a dictionary
+ * and its elements.
+ *
+ * @returns a copy of the dictionary.
+ */
+Object::Ptr Dictionary::Clone() const
+{
+ DictionaryData dict;
+
+ {
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ dict.reserve(GetLength());
+
+ for (const Dictionary::Pair& kv : m_Data) {
+ dict.emplace_back(kv.first, kv.second.Clone());
+ }
+ }
+
+ return new Dictionary(std::move(dict));
+}
+
+/**
+ * Returns an ordered vector containing all keys
+ * which are currently set in this directory.
+ *
+ * @returns an ordered vector of key names
+ */
+std::vector<String> Dictionary::GetKeys() const
+{
+ std::shared_lock<std::shared_timed_mutex> lock (m_DataMutex);
+
+ std::vector<String> keys;
+
+ for (const Dictionary::Pair& kv : m_Data) {
+ keys.push_back(kv.first);
+ }
+
+ return keys;
+}
+
+String Dictionary::ToString() const
+{
+ std::ostringstream msgbuf;
+ ConfigWriter::EmitScope(msgbuf, 1, const_cast<Dictionary *>(this));
+ return msgbuf.str();
+}
+
+void Dictionary::Freeze()
+{
+ ObjectLock olock(this);
+ m_Frozen = true;
+}
+
+Value Dictionary::GetFieldByName(const String& field, bool, const DebugInfo& debugInfo) const
+{
+ Value value;
+
+ if (Get(field, &value))
+ return value;
+ else
+ return GetPrototypeField(const_cast<Dictionary *>(this), field, false, debugInfo);
+}
+
+void Dictionary::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo&)
+{
+ Set(field, value, overrideFrozen);
+}
+
+bool Dictionary::HasOwnField(const String& field) const
+{
+ return Contains(field);
+}
+
+bool Dictionary::GetOwnField(const String& field, Value *result) const
+{
+ return Get(field, result);
+}
+
+Dictionary::Iterator icinga::begin(const Dictionary::Ptr& x)
+{
+ return x->Begin();
+}
+
+Dictionary::Iterator icinga::end(const Dictionary::Ptr& x)
+{
+ return x->End();
+}
+
diff --git a/lib/base/dictionary.hpp b/lib/base/dictionary.hpp
new file mode 100644
index 0000000..ffccd63
--- /dev/null
+++ b/lib/base/dictionary.hpp
@@ -0,0 +1,91 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DICTIONARY_H
+#define DICTIONARY_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include "base/value.hpp"
+#include <boost/range/iterator.hpp>
+#include <map>
+#include <shared_mutex>
+#include <vector>
+
+namespace icinga
+{
+
+typedef std::vector<std::pair<String, Value> > DictionaryData;
+
+/**
+ * A container that holds key-value pairs.
+ *
+ * @ingroup base
+ */
+class Dictionary final : public Object
+{
+public:
+ DECLARE_OBJECT(Dictionary);
+
+ /**
+ * An iterator that can be used to iterate over dictionary elements.
+ */
+ typedef std::map<String, Value>::iterator Iterator;
+
+ typedef std::map<String, Value>::size_type SizeType;
+
+ typedef std::map<String, Value>::value_type Pair;
+
+ Dictionary() = default;
+ Dictionary(const DictionaryData& other);
+ Dictionary(DictionaryData&& other);
+ Dictionary(std::initializer_list<Pair> init);
+
+ Value Get(const String& key) const;
+ bool Get(const String& key, Value *result) const;
+ const Value * GetRef(const String& key) const;
+ void Set(const String& key, Value value, bool overrideFrozen = false);
+ bool Contains(const String& key) const;
+
+ Iterator Begin();
+ Iterator End();
+
+ size_t GetLength() const;
+
+ void Remove(const String& key);
+
+ void Remove(Iterator it);
+
+ void Clear();
+
+ void CopyTo(const Dictionary::Ptr& dest) const;
+ Dictionary::Ptr ShallowClone() const;
+
+ std::vector<String> GetKeys() const;
+
+ static Object::Ptr GetPrototype();
+
+ Object::Ptr Clone() const override;
+
+ String ToString() const override;
+
+ void Freeze();
+
+ Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const override;
+ void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo) override;
+ bool HasOwnField(const String& field) const override;
+ bool GetOwnField(const String& field, Value *result) const override;
+
+private:
+ std::map<String, Value> m_Data; /**< The data for the dictionary. */
+ mutable std::shared_timed_mutex m_DataMutex;
+ bool m_Frozen{false};
+};
+
+Dictionary::Iterator begin(const Dictionary::Ptr& x);
+Dictionary::Iterator end(const Dictionary::Ptr& x);
+
+}
+
+extern template class std::map<icinga::String, icinga::Value>;
+
+#endif /* DICTIONARY_H */
diff --git a/lib/base/exception.cpp b/lib/base/exception.cpp
new file mode 100644
index 0000000..57b324b
--- /dev/null
+++ b/lib/base/exception.cpp
@@ -0,0 +1,507 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/exception.hpp"
+#include "base/stacktrace.hpp"
+#include <boost/thread/tss.hpp>
+#include <utility>
+
+#ifdef _WIN32
+# include "base/utility.hpp"
+#endif /* _WIN32 */
+
+#ifdef HAVE_CXXABI_H
+# include <cxxabi.h>
+#endif /* HAVE_CXXABI_H */
+
+using namespace icinga;
+
+static boost::thread_specific_ptr<boost::stacktrace::stacktrace> l_LastExceptionStack;
+static boost::thread_specific_ptr<ContextTrace> l_LastExceptionContext;
+
+#ifdef HAVE_CXXABI_H
+
+#ifdef _LIBCPPABI_VERSION
+class libcxx_type_info : public std::type_info
+{
+public:
+ ~libcxx_type_info() override;
+
+ virtual void noop1() const;
+ virtual void noop2() const;
+ virtual bool can_catch(const libcxx_type_info *thrown_type, void *&adjustedPtr) const = 0;
+};
+#endif /* _LIBCPPABI_VERSION */
+
+
+#if defined(__GLIBCXX__) || defined(_LIBCPPABI_VERSION)
+/**
+ * Attempts to cast an exception to a destination type
+ *
+ * @param obj Exception to be casted
+ * @param src Type information of obj
+ * @param dst Information of which type to cast to
+ * @return Pointer to the exception if the cast is possible, nullptr otherwise
+ */
+inline void *cast_exception(void *obj, const std::type_info *src, const std::type_info *dst)
+{
+#ifdef __GLIBCXX__
+ void *thrown_ptr = obj;
+
+ /* Check if the exception is a pointer type. */
+ if (src->__is_pointer_p())
+ thrown_ptr = *(void **)thrown_ptr;
+
+ if (dst->__do_catch(src, &thrown_ptr, 1))
+ return thrown_ptr;
+ else
+ return nullptr;
+#else /* __GLIBCXX__ */
+ const auto *srcInfo = static_cast<const libcxx_type_info *>(src);
+ const auto *dstInfo = static_cast<const libcxx_type_info *>(dst);
+
+ void *adj = obj;
+
+ if (dstInfo->can_catch(srcInfo, adj))
+ return adj;
+ else
+ return nullptr;
+#endif /* __GLIBCXX__ */
+
+}
+#else /* defined(__GLIBCXX__) || defined(_LIBCPPABI_VERSION) */
+#define NO_CAST_EXCEPTION
+#endif /* defined(__GLIBCXX__) || defined(_LIBCPPABI_VERSION) */
+
+# if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ > 3)
+# define TYPEINFO_TYPE std::type_info
+# else
+# define TYPEINFO_TYPE void
+# endif
+
+# if !defined(__GLIBCXX__) && !defined(_WIN32)
+static boost::thread_specific_ptr<void *> l_LastExceptionObj;
+static boost::thread_specific_ptr<TYPEINFO_TYPE *> l_LastExceptionPvtInfo;
+
+typedef void (*DestCallback)(void *);
+static boost::thread_specific_ptr<DestCallback> l_LastExceptionDest;
+# endif /* !__GLIBCXX__ && !_WIN32 */
+
+extern "C" void __cxa_throw(void *obj, TYPEINFO_TYPE *pvtinfo, void (*dest)(void *));
+#endif /* HAVE_CXXABI_H */
+
+void icinga::RethrowUncaughtException()
+{
+#if defined(__GLIBCXX__) || !defined(HAVE_CXXABI_H)
+ throw;
+#else /* __GLIBCXX__ || !HAVE_CXXABI_H */
+ __cxa_throw(*l_LastExceptionObj.get(), *l_LastExceptionPvtInfo.get(), *l_LastExceptionDest.get());
+#endif /* __GLIBCXX__ || !HAVE_CXXABI_H */
+}
+
+#ifdef HAVE_CXXABI_H
+extern "C"
+void __cxa_throw(void *obj, TYPEINFO_TYPE *pvtinfo, void (*dest)(void *))
+{
+ /* This function overrides an internal function of libstdc++ that is called when a C++ exception is thrown in order
+ * to capture as much information as possible at that time and then call the original implementation. This
+ * information includes:
+ * - stack trace (for later use in DiagnosticInformation)
+ * - context trace (for later use in DiagnosticInformation)
+ */
+
+ auto *tinfo = static_cast<std::type_info *>(pvtinfo);
+
+ typedef void (*cxa_throw_fn)(void *, std::type_info *, void (*)(void *)) __attribute__((noreturn));
+ static cxa_throw_fn real_cxa_throw;
+
+#if !defined(__GLIBCXX__) && !defined(_WIN32)
+ l_LastExceptionObj.reset(new void *(obj));
+ l_LastExceptionPvtInfo.reset(new TYPEINFO_TYPE *(pvtinfo));
+ l_LastExceptionDest.reset(new DestCallback(dest));
+#endif /* !defined(__GLIBCXX__) && !defined(_WIN32) */
+
+ // resolve symbol to original implementation of __cxa_throw for the call at the end of this function
+ if (real_cxa_throw == nullptr)
+ real_cxa_throw = (cxa_throw_fn)dlsym(RTLD_NEXT, "__cxa_throw");
+
+#ifndef NO_CAST_EXCEPTION
+ void *uex = cast_exception(obj, tinfo, &typeid(user_error));
+ auto *ex = reinterpret_cast<boost::exception *>(cast_exception(obj, tinfo, &typeid(boost::exception)));
+
+ if (!uex) {
+#endif /* NO_CAST_EXCEPTION */
+ // save the current stack trace in a thread-local variable
+ boost::stacktrace::stacktrace stack;
+ SetLastExceptionStack(stack);
+
+#ifndef NO_CAST_EXCEPTION
+ // save the current stack trace in the boost exception error info if the exception is a boost::exception
+ if (ex && !boost::get_error_info<StackTraceErrorInfo>(*ex))
+ *ex << StackTraceErrorInfo(stack);
+ }
+#endif /* NO_CAST_EXCEPTION */
+
+ ContextTrace context;
+ SetLastExceptionContext(context);
+
+#ifndef NO_CAST_EXCEPTION
+ // save the current context trace in the boost exception error info if the exception is a boost::exception
+ if (ex && !boost::get_error_info<ContextTraceErrorInfo>(*ex))
+ *ex << ContextTraceErrorInfo(context);
+#endif /* NO_CAST_EXCEPTION */
+
+ real_cxa_throw(obj, tinfo, dest);
+}
+#endif /* HAVE_CXXABI_H */
+
+boost::stacktrace::stacktrace *icinga::GetLastExceptionStack()
+{
+ return l_LastExceptionStack.get();
+}
+
+void icinga::SetLastExceptionStack(const boost::stacktrace::stacktrace& trace)
+{
+ l_LastExceptionStack.reset(new boost::stacktrace::stacktrace(trace));
+}
+
+ContextTrace *icinga::GetLastExceptionContext()
+{
+ return l_LastExceptionContext.get();
+}
+
+void icinga::SetLastExceptionContext(const ContextTrace& context)
+{
+ l_LastExceptionContext.reset(new ContextTrace(context));
+}
+
+String icinga::DiagnosticInformation(const std::exception& ex, bool verbose, boost::stacktrace::stacktrace *stack, ContextTrace *context)
+{
+ std::ostringstream result;
+
+ String message = ex.what();
+
+#ifdef _WIN32
+ const auto *win32_err = dynamic_cast<const win32_error *>(&ex);
+ if (win32_err) {
+ message = to_string(*win32_err);
+ }
+#endif /* _WIN32 */
+
+ const auto *vex = dynamic_cast<const ValidationError *>(&ex);
+
+ if (message.IsEmpty())
+ result << boost::diagnostic_information(ex) << "\n";
+ else
+ result << "Error: " << message << "\n";
+
+ const auto *dex = dynamic_cast<const ScriptError *>(&ex);
+
+ if (dex && !dex->GetDebugInfo().Path.IsEmpty())
+ ShowCodeLocation(result, dex->GetDebugInfo());
+
+ if (vex) {
+ DebugInfo di;
+
+ ConfigObject::Ptr dobj = vex->GetObject();
+ if (dobj)
+ di = dobj->GetDebugInfo();
+
+ Dictionary::Ptr currentHint = vex->GetDebugHint();
+ Array::Ptr messages;
+
+ if (currentHint) {
+ for (const String& attr : vex->GetAttributePath()) {
+ Dictionary::Ptr props = currentHint->Get("properties");
+
+ if (!props)
+ break;
+
+ currentHint = props->Get(attr);
+
+ if (!currentHint)
+ break;
+
+ messages = currentHint->Get("messages");
+ }
+ }
+
+ if (messages && messages->GetLength() > 0) {
+ Array::Ptr message = messages->Get(messages->GetLength() - 1);
+
+ di.Path = message->Get(1);
+ di.FirstLine = message->Get(2);
+ di.FirstColumn = message->Get(3);
+ di.LastLine = message->Get(4);
+ di.LastColumn = message->Get(5);
+ }
+
+ if (!di.Path.IsEmpty())
+ ShowCodeLocation(result, di);
+ }
+
+ const auto *uex = dynamic_cast<const user_error *>(&ex);
+ const auto *pex = dynamic_cast<const posix_error *>(&ex);
+
+ if (!uex && !pex && verbose) {
+ // Print the first of the following stack traces (if any exists)
+ // 1. stack trace from boost exception error information
+ const boost::stacktrace::stacktrace *st = boost::get_error_info<StackTraceErrorInfo>(ex);
+ // 2. stack trace explicitly passed as a parameter
+ if (!st) {
+ st = stack;
+ }
+ // 3. stack trace saved when the last exception was thrown
+ if (!st) {
+ st = GetLastExceptionStack();
+ }
+
+ if (st && !st->empty()) {
+ result << "\nStacktrace:\n" << StackTraceFormatter(*st);
+ }
+ }
+
+ // Print the first of the following context traces (if any exists)
+ // 1. context trace from boost exception error information
+ const ContextTrace *ct = boost::get_error_info<ContextTraceErrorInfo>(ex);
+ // 2. context trace explicitly passed as a parameter
+ if (!ct) {
+ ct = context;
+ }
+ // 3. context trace saved when the last exception was thrown
+ if (!ct) {
+ ct = GetLastExceptionContext();
+ }
+
+ if (ct && ct->GetLength() > 0) {
+ result << "\nContext:\n" << *ct;
+ }
+
+ return result.str();
+}
+
+String icinga::DiagnosticInformation(const boost::exception_ptr& eptr, bool verbose)
+{
+ boost::stacktrace::stacktrace *pt = GetLastExceptionStack();
+ boost::stacktrace::stacktrace stack;
+
+ ContextTrace *pc = GetLastExceptionContext();
+ ContextTrace context;
+
+ if (pt)
+ stack = *pt;
+
+ if (pc)
+ context = *pc;
+
+ try {
+ boost::rethrow_exception(eptr);
+ } catch (const std::exception& ex) {
+ return DiagnosticInformation(ex, verbose, pt ? &stack : nullptr, pc ? &context : nullptr);
+ }
+
+ return boost::diagnostic_information(eptr);
+}
+
+ScriptError::ScriptError(String message)
+ : m_Message(std::move(message)), m_IncompleteExpr(false)
+{ }
+
+ScriptError::ScriptError(String message, DebugInfo di, bool incompleteExpr)
+ : m_Message(std::move(message)), m_DebugInfo(std::move(di)), m_IncompleteExpr(incompleteExpr), m_HandledByDebugger(false)
+{ }
+
+const char *ScriptError::what() const throw()
+{
+ return m_Message.CStr();
+}
+
+DebugInfo ScriptError::GetDebugInfo() const
+{
+ return m_DebugInfo;
+}
+
+bool ScriptError::IsIncompleteExpression() const
+{
+ return m_IncompleteExpr;
+}
+
+bool ScriptError::IsHandledByDebugger() const
+{
+ return m_HandledByDebugger;
+}
+
+void ScriptError::SetHandledByDebugger(bool handled)
+{
+ m_HandledByDebugger = handled;
+}
+
+posix_error::~posix_error() throw()
+{
+ free(m_Message);
+}
+
+const char *posix_error::what() const throw()
+{
+ if (!m_Message) {
+ std::ostringstream msgbuf;
+
+ const char * const *func = boost::get_error_info<boost::errinfo_api_function>(*this);
+
+ if (func)
+ msgbuf << "Function call '" << *func << "'";
+ else
+ msgbuf << "Function call";
+
+ const std::string *fname = boost::get_error_info<boost::errinfo_file_name>(*this);
+
+ if (fname)
+ msgbuf << " for file '" << *fname << "'";
+
+ msgbuf << " failed";
+
+ const int *errnum = boost::get_error_info<boost::errinfo_errno>(*this);
+
+ if (errnum)
+ msgbuf << " with error code " << *errnum << ", '" << strerror(*errnum) << "'";
+
+ String str = msgbuf.str();
+ m_Message = strdup(str.CStr());
+ }
+
+ return m_Message;
+}
+
+ValidationError::ValidationError(const ConfigObject::Ptr& object, const std::vector<String>& attributePath, const String& message)
+ : m_Object(object), m_AttributePath(attributePath), m_Message(message)
+{
+ String path;
+
+ for (const String& attribute : attributePath) {
+ if (!path.IsEmpty())
+ path += " -> ";
+
+ path += "'" + attribute + "'";
+ }
+
+ Type::Ptr type = object->GetReflectionType();
+ m_What = "Validation failed for object '" + object->GetName() + "' of type '" + type->GetName() + "'";
+
+ if (!path.IsEmpty())
+ m_What += "; Attribute " + path;
+
+ m_What += ": " + message;
+}
+
+ValidationError::~ValidationError() throw()
+{ }
+
+const char *ValidationError::what() const throw()
+{
+ return m_What.CStr();
+}
+
+ConfigObject::Ptr ValidationError::GetObject() const
+{
+ return m_Object;
+}
+
+std::vector<String> ValidationError::GetAttributePath() const
+{
+ return m_AttributePath;
+}
+
+String ValidationError::GetMessage() const
+{
+ return m_Message;
+}
+
+void ValidationError::SetDebugHint(const Dictionary::Ptr& dhint)
+{
+ m_DebugHint = dhint;
+}
+
+Dictionary::Ptr ValidationError::GetDebugHint() const
+{
+ return m_DebugHint;
+}
+
+std::string icinga::to_string(const StackTraceErrorInfo&)
+{
+ return "";
+}
+
+#ifdef _WIN32
+const char *win32_error::what() const noexcept
+{
+ return "win32_error";
+}
+
+std::string icinga::to_string(const win32_error &e) {
+ std::ostringstream msgbuf;
+
+ const char * const *func = boost::get_error_info<boost::errinfo_api_function>(e);
+
+ if (func) {
+ msgbuf << "Function call '" << *func << "'";
+ } else {
+ msgbuf << "Function call";
+ }
+
+ const std::string *fname = boost::get_error_info<boost::errinfo_file_name>(e);
+
+ if (fname) {
+ msgbuf << " for file '" << *fname << "'";
+ }
+
+ msgbuf << " failed";
+
+ const int *errnum = boost::get_error_info<errinfo_win32_error>(e);
+
+ if (errnum) {
+ msgbuf << " with error code " << Utility::FormatErrorNumber(*errnum);
+ }
+
+ return msgbuf.str();
+}
+
+std::string icinga::to_string(const errinfo_win32_error& e)
+{
+ return "[errinfo_win32_error] = " + Utility::FormatErrorNumber(e.value()) + "\n";
+}
+#endif /* _WIN32 */
+
+std::string icinga::to_string(const errinfo_getaddrinfo_error& e)
+{
+ String msg;
+
+#ifdef _WIN32
+ msg = gai_strerrorA(e.value());
+#else /* _WIN32 */
+ msg = gai_strerror(e.value());
+#endif /* _WIN32 */
+
+ return "[errinfo_getaddrinfo_error] = " + String(msg) + "\n";
+}
+
+std::string icinga::to_string(const ContextTraceErrorInfo& e)
+{
+ std::ostringstream msgbuf;
+ msgbuf << "[Context] = " << e.value();
+ return msgbuf.str();
+}
+
+invalid_downtime_removal_error::invalid_downtime_removal_error(String message)
+ : m_Message(std::move(message))
+{ }
+
+invalid_downtime_removal_error::invalid_downtime_removal_error(const char *message)
+ : m_Message(message)
+{ }
+
+invalid_downtime_removal_error::~invalid_downtime_removal_error() noexcept
+{ }
+
+const char *invalid_downtime_removal_error::what() const noexcept
+{
+ return m_Message.CStr();
+}
diff --git a/lib/base/exception.hpp b/lib/base/exception.hpp
new file mode 100644
index 0000000..18dab65
--- /dev/null
+++ b/lib/base/exception.hpp
@@ -0,0 +1,166 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EXCEPTION_H
+#define EXCEPTION_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include "base/context.hpp"
+#include "base/debuginfo.hpp"
+#include "base/dictionary.hpp"
+#include "base/configobject.hpp"
+#include <boost/exception/errinfo_api_function.hpp>
+#include <boost/exception/errinfo_errno.hpp>
+#include <boost/exception/errinfo_file_name.hpp>
+#include <boost/exception/diagnostic_information.hpp>
+#include <boost/exception_ptr.hpp>
+#include <boost/stacktrace.hpp>
+
+#ifdef _WIN32
+# include <boost/algorithm/string/trim.hpp>
+#endif /* _WIN32 */
+
+namespace icinga
+{
+
+class user_error : virtual public std::exception, virtual public boost::exception
+{ };
+
+/*
+ * @ingroup base
+ */
+class ScriptError : virtual public user_error
+{
+public:
+ ScriptError(String message);
+ ScriptError(String message, DebugInfo di, bool incompleteExpr = false);
+
+ const char *what(void) const throw() final;
+
+ DebugInfo GetDebugInfo() const;
+ bool IsIncompleteExpression() const;
+
+ bool IsHandledByDebugger() const;
+ void SetHandledByDebugger(bool handled);
+
+private:
+ String m_Message;
+ DebugInfo m_DebugInfo;
+ bool m_IncompleteExpr;
+ bool m_HandledByDebugger;
+};
+
+/*
+ * @ingroup base
+ */
+class ValidationError : virtual public user_error
+{
+public:
+ ValidationError(const ConfigObject::Ptr& object, const std::vector<String>& attributePath, const String& message);
+ ~ValidationError() throw() override;
+
+ const char *what() const throw() override;
+
+ ConfigObject::Ptr GetObject() const;
+ std::vector<String> GetAttributePath() const;
+ String GetMessage() const;
+
+ void SetDebugHint(const Dictionary::Ptr& dhint);
+ Dictionary::Ptr GetDebugHint() const;
+
+private:
+ ConfigObject::Ptr m_Object;
+ std::vector<String> m_AttributePath;
+ String m_Message;
+ String m_What;
+ Dictionary::Ptr m_DebugHint;
+};
+
+boost::stacktrace::stacktrace *GetLastExceptionStack();
+void SetLastExceptionStack(const boost::stacktrace::stacktrace& trace);
+
+ContextTrace *GetLastExceptionContext();
+void SetLastExceptionContext(const ContextTrace& context);
+
+void RethrowUncaughtException();
+
+struct errinfo_stacktrace_;
+typedef boost::error_info<struct errinfo_stacktrace_, boost::stacktrace::stacktrace> StackTraceErrorInfo;
+
+std::string to_string(const StackTraceErrorInfo&);
+
+typedef boost::error_info<ContextTrace, ContextTrace> ContextTraceErrorInfo;
+
+std::string to_string(const ContextTraceErrorInfo& e);
+
+/**
+ * Generate diagnostic information about an exception
+ *
+ * The following information is gathered in the result:
+ * - Exception error message
+ * - Debug information about the Icinga config if the exception is a ValidationError
+ * - Stack trace
+ * - Context trace
+ *
+ * Each, stack trace and the context trace, are printed if the they were saved in the boost exception error
+ * information, are explicitly passed as a parameter, or were stored when the last exception was thrown. If multiple
+ * of these exist, the first one is used.
+ *
+ * @param ex exception to print diagnostic information about
+ * @param verbose if verbose is set, a stack trace is added
+ * @param stack optionally supply a stack trace
+ * @param context optionally supply a context trace
+ * @return string containing the aforementioned information
+ */
+String DiagnosticInformation(const std::exception& ex, bool verbose = true,
+ boost::stacktrace::stacktrace *stack = nullptr, ContextTrace *context = nullptr);
+String DiagnosticInformation(const boost::exception_ptr& eptr, bool verbose = true);
+
+class posix_error : virtual public std::exception, virtual public boost::exception {
+public:
+ ~posix_error() throw() override;
+
+ const char *what(void) const throw() final;
+
+private:
+ mutable char *m_Message{nullptr};
+};
+
+#ifdef _WIN32
+class win32_error : virtual public std::exception, virtual public boost::exception {
+public:
+ const char *what() const noexcept override;
+};
+
+std::string to_string(const win32_error& e);
+
+struct errinfo_win32_error_;
+typedef boost::error_info<struct errinfo_win32_error_, int> errinfo_win32_error;
+
+std::string to_string(const errinfo_win32_error& e);
+#endif /* _WIN32 */
+
+struct errinfo_getaddrinfo_error_;
+typedef boost::error_info<struct errinfo_getaddrinfo_error_, int> errinfo_getaddrinfo_error;
+
+std::string to_string(const errinfo_getaddrinfo_error& e);
+
+struct errinfo_message_;
+typedef boost::error_info<struct errinfo_message_, std::string> errinfo_message;
+
+class invalid_downtime_removal_error : virtual public std::exception, virtual public boost::exception {
+public:
+ explicit invalid_downtime_removal_error(String message);
+ explicit invalid_downtime_removal_error(const char* message);
+
+ ~invalid_downtime_removal_error() noexcept override;
+
+ const char *what() const noexcept final;
+
+private:
+ String m_Message;
+};
+
+}
+
+#endif /* EXCEPTION_H */
diff --git a/lib/base/fifo.cpp b/lib/base/fifo.cpp
new file mode 100644
index 0000000..8653f51
--- /dev/null
+++ b/lib/base/fifo.cpp
@@ -0,0 +1,124 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/fifo.hpp"
+
+using namespace icinga;
+
+/**
+ * Destructor for the FIFO class.
+ */
+FIFO::~FIFO()
+{
+ free(m_Buffer);
+}
+
+/**
+ * Resizes the FIFO's buffer so that it is at least newSize bytes long.
+ *
+ * @param newSize The minimum new size of the FIFO buffer.
+ */
+void FIFO::ResizeBuffer(size_t newSize, bool decrease)
+{
+ if (m_AllocSize >= newSize && !decrease)
+ return;
+
+ newSize = (newSize / FIFO::BlockSize + 1) * FIFO::BlockSize;
+
+ if (newSize == m_AllocSize)
+ return;
+
+ auto *newBuffer = static_cast<char *>(realloc(m_Buffer, newSize));
+
+ if (!newBuffer)
+ BOOST_THROW_EXCEPTION(std::bad_alloc());
+
+ m_Buffer = newBuffer;
+
+ m_AllocSize = newSize;
+}
+
+/**
+ * Optimizes memory usage of the FIFO buffer by reallocating
+ * and moving the buffer.
+ */
+void FIFO::Optimize()
+{
+ if (m_Offset > m_DataSize / 10 && m_Offset - m_DataSize > 1024) {
+ std::memmove(m_Buffer, m_Buffer + m_Offset, m_DataSize);
+ m_Offset = 0;
+
+ if (m_DataSize > 0)
+ ResizeBuffer(m_DataSize, true);
+
+ return;
+ }
+}
+
+size_t FIFO::Peek(void *buffer, size_t count, bool allow_partial)
+{
+ ASSERT(allow_partial);
+
+ if (count > m_DataSize)
+ count = m_DataSize;
+
+ if (buffer)
+ std::memcpy(buffer, m_Buffer + m_Offset, count);
+
+ return count;
+}
+
+/**
+ * Implements IOQueue::Read.
+ */
+size_t FIFO::Read(void *buffer, size_t count, bool allow_partial)
+{
+ ASSERT(allow_partial);
+
+ if (count > m_DataSize)
+ count = m_DataSize;
+
+ if (buffer)
+ std::memcpy(buffer, m_Buffer + m_Offset, count);
+
+ m_DataSize -= count;
+ m_Offset += count;
+
+ Optimize();
+
+ return count;
+}
+
+/**
+ * Implements IOQueue::Write.
+ */
+void FIFO::Write(const void *buffer, size_t count)
+{
+ ResizeBuffer(m_Offset + m_DataSize + count, false);
+ std::memcpy(m_Buffer + m_Offset + m_DataSize, buffer, count);
+ m_DataSize += count;
+
+ SignalDataAvailable();
+}
+
+void FIFO::Close()
+{ }
+
+bool FIFO::IsEof() const
+{
+ return false;
+}
+
+size_t FIFO::GetAvailableBytes() const
+{
+ return m_DataSize;
+}
+
+bool FIFO::SupportsWaiting() const
+{
+ return true;
+}
+
+bool FIFO::IsDataAvailable() const
+{
+ return m_DataSize > 0;
+}
diff --git a/lib/base/fifo.hpp b/lib/base/fifo.hpp
new file mode 100644
index 0000000..a8273c1
--- /dev/null
+++ b/lib/base/fifo.hpp
@@ -0,0 +1,48 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FIFO_H
+#define FIFO_H
+
+#include "base/i2-base.hpp"
+#include "base/stream.hpp"
+
+namespace icinga
+{
+
+/**
+ * A byte-based FIFO buffer.
+ *
+ * @ingroup base
+ */
+class FIFO final : public Stream
+{
+public:
+ DECLARE_PTR_TYPEDEFS(FIFO);
+
+ static const size_t BlockSize = 512;
+
+ ~FIFO() override;
+
+ size_t Peek(void *buffer, size_t count, bool allow_partial = false) override;
+ size_t Read(void *buffer, size_t count, bool allow_partial = false) override;
+ void Write(const void *buffer, size_t count) override;
+ void Close() override;
+ bool IsEof() const override;
+ bool SupportsWaiting() const override;
+ bool IsDataAvailable() const override;
+
+ size_t GetAvailableBytes() const;
+
+private:
+ char *m_Buffer{nullptr};
+ size_t m_DataSize{0};
+ size_t m_AllocSize{0};
+ size_t m_Offset{0};
+
+ void ResizeBuffer(size_t newSize, bool decrease);
+ void Optimize();
+};
+
+}
+
+#endif /* FIFO_H */
diff --git a/lib/base/filelogger.cpp b/lib/base/filelogger.cpp
new file mode 100644
index 0000000..c3da84a
--- /dev/null
+++ b/lib/base/filelogger.cpp
@@ -0,0 +1,59 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/filelogger.hpp"
+#include "base/filelogger-ti.cpp"
+#include "base/configtype.hpp"
+#include "base/statsfunction.hpp"
+#include "base/application.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+REGISTER_TYPE(FileLogger);
+
+REGISTER_STATSFUNCTION(FileLogger, &FileLogger::StatsFunc);
+
+void FileLogger::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const FileLogger::Ptr& filelogger : ConfigType::GetObjectsByType<FileLogger>()) {
+ nodes.emplace_back(filelogger->GetName(), 1); //add more stats
+ }
+
+ status->Set("filelogger", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Constructor for the FileLogger class.
+ */
+void FileLogger::Start(bool runtimeCreated)
+{
+ ReopenLogFile();
+
+ Application::OnReopenLogs.connect([this]() { ReopenLogFile(); });
+
+ ObjectImpl<FileLogger>::Start(runtimeCreated);
+
+ Log(LogInformation, "FileLogger")
+ << "'" << GetName() << "' started.";
+}
+
+void FileLogger::ReopenLogFile()
+{
+ auto *stream = new std::ofstream();
+
+ String path = GetPath();
+
+ try {
+ stream->open(path.CStr(), std::fstream::app | std::fstream::out);
+
+ if (!stream->good())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not open logfile '" + path + "'"));
+ } catch (...) {
+ delete stream;
+ throw;
+ }
+
+ BindStream(stream, true);
+}
diff --git a/lib/base/filelogger.hpp b/lib/base/filelogger.hpp
new file mode 100644
index 0000000..420337f
--- /dev/null
+++ b/lib/base/filelogger.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FILELOGGER_H
+#define FILELOGGER_H
+
+#include "base/i2-base.hpp"
+#include "base/filelogger-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * A logger that logs to a file.
+ *
+ * @ingroup base
+ */
+class FileLogger final : public ObjectImpl<FileLogger>
+{
+public:
+ DECLARE_OBJECT(FileLogger);
+ DECLARE_OBJECTNAME(FileLogger);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void Start(bool runtimeCreated) override;
+
+private:
+ void ReopenLogFile();
+};
+
+}
+
+#endif /* FILELOGGER_H */
diff --git a/lib/base/filelogger.ti b/lib/base/filelogger.ti
new file mode 100644
index 0000000..8af2498
--- /dev/null
+++ b/lib/base/filelogger.ti
@@ -0,0 +1,17 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/streamlogger.hpp"
+
+library base;
+
+namespace icinga
+{
+
+class FileLogger : StreamLogger
+{
+ activation_priority -100;
+
+ [config, required] String path;
+};
+
+}
diff --git a/lib/base/function-script.cpp b/lib/base/function-script.cpp
new file mode 100644
index 0000000..e59e84d
--- /dev/null
+++ b/lib/base/function-script.cpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/objectlock.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+static Value FunctionCall(const std::vector<Value>& args)
+{
+ if (args.size() < 1)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments for call()"));
+
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Function::Ptr self = static_cast<Function::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ std::vector<Value> uargs(args.begin() + 1, args.end());
+ return self->InvokeThis(args[0], uargs);
+}
+
+static Value FunctionCallV(const Value& thisArg, const Array::Ptr& args)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Function::Ptr self = static_cast<Function::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ std::vector<Value> uargs;
+
+ {
+ ObjectLock olock(args);
+ uargs = std::vector<Value>(args->Begin(), args->End());
+ }
+
+ return self->InvokeThis(thisArg, uargs);
+}
+
+
+Object::Ptr Function::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "call", new Function("Function#call", FunctionCall) },
+ { "callv", new Function("Function#callv", FunctionCallV) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/function.cpp b/lib/base/function.cpp
new file mode 100644
index 0000000..f9a261d
--- /dev/null
+++ b/lib/base/function.cpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/function.hpp"
+#include "base/function-ti.cpp"
+#include "base/array.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE_WITH_PROTOTYPE(Function, Function::GetPrototype());
+
+Function::Function(const String& name, Callback function, const std::vector<String>& args,
+ bool side_effect_free, bool deprecated)
+ : m_Callback(std::move(function))
+{
+ SetName(name, true);
+ SetSideEffectFree(side_effect_free, true);
+ SetDeprecated(deprecated, true);
+ SetArguments(Array::FromVector(args), true);
+}
+
+Value Function::Invoke(const std::vector<Value>& arguments)
+{
+ ScriptFrame frame(false);
+ return m_Callback(arguments);
+}
+
+Value Function::InvokeThis(const Value& otherThis, const std::vector<Value>& arguments)
+{
+ ScriptFrame frame(false, otherThis);
+ return m_Callback(arguments);
+}
+
+Object::Ptr Function::Clone() const
+{
+ return const_cast<Function *>(this);
+}
diff --git a/lib/base/function.hpp b/lib/base/function.hpp
new file mode 100644
index 0000000..d52a230
--- /dev/null
+++ b/lib/base/function.hpp
@@ -0,0 +1,89 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FUNCTION_H
+#define FUNCTION_H
+
+#include "base/i2-base.hpp"
+#include "base/function-ti.hpp"
+#include "base/value.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptglobal.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * A script function that can be used to execute a script task.
+ *
+ * @ingroup base
+ */
+class Function final : public ObjectImpl<Function>
+{
+public:
+ DECLARE_OBJECT(Function);
+
+ typedef std::function<Value (const std::vector<Value>& arguments)> Callback;
+
+ template<typename F>
+ Function(const String& name, F function, const std::vector<String>& args = std::vector<String>(),
+ bool side_effect_free = false, bool deprecated = false)
+ : Function(name, WrapFunction(function), args, side_effect_free, deprecated)
+ { }
+
+ Value Invoke(const std::vector<Value>& arguments = std::vector<Value>());
+ Value InvokeThis(const Value& otherThis, const std::vector<Value>& arguments = std::vector<Value>());
+
+ bool IsSideEffectFree() const
+ {
+ return GetSideEffectFree();
+ }
+
+ bool IsDeprecated() const
+ {
+ return GetDeprecated();
+ }
+
+ static Object::Ptr GetPrototype();
+
+ Object::Ptr Clone() const override;
+
+private:
+ Callback m_Callback;
+
+ Function(const String& name, Callback function, const std::vector<String>& args,
+ bool side_effect_free, bool deprecated);
+};
+
+/* Ensure that the priority is lower than the basic namespace initialization in scriptframe.cpp. */
+#define REGISTER_FUNCTION(ns, name, callback, args) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, callback, String(args).Split(":"), false); \
+ Namespace::Ptr nsp = ScriptGlobal::Get(#ns); \
+ nsp->Set(#name, sf, true); \
+ }, InitializePriority::RegisterFunctions)
+
+#define REGISTER_SAFE_FUNCTION(ns, name, callback, args) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, callback, String(args).Split(":"), true); \
+ Namespace::Ptr nsp = ScriptGlobal::Get(#ns); \
+ nsp->Set(#name, sf, true); \
+ }, InitializePriority::RegisterFunctions)
+
+#define REGISTER_FUNCTION_NONCONST(ns, name, callback, args) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, callback, String(args).Split(":"), false); \
+ Namespace::Ptr nsp = ScriptGlobal::Get(#ns); \
+ nsp->Set(#name, sf, false); \
+ }, InitializePriority::RegisterFunctions)
+
+#define REGISTER_SAFE_FUNCTION_NONCONST(ns, name, callback, args) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ Function::Ptr sf = new icinga::Function(#ns "#" #name, callback, String(args).Split(":"), true); \
+ Namespace::Ptr nsp = ScriptGlobal::Get(#ns); \
+ nsp->SetAttribute(#name, sf, false); \
+ }, InitializePriority::RegisterFunctions)
+
+}
+
+#endif /* FUNCTION_H */
diff --git a/lib/base/function.ti b/lib/base/function.ti
new file mode 100644
index 0000000..f2623c1
--- /dev/null
+++ b/lib/base/function.ti
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library base;
+
+namespace icinga
+{
+
+abstract class Function
+{
+ [config] String "name";
+ [config] bool side_effect_free;
+ [config] bool "deprecated";
+ [config] Array::Ptr arguments;
+};
+
+}
diff --git a/lib/base/functionwrapper.hpp b/lib/base/functionwrapper.hpp
new file mode 100644
index 0000000..57cf1cb
--- /dev/null
+++ b/lib/base/functionwrapper.hpp
@@ -0,0 +1,149 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FUNCTIONWRAPPER_H
+#define FUNCTIONWRAPPER_H
+
+#include "base/i2-base.hpp"
+#include "base/value.hpp"
+#include <boost/function_types/function_type.hpp>
+#include <boost/function_types/parameter_types.hpp>
+#include <boost/function_types/result_type.hpp>
+#include <boost/function_types/function_arity.hpp>
+#include <vector>
+
+namespace icinga
+{
+
+template<typename FuncType>
+typename std::enable_if<
+ std::is_class<FuncType>::value &&
+ std::is_same<typename boost::function_types::result_type<decltype(&FuncType::operator())>::type, Value>::value &&
+ boost::function_types::function_arity<decltype(&FuncType::operator())>::value == 2,
+ std::function<Value (const std::vector<Value>&)>>::type
+WrapFunction(FuncType function)
+{
+ static_assert(std::is_same<typename boost::mpl::at_c<typename boost::function_types::parameter_types<decltype(&FuncType::operator())>, 1>::type, const std::vector<Value>&>::value, "Argument type must be const std::vector<Value>");
+ return function;
+}
+
+inline std::function<Value (const std::vector<Value>&)> WrapFunction(void (*function)(const std::vector<Value>&))
+{
+ return [function](const std::vector<Value>& arguments) {
+ function(arguments);
+ return Empty;
+ };
+}
+
+template<typename Return>
+std::function<Value (const std::vector<Value>&)> WrapFunction(Return (*function)(const std::vector<Value>&))
+{
+ return [function](const std::vector<Value>& values) -> Value { return function(values); };
+}
+
+template <std::size_t... Indices>
+struct indices {
+ using next = indices<Indices..., sizeof...(Indices)>;
+};
+
+template <std::size_t N>
+struct build_indices {
+ using type = typename build_indices<N-1>::type::next;
+};
+
+template <>
+struct build_indices<0> {
+ using type = indices<>;
+};
+
+template <std::size_t N>
+using BuildIndices = typename build_indices<N>::type;
+
+struct UnpackCaller
+{
+private:
+ template <typename FuncType, size_t... I>
+ auto Invoke(FuncType f, const std::vector<Value>& args, indices<I...>) -> decltype(f(args[I]...))
+ {
+ return f(args[I]...);
+ }
+
+public:
+ template <typename FuncType, int Arity>
+ auto operator() (FuncType f, const std::vector<Value>& args) -> decltype(Invoke(f, args, BuildIndices<Arity>{}))
+ {
+ return Invoke(f, args, BuildIndices<Arity>{});
+ }
+};
+
+template<typename FuncType, int Arity, typename ReturnType>
+struct FunctionWrapper
+{
+ static Value Invoke(FuncType function, const std::vector<Value>& arguments)
+ {
+ return UnpackCaller().operator()<FuncType, Arity>(function, arguments);
+ }
+};
+
+template<typename FuncType, int Arity>
+struct FunctionWrapper<FuncType, Arity, void>
+{
+ static Value Invoke(FuncType function, const std::vector<Value>& arguments)
+ {
+ UnpackCaller().operator()<FuncType, Arity>(function, arguments);
+ return Empty;
+ }
+};
+
+template<typename FuncType>
+typename std::enable_if<
+ std::is_function<typename std::remove_pointer<FuncType>::type>::value && !std::is_same<FuncType, Value(*)(const std::vector<Value>&)>::value,
+ std::function<Value (const std::vector<Value>&)>>::type
+WrapFunction(FuncType function)
+{
+ return [function](const std::vector<Value>& arguments) {
+ constexpr size_t arity = boost::function_types::function_arity<typename std::remove_pointer<FuncType>::type>::value;
+
+ if (arity > 0) {
+ if (arguments.size() < arity)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments for function."));
+ else if (arguments.size() > arity)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too many arguments for function."));
+ }
+
+ using ReturnType = decltype(UnpackCaller().operator()<FuncType, arity>(*static_cast<FuncType *>(nullptr), std::vector<Value>()));
+
+ return FunctionWrapper<FuncType, arity, ReturnType>::Invoke(function, arguments);
+ };
+}
+
+template<typename FuncType>
+typename std::enable_if<
+ std::is_class<FuncType>::value &&
+ !(std::is_same<typename boost::function_types::result_type<decltype(&FuncType::operator())>::type, Value>::value &&
+ boost::function_types::function_arity<decltype(&FuncType::operator())>::value == 2),
+ std::function<Value (const std::vector<Value>&)>>::type
+WrapFunction(FuncType function)
+{
+ static_assert(!std::is_same<typename boost::mpl::at_c<typename boost::function_types::parameter_types<decltype(&FuncType::operator())>, 1>::type, const std::vector<Value>&>::value, "Argument type must be const std::vector<Value>");
+
+ using FuncTypeInvoker = decltype(&FuncType::operator());
+
+ return [function](const std::vector<Value>& arguments) {
+ constexpr size_t arity = boost::function_types::function_arity<FuncTypeInvoker>::value - 1;
+
+ if (arity > 0) {
+ if (arguments.size() < arity)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments for function."));
+ else if (arguments.size() > arity)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too many arguments for function."));
+ }
+
+ using ReturnType = decltype(UnpackCaller().operator()<FuncType, arity>(*static_cast<FuncType *>(nullptr), std::vector<Value>()));
+
+ return FunctionWrapper<FuncType, arity, ReturnType>::Invoke(function, arguments);
+ };
+}
+
+}
+
+#endif /* FUNCTIONWRAPPER_H */
diff --git a/lib/base/i2-base.hpp b/lib/base/i2-base.hpp
new file mode 100644
index 0000000..a7bfc6a
--- /dev/null
+++ b/lib/base/i2-base.hpp
@@ -0,0 +1,79 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2BASE_H
+#define I2BASE_H
+
+/**
+ * @mainpage Icinga Documentation
+ *
+ * Icinga implements a framework for run-time-loadable components which can
+ * pass messages between each other. These components can either be hosted in
+ * the same process or in several host processes (either on the same machine or
+ * on different machines).
+ *
+ * The framework's code critically depends on the following patterns:
+ *
+ * <list type="bullet">
+ * <item>Smart pointers
+ *
+ * The shared_ptr and weak_ptr template classes are used to simplify memory
+ * management and to avoid accidental memory leaks and use-after-free
+ * bugs.</item>
+ *
+ * <item>Observer pattern
+ *
+ * Framework classes expose events which other objects can subscribe to. This
+ * is used to decouple clients of a class from the class' internal
+ * implementation.</item>
+ * </list>
+ */
+
+/**
+ * @defgroup base Base class library
+ *
+ * The base class library implements commonly-used functionality like
+ * event handling for sockets and timers.
+ */
+
+#include <boost/config.hpp>
+
+#if defined(__clang__) && __cplusplus >= 201103L
+# undef BOOST_NO_CXX11_HDR_TUPLE
+#endif
+
+#ifdef _MSC_VER
+# pragma warning(disable:4251)
+# pragma warning(disable:4275)
+# pragma warning(disable:4345)
+#endif /* _MSC_VER */
+
+#include "config.h"
+
+#ifdef _WIN32
+# include "base/win32.hpp"
+#else
+# include "base/unix.hpp"
+#endif
+
+#include <cstdlib>
+#include <cstdarg>
+#include <cstdio>
+#include <cstring>
+#include <cerrno>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <signal.h>
+
+#include <exception>
+#include <stdexcept>
+
+#if defined(__APPLE__) && defined(__MACH__)
+# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#define BOOST_BIND_NO_PLACEHOLDERS
+
+#include <functional>
+
+#endif /* I2BASE_H */
diff --git a/lib/base/initialize.cpp b/lib/base/initialize.cpp
new file mode 100644
index 0000000..49b653f
--- /dev/null
+++ b/lib/base/initialize.cpp
@@ -0,0 +1,13 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/initialize.hpp"
+#include "base/loader.hpp"
+
+using namespace icinga;
+
+bool icinga::InitializeOnceHelper(const std::function<void()>& func, InitializePriority priority)
+{
+ Loader::AddDeferredInitializer(func, priority);
+ return true;
+}
+
diff --git a/lib/base/initialize.hpp b/lib/base/initialize.hpp
new file mode 100644
index 0000000..adc995f
--- /dev/null
+++ b/lib/base/initialize.hpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef INITIALIZE_H
+#define INITIALIZE_H
+
+#include "base/i2-base.hpp"
+#include <functional>
+
+namespace icinga
+{
+
+/**
+ * Priority values for use with the INITIALIZE_ONCE_WITH_PRIORITY macro.
+ *
+ * The values are given in the order of initialization.
+ */
+enum class InitializePriority {
+ CreateNamespaces,
+ InitIcingaApplication,
+ RegisterTypeType,
+ RegisterObjectType,
+ RegisterPrimitiveTypes,
+ RegisterBuiltinTypes,
+ RegisterFunctions,
+ RegisterTypes,
+ EvaluateConfigFragments,
+ Default,
+ FreezeNamespaces,
+};
+
+#define I2_TOKENPASTE(x, y) x ## y
+#define I2_TOKENPASTE2(x, y) I2_TOKENPASTE(x, y)
+
+#define I2_UNIQUE_NAME(prefix) I2_TOKENPASTE2(prefix, __COUNTER__)
+
+bool InitializeOnceHelper(const std::function<void()>& func, InitializePriority priority = InitializePriority::Default);
+
+#define INITIALIZE_ONCE(func) \
+ namespace { namespace I2_UNIQUE_NAME(io) { \
+ bool l_InitializeOnce(icinga::InitializeOnceHelper(func)); \
+ } }
+
+#define INITIALIZE_ONCE_WITH_PRIORITY(func, priority) \
+ namespace { namespace I2_UNIQUE_NAME(io) { \
+ bool l_InitializeOnce(icinga::InitializeOnceHelper(func, priority)); \
+ } }
+}
+
+#endif /* INITIALIZE_H */
diff --git a/lib/base/io-engine.cpp b/lib/base/io-engine.cpp
new file mode 100644
index 0000000..26125fe
--- /dev/null
+++ b/lib/base/io-engine.cpp
@@ -0,0 +1,155 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configuration.hpp"
+#include "base/exception.hpp"
+#include "base/io-engine.hpp"
+#include "base/lazy-init.hpp"
+#include "base/logger.hpp"
+#include <exception>
+#include <memory>
+#include <thread>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/asio/post.hpp>
+#include <boost/date_time/posix_time/ptime.hpp>
+#include <boost/system/error_code.hpp>
+
+using namespace icinga;
+
+CpuBoundWork::CpuBoundWork(boost::asio::yield_context yc)
+ : m_Done(false)
+{
+ auto& ioEngine (IoEngine::Get());
+
+ for (;;) {
+ auto availableSlots (ioEngine.m_CpuBoundSemaphore.fetch_sub(1));
+
+ if (availableSlots < 1) {
+ ioEngine.m_CpuBoundSemaphore.fetch_add(1);
+ IoEngine::YieldCurrentCoroutine(yc);
+ continue;
+ }
+
+ break;
+ }
+}
+
+CpuBoundWork::~CpuBoundWork()
+{
+ if (!m_Done) {
+ IoEngine::Get().m_CpuBoundSemaphore.fetch_add(1);
+ }
+}
+
+void CpuBoundWork::Done()
+{
+ if (!m_Done) {
+ IoEngine::Get().m_CpuBoundSemaphore.fetch_add(1);
+
+ m_Done = true;
+ }
+}
+
+IoBoundWorkSlot::IoBoundWorkSlot(boost::asio::yield_context yc)
+ : yc(yc)
+{
+ IoEngine::Get().m_CpuBoundSemaphore.fetch_add(1);
+}
+
+IoBoundWorkSlot::~IoBoundWorkSlot()
+{
+ auto& ioEngine (IoEngine::Get());
+
+ for (;;) {
+ auto availableSlots (ioEngine.m_CpuBoundSemaphore.fetch_sub(1));
+
+ if (availableSlots < 1) {
+ ioEngine.m_CpuBoundSemaphore.fetch_add(1);
+ IoEngine::YieldCurrentCoroutine(yc);
+ continue;
+ }
+
+ break;
+ }
+}
+
+LazyInit<std::unique_ptr<IoEngine>> IoEngine::m_Instance ([]() { return std::unique_ptr<IoEngine>(new IoEngine()); });
+
+IoEngine& IoEngine::Get()
+{
+ return *m_Instance.Get();
+}
+
+boost::asio::io_context& IoEngine::GetIoContext()
+{
+ return m_IoContext;
+}
+
+IoEngine::IoEngine() : m_IoContext(), m_KeepAlive(boost::asio::make_work_guard(m_IoContext)), m_Threads(decltype(m_Threads)::size_type(Configuration::Concurrency * 2u)), m_AlreadyExpiredTimer(m_IoContext)
+{
+ m_AlreadyExpiredTimer.expires_at(boost::posix_time::neg_infin);
+ m_CpuBoundSemaphore.store(Configuration::Concurrency * 3u / 2u);
+
+ for (auto& thread : m_Threads) {
+ thread = std::thread(&IoEngine::RunEventLoop, this);
+ }
+}
+
+IoEngine::~IoEngine()
+{
+ for (auto& thread : m_Threads) {
+ boost::asio::post(m_IoContext, []() {
+ throw TerminateIoThread();
+ });
+ }
+
+ for (auto& thread : m_Threads) {
+ thread.join();
+ }
+}
+
+void IoEngine::RunEventLoop()
+{
+ for (;;) {
+ try {
+ m_IoContext.run();
+
+ break;
+ } catch (const TerminateIoThread&) {
+ break;
+ } catch (const std::exception& e) {
+ Log(LogCritical, "IoEngine", "Exception during I/O operation!");
+ Log(LogDebug, "IoEngine") << "Exception during I/O operation: " << DiagnosticInformation(e);
+ }
+ }
+}
+
+AsioConditionVariable::AsioConditionVariable(boost::asio::io_context& io, bool init)
+ : m_Timer(io)
+{
+ m_Timer.expires_at(init ? boost::posix_time::neg_infin : boost::posix_time::pos_infin);
+}
+
+void AsioConditionVariable::Set()
+{
+ m_Timer.expires_at(boost::posix_time::neg_infin);
+}
+
+void AsioConditionVariable::Clear()
+{
+ m_Timer.expires_at(boost::posix_time::pos_infin);
+}
+
+void AsioConditionVariable::Wait(boost::asio::yield_context yc)
+{
+ boost::system::error_code ec;
+ m_Timer.async_wait(yc[ec]);
+}
+
+void Timeout::Cancel()
+{
+ m_Cancelled.store(true);
+
+ boost::system::error_code ec;
+ m_Timer.cancel(ec);
+}
diff --git a/lib/base/io-engine.hpp b/lib/base/io-engine.hpp
new file mode 100644
index 0000000..684d3ac
--- /dev/null
+++ b/lib/base/io-engine.hpp
@@ -0,0 +1,216 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef IO_ENGINE_H
+#define IO_ENGINE_H
+
+#include "base/exception.hpp"
+#include "base/lazy-init.hpp"
+#include "base/logger.hpp"
+#include "base/shared-object.hpp"
+#include <atomic>
+#include <exception>
+#include <memory>
+#include <thread>
+#include <utility>
+#include <vector>
+#include <stdexcept>
+#include <boost/exception/all.hpp>
+#include <boost/asio/deadline_timer.hpp>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/spawn.hpp>
+
+namespace icinga
+{
+
+/**
+ * Scope lock for CPU-bound work done in an I/O thread
+ *
+ * @ingroup base
+ */
+class CpuBoundWork
+{
+public:
+ CpuBoundWork(boost::asio::yield_context yc);
+ CpuBoundWork(const CpuBoundWork&) = delete;
+ CpuBoundWork(CpuBoundWork&&) = delete;
+ CpuBoundWork& operator=(const CpuBoundWork&) = delete;
+ CpuBoundWork& operator=(CpuBoundWork&&) = delete;
+ ~CpuBoundWork();
+
+ void Done();
+
+private:
+ bool m_Done;
+};
+
+/**
+ * Scope break for CPU-bound work done in an I/O thread
+ *
+ * @ingroup base
+ */
+class IoBoundWorkSlot
+{
+public:
+ IoBoundWorkSlot(boost::asio::yield_context yc);
+ IoBoundWorkSlot(const IoBoundWorkSlot&) = delete;
+ IoBoundWorkSlot(IoBoundWorkSlot&&) = delete;
+ IoBoundWorkSlot& operator=(const IoBoundWorkSlot&) = delete;
+ IoBoundWorkSlot& operator=(IoBoundWorkSlot&&) = delete;
+ ~IoBoundWorkSlot();
+
+private:
+ boost::asio::yield_context yc;
+};
+
+/**
+ * Async I/O engine
+ *
+ * @ingroup base
+ */
+class IoEngine
+{
+ friend CpuBoundWork;
+ friend IoBoundWorkSlot;
+
+public:
+ IoEngine(const IoEngine&) = delete;
+ IoEngine(IoEngine&&) = delete;
+ IoEngine& operator=(const IoEngine&) = delete;
+ IoEngine& operator=(IoEngine&&) = delete;
+ ~IoEngine();
+
+ static IoEngine& Get();
+
+ boost::asio::io_context& GetIoContext();
+
+ static inline size_t GetCoroutineStackSize() {
+#ifdef _WIN32
+ // Increase the stack size for Windows coroutines to prevent exception corruption.
+ // Rationale: Low cost Windows agent only & https://github.com/Icinga/icinga2/issues/7431
+ return 8 * 1024 * 1024;
+#else /* _WIN32 */
+ // Increase the stack size for Linux/Unix coroutines for many JSON objects on the stack.
+ // This may help mitigate possible stack overflows. https://github.com/Icinga/icinga2/issues/7532
+ return 256 * 1024;
+ //return boost::coroutines::stack_allocator::traits_type::default_size(); // Default 64 KB
+#endif /* _WIN32 */
+ }
+
+ template <typename Handler, typename Function>
+ static void SpawnCoroutine(Handler& h, Function f) {
+
+ boost::asio::spawn(h,
+ [f](boost::asio::yield_context yc) {
+
+ try {
+ f(yc);
+ } catch (const boost::coroutines::detail::forced_unwind &) {
+ // Required for proper stack unwinding when coroutines are destroyed.
+ // https://github.com/boostorg/coroutine/issues/39
+ throw;
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "IoEngine", "Exception in coroutine!");
+ Log(LogDebug, "IoEngine") << "Exception in coroutine: " << DiagnosticInformation(ex);
+ } catch (...) {
+ Log(LogCritical, "IoEngine", "Exception in coroutine!");
+ }
+ },
+ boost::coroutines::attributes(GetCoroutineStackSize()) // Set a pre-defined stack size.
+ );
+ }
+
+ static inline
+ void YieldCurrentCoroutine(boost::asio::yield_context yc)
+ {
+ Get().m_AlreadyExpiredTimer.async_wait(yc);
+ }
+
+private:
+ IoEngine();
+
+ void RunEventLoop();
+
+ static LazyInit<std::unique_ptr<IoEngine>> m_Instance;
+
+ boost::asio::io_context m_IoContext;
+ boost::asio::executor_work_guard<boost::asio::io_context::executor_type> m_KeepAlive;
+ std::vector<std::thread> m_Threads;
+ boost::asio::deadline_timer m_AlreadyExpiredTimer;
+ std::atomic_int_fast32_t m_CpuBoundSemaphore;
+};
+
+class TerminateIoThread : public std::exception
+{
+};
+
+/**
+ * Condition variable which doesn't block I/O threads
+ *
+ * @ingroup base
+ */
+class AsioConditionVariable
+{
+public:
+ AsioConditionVariable(boost::asio::io_context& io, bool init = false);
+
+ void Set();
+ void Clear();
+ void Wait(boost::asio::yield_context yc);
+
+private:
+ boost::asio::deadline_timer m_Timer;
+};
+
+/**
+ * I/O timeout emulator
+ *
+ * @ingroup base
+ */
+class Timeout : public SharedObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Timeout);
+
+ template<class Executor, class TimeoutFromNow, class OnTimeout>
+ Timeout(boost::asio::io_context& io, Executor& executor, TimeoutFromNow timeoutFromNow, OnTimeout onTimeout)
+ : m_Timer(io)
+ {
+ Ptr keepAlive (this);
+
+ m_Cancelled.store(false);
+ m_Timer.expires_from_now(std::move(timeoutFromNow));
+
+ IoEngine::SpawnCoroutine(executor, [this, keepAlive, onTimeout](boost::asio::yield_context yc) {
+ if (m_Cancelled.load()) {
+ return;
+ }
+
+ {
+ boost::system::error_code ec;
+
+ m_Timer.async_wait(yc[ec]);
+
+ if (ec) {
+ return;
+ }
+ }
+
+ if (m_Cancelled.load()) {
+ return;
+ }
+
+ auto f (onTimeout);
+ f(std::move(yc));
+ });
+ }
+
+ void Cancel();
+
+private:
+ boost::asio::deadline_timer m_Timer;
+ std::atomic<bool> m_Cancelled;
+};
+
+}
+
+#endif /* IO_ENGINE_H */
diff --git a/lib/base/journaldlogger.cpp b/lib/base/journaldlogger.cpp
new file mode 100644
index 0000000..92d6af7
--- /dev/null
+++ b/lib/base/journaldlogger.cpp
@@ -0,0 +1,87 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "base/i2-base.hpp"
+#if !defined(_WIN32) && defined(HAVE_SYSTEMD)
+#include "base/journaldlogger.hpp"
+#include "base/journaldlogger-ti.cpp"
+#include "base/configtype.hpp"
+#include "base/statsfunction.hpp"
+#include "base/sysloglogger.hpp"
+#include <systemd/sd-journal.h>
+
+using namespace icinga;
+
+REGISTER_TYPE(JournaldLogger);
+
+REGISTER_STATSFUNCTION(JournaldLogger, &JournaldLogger::StatsFunc);
+
+void JournaldLogger::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const JournaldLogger::Ptr& journaldlogger : ConfigType::GetObjectsByType<JournaldLogger>()) {
+ nodes.emplace_back(journaldlogger->GetName(), 1); //add more stats
+ }
+
+ status->Set("journaldlogger", new Dictionary(std::move(nodes)));
+}
+
+void JournaldLogger::OnConfigLoaded()
+{
+ ObjectImpl<JournaldLogger>::OnConfigLoaded();
+ m_ConfiguredJournalFields.clear();
+ m_ConfiguredJournalFields.push_back(
+ String("SYSLOG_FACILITY=") + Value(SyslogHelper::FacilityToNumber(GetFacility())));
+ const String identifier = GetIdentifier();
+ if (!identifier.IsEmpty()) {
+ m_ConfiguredJournalFields.push_back(String("SYSLOG_IDENTIFIER=" + identifier));
+ }
+}
+
+void JournaldLogger::ValidateFacility(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<JournaldLogger>::ValidateFacility(lvalue, utils);
+ if (!SyslogHelper::ValidateFacility(lvalue()))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "facility" }, "Invalid facility specified."));
+}
+
+/**
+ * Processes a log entry and outputs it to journald.
+ *
+ * @param entry The log entry.
+ */
+void JournaldLogger::ProcessLogEntry(const LogEntry& entry)
+{
+ const std::vector<String> sdFields {
+ String("MESSAGE=") + entry.Message.GetData(),
+ String("PRIORITY=") + Value(SyslogHelper::SeverityToNumber(entry.Severity)),
+ String("ICINGA2_FACILITY=") + entry.Facility,
+ };
+ SystemdJournalSend(sdFields);
+}
+
+void JournaldLogger::Flush()
+{
+ /* Nothing to do here. */
+}
+
+void JournaldLogger::SystemdJournalSend(const std::vector<String>& varJournalFields) const
+{
+ struct iovec iovec[m_ConfiguredJournalFields.size() + varJournalFields.size()];
+ int iovecCount = 0;
+
+ for (const String& journalField: m_ConfiguredJournalFields) {
+ iovec[iovecCount] = IovecFromString(journalField);
+ iovecCount++;
+ }
+ for (const String& journalField: varJournalFields) {
+ iovec[iovecCount] = IovecFromString(journalField);
+ iovecCount++;
+ }
+ sd_journal_sendv(iovec, iovecCount);
+}
+
+struct iovec JournaldLogger::IovecFromString(const String& s) {
+ return { const_cast<char *>(s.CStr()), s.GetLength() };
+}
+#endif /* !_WIN32 && HAVE_SYSTEMD */
diff --git a/lib/base/journaldlogger.hpp b/lib/base/journaldlogger.hpp
new file mode 100644
index 0000000..373dd1a
--- /dev/null
+++ b/lib/base/journaldlogger.hpp
@@ -0,0 +1,44 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#ifndef JOURNALDLOGGER_H
+#define JOURNALDLOGGER_H
+
+#include "base/i2-base.hpp"
+#if !defined(_WIN32) && defined(HAVE_SYSTEMD)
+#include "base/journaldlogger-ti.hpp"
+#include <sys/uio.h>
+
+namespace icinga
+{
+
+/**
+ * A logger that logs to systemd journald.
+ *
+ * @ingroup base
+ */
+class JournaldLogger final : public ObjectImpl<JournaldLogger>
+{
+public:
+ DECLARE_OBJECT(JournaldLogger);
+ DECLARE_OBJECTNAME(JournaldLogger);
+
+ static void StaticInitialize();
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void OnConfigLoaded() override;
+ void ValidateFacility(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void SystemdJournalSend(const std::vector<String>& varJournalFields) const;
+ static struct iovec IovecFromString(const String& s);
+
+ std::vector<String> m_ConfiguredJournalFields;
+
+ void ProcessLogEntry(const LogEntry& entry) override;
+ void Flush() override;
+};
+
+}
+#endif /* !_WIN32 && HAVE_SYSTEMD */
+
+#endif /* JOURNALDLOGGER_H */
diff --git a/lib/base/journaldlogger.ti b/lib/base/journaldlogger.ti
new file mode 100644
index 0000000..88e9ca1
--- /dev/null
+++ b/lib/base/journaldlogger.ti
@@ -0,0 +1,21 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "base/logger.hpp"
+
+library base;
+
+namespace icinga
+{
+
+class JournaldLogger : Logger
+{
+ activation_priority -100;
+
+ [config] String facility {
+ default {{{ return "LOG_USER"; }}}
+ };
+
+ [config] String identifier;
+};
+
+}
diff --git a/lib/base/json-script.cpp b/lib/base/json-script.cpp
new file mode 100644
index 0000000..90595c8
--- /dev/null
+++ b/lib/base/json-script.cpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/initialize.hpp"
+#include "base/json.hpp"
+
+using namespace icinga;
+
+static String JsonEncodeShim(const Value& value)
+{
+ return JsonEncode(value);
+}
+
+INITIALIZE_ONCE([]() {
+ Namespace::Ptr jsonNS = new Namespace(true);
+
+ /* Methods */
+ jsonNS->Set("encode", new Function("Json#encode", JsonEncodeShim, { "value" }, true));
+ jsonNS->Set("decode", new Function("Json#decode", JsonDecode, { "value" }, true));
+
+ jsonNS->Freeze();
+
+ Namespace::Ptr systemNS = ScriptGlobal::Get("System");
+ systemNS->Set("Json", jsonNS, true);
+});
diff --git a/lib/base/json.cpp b/lib/base/json.cpp
new file mode 100644
index 0000000..5689330
--- /dev/null
+++ b/lib/base/json.cpp
@@ -0,0 +1,525 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/json.hpp"
+#include "base/debug.hpp"
+#include "base/namespace.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include <bitset>
+#include <boost/exception_ptr.hpp>
+#include <cstdint>
+#include <json.hpp>
+#include <stack>
+#include <utility>
+#include <vector>
+
+using namespace icinga;
+
+class JsonSax : public nlohmann::json_sax<nlohmann::json>
+{
+public:
+ bool null() override;
+ bool boolean(bool val) override;
+ bool number_integer(number_integer_t val) override;
+ bool number_unsigned(number_unsigned_t val) override;
+ bool number_float(number_float_t val, const string_t& s) override;
+ bool string(string_t& val) override;
+ bool binary(binary_t& val) override;
+ bool start_object(std::size_t elements) override;
+ bool key(string_t& val) override;
+ bool end_object() override;
+ bool start_array(std::size_t elements) override;
+ bool end_array() override;
+ bool parse_error(std::size_t position, const std::string& last_token, const nlohmann::detail::exception& ex) override;
+
+ Value GetResult();
+
+private:
+ Value m_Root;
+ std::stack<std::pair<Dictionary*, Array*>> m_CurrentSubtree;
+ String m_CurrentKey;
+
+ void FillCurrentTarget(Value value);
+};
+
+const char l_Null[] = "null";
+const char l_False[] = "false";
+const char l_True[] = "true";
+const char l_Indent[] = " ";
+
+// https://github.com/nlohmann/json/issues/1512
+template<bool prettyPrint>
+class JsonEncoder
+{
+public:
+ void Null();
+ void Boolean(bool value);
+ void NumberFloat(double value);
+ void Strng(String value);
+ void StartObject();
+ void Key(String value);
+ void EndObject();
+ void StartArray();
+ void EndArray();
+
+ String GetResult();
+
+private:
+ std::vector<char> m_Result;
+ String m_CurrentKey;
+ std::stack<std::bitset<2>> m_CurrentSubtree;
+
+ void AppendChar(char c);
+
+ template<class Iterator>
+ void AppendChars(Iterator begin, Iterator end);
+
+ void AppendJson(nlohmann::json json);
+
+ void BeforeItem();
+
+ void FinishContainer(char terminator);
+};
+
+template<bool prettyPrint>
+void Encode(JsonEncoder<prettyPrint>& stateMachine, const Value& value);
+
+template<bool prettyPrint>
+inline
+void EncodeNamespace(JsonEncoder<prettyPrint>& stateMachine, const Namespace::Ptr& ns)
+{
+ stateMachine.StartObject();
+
+ ObjectLock olock(ns);
+ for (const Namespace::Pair& kv : ns) {
+ stateMachine.Key(Utility::ValidateUTF8(kv.first));
+ Encode(stateMachine, kv.second.Val);
+ }
+
+ stateMachine.EndObject();
+}
+
+template<bool prettyPrint>
+inline
+void EncodeDictionary(JsonEncoder<prettyPrint>& stateMachine, const Dictionary::Ptr& dict)
+{
+ stateMachine.StartObject();
+
+ ObjectLock olock(dict);
+ for (const Dictionary::Pair& kv : dict) {
+ stateMachine.Key(Utility::ValidateUTF8(kv.first));
+ Encode(stateMachine, kv.second);
+ }
+
+ stateMachine.EndObject();
+}
+
+template<bool prettyPrint>
+inline
+void EncodeArray(JsonEncoder<prettyPrint>& stateMachine, const Array::Ptr& arr)
+{
+ stateMachine.StartArray();
+
+ ObjectLock olock(arr);
+ for (const Value& value : arr) {
+ Encode(stateMachine, value);
+ }
+
+ stateMachine.EndArray();
+}
+
+template<bool prettyPrint>
+void Encode(JsonEncoder<prettyPrint>& stateMachine, const Value& value)
+{
+ switch (value.GetType()) {
+ case ValueNumber:
+ stateMachine.NumberFloat(value.Get<double>());
+ break;
+
+ case ValueBoolean:
+ stateMachine.Boolean(value.ToBool());
+ break;
+
+ case ValueString:
+ stateMachine.Strng(Utility::ValidateUTF8(value.Get<String>()));
+ break;
+
+ case ValueObject:
+ {
+ const Object::Ptr& obj = value.Get<Object::Ptr>();
+
+ {
+ Namespace::Ptr ns = dynamic_pointer_cast<Namespace>(obj);
+ if (ns) {
+ EncodeNamespace(stateMachine, ns);
+ break;
+ }
+ }
+
+ {
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(obj);
+ if (dict) {
+ EncodeDictionary(stateMachine, dict);
+ break;
+ }
+ }
+
+ {
+ Array::Ptr arr = dynamic_pointer_cast<Array>(obj);
+ if (arr) {
+ EncodeArray(stateMachine, arr);
+ break;
+ }
+ }
+
+ // obj is most likely a function => "Object of type 'Function'"
+ Encode(stateMachine, obj->ToString());
+ break;
+ }
+
+ case ValueEmpty:
+ stateMachine.Null();
+ break;
+
+ default:
+ VERIFY(!"Invalid variant type.");
+ }
+}
+
+String icinga::JsonEncode(const Value& value, bool pretty_print)
+{
+ if (pretty_print) {
+ JsonEncoder<true> stateMachine;
+
+ Encode(stateMachine, value);
+
+ return stateMachine.GetResult() + "\n";
+ } else {
+ JsonEncoder<false> stateMachine;
+
+ Encode(stateMachine, value);
+
+ return stateMachine.GetResult();
+ }
+}
+
+Value icinga::JsonDecode(const String& data)
+{
+ String sanitized (Utility::ValidateUTF8(data));
+
+ JsonSax stateMachine;
+
+ nlohmann::json::sax_parse(sanitized.Begin(), sanitized.End(), &stateMachine);
+
+ return stateMachine.GetResult();
+}
+
+inline
+bool JsonSax::null()
+{
+ FillCurrentTarget(Value());
+
+ return true;
+}
+
+inline
+bool JsonSax::boolean(bool val)
+{
+ FillCurrentTarget(val);
+
+ return true;
+}
+
+inline
+bool JsonSax::number_integer(JsonSax::number_integer_t val)
+{
+ FillCurrentTarget((double)val);
+
+ return true;
+}
+
+inline
+bool JsonSax::number_unsigned(JsonSax::number_unsigned_t val)
+{
+ FillCurrentTarget((double)val);
+
+ return true;
+}
+
+inline
+bool JsonSax::number_float(JsonSax::number_float_t val, const JsonSax::string_t&)
+{
+ FillCurrentTarget((double)val);
+
+ return true;
+}
+
+inline
+bool JsonSax::string(JsonSax::string_t& val)
+{
+ FillCurrentTarget(String(std::move(val)));
+
+ return true;
+}
+
+inline
+bool JsonSax::binary(JsonSax::binary_t& val)
+{
+ FillCurrentTarget(String(val.begin(), val.end()));
+
+ return true;
+}
+
+inline
+bool JsonSax::start_object(std::size_t)
+{
+ auto object (new Dictionary());
+
+ FillCurrentTarget(object);
+
+ m_CurrentSubtree.push({object, nullptr});
+
+ return true;
+}
+
+inline
+bool JsonSax::key(JsonSax::string_t& val)
+{
+ m_CurrentKey = String(std::move(val));
+
+ return true;
+}
+
+inline
+bool JsonSax::end_object()
+{
+ m_CurrentSubtree.pop();
+ m_CurrentKey = String();
+
+ return true;
+}
+
+inline
+bool JsonSax::start_array(std::size_t)
+{
+ auto array (new Array());
+
+ FillCurrentTarget(array);
+
+ m_CurrentSubtree.push({nullptr, array});
+
+ return true;
+}
+
+inline
+bool JsonSax::end_array()
+{
+ m_CurrentSubtree.pop();
+
+ return true;
+}
+
+inline
+bool JsonSax::parse_error(std::size_t, const std::string&, const nlohmann::detail::exception& ex)
+{
+ throw std::invalid_argument(ex.what());
+}
+
+inline
+Value JsonSax::GetResult()
+{
+ return m_Root;
+}
+
+inline
+void JsonSax::FillCurrentTarget(Value value)
+{
+ if (m_CurrentSubtree.empty()) {
+ m_Root = value;
+ } else {
+ auto& node (m_CurrentSubtree.top());
+
+ if (node.first) {
+ node.first->Set(m_CurrentKey, value);
+ } else {
+ node.second->Add(value);
+ }
+ }
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::Null()
+{
+ BeforeItem();
+ AppendChars((const char*)l_Null, (const char*)l_Null + 4);
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::Boolean(bool value)
+{
+ BeforeItem();
+
+ if (value) {
+ AppendChars((const char*)l_True, (const char*)l_True + 4);
+ } else {
+ AppendChars((const char*)l_False, (const char*)l_False + 5);
+ }
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::NumberFloat(double value)
+{
+ BeforeItem();
+
+ // Make sure 0.0 is serialized as 0, so e.g. Icinga DB can parse it as int.
+ if (value < 0) {
+ long long i = value;
+
+ if (i == value) {
+ AppendJson(i);
+ } else {
+ AppendJson(value);
+ }
+ } else {
+ unsigned long long i = value;
+
+ if (i == value) {
+ AppendJson(i);
+ } else {
+ AppendJson(value);
+ }
+ }
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::Strng(String value)
+{
+ BeforeItem();
+ AppendJson(std::move(value));
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::StartObject()
+{
+ BeforeItem();
+ AppendChar('{');
+
+ m_CurrentSubtree.push(2);
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::Key(String value)
+{
+ m_CurrentKey = std::move(value);
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::EndObject()
+{
+ FinishContainer('}');
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::StartArray()
+{
+ BeforeItem();
+ AppendChar('[');
+
+ m_CurrentSubtree.push(0);
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::EndArray()
+{
+ FinishContainer(']');
+}
+
+template<bool prettyPrint>
+inline
+String JsonEncoder<prettyPrint>::GetResult()
+{
+ return String(m_Result.begin(), m_Result.end());
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::AppendChar(char c)
+{
+ m_Result.emplace_back(c);
+}
+
+template<bool prettyPrint>
+template<class Iterator>
+inline
+void JsonEncoder<prettyPrint>::AppendChars(Iterator begin, Iterator end)
+{
+ m_Result.insert(m_Result.end(), begin, end);
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::AppendJson(nlohmann::json json)
+{
+ nlohmann::detail::serializer<nlohmann::json>(nlohmann::detail::output_adapter<char>(m_Result), ' ').dump(std::move(json), prettyPrint, true, 0);
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::BeforeItem()
+{
+ if (!m_CurrentSubtree.empty()) {
+ auto& node (m_CurrentSubtree.top());
+
+ if (node[0]) {
+ AppendChar(',');
+ } else {
+ node[0] = true;
+ }
+
+ if (prettyPrint) {
+ AppendChar('\n');
+
+ for (auto i (m_CurrentSubtree.size()); i; --i) {
+ AppendChars((const char*)l_Indent, (const char*)l_Indent + 4);
+ }
+ }
+
+ if (node[1]) {
+ AppendJson(std::move(m_CurrentKey));
+ AppendChar(':');
+
+ if (prettyPrint) {
+ AppendChar(' ');
+ }
+ }
+ }
+}
+
+template<bool prettyPrint>
+inline
+void JsonEncoder<prettyPrint>::FinishContainer(char terminator)
+{
+ if (prettyPrint && m_CurrentSubtree.top()[0]) {
+ AppendChar('\n');
+
+ for (auto i (m_CurrentSubtree.size() - 1u); i; --i) {
+ AppendChars((const char*)l_Indent, (const char*)l_Indent + 4);
+ }
+ }
+
+ AppendChar(terminator);
+
+ m_CurrentSubtree.pop();
+}
diff --git a/lib/base/json.hpp b/lib/base/json.hpp
new file mode 100644
index 0000000..df0ea18
--- /dev/null
+++ b/lib/base/json.hpp
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef JSON_H
+#define JSON_H
+
+#include "base/i2-base.hpp"
+
+namespace icinga
+{
+
+class String;
+class Value;
+
+String JsonEncode(const Value& value, bool pretty_print = false);
+Value JsonDecode(const String& data);
+
+}
+
+#endif /* JSON_H */
diff --git a/lib/base/lazy-init.hpp b/lib/base/lazy-init.hpp
new file mode 100644
index 0000000..c1da2cd
--- /dev/null
+++ b/lib/base/lazy-init.hpp
@@ -0,0 +1,72 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LAZY_INIT
+#define LAZY_INIT
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <utility>
+
+namespace icinga
+{
+
+/**
+ * Lazy object initialization abstraction inspired from
+ * <https://docs.microsoft.com/en-us/dotnet/api/system.lazy-1?view=netframework-4.7.2>.
+ *
+ * @ingroup base
+ */
+template<class T>
+class LazyInit
+{
+public:
+ inline
+ LazyInit(std::function<T()> initializer = []() { return T(); }) : m_Initializer(std::move(initializer))
+ {
+ m_Underlying.store(nullptr, std::memory_order_release);
+ }
+
+ LazyInit(const LazyInit&) = delete;
+ LazyInit(LazyInit&&) = delete;
+ LazyInit& operator=(const LazyInit&) = delete;
+ LazyInit& operator=(LazyInit&&) = delete;
+
+ inline
+ ~LazyInit()
+ {
+ auto ptr (m_Underlying.load(std::memory_order_acquire));
+
+ if (ptr != nullptr) {
+ delete ptr;
+ }
+ }
+
+ inline
+ T& Get()
+ {
+ auto ptr (m_Underlying.load(std::memory_order_acquire));
+
+ if (ptr == nullptr) {
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ ptr = m_Underlying.load(std::memory_order_acquire);
+
+ if (ptr == nullptr) {
+ ptr = new T(m_Initializer());
+ m_Underlying.store(ptr, std::memory_order_release);
+ }
+ }
+
+ return *ptr;
+ }
+
+private:
+ std::function<T()> m_Initializer;
+ std::mutex m_Mutex;
+ std::atomic<T*> m_Underlying;
+};
+
+}
+
+#endif /* LAZY_INIT */
diff --git a/lib/base/library.cpp b/lib/base/library.cpp
new file mode 100644
index 0000000..541ed74
--- /dev/null
+++ b/lib/base/library.cpp
@@ -0,0 +1,68 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/library.hpp"
+#include "base/loader.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/application.hpp"
+
+using namespace icinga;
+
+/**
+ * Loads the specified library.
+ *
+ * @param name The name of the library.
+ */
+Library::Library(const String& name)
+{
+ String path;
+#if defined(_WIN32)
+ path = name + ".dll";
+#elif defined(__APPLE__)
+ path = "lib" + name + "." + Application::GetAppSpecVersion() + ".dylib";
+#else /* __APPLE__ */
+ path = "lib" + name + ".so." + Application::GetAppSpecVersion();
+#endif /* _WIN32 */
+
+ Log(LogNotice, "Library")
+ << "Loading library '" << path << "'";
+
+#ifdef _WIN32
+ HMODULE hModule = LoadLibrary(path.CStr());
+
+ if (!hModule) {
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("LoadLibrary")
+ << errinfo_win32_error(GetLastError())
+ << boost::errinfo_file_name(path));
+ }
+#else /* _WIN32 */
+ void *hModule = dlopen(path.CStr(), RTLD_NOW | RTLD_GLOBAL);
+
+ if (!hModule) {
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not load library '" + path + "': " + dlerror()));
+ }
+#endif /* _WIN32 */
+
+ Loader::ExecuteDeferredInitializers();
+
+ m_Handle.reset(new LibraryHandle(hModule), [](LibraryHandle *handle) {
+#ifdef _WIN32
+ FreeLibrary(*handle);
+#else /* _WIN32 */
+ dlclose(*handle);
+#endif /* _WIN32 */
+ });
+}
+
+void *Library::GetSymbolAddress(const String& name) const
+{
+ if (!m_Handle)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid library handle"));
+
+#ifdef _WIN32
+ return GetProcAddress(*m_Handle.get(), name.CStr());
+#else /* _WIN32 */
+ return dlsym(*m_Handle.get(), name.CStr());
+#endif /* _WIN32 */
+}
diff --git a/lib/base/library.hpp b/lib/base/library.hpp
new file mode 100644
index 0000000..6bd2065
--- /dev/null
+++ b/lib/base/library.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LIBRARY_H
+#define LIBRARY_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include <memory>
+
+namespace icinga
+{
+
+#ifndef _WIN32
+typedef void *LibraryHandle;
+#else /* _WIN32 */
+typedef HMODULE LibraryHandle;
+#endif /* _WIN32 */
+
+class Library
+{
+public:
+ Library() = default;
+ Library(const String& name);
+
+ void *GetSymbolAddress(const String& name) const;
+
+ template<typename T>
+ T GetSymbolAddress(const String& name) const
+ {
+ static_assert(!std::is_same<T, void *>::value, "T must not be void *");
+
+ return reinterpret_cast<T>(GetSymbolAddress(name));
+ }
+
+private:
+ std::shared_ptr<LibraryHandle> m_Handle;
+};
+
+}
+
+#endif /* LIBRARY_H */
diff --git a/lib/base/loader.cpp b/lib/base/loader.cpp
new file mode 100644
index 0000000..a4364de
--- /dev/null
+++ b/lib/base/loader.cpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/loader.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/application.hpp"
+
+using namespace icinga;
+
+boost::thread_specific_ptr<Loader::DeferredInitializerPriorityQueue>& Loader::GetDeferredInitializers()
+{
+ static boost::thread_specific_ptr<DeferredInitializerPriorityQueue> initializers;
+ return initializers;
+}
+
+void Loader::ExecuteDeferredInitializers()
+{
+ auto& initializers = GetDeferredInitializers();
+ if (!initializers.get())
+ return;
+
+ while (!initializers->empty()) {
+ DeferredInitializer initializer = initializers->top();
+ initializers->pop();
+ initializer();
+ }
+}
+
+void Loader::AddDeferredInitializer(const std::function<void()>& callback, InitializePriority priority)
+{
+ auto& initializers = GetDeferredInitializers();
+ if (!initializers.get()) {
+ initializers.reset(new Loader::DeferredInitializerPriorityQueue());
+ }
+
+ initializers->push(DeferredInitializer(callback, priority));
+}
+
diff --git a/lib/base/loader.hpp b/lib/base/loader.hpp
new file mode 100644
index 0000000..f1c7759
--- /dev/null
+++ b/lib/base/loader.hpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LOADER_H
+#define LOADER_H
+
+#include "base/i2-base.hpp"
+#include "base/initialize.hpp"
+#include "base/string.hpp"
+#include <boost/thread/tss.hpp>
+#include <queue>
+
+namespace icinga
+{
+
+struct DeferredInitializer
+{
+public:
+ DeferredInitializer(std::function<void ()> callback, InitializePriority priority)
+ : m_Callback(std::move(callback)), m_Priority(priority)
+ { }
+
+ bool operator>(const DeferredInitializer& other) const
+ {
+ return m_Priority > other.m_Priority;
+ }
+
+ void operator()()
+ {
+ m_Callback();
+ }
+
+private:
+ std::function<void ()> m_Callback;
+ InitializePriority m_Priority;
+};
+
+/**
+ * Loader helper functions.
+ *
+ * @ingroup base
+ */
+class Loader
+{
+public:
+ static void AddDeferredInitializer(const std::function<void ()>& callback, InitializePriority priority = InitializePriority::Default);
+ static void ExecuteDeferredInitializers();
+
+private:
+ Loader();
+
+ // Deferred initializers are run in the order of the definition of their enum values.
+ // Therefore, initializers that should be run first have lower enum values and
+ // the order of the std::priority_queue has to be reversed using std::greater.
+ using DeferredInitializerPriorityQueue = std::priority_queue<DeferredInitializer, std::vector<DeferredInitializer>, std::greater<>>;
+
+ static boost::thread_specific_ptr<DeferredInitializerPriorityQueue>& GetDeferredInitializers();
+};
+
+}
+
+#endif /* LOADER_H */
diff --git a/lib/base/logger.cpp b/lib/base/logger.cpp
new file mode 100644
index 0000000..38a2c67
--- /dev/null
+++ b/lib/base/logger.cpp
@@ -0,0 +1,326 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/logger.hpp"
+#include "base/logger-ti.cpp"
+#include "base/application.hpp"
+#include "base/streamlogger.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/objectlock.hpp"
+#include "base/context.hpp"
+#include "base/scriptglobal.hpp"
+#ifdef _WIN32
+#include "base/windowseventloglogger.hpp"
+#endif /* _WIN32 */
+#include <algorithm>
+#include <iostream>
+#include <utility>
+
+using namespace icinga;
+
+template Log& Log::operator<<(const Value&);
+template Log& Log::operator<<(const String&);
+template Log& Log::operator<<(const std::string&);
+template Log& Log::operator<<(const bool&);
+template Log& Log::operator<<(const unsigned int&);
+template Log& Log::operator<<(const int&);
+template Log& Log::operator<<(const unsigned long&);
+template Log& Log::operator<<(const long&);
+template Log& Log::operator<<(const double&);
+
+REGISTER_TYPE(Logger);
+
+std::set<Logger::Ptr> Logger::m_Loggers;
+std::mutex Logger::m_Mutex;
+bool Logger::m_ConsoleLogEnabled = true;
+std::atomic<bool> Logger::m_EarlyLoggingEnabled (true);
+bool Logger::m_TimestampEnabled = true;
+LogSeverity Logger::m_ConsoleLogSeverity = LogInformation;
+std::mutex Logger::m_UpdateMinLogSeverityMutex;
+Atomic<LogSeverity> Logger::m_MinLogSeverity (LogDebug);
+
+INITIALIZE_ONCE([]() {
+ ScriptGlobal::Set("System.LogDebug", LogDebug);
+ ScriptGlobal::Set("System.LogNotice", LogNotice);
+ ScriptGlobal::Set("System.LogInformation", LogInformation);
+ ScriptGlobal::Set("System.LogWarning", LogWarning);
+ ScriptGlobal::Set("System.LogCritical", LogCritical);
+});
+
+/**
+ * Constructor for the Logger class.
+ */
+void Logger::Start(bool runtimeCreated)
+{
+ ObjectImpl<Logger>::Start(runtimeCreated);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Loggers.insert(this);
+ }
+
+ UpdateMinLogSeverity();
+}
+
+void Logger::Stop(bool runtimeRemoved)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Loggers.erase(this);
+ }
+
+ UpdateMinLogSeverity();
+
+ ObjectImpl<Logger>::Stop(runtimeRemoved);
+}
+
+std::set<Logger::Ptr> Logger::GetLoggers()
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ return m_Loggers;
+}
+
+/**
+ * Retrieves the minimum severity for this logger.
+ *
+ * @returns The minimum severity.
+ */
+LogSeverity Logger::GetMinSeverity() const
+{
+ String severity = GetSeverity();
+ if (severity.IsEmpty())
+ return LogInformation;
+ else {
+ LogSeverity ls = LogInformation;
+
+ try {
+ ls = Logger::StringToSeverity(severity);
+ } catch (const std::exception&) { /* use the default level */ }
+
+ return ls;
+ }
+}
+
+/**
+ * Converts a severity enum value to a string.
+ *
+ * @param severity The severity value.
+ */
+String Logger::SeverityToString(LogSeverity severity)
+{
+ switch (severity) {
+ case LogDebug:
+ return "debug";
+ case LogNotice:
+ return "notice";
+ case LogInformation:
+ return "information";
+ case LogWarning:
+ return "warning";
+ case LogCritical:
+ return "critical";
+ default:
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid severity."));
+ }
+}
+
+/**
+ * Converts a string to a severity enum value.
+ *
+ * @param severity The severity.
+ */
+LogSeverity Logger::StringToSeverity(const String& severity)
+{
+ if (severity == "debug")
+ return LogDebug;
+ else if (severity == "notice")
+ return LogNotice;
+ else if (severity == "information")
+ return LogInformation;
+ else if (severity == "warning")
+ return LogWarning;
+ else if (severity == "critical")
+ return LogCritical;
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid severity: " + severity));
+}
+
+void Logger::DisableConsoleLog()
+{
+ m_ConsoleLogEnabled = false;
+
+ UpdateMinLogSeverity();
+}
+
+void Logger::EnableConsoleLog()
+{
+ m_ConsoleLogEnabled = true;
+
+ UpdateMinLogSeverity();
+}
+
+bool Logger::IsConsoleLogEnabled()
+{
+ return m_ConsoleLogEnabled;
+}
+
+void Logger::SetConsoleLogSeverity(LogSeverity logSeverity)
+{
+ m_ConsoleLogSeverity = logSeverity;
+}
+
+LogSeverity Logger::GetConsoleLogSeverity()
+{
+ return m_ConsoleLogSeverity;
+}
+
+void Logger::DisableEarlyLogging() {
+ m_EarlyLoggingEnabled = false;
+
+ UpdateMinLogSeverity();
+}
+
+bool Logger::IsEarlyLoggingEnabled() {
+ return m_EarlyLoggingEnabled;
+}
+
+void Logger::DisableTimestamp()
+{
+ m_TimestampEnabled = false;
+}
+
+void Logger::EnableTimestamp()
+{
+ m_TimestampEnabled = true;
+}
+
+bool Logger::IsTimestampEnabled()
+{
+ return m_TimestampEnabled;
+}
+
+void Logger::SetSeverity(const String& value, bool suppress_events, const Value& cookie)
+{
+ ObjectImpl<Logger>::SetSeverity(value, suppress_events, cookie);
+
+ UpdateMinLogSeverity();
+}
+
+void Logger::ValidateSeverity(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Logger>::ValidateSeverity(lvalue, utils);
+
+ try {
+ StringToSeverity(lvalue());
+ } catch (...) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "severity" }, "Invalid severity specified: " + lvalue()));
+ }
+}
+
+void Logger::UpdateMinLogSeverity()
+{
+ std::unique_lock<std::mutex> lock (m_UpdateMinLogSeverityMutex);
+ auto result (LogNothing);
+
+ for (auto& logger : Logger::GetLoggers()) {
+ ObjectLock llock (logger);
+
+ if (logger->IsActive()) {
+ result = std::min(result, logger->GetMinSeverity());
+ }
+ }
+
+ if (Logger::IsConsoleLogEnabled()) {
+ result = std::min(result, Logger::GetConsoleLogSeverity());
+ }
+
+#ifdef _WIN32
+ if (Logger::IsEarlyLoggingEnabled()) {
+ result = std::min(result, LogCritical);
+ }
+#endif /* _WIN32 */
+
+ m_MinLogSeverity.store(result);
+}
+
+Log::Log(LogSeverity severity, String facility, const String& message)
+ : Log(severity, std::move(facility))
+{
+ if (!m_IsNoOp) {
+ m_Buffer << message;
+ }
+}
+
+Log::Log(LogSeverity severity, String facility)
+ : m_Severity(severity), m_Facility(std::move(facility)), m_IsNoOp(severity < Logger::GetMinLogSeverity())
+{ }
+
+/**
+ * Writes the message to the application's log.
+ */
+Log::~Log()
+{
+ if (m_IsNoOp) {
+ return;
+ }
+
+ LogEntry entry;
+ entry.Timestamp = Utility::GetTime();
+ entry.Severity = m_Severity;
+ entry.Facility = m_Facility;
+
+ {
+ auto msg (m_Buffer.str());
+ msg.erase(msg.find_last_not_of("\n") + 1u);
+
+ entry.Message = std::move(msg);
+ }
+
+ if (m_Severity >= LogWarning) {
+ ContextTrace context;
+
+ if (context.GetLength() > 0) {
+ std::ostringstream trace;
+ trace << context;
+ entry.Message += "\nContext:" + trace.str();
+ }
+ }
+
+ for (const Logger::Ptr& logger : Logger::GetLoggers()) {
+ ObjectLock llock(logger);
+
+ if (!logger->IsActive())
+ continue;
+
+ if (entry.Severity >= logger->GetMinSeverity())
+ logger->ProcessLogEntry(entry);
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ /* Always flush, don't depend on the timer. Enable this for development sprints on Linux/macOS only. Windows crashes. */
+ //logger->Flush();
+#endif /* I2_DEBUG */
+ }
+
+ if (Logger::IsConsoleLogEnabled() && entry.Severity >= Logger::GetConsoleLogSeverity()) {
+ StreamLogger::ProcessLogEntry(std::cout, entry);
+
+ /* "Console" might be a pipe/socket (systemd, daemontools, docker, ...),
+ * then cout will not flush lines automatically. */
+ std::cout << std::flush;
+ }
+
+#ifdef _WIN32
+ if (Logger::IsEarlyLoggingEnabled() && entry.Severity >= LogCritical) {
+ WindowsEventLogLogger::WriteToWindowsEventLog(entry);
+ }
+#endif /* _WIN32 */
+}
+
+Log& Log::operator<<(const char *val)
+{
+ if (!m_IsNoOp) {
+ m_Buffer << val;
+ }
+
+ return *this;
+}
diff --git a/lib/base/logger.hpp b/lib/base/logger.hpp
new file mode 100644
index 0000000..10e0872
--- /dev/null
+++ b/lib/base/logger.hpp
@@ -0,0 +1,149 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LOGGER_H
+#define LOGGER_H
+
+#include "base/atomic.hpp"
+#include "base/i2-base.hpp"
+#include "base/logger-ti.hpp"
+#include <set>
+#include <sstream>
+
+namespace icinga
+{
+
+/**
+ * Log severity.
+ *
+ * @ingroup base
+ */
+enum LogSeverity
+{
+ LogDebug,
+ LogNotice,
+ LogInformation,
+ LogWarning,
+ LogCritical,
+
+ // Just for internal comparision
+ LogNothing,
+};
+
+/**
+ * A log entry.
+ *
+ * @ingroup base
+ */
+struct LogEntry {
+ double Timestamp; /**< The timestamp when this log entry was created. */
+ LogSeverity Severity; /**< The severity of this log entry. */
+ String Facility; /**< The facility this log entry belongs to. */
+ String Message; /**< The log entry's message. */
+};
+
+/**
+ * A log provider.
+ *
+ * @ingroup base
+ */
+class Logger : public ObjectImpl<Logger>
+{
+public:
+ DECLARE_OBJECT(Logger);
+
+ static String SeverityToString(LogSeverity severity);
+ static LogSeverity StringToSeverity(const String& severity);
+
+ LogSeverity GetMinSeverity() const;
+
+ /**
+ * Processes the log entry and writes it to the log that is
+ * represented by this ILogger object.
+ *
+ * @param entry The log entry that is to be processed.
+ */
+ virtual void ProcessLogEntry(const LogEntry& entry) = 0;
+
+ virtual void Flush() = 0;
+
+ static std::set<Logger::Ptr> GetLoggers();
+
+ static void DisableConsoleLog();
+ static void EnableConsoleLog();
+ static bool IsConsoleLogEnabled();
+ static void DisableEarlyLogging();
+ static bool IsEarlyLoggingEnabled();
+ static void DisableTimestamp();
+ static void EnableTimestamp();
+ static bool IsTimestampEnabled();
+
+ static void SetConsoleLogSeverity(LogSeverity logSeverity);
+ static LogSeverity GetConsoleLogSeverity();
+
+ static inline
+ LogSeverity GetMinLogSeverity()
+ {
+ return m_MinLogSeverity.load();
+ }
+
+ void SetSeverity(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+ void ValidateSeverity(const Lazy<String>& lvalue, const ValidationUtils& utils) final;
+
+protected:
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ static void UpdateMinLogSeverity();
+
+ static std::mutex m_Mutex;
+ static std::set<Logger::Ptr> m_Loggers;
+ static bool m_ConsoleLogEnabled;
+ static std::atomic<bool> m_EarlyLoggingEnabled;
+ static bool m_TimestampEnabled;
+ static LogSeverity m_ConsoleLogSeverity;
+ static std::mutex m_UpdateMinLogSeverityMutex;
+ static Atomic<LogSeverity> m_MinLogSeverity;
+};
+
+class Log
+{
+public:
+ Log() = delete;
+ Log(const Log& other) = delete;
+ Log& operator=(const Log& rhs) = delete;
+
+ Log(LogSeverity severity, String facility, const String& message);
+ Log(LogSeverity severity, String facility);
+
+ ~Log();
+
+ template<typename T>
+ Log& operator<<(const T& val)
+ {
+ m_Buffer << val;
+ return *this;
+ }
+
+ Log& operator<<(const char *val);
+
+private:
+ LogSeverity m_Severity;
+ String m_Facility;
+ std::ostringstream m_Buffer;
+ bool m_IsNoOp;
+};
+
+extern template Log& Log::operator<<(const Value&);
+extern template Log& Log::operator<<(const String&);
+extern template Log& Log::operator<<(const std::string&);
+extern template Log& Log::operator<<(const bool&);
+extern template Log& Log::operator<<(const unsigned int&);
+extern template Log& Log::operator<<(const int&);
+extern template Log& Log::operator<<(const unsigned long&);
+extern template Log& Log::operator<<(const long&);
+extern template Log& Log::operator<<(const double&);
+
+}
+
+#endif /* LOGGER_H */
diff --git a/lib/base/logger.ti b/lib/base/logger.ti
new file mode 100644
index 0000000..44226ce
--- /dev/null
+++ b/lib/base/logger.ti
@@ -0,0 +1,17 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library base;
+
+namespace icinga
+{
+
+abstract class Logger : ConfigObject
+{
+ [config, virtual] String severity {
+ default {{{ return "information"; }}}
+ };
+};
+
+}
diff --git a/lib/base/math-script.cpp b/lib/base/math-script.cpp
new file mode 100644
index 0000000..6cd7b0e
--- /dev/null
+++ b/lib/base/math-script.cpp
@@ -0,0 +1,184 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/initialize.hpp"
+#include "base/namespace.hpp"
+#include <boost/math/special_functions/round.hpp>
+#include <cmath>
+
+using namespace icinga;
+
+static double MathAbs(double x)
+{
+ return std::fabs(x);
+}
+
+static double MathAcos(double x)
+{
+ return std::acos(x);
+}
+
+static double MathAsin(double x)
+{
+ return std::asin(x);
+}
+
+static double MathAtan(double x)
+{
+ return std::atan(x);
+}
+
+static double MathAtan2(double y, double x)
+{
+ return std::atan2(y, x);
+}
+
+static double MathCeil(double x)
+{
+ return std::ceil(x);
+}
+
+static double MathCos(double x)
+{
+ return std::cos(x);
+}
+
+static double MathExp(double x)
+{
+ return std::exp(x);
+}
+
+static double MathFloor(double x)
+{
+ return std::floor(x);
+}
+
+static double MathLog(double x)
+{
+ return std::log(x);
+}
+
+static Value MathMax(const std::vector<Value>& args)
+{
+ bool first = true;
+ Value result = -INFINITY;
+
+ for (const Value& arg : args) {
+ if (first || arg > result) {
+ first = false;
+ result = arg;
+ }
+ }
+
+ return result;
+}
+
+static Value MathMin(const std::vector<Value>& args)
+{
+ bool first = true;
+ Value result = INFINITY;
+
+ for (const Value& arg : args) {
+ if (first || arg < result) {
+ first = false;
+ result = arg;
+ }
+ }
+
+ return result;
+}
+
+static double MathPow(double x, double y)
+{
+ return std::pow(x, y);
+}
+
+static double MathRandom()
+{
+ return (double)std::rand() / RAND_MAX;
+}
+
+static double MathRound(double x)
+{
+ return boost::math::round(x);
+}
+
+static double MathSin(double x)
+{
+ return std::sin(x);
+}
+
+static double MathSqrt(double x)
+{
+ return std::sqrt(x);
+}
+
+static double MathTan(double x)
+{
+ return std::tan(x);
+}
+
+static bool MathIsnan(double x)
+{
+ return boost::math::isnan(x);
+}
+
+static bool MathIsinf(double x)
+{
+ return boost::math::isinf(x);
+}
+
+static double MathSign(double x)
+{
+ if (x > 0)
+ return 1;
+ else if (x < 0)
+ return -1;
+ else
+ return 0;
+}
+
+INITIALIZE_ONCE([]() {
+ Namespace::Ptr mathNS = new Namespace(true);
+
+ /* Constants */
+ mathNS->Set("E", 2.71828182845904523536);
+ mathNS->Set("LN2", 0.693147180559945309417);
+ mathNS->Set("LN10", 2.30258509299404568402);
+ mathNS->Set("LOG2E", 1.44269504088896340736);
+ mathNS->Set("LOG10E", 0.434294481903251827651);
+ mathNS->Set("PI", 3.14159265358979323846);
+ mathNS->Set("SQRT1_2", 0.707106781186547524401);
+ mathNS->Set("SQRT2", 1.41421356237309504880);
+
+ /* Methods */
+ mathNS->Set("abs", new Function("Math#abs", MathAbs, { "x" }, true));
+ mathNS->Set("acos", new Function("Math#acos", MathAcos, { "x" }, true));
+ mathNS->Set("asin", new Function("Math#asin", MathAsin, { "x" }, true));
+ mathNS->Set("atan", new Function("Math#atan", MathAtan, { "x" }, true));
+ mathNS->Set("atan2", new Function("Math#atan2", MathAtan2, { "x", "y" }, true));
+ mathNS->Set("ceil", new Function("Math#ceil", MathCeil, { "x" }, true));
+ mathNS->Set("cos", new Function("Math#cos", MathCos, { "x" }, true));
+ mathNS->Set("exp", new Function("Math#exp", MathExp, { "x" }, true));
+ mathNS->Set("floor", new Function("Math#floor", MathFloor, { "x" }, true));
+ mathNS->Set("log", new Function("Math#log", MathLog, { "x" }, true));
+ mathNS->Set("max", new Function("Math#max", MathMax, {}, true));
+ mathNS->Set("min", new Function("Math#min", MathMin, {}, true));
+ mathNS->Set("pow", new Function("Math#pow", MathPow, { "x", "y" }, true));
+ mathNS->Set("random", new Function("Math#random", MathRandom, {}, true));
+ mathNS->Set("round", new Function("Math#round", MathRound, { "x" }, true));
+ mathNS->Set("sin", new Function("Math#sin", MathSin, { "x" }, true));
+ mathNS->Set("sqrt", new Function("Math#sqrt", MathSqrt, { "x" }, true));
+ mathNS->Set("tan", new Function("Math#tan", MathTan, { "x" }, true));
+ mathNS->Set("isnan", new Function("Math#isnan", MathIsnan, { "x" }, true));
+ mathNS->Set("isinf", new Function("Math#isinf", MathIsinf, { "x" }, true));
+ mathNS->Set("sign", new Function("Math#sign", MathSign, { "x" }, true));
+
+ mathNS->Freeze();
+
+ Namespace::Ptr systemNS = ScriptGlobal::Get("System");
+ systemNS->Set("Math", mathNS, true);
+});
diff --git a/lib/base/namespace-script.cpp b/lib/base/namespace-script.cpp
new file mode 100644
index 0000000..deaae7d
--- /dev/null
+++ b/lib/base/namespace-script.cpp
@@ -0,0 +1,84 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/namespace.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/array.hpp"
+
+using namespace icinga;
+
+static void NamespaceSet(const String& key, const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Namespace::Ptr self = static_cast<Namespace::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Set(key, value);
+}
+
+static Value NamespaceGet(const String& key)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Namespace::Ptr self = static_cast<Namespace::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Get(key);
+}
+
+static void NamespaceRemove(const String& key)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Namespace::Ptr self = static_cast<Namespace::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Remove(key);
+}
+
+static bool NamespaceContains(const String& key)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Namespace::Ptr self = static_cast<Namespace::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Contains(key);
+}
+
+static Array::Ptr NamespaceKeys()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Namespace::Ptr self = static_cast<Namespace::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ ArrayData keys;
+ ObjectLock olock(self);
+ for (const Namespace::Pair& kv : self) {
+ keys.push_back(kv.first);
+ }
+ return new Array(std::move(keys));
+}
+
+static Array::Ptr NamespaceValues()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Namespace::Ptr self = static_cast<Namespace::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ ArrayData values;
+ ObjectLock olock(self);
+ for (const Namespace::Pair& kv : self) {
+ values.push_back(kv.second.Val);
+ }
+ return new Array(std::move(values));
+}
+
+Object::Ptr Namespace::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "set", new Function("Namespace#set", NamespaceSet, { "key", "value" }) },
+ { "get", new Function("Namespace#get", NamespaceGet, { "key" }) },
+ { "remove", new Function("Namespace#remove", NamespaceRemove, { "key" }) },
+ { "contains", new Function("Namespace#contains", NamespaceContains, { "key" }, true) },
+ { "keys", new Function("Namespace#keys", NamespaceKeys, {}, true) },
+ { "values", new Function("Namespace#values", NamespaceValues, {}, true) },
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/namespace.cpp b/lib/base/namespace.cpp
new file mode 100644
index 0000000..4c5f4f6
--- /dev/null
+++ b/lib/base/namespace.cpp
@@ -0,0 +1,189 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/namespace.hpp"
+#include "base/objectlock.hpp"
+#include "base/debug.hpp"
+#include "base/primitivetype.hpp"
+#include "base/debuginfo.hpp"
+#include "base/exception.hpp"
+#include <sstream>
+
+using namespace icinga;
+
+template class std::map<icinga::String, std::shared_ptr<icinga::NamespaceValue> >;
+
+REGISTER_PRIMITIVE_TYPE(Namespace, Object, Namespace::GetPrototype());
+
+/**
+ * Creates a new namespace.
+ *
+ * @param constValues If true, all values inserted into the namespace are treated as constants and can't be updated.
+ */
+Namespace::Namespace(bool constValues)
+ : m_ConstValues(constValues), m_Frozen(false)
+{ }
+
+Value Namespace::Get(const String& field) const
+{
+ Value value;
+ if (!Get(field, &value))
+ BOOST_THROW_EXCEPTION(ScriptError("Namespace does not contain field '" + field + "'"));
+ return value;
+}
+
+bool Namespace::Get(const String& field, Value *value) const
+{
+ auto lock(ReadLockUnlessFrozen());
+
+ auto nsVal = m_Data.find(field);
+
+ if (nsVal == m_Data.end()) {
+ return false;
+ }
+
+ *value = nsVal->second.Val;
+ return true;
+}
+
+void Namespace::Set(const String& field, const Value& value, bool isConst, const DebugInfo& debugInfo)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen) {
+ BOOST_THROW_EXCEPTION(ScriptError("Namespace is read-only and must not be modified.", debugInfo));
+ }
+
+ std::unique_lock<decltype(m_DataMutex)> dlock (m_DataMutex);
+
+ auto nsVal = m_Data.find(field);
+
+ if (nsVal == m_Data.end()) {
+ m_Data[field] = NamespaceValue{value, isConst || m_ConstValues};
+ } else {
+ if (nsVal->second.Const) {
+ BOOST_THROW_EXCEPTION(ScriptError("Constant must not be modified.", debugInfo));
+ }
+
+ nsVal->second.Val = value;
+ }
+}
+
+/**
+ * Returns the number of elements in the namespace.
+ *
+ * @returns Number of elements.
+ */
+size_t Namespace::GetLength() const
+{
+ auto lock(ReadLockUnlessFrozen());
+
+ return m_Data.size();
+}
+
+bool Namespace::Contains(const String& field) const
+{
+ auto lock (ReadLockUnlessFrozen());
+
+ return m_Data.find(field) != m_Data.end();
+}
+
+void Namespace::Remove(const String& field)
+{
+ ObjectLock olock(this);
+
+ if (m_Frozen) {
+ BOOST_THROW_EXCEPTION(ScriptError("Namespace is read-only and must not be modified."));
+ }
+
+ std::unique_lock<decltype(m_DataMutex)> dlock (m_DataMutex);
+
+ auto it = m_Data.find(field);
+
+ if (it == m_Data.end()) {
+ return;
+ }
+
+ if (it->second.Const) {
+ BOOST_THROW_EXCEPTION(ScriptError("Constants must not be removed."));
+ }
+
+ m_Data.erase(it);
+}
+
+/**
+ * Freeze the namespace, preventing further updates.
+ *
+ * This only prevents inserting, replacing or deleting values from the namespace. This operation has no effect on
+ * objects referenced by the values, these remain mutable if they were before.
+ */
+void Namespace::Freeze() {
+ ObjectLock olock(this);
+
+ m_Frozen = true;
+}
+
+std::shared_lock<std::shared_timed_mutex> Namespace::ReadLockUnlessFrozen() const
+{
+ if (m_Frozen.load(std::memory_order_relaxed)) {
+ return std::shared_lock<std::shared_timed_mutex>();
+ } else {
+ return std::shared_lock<std::shared_timed_mutex>(m_DataMutex);
+ }
+}
+
+Value Namespace::GetFieldByName(const String& field, bool, const DebugInfo& debugInfo) const
+{
+ auto lock (ReadLockUnlessFrozen());
+
+ auto nsVal = m_Data.find(field);
+
+ if (nsVal != m_Data.end())
+ return nsVal->second.Val;
+ else
+ return GetPrototypeField(const_cast<Namespace *>(this), field, false, debugInfo); /* Ignore indexer not found errors similar to the Dictionary class. */
+}
+
+void Namespace::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo)
+{
+ // The override frozen parameter is mandated by the interface but ignored here. If the namespace is frozen, this
+ // disables locking for read operations, so it must not be modified again to ensure the consistency of the internal
+ // data structures.
+ (void) overrideFrozen;
+
+ Set(field, value, false, debugInfo);
+}
+
+bool Namespace::HasOwnField(const String& field) const
+{
+ return Contains(field);
+}
+
+bool Namespace::GetOwnField(const String& field, Value *result) const
+{
+ return Get(field, result);
+}
+
+Namespace::Iterator Namespace::Begin()
+{
+ ASSERT(OwnsLock());
+
+ return m_Data.begin();
+}
+
+Namespace::Iterator Namespace::End()
+{
+ ASSERT(OwnsLock());
+
+ return m_Data.end();
+}
+
+Namespace::Iterator icinga::begin(const Namespace::Ptr& x)
+{
+ return x->Begin();
+}
+
+Namespace::Iterator icinga::end(const Namespace::Ptr& x)
+{
+ return x->End();
+}
+
diff --git a/lib/base/namespace.hpp b/lib/base/namespace.hpp
new file mode 100644
index 0000000..94f2055
--- /dev/null
+++ b/lib/base/namespace.hpp
@@ -0,0 +1,105 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NAMESPACE_H
+#define NAMESPACE_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include "base/shared-object.hpp"
+#include "base/value.hpp"
+#include "base/debuginfo.hpp"
+#include <atomic>
+#include <map>
+#include <vector>
+#include <memory>
+#include <shared_mutex>
+
+namespace icinga
+{
+
+struct NamespaceValue
+{
+ Value Val;
+ bool Const;
+};
+
+
+/**
+ * A namespace.
+ *
+ * ## External Locking
+ *
+ * Synchronization is handled internally, so almost all functions are safe for concurrent use without external locking.
+ * The only exception to this are functions returning an iterator. To use these, the caller has to acquire an ObjectLock
+ * on the namespace. The iterators only remain valid for as long as that ObjectLock is held. Note that this also
+ * includes range-based for loops.
+ *
+ * If consistency across multiple operations is required, an ObjectLock must also be acquired to prevent concurrent
+ * modifications.
+ *
+ * ## Internal Locking
+ *
+ * Two mutex objects are involved in locking a namespace: the recursive mutex inherited from the Object class that is
+ * acquired and released using the ObjectLock class and the m_DataMutex shared mutex contained directly in the
+ * Namespace class. The ObjectLock is used to synchronize multiple write operations against each other. The shared mutex
+ * is only used to ensure the consistency of the m_Data data structure.
+ *
+ * Read operations must acquire a shared lock on m_DataMutex. This prevents concurrent writes to that data structure
+ * but still allows concurrent reads.
+ *
+ * Write operations must first obtain an ObjectLock and then a shared lock on m_DataMutex. This order is important for
+ * preventing deadlocks. The ObjectLock prevents concurrent write operations while the shared lock prevents concurrent
+ * read operations.
+ *
+ * External read access to iterators is synchronized by the caller holding an ObjectLock. This ensures no concurrent
+ * write operations as these require the ObjectLock but still allows concurrent reads as m_DataMutex is not locked.
+ *
+ * @ingroup base
+ */
+class Namespace final : public Object
+{
+public:
+ DECLARE_OBJECT(Namespace);
+
+ typedef std::map<String, NamespaceValue>::iterator Iterator;
+
+ typedef std::map<String, NamespaceValue>::value_type Pair;
+
+ explicit Namespace(bool constValues = false);
+
+ Value Get(const String& field) const;
+ bool Get(const String& field, Value *value) const;
+ void Set(const String& field, const Value& value, bool isConst = false, const DebugInfo& debugInfo = DebugInfo());
+ bool Contains(const String& field) const;
+ void Remove(const String& field);
+ void Freeze();
+
+ Iterator Begin();
+ Iterator End();
+
+ size_t GetLength() const;
+
+ Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const override;
+ void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo) override;
+ bool HasOwnField(const String& field) const override;
+ bool GetOwnField(const String& field, Value *result) const override;
+
+ static Object::Ptr GetPrototype();
+
+private:
+ std::shared_lock<std::shared_timed_mutex> ReadLockUnlessFrozen() const;
+
+ std::map<String, NamespaceValue> m_Data;
+ mutable std::shared_timed_mutex m_DataMutex;
+ bool m_ConstValues;
+ std::atomic<bool> m_Frozen;
+};
+
+Namespace::Iterator begin(const Namespace::Ptr& x);
+Namespace::Iterator end(const Namespace::Ptr& x);
+
+}
+
+extern template class std::map<icinga::String, std::shared_ptr<icinga::NamespaceValue> >;
+
+#endif /* NAMESPACE_H */
diff --git a/lib/base/netstring.cpp b/lib/base/netstring.cpp
new file mode 100644
index 0000000..60f08c2
--- /dev/null
+++ b/lib/base/netstring.cpp
@@ -0,0 +1,334 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/netstring.hpp"
+#include "base/debug.hpp"
+#include "base/tlsstream.hpp"
+#include <cstdint>
+#include <memory>
+#include <sstream>
+#include <utility>
+#include <boost/asio/buffer.hpp>
+#include <boost/asio/read.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/asio/write.hpp>
+
+using namespace icinga;
+
+/**
+ * Reads data from a stream in netstring format.
+ *
+ * @param stream The stream to read from.
+ * @param[out] str The String that has been read from the IOQueue.
+ * @returns true if a complete String was read from the IOQueue, false otherwise.
+ * @exception invalid_argument The input stream is invalid.
+ * @see https://github.com/PeterScott/netstring-c/blob/master/netstring.c
+ */
+StreamReadStatus NetString::ReadStringFromStream(const Stream::Ptr& stream, String *str, StreamReadContext& context,
+ bool may_wait, ssize_t maxMessageLength)
+{
+ if (context.Eof)
+ return StatusEof;
+
+ if (context.MustRead) {
+ if (!context.FillFromStream(stream, may_wait)) {
+ context.Eof = true;
+ return StatusEof;
+ }
+
+ context.MustRead = false;
+ }
+
+ size_t header_length = 0;
+
+ for (size_t i = 0; i < context.Size; i++) {
+ if (context.Buffer[i] == ':') {
+ header_length = i;
+
+ /* make sure there's a header */
+ if (header_length == 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (no length specifier)"));
+
+ break;
+ } else if (i > 16)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (missing :)"));
+ }
+
+ if (header_length == 0) {
+ context.MustRead = true;
+ return StatusNeedData;
+ }
+
+ /* no leading zeros allowed */
+ if (context.Buffer[0] == '0' && isdigit(context.Buffer[1]))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (leading zero)"));
+
+ size_t len, i;
+
+ len = 0;
+ for (i = 0; i < header_length && isdigit(context.Buffer[i]); i++) {
+ /* length specifier must have at most 9 characters */
+ if (i >= 9)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Length specifier must not exceed 9 characters"));
+
+ len = len * 10 + (context.Buffer[i] - '0');
+ }
+
+ /* read the whole message */
+ size_t data_length = len + 1;
+
+ if (maxMessageLength >= 0 && data_length > (size_t)maxMessageLength) {
+ std::stringstream errorMessage;
+ errorMessage << "Max data length exceeded: " << (maxMessageLength / 1024) << " KB";
+
+ BOOST_THROW_EXCEPTION(std::invalid_argument(errorMessage.str()));
+ }
+
+ char *data = context.Buffer + header_length + 1;
+
+ if (context.Size < header_length + 1 + data_length) {
+ context.MustRead = true;
+ return StatusNeedData;
+ }
+
+ if (data[len] != ',')
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (missing ,)"));
+
+ *str = String(&data[0], &data[len]);
+
+ context.DropData(header_length + 1 + len + 1);
+
+ return StatusNewItem;
+}
+
+/**
+ * Writes data into a stream using the netstring format and returns bytes written.
+ *
+ * @param stream The stream.
+ * @param str The String that is to be written.
+ *
+ * @return The amount of bytes written.
+ */
+size_t NetString::WriteStringToStream(const Stream::Ptr& stream, const String& str)
+{
+ std::ostringstream msgbuf;
+ WriteStringToStream(msgbuf, str);
+
+ String msg = msgbuf.str();
+ stream->Write(msg.CStr(), msg.GetLength());
+ return msg.GetLength();
+}
+
+/**
+ * Reads data from a stream in netstring format.
+ *
+ * @param stream The stream to read from.
+ * @returns The String that has been read from the IOQueue.
+ * @exception invalid_argument The input stream is invalid.
+ * @see https://github.com/PeterScott/netstring-c/blob/master/netstring.c
+ */
+String NetString::ReadStringFromStream(const Shared<AsioTlsStream>::Ptr& stream,
+ ssize_t maxMessageLength)
+{
+ namespace asio = boost::asio;
+
+ size_t len = 0;
+ bool leadingZero = false;
+
+ for (uint_fast8_t readBytes = 0;; ++readBytes) {
+ char byte = 0;
+
+ {
+ asio::mutable_buffer byteBuf (&byte, 1);
+ asio::read(*stream, byteBuf);
+ }
+
+ if (isdigit(byte)) {
+ if (readBytes == 9) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Length specifier must not exceed 9 characters"));
+ }
+
+ if (leadingZero) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (leading zero)"));
+ }
+
+ len = len * 10u + size_t(byte - '0');
+
+ if (!readBytes && byte == '0') {
+ leadingZero = true;
+ }
+ } else if (byte == ':') {
+ if (!readBytes) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (no length specifier)"));
+ }
+
+ break;
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (missing :)"));
+ }
+ }
+
+ if (maxMessageLength >= 0 && len > maxMessageLength) {
+ std::stringstream errorMessage;
+ errorMessage << "Max data length exceeded: " << (maxMessageLength / 1024) << " KB";
+
+ BOOST_THROW_EXCEPTION(std::invalid_argument(errorMessage.str()));
+ }
+
+ String payload;
+
+ if (len) {
+ payload.Append(len, 0);
+
+ asio::mutable_buffer payloadBuf (&*payload.Begin(), payload.GetLength());
+ asio::read(*stream, payloadBuf);
+ }
+
+ char trailer = 0;
+
+ {
+ asio::mutable_buffer trailerBuf (&trailer, 1);
+ asio::read(*stream, trailerBuf);
+ }
+
+ if (trailer != ',') {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (missing ,)"));
+ }
+
+ return payload;
+}
+
+/**
+ * Reads data from a stream in netstring format.
+ *
+ * @param stream The stream to read from.
+ * @returns The String that has been read from the IOQueue.
+ * @exception invalid_argument The input stream is invalid.
+ * @see https://github.com/PeterScott/netstring-c/blob/master/netstring.c
+ */
+String NetString::ReadStringFromStream(const Shared<AsioTlsStream>::Ptr& stream,
+ boost::asio::yield_context yc, ssize_t maxMessageLength)
+{
+ namespace asio = boost::asio;
+
+ size_t len = 0;
+ bool leadingZero = false;
+
+ for (uint_fast8_t readBytes = 0;; ++readBytes) {
+ char byte = 0;
+
+ {
+ asio::mutable_buffer byteBuf (&byte, 1);
+ asio::async_read(*stream, byteBuf, yc);
+ }
+
+ if (isdigit(byte)) {
+ if (readBytes == 9) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Length specifier must not exceed 9 characters"));
+ }
+
+ if (leadingZero) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (leading zero)"));
+ }
+
+ len = len * 10u + size_t(byte - '0');
+
+ if (!readBytes && byte == '0') {
+ leadingZero = true;
+ }
+ } else if (byte == ':') {
+ if (!readBytes) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (no length specifier)"));
+ }
+
+ break;
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (missing :)"));
+ }
+ }
+
+ if (maxMessageLength >= 0 && len > maxMessageLength) {
+ std::stringstream errorMessage;
+ errorMessage << "Max data length exceeded: " << (maxMessageLength / 1024) << " KB";
+
+ BOOST_THROW_EXCEPTION(std::invalid_argument(errorMessage.str()));
+ }
+
+ String payload;
+
+ if (len) {
+ payload.Append(len, 0);
+
+ asio::mutable_buffer payloadBuf (&*payload.Begin(), payload.GetLength());
+ asio::async_read(*stream, payloadBuf, yc);
+ }
+
+ char trailer = 0;
+
+ {
+ asio::mutable_buffer trailerBuf (&trailer, 1);
+ asio::async_read(*stream, trailerBuf, yc);
+ }
+
+ if (trailer != ',') {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid NetString (missing ,)"));
+ }
+
+ return payload;
+}
+
+/**
+ * Writes data into a stream using the netstring format and returns bytes written.
+ *
+ * @param stream The stream.
+ * @param str The String that is to be written.
+ *
+ * @return The amount of bytes written.
+ */
+size_t NetString::WriteStringToStream(const Shared<AsioTlsStream>::Ptr& stream, const String& str)
+{
+ namespace asio = boost::asio;
+
+ std::ostringstream msgbuf;
+ WriteStringToStream(msgbuf, str);
+
+ String msg = msgbuf.str();
+ asio::const_buffer msgBuf (msg.CStr(), msg.GetLength());
+
+ asio::write(*stream, msgBuf);
+
+ return msg.GetLength();
+}
+
+/**
+ * Writes data into a stream using the netstring format and returns bytes written.
+ *
+ * @param stream The stream.
+ * @param str The String that is to be written.
+ *
+ * @return The amount of bytes written.
+ */
+size_t NetString::WriteStringToStream(const Shared<AsioTlsStream>::Ptr& stream, const String& str, boost::asio::yield_context yc)
+{
+ namespace asio = boost::asio;
+
+ std::ostringstream msgbuf;
+ WriteStringToStream(msgbuf, str);
+
+ String msg = msgbuf.str();
+ asio::const_buffer msgBuf (msg.CStr(), msg.GetLength());
+
+ asio::async_write(*stream, msgBuf, yc);
+
+ return msg.GetLength();
+}
+
+/**
+ * Writes data into a stream using the netstring format.
+ *
+ * @param stream The stream.
+ * @param str The String that is to be written.
+ */
+void NetString::WriteStringToStream(std::ostream& stream, const String& str)
+{
+ stream << str.GetLength() << ":" << str << ",";
+}
diff --git a/lib/base/netstring.hpp b/lib/base/netstring.hpp
new file mode 100644
index 0000000..e5ec051
--- /dev/null
+++ b/lib/base/netstring.hpp
@@ -0,0 +1,43 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NETSTRING_H
+#define NETSTRING_H
+
+#include "base/i2-base.hpp"
+#include "base/stream.hpp"
+#include "base/tlsstream.hpp"
+#include <memory>
+#include <boost/asio/spawn.hpp>
+
+namespace icinga
+{
+
+class String;
+
+/**
+ * Helper functions for reading/writing messages in the netstring format.
+ *
+ * @see https://cr.yp.to/proto/netstrings.txt
+ *
+ * @ingroup base
+ */
+class NetString
+{
+public:
+ static StreamReadStatus ReadStringFromStream(const Stream::Ptr& stream, String *message, StreamReadContext& context,
+ bool may_wait = false, ssize_t maxMessageLength = -1);
+ static String ReadStringFromStream(const Shared<AsioTlsStream>::Ptr& stream, ssize_t maxMessageLength = -1);
+ static String ReadStringFromStream(const Shared<AsioTlsStream>::Ptr& stream,
+ boost::asio::yield_context yc, ssize_t maxMessageLength = -1);
+ static size_t WriteStringToStream(const Stream::Ptr& stream, const String& message);
+ static size_t WriteStringToStream(const Shared<AsioTlsStream>::Ptr& stream, const String& message);
+ static size_t WriteStringToStream(const Shared<AsioTlsStream>::Ptr& stream, const String& message, boost::asio::yield_context yc);
+ static void WriteStringToStream(std::ostream& stream, const String& message);
+
+private:
+ NetString();
+};
+
+}
+
+#endif /* NETSTRING_H */
diff --git a/lib/base/networkstream.cpp b/lib/base/networkstream.cpp
new file mode 100644
index 0000000..57da507
--- /dev/null
+++ b/lib/base/networkstream.cpp
@@ -0,0 +1,81 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/networkstream.hpp"
+
+using namespace icinga;
+
+NetworkStream::NetworkStream(Socket::Ptr socket)
+ : m_Socket(std::move(socket)), m_Eof(false)
+{ }
+
+void NetworkStream::Close()
+{
+ Stream::Close();
+
+ m_Socket->Close();
+}
+
+/**
+ * Reads data from the stream.
+ *
+ * @param buffer The buffer where data should be stored. May be nullptr if you're
+ * not actually interested in the data.
+ * @param count The number of bytes to read from the queue.
+ * @returns The number of bytes actually read.
+ */
+size_t NetworkStream::Read(void *buffer, size_t count, bool allow_partial)
+{
+ size_t rc;
+
+ ASSERT(allow_partial);
+
+ if (m_Eof)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Tried to read from closed socket."));
+
+ try {
+ rc = m_Socket->Read(buffer, count);
+ } catch (...) {
+ m_Eof = true;
+
+ throw;
+ }
+
+ if (rc == 0)
+ m_Eof = true;
+
+ return rc;
+}
+
+/**
+ * Writes data to the stream.
+ *
+ * @param buffer The data that is to be written.
+ * @param count The number of bytes to write.
+ * @returns The number of bytes written
+ */
+void NetworkStream::Write(const void *buffer, size_t count)
+{
+ size_t rc;
+
+ if (m_Eof)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Tried to write to closed socket."));
+
+ try {
+ rc = m_Socket->Write(buffer, count);
+ } catch (...) {
+ m_Eof = true;
+
+ throw;
+ }
+
+ if (rc < count) {
+ m_Eof = true;
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Short write for socket."));
+ }
+}
+
+bool NetworkStream::IsEof() const
+{
+ return m_Eof;
+}
diff --git a/lib/base/networkstream.hpp b/lib/base/networkstream.hpp
new file mode 100644
index 0000000..453d7ad
--- /dev/null
+++ b/lib/base/networkstream.hpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NETWORKSTREAM_H
+#define NETWORKSTREAM_H
+
+#include "base/i2-base.hpp"
+#include "base/stream.hpp"
+#include "base/socket.hpp"
+
+namespace icinga
+{
+
+/**
+ * A network stream. DEPRECATED - Use Boost ASIO instead.
+ *
+ * @ingroup base
+ */
+class NetworkStream final : public Stream
+{
+public:
+ DECLARE_PTR_TYPEDEFS(NetworkStream);
+
+ NetworkStream(Socket::Ptr socket);
+
+ size_t Read(void *buffer, size_t count, bool allow_partial = false) override;
+ void Write(const void *buffer, size_t count) override;
+
+ void Close() override;
+
+ bool IsEof() const override;
+
+private:
+ Socket::Ptr m_Socket;
+ bool m_Eof;
+};
+
+}
+
+#endif /* NETWORKSTREAM_H */
diff --git a/lib/base/number-script.cpp b/lib/base/number-script.cpp
new file mode 100644
index 0000000..0dcaca5
--- /dev/null
+++ b/lib/base/number-script.cpp
@@ -0,0 +1,25 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/number.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+static String NumberToString()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ return vframe->Self;
+}
+
+Object::Ptr Number::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "to_string", new Function("Number#to_string", NumberToString, {}, true) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/number.cpp b/lib/base/number.cpp
new file mode 100644
index 0000000..a336519
--- /dev/null
+++ b/lib/base/number.cpp
@@ -0,0 +1,9 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/number.hpp"
+#include "base/primitivetype.hpp"
+
+using namespace icinga;
+
+REGISTER_BUILTIN_TYPE(Number, Number::GetPrototype());
+
diff --git a/lib/base/number.hpp b/lib/base/number.hpp
new file mode 100644
index 0000000..dd5196f
--- /dev/null
+++ b/lib/base/number.hpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NUMBER_H
+#define NUMBER_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+
+namespace icinga {
+
+class Value;
+
+/**
+ * Number class.
+ */
+class Number
+{
+public:
+ static Object::Ptr GetPrototype();
+
+private:
+ Number();
+};
+
+}
+
+#endif /* NUMBER_H */
diff --git a/lib/base/object-packer.cpp b/lib/base/object-packer.cpp
new file mode 100644
index 0000000..123ddad
--- /dev/null
+++ b/lib/base/object-packer.cpp
@@ -0,0 +1,246 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/object-packer.hpp"
+#include "base/debug.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include <algorithm>
+#include <climits>
+#include <cstdint>
+#include <string>
+#include <utility>
+
+using namespace icinga;
+
+union EndiannessDetector
+{
+ EndiannessDetector()
+ {
+ i = 1;
+ }
+
+ int i;
+ char buf[sizeof(int)];
+};
+
+static const EndiannessDetector l_EndiannessDetector;
+
+// Assumption: The compiler will optimize (away) if/else statements using this.
+#define MACHINE_LITTLE_ENDIAN (l_EndiannessDetector.buf[0])
+
+static void PackAny(const Value& value, std::string& builder);
+
+/**
+ * std::swap() seems not to work
+ */
+static inline void SwapBytes(char& a, char& b)
+{
+ char c = a;
+ a = b;
+ b = c;
+}
+
+#if CHAR_MIN != 0
+union CharU2SConverter
+{
+ CharU2SConverter()
+ {
+ s = 0;
+ }
+
+ unsigned char u;
+ signed char s;
+};
+#endif
+
+/**
+ * Avoid implementation-defined overflows during unsigned to signed casts
+ */
+static inline char UIntToByte(unsigned i)
+{
+#if CHAR_MIN == 0
+ return i;
+#else
+ CharU2SConverter converter;
+
+ converter.u = i;
+ return converter.s;
+#endif
+}
+
+/**
+ * Append the given int as big-endian 64-bit unsigned int
+ */
+static inline void PackUInt64BE(uint_least64_t i, std::string& builder)
+{
+ char buf[8] = {
+ UIntToByte(i >> 56u),
+ UIntToByte((i >> 48u) & 255u),
+ UIntToByte((i >> 40u) & 255u),
+ UIntToByte((i >> 32u) & 255u),
+ UIntToByte((i >> 24u) & 255u),
+ UIntToByte((i >> 16u) & 255u),
+ UIntToByte((i >> 8u) & 255u),
+ UIntToByte(i & 255u)
+ };
+
+ builder.append((char*)buf, 8);
+}
+
+union Double2BytesConverter
+{
+ Double2BytesConverter()
+ {
+ buf[0] = 0;
+ buf[1] = 0;
+ buf[2] = 0;
+ buf[3] = 0;
+ buf[4] = 0;
+ buf[5] = 0;
+ buf[6] = 0;
+ buf[7] = 0;
+ }
+
+ double f;
+ char buf[8];
+};
+
+/**
+ * Append the given double as big-endian IEEE 754 binary64
+ */
+static inline void PackFloat64BE(double f, std::string& builder)
+{
+ Double2BytesConverter converter;
+
+ converter.f = f;
+
+ if (MACHINE_LITTLE_ENDIAN) {
+ SwapBytes(converter.buf[0], converter.buf[7]);
+ SwapBytes(converter.buf[1], converter.buf[6]);
+ SwapBytes(converter.buf[2], converter.buf[5]);
+ SwapBytes(converter.buf[3], converter.buf[4]);
+ }
+
+ builder.append((char*)converter.buf, 8);
+}
+
+/**
+ * Append the given string's length (BE uint64) and the string itself
+ */
+static inline void PackString(const String& string, std::string& builder)
+{
+ PackUInt64BE(string.GetLength(), builder);
+ builder += string.GetData();
+}
+
+/**
+ * Append the given array
+ */
+static inline void PackArray(const Array::Ptr& arr, std::string& builder)
+{
+ ObjectLock olock(arr);
+
+ builder += '\5';
+ PackUInt64BE(arr->GetLength(), builder);
+
+ for (const Value& value : arr) {
+ PackAny(value, builder);
+ }
+}
+
+/**
+ * Append the given dictionary
+ */
+static inline void PackDictionary(const Dictionary::Ptr& dict, std::string& builder)
+{
+ ObjectLock olock(dict);
+
+ builder += '\6';
+ PackUInt64BE(dict->GetLength(), builder);
+
+ for (const Dictionary::Pair& kv : dict) {
+ PackString(kv.first, builder);
+ PackAny(kv.second, builder);
+ }
+}
+
+/**
+ * Append any JSON-encodable value
+ */
+static void PackAny(const Value& value, std::string& builder)
+{
+ switch (value.GetType()) {
+ case ValueString:
+ builder += '\4';
+ PackString(value.Get<String>(), builder);
+ break;
+
+ case ValueNumber:
+ builder += '\3';
+ PackFloat64BE(value.Get<double>(), builder);
+ break;
+
+ case ValueBoolean:
+ builder += (value.ToBool() ? '\2' : '\1');
+ break;
+
+ case ValueEmpty:
+ builder += '\0';
+ break;
+
+ case ValueObject:
+ {
+ const Object::Ptr& obj = value.Get<Object::Ptr>();
+
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(obj);
+ if (dict) {
+ PackDictionary(dict, builder);
+ break;
+ }
+
+ Array::Ptr arr = dynamic_pointer_cast<Array>(obj);
+ if (arr) {
+ PackArray(arr, builder);
+ break;
+ }
+ }
+
+ builder += '\0';
+ break;
+
+ default:
+ VERIFY(!"Invalid variant type.");
+ }
+}
+
+/**
+ * Pack any JSON-encodable value to a BSON-similar structure suitable for consistent hashing
+ *
+ * Spec:
+ * null: 0x00
+ * false: 0x01
+ * true: 0x02
+ * number: 0x03 (ieee754_binary64_bigendian)payload
+ * string: 0x04 (uint64_bigendian)payload.length (char[])payload
+ * array: 0x05 (uint64_bigendian)payload.length (any[])payload
+ * object: 0x06 (uint64_bigendian)payload.length (keyvalue[])payload.sort()
+ *
+ * any: null|false|true|number|string|array|object
+ * keyvalue: (uint64_bigendian)key.length (char[])key (any)value
+ *
+ * Assumptions:
+ * - double is IEEE 754 binary64
+ * - all int types (signed and unsigned) and all float types share the same endianness
+ * - char is exactly 8 bits wide and one char is exactly one byte affected by the machine endianness
+ * - all input strings, arrays and dictionaries are at most 2^64-1 long
+ *
+ * If not, this function will silently produce invalid results.
+ */
+String icinga::PackObject(const Value& value)
+{
+ std::string builder;
+ PackAny(value, builder);
+
+ return std::move(builder);
+}
diff --git a/lib/base/object-packer.hpp b/lib/base/object-packer.hpp
new file mode 100644
index 0000000..00f7b99
--- /dev/null
+++ b/lib/base/object-packer.hpp
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECT_PACKER
+#define OBJECT_PACKER
+
+#include "base/i2-base.hpp"
+
+namespace icinga
+{
+
+class String;
+class Value;
+
+String PackObject(const Value& value);
+
+}
+
+#endif /* OBJECT_PACKER */
diff --git a/lib/base/object-script.cpp b/lib/base/object-script.cpp
new file mode 100644
index 0000000..fff7df0
--- /dev/null
+++ b/lib/base/object-script.cpp
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/object.hpp"
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+static String ObjectToString()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Object::Ptr self = static_cast<Object::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->ToString();
+}
+
+static void ObjectNotifyAttribute(const String& attribute)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Object::Ptr self = static_cast<Object::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->NotifyField(self->GetReflectionType()->GetFieldId(attribute));
+}
+
+static Object::Ptr ObjectClone()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Object::Ptr self = static_cast<Object::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Clone();
+}
+
+Object::Ptr Object::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "to_string", new Function("Object#to_string", ObjectToString, {}, true) },
+ { "notify_attribute", new Function("Object#notify_attribute", ObjectNotifyAttribute, { "attribute" }, false) },
+ { "clone", new Function("Object#clone", ObjectClone, {}, true) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/object.cpp b/lib/base/object.cpp
new file mode 100644
index 0000000..92a43b9
--- /dev/null
+++ b/lib/base/object.cpp
@@ -0,0 +1,275 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/object.hpp"
+#include "base/value.hpp"
+#include "base/dictionary.hpp"
+#include "base/primitivetype.hpp"
+#include "base/utility.hpp"
+#include "base/timer.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include <boost/lexical_cast.hpp>
+#include <boost/thread/recursive_mutex.hpp>
+#include <thread>
+
+using namespace icinga;
+
+DEFINE_TYPE_INSTANCE(Object);
+
+#ifdef I2_LEAK_DEBUG
+static std::mutex l_ObjectCountLock;
+static std::map<String, int> l_ObjectCounts;
+static Timer::Ptr l_ObjectCountTimer;
+#endif /* I2_LEAK_DEBUG */
+
+/**
+ * Constructor for the Object class.
+ */
+Object::Object()
+{
+ m_References.store(0);
+
+#ifdef I2_DEBUG
+ m_LockOwner.store(decltype(m_LockOwner.load())());
+#endif /* I2_DEBUG */
+}
+
+/**
+ * Destructor for the Object class.
+ */
+Object::~Object()
+{
+}
+
+/**
+ * Returns a string representation for the object.
+ */
+String Object::ToString() const
+{
+ return "Object of type '" + GetReflectionType()->GetName() + "'";
+}
+
+#ifdef I2_DEBUG
+/**
+ * Checks if the calling thread owns the lock on this object.
+ *
+ * @returns True if the calling thread owns the lock, false otherwise.
+ */
+bool Object::OwnsLock() const
+{
+ return m_LockOwner.load() == std::this_thread::get_id();
+}
+#endif /* I2_DEBUG */
+
+void Object::SetField(int id, const Value&, bool, const Value&)
+{
+ if (id == 0)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Type field cannot be set."));
+ else
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid field ID."));
+}
+
+Value Object::GetField(int id) const
+{
+ if (id == 0)
+ return GetReflectionType()->GetName();
+ else
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid field ID."));
+}
+
+bool Object::HasOwnField(const String& field) const
+{
+ Type::Ptr type = GetReflectionType();
+
+ if (!type)
+ return false;
+
+ return type->GetFieldId(field) != -1;
+}
+
+bool Object::GetOwnField(const String& field, Value *result) const
+{
+ Type::Ptr type = GetReflectionType();
+
+ if (!type)
+ return false;
+
+ int tid = type->GetFieldId(field);
+
+ if (tid == -1)
+ return false;
+
+ *result = GetField(tid);
+ return true;
+}
+
+Value Object::GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const
+{
+ Type::Ptr type = GetReflectionType();
+
+ if (!type)
+ return Empty;
+
+ int fid = type->GetFieldId(field);
+
+ if (fid == -1)
+ return GetPrototypeField(const_cast<Object *>(this), field, true, debugInfo);
+
+ if (sandboxed) {
+ Field fieldInfo = type->GetFieldInfo(fid);
+
+ if (fieldInfo.Attributes & FANoUserView)
+ BOOST_THROW_EXCEPTION(ScriptError("Accessing the field '" + field + "' for type '" + type->GetName() + "' is not allowed in sandbox mode.", debugInfo));
+ }
+
+ return GetField(fid);
+}
+
+void Object::SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo)
+{
+ Type::Ptr type = GetReflectionType();
+
+ if (!type)
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot set field on object.", debugInfo));
+
+ int fid = type->GetFieldId(field);
+
+ if (fid == -1)
+ BOOST_THROW_EXCEPTION(ScriptError("Attribute '" + field + "' does not exist.", debugInfo));
+
+ try {
+ SetField(fid, value);
+ } catch (const boost::bad_lexical_cast&) {
+ Field fieldInfo = type->GetFieldInfo(fid);
+ Type::Ptr ftype = Type::GetByName(fieldInfo.TypeName);
+ BOOST_THROW_EXCEPTION(ScriptError("Attribute '" + field + "' cannot be set to value of type '" + value.GetTypeName() + "', expected '" + ftype->GetName() + "'", debugInfo));
+ } catch (const std::bad_cast&) {
+ Field fieldInfo = type->GetFieldInfo(fid);
+ Type::Ptr ftype = Type::GetByName(fieldInfo.TypeName);
+ BOOST_THROW_EXCEPTION(ScriptError("Attribute '" + field + "' cannot be set to value of type '" + value.GetTypeName() + "', expected '" + ftype->GetName() + "'", debugInfo));
+ }
+}
+
+void Object::Validate(int types, const ValidationUtils& utils)
+{
+ /* Nothing to do here. */
+}
+
+void Object::ValidateField(int id, const Lazy<Value>& lvalue, const ValidationUtils& utils)
+{
+ /* Nothing to do here. */
+}
+
+void Object::NotifyField(int id, const Value& cookie)
+{
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid field ID."));
+}
+
+Object::Ptr Object::NavigateField(int id) const
+{
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid field ID."));
+}
+
+Object::Ptr Object::Clone() const
+{
+ BOOST_THROW_EXCEPTION(std::runtime_error("Object cannot be cloned."));
+}
+
+Type::Ptr Object::GetReflectionType() const
+{
+ return Object::TypeInstance;
+}
+
+Value icinga::GetPrototypeField(const Value& context, const String& field, bool not_found_error, const DebugInfo& debugInfo)
+{
+ Type::Ptr ctype = context.GetReflectionType();
+ Type::Ptr type = ctype;
+
+ do {
+ Object::Ptr object = type->GetPrototype();
+
+ if (object && object->HasOwnField(field))
+ return object->GetFieldByName(field, false, debugInfo);
+
+ type = type->GetBaseType();
+ } while (type);
+
+ if (not_found_error)
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid field access (for value of type '" + ctype->GetName() + "'): '" + field + "'", debugInfo));
+ else
+ return Empty;
+}
+
+#ifdef I2_LEAK_DEBUG
+void icinga::TypeAddObject(Object *object)
+{
+ std::unique_lock<std::mutex> lock(l_ObjectCountLock);
+ String typeName = Utility::GetTypeName(typeid(*object));
+ l_ObjectCounts[typeName]++;
+}
+
+void icinga::TypeRemoveObject(Object *object)
+{
+ std::unique_lock<std::mutex> lock(l_ObjectCountLock);
+ String typeName = Utility::GetTypeName(typeid(*object));
+ l_ObjectCounts[typeName]--;
+}
+
+static void TypeInfoTimerHandler()
+{
+ std::unique_lock<std::mutex> lock(l_ObjectCountLock);
+
+ typedef std::map<String, int>::value_type kv_pair;
+ for (kv_pair& kv : l_ObjectCounts) {
+ if (kv.second == 0)
+ continue;
+
+ Log(LogInformation, "TypeInfo")
+ << kv.second << " " << kv.first << " objects";
+
+ kv.second = 0;
+ }
+}
+
+INITIALIZE_ONCE([]() {
+ l_ObjectCountTimer = Timer::Create();
+ l_ObjectCountTimer->SetInterval(10);
+ l_ObjectCountTimer->OnTimerExpired.connect([](const Timer * const&) { TypeInfoTimerHandler(); });
+ l_ObjectCountTimer->Start();
+});
+#endif /* I2_LEAK_DEBUG */
+
+void icinga::intrusive_ptr_add_ref(Object *object)
+{
+#ifdef I2_LEAK_DEBUG
+ if (object->m_References.fetch_add(1) == 0u)
+ TypeAddObject(object);
+#else /* I2_LEAK_DEBUG */
+ object->m_References.fetch_add(1);
+#endif /* I2_LEAK_DEBUG */
+}
+
+void icinga::intrusive_ptr_release(Object *object)
+{
+ auto previous (object->m_References.fetch_sub(1));
+
+ if (previous == 1u) {
+#ifdef I2_LEAK_DEBUG
+ TypeRemoveObject(object);
+#endif /* I2_LEAK_DEBUG */
+
+ delete object;
+ }
+}
+
+void icinga::DefaultObjectFactoryCheckArgs(const std::vector<Value>& args)
+{
+ if (!args.empty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Constructor does not take any arguments."));
+}
+
+void icinga::RequireNotNullInternal(const intrusive_ptr<Object>& object, const char *description)
+{
+ if (!object)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Pointer must not be null: " + String(description)));
+}
diff --git a/lib/base/object.hpp b/lib/base/object.hpp
new file mode 100644
index 0000000..5a90cfa
--- /dev/null
+++ b/lib/base/object.hpp
@@ -0,0 +1,225 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECT_H
+#define OBJECT_H
+
+#include "base/i2-base.hpp"
+#include "base/debug.hpp"
+#include <boost/smart_ptr/intrusive_ptr.hpp>
+#include <atomic>
+#include <cstddef>
+#include <cstdint>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+using boost::intrusive_ptr;
+using boost::dynamic_pointer_cast;
+using boost::static_pointer_cast;
+
+namespace icinga
+{
+
+class Value;
+class Object;
+class Type;
+class String;
+struct DebugInfo;
+class ValidationUtils;
+
+extern Value Empty;
+
+#define DECLARE_PTR_TYPEDEFS(klass) \
+ typedef intrusive_ptr<klass> Ptr
+
+#define IMPL_TYPE_LOOKUP_SUPER() \
+
+#define IMPL_TYPE_LOOKUP() \
+ static intrusive_ptr<Type> TypeInstance; \
+ virtual intrusive_ptr<Type> GetReflectionType() const override \
+ { \
+ return TypeInstance; \
+ }
+
+#define DECLARE_OBJECT(klass) \
+ DECLARE_PTR_TYPEDEFS(klass); \
+ IMPL_TYPE_LOOKUP();
+
+#define REQUIRE_NOT_NULL(ptr) RequireNotNullInternal(ptr, #ptr)
+
+void RequireNotNullInternal(const intrusive_ptr<Object>& object, const char *description);
+
+void DefaultObjectFactoryCheckArgs(const std::vector<Value>& args);
+
+template<typename T>
+intrusive_ptr<Object> DefaultObjectFactory(const std::vector<Value>& args)
+{
+ DefaultObjectFactoryCheckArgs(args);
+
+ return new T();
+}
+
+template<typename T>
+intrusive_ptr<Object> DefaultObjectFactoryVA(const std::vector<Value>& args)
+{
+ return new T(args);
+}
+
+typedef intrusive_ptr<Object> (*ObjectFactory)(const std::vector<Value>&);
+
+template<typename T, bool VA>
+struct TypeHelper
+{
+};
+
+template<typename T>
+struct TypeHelper<T, false>
+{
+ static ObjectFactory GetFactory()
+ {
+ return DefaultObjectFactory<T>;
+ }
+};
+
+template<typename T>
+struct TypeHelper<T, true>
+{
+ static ObjectFactory GetFactory()
+ {
+ return DefaultObjectFactoryVA<T>;
+ }
+};
+
+template<typename T>
+struct Lazy
+{
+ using Accessor = std::function<T ()>;
+
+ explicit Lazy(T value)
+ : m_Cached(true), m_Value(value)
+ { }
+
+ explicit Lazy(Accessor accessor)
+ : m_Accessor(accessor)
+ { }
+
+ template<typename U>
+ explicit Lazy(const Lazy<U>& other)
+ {
+ if (other.m_Cached) {
+ m_Accessor = Accessor();
+ m_Value = static_cast<T>(other.m_Value);
+ m_Cached = true;
+ } else {
+ auto accessor = other.m_Accessor;
+ m_Accessor = [accessor]() { return static_cast<T>(accessor()); };
+ m_Cached = false;
+ }
+ }
+
+ template<typename U>
+ operator Lazy<U>() const
+ {
+ if (m_Cached)
+ return Lazy<U>(static_cast<U>(m_Value));
+ else {
+ Accessor accessor = m_Accessor;
+ return Lazy<U>(static_cast<typename Lazy<U>::Accessor>([accessor]() { return static_cast<U>(accessor()); }));
+ }
+ }
+
+ const T& operator()() const
+ {
+ if (!m_Cached) {
+ m_Value = m_Accessor();
+ m_Cached = true;
+ }
+
+ return m_Value;
+ }
+
+private:
+ Accessor m_Accessor;
+ mutable bool m_Cached{false};
+ mutable T m_Value;
+
+ template<typename U>
+ friend struct Lazy;
+};
+
+/**
+ * Base class for all heap-allocated objects. At least one of its methods
+ * has to be virtual for RTTI to work.
+ *
+ * @ingroup base
+ */
+class Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Object);
+
+ Object();
+ virtual ~Object();
+
+ virtual String ToString() const;
+
+ virtual intrusive_ptr<Type> GetReflectionType() const;
+
+ virtual void Validate(int types, const ValidationUtils& utils);
+
+ virtual void SetField(int id, const Value& value, bool suppress_events = false, const Value& cookie = Empty);
+ virtual Value GetField(int id) const;
+ virtual Value GetFieldByName(const String& field, bool sandboxed, const DebugInfo& debugInfo) const;
+ virtual void SetFieldByName(const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo);
+ virtual bool HasOwnField(const String& field) const;
+ virtual bool GetOwnField(const String& field, Value *result) const;
+ virtual void ValidateField(int id, const Lazy<Value>& lvalue, const ValidationUtils& utils);
+ virtual void NotifyField(int id, const Value& cookie = Empty);
+ virtual Object::Ptr NavigateField(int id) const;
+
+#ifdef I2_DEBUG
+ bool OwnsLock() const;
+#endif /* I2_DEBUG */
+
+ static Object::Ptr GetPrototype();
+
+ virtual Object::Ptr Clone() const;
+
+ static intrusive_ptr<Type> TypeInstance;
+
+private:
+ Object(const Object& other) = delete;
+ Object& operator=(const Object& rhs) = delete;
+
+ std::atomic<uint_fast64_t> m_References;
+ mutable std::recursive_mutex m_Mutex;
+
+#ifdef I2_DEBUG
+ mutable std::atomic<std::thread::id> m_LockOwner;
+ mutable size_t m_LockCount = 0;
+#endif /* I2_DEBUG */
+
+ friend struct ObjectLock;
+
+ friend void intrusive_ptr_add_ref(Object *object);
+ friend void intrusive_ptr_release(Object *object);
+};
+
+Value GetPrototypeField(const Value& context, const String& field, bool not_found_error, const DebugInfo& debugInfo);
+
+void TypeAddObject(Object *object);
+void TypeRemoveObject(Object *object);
+
+void intrusive_ptr_add_ref(Object *object);
+void intrusive_ptr_release(Object *object);
+
+template<typename T>
+class ObjectImpl
+{
+};
+
+}
+
+#endif /* OBJECT_H */
+
+#include "base/type.hpp"
diff --git a/lib/base/objectlock.cpp b/lib/base/objectlock.cpp
new file mode 100644
index 0000000..fc0c7c6
--- /dev/null
+++ b/lib/base/objectlock.cpp
@@ -0,0 +1,55 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/objectlock.hpp"
+#include <thread>
+
+using namespace icinga;
+
+#define I2MUTEX_UNLOCKED 0
+#define I2MUTEX_LOCKED 1
+
+ObjectLock::~ObjectLock()
+{
+ Unlock();
+}
+
+ObjectLock::ObjectLock(const Object::Ptr& object)
+ : ObjectLock(object.get())
+{
+}
+
+ObjectLock::ObjectLock(const Object *object)
+ : m_Object(object), m_Locked(false)
+{
+ if (m_Object)
+ Lock();
+}
+
+void ObjectLock::Lock()
+{
+ ASSERT(!m_Locked && m_Object);
+
+ m_Object->m_Mutex.lock();
+
+ m_Locked = true;
+
+#ifdef I2_DEBUG
+ if (++m_Object->m_LockCount == 1u) {
+ m_Object->m_LockOwner.store(std::this_thread::get_id());
+ }
+#endif /* I2_DEBUG */
+}
+
+void ObjectLock::Unlock()
+{
+#ifdef I2_DEBUG
+ if (m_Locked && !--m_Object->m_LockCount) {
+ m_Object->m_LockOwner.store(decltype(m_Object->m_LockOwner.load())());
+ }
+#endif /* I2_DEBUG */
+
+ if (m_Locked) {
+ m_Object->m_Mutex.unlock();
+ m_Locked = false;
+ }
+}
diff --git a/lib/base/objectlock.hpp b/lib/base/objectlock.hpp
new file mode 100644
index 0000000..8e98641
--- /dev/null
+++ b/lib/base/objectlock.hpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTLOCK_H
+#define OBJECTLOCK_H
+
+#include "base/object.hpp"
+
+namespace icinga
+{
+
+/**
+ * A scoped lock for Objects.
+ */
+struct ObjectLock
+{
+public:
+ ObjectLock(const Object::Ptr& object);
+ ObjectLock(const Object *object);
+
+ ObjectLock(const ObjectLock&) = delete;
+ ObjectLock& operator=(const ObjectLock&) = delete;
+
+ ~ObjectLock();
+
+ void Lock();
+ void Unlock();
+
+private:
+ const Object *m_Object{nullptr};
+ bool m_Locked{false};
+};
+
+}
+
+#endif /* OBJECTLOCK_H */
diff --git a/lib/base/objecttype.cpp b/lib/base/objecttype.cpp
new file mode 100644
index 0000000..b871555
--- /dev/null
+++ b/lib/base/objecttype.cpp
@@ -0,0 +1,57 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/objecttype.hpp"
+#include "base/initialize.hpp"
+#include <boost/throw_exception.hpp>
+
+using namespace icinga;
+
+/* Ensure that the priority is lower than the basic namespace initialization in scriptframe.cpp. */
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ Type::Ptr type = new ObjectType();
+ type->SetPrototype(Object::GetPrototype());
+ Type::Register(type);
+ Object::TypeInstance = type;
+}, InitializePriority::RegisterObjectType);
+
+String ObjectType::GetName() const
+{
+ return "Object";
+}
+
+Type::Ptr ObjectType::GetBaseType() const
+{
+ return nullptr;
+}
+
+int ObjectType::GetAttributes() const
+{
+ return 0;
+}
+
+int ObjectType::GetFieldId(const String& name) const
+{
+ if (name == "type")
+ return 0;
+ else
+ return -1;
+}
+
+Field ObjectType::GetFieldInfo(int id) const
+{
+ if (id == 0)
+ return {1, "String", "type", nullptr, nullptr, 0, 0};
+ else
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid field ID."));
+}
+
+int ObjectType::GetFieldCount() const
+{
+ return 1;
+}
+
+ObjectFactory ObjectType::GetFactory() const
+{
+ return DefaultObjectFactory<Object>;
+}
+
diff --git a/lib/base/objecttype.hpp b/lib/base/objecttype.hpp
new file mode 100644
index 0000000..0db715e
--- /dev/null
+++ b/lib/base/objecttype.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTTYPE_H
+#define OBJECTTYPE_H
+
+#include "base/i2-base.hpp"
+#include "base/type.hpp"
+#include "base/initialize.hpp"
+
+namespace icinga
+{
+
+class ObjectType final : public Type
+{
+public:
+ String GetName() const override;
+ Type::Ptr GetBaseType() const override;
+ int GetAttributes() const override;
+ int GetFieldId(const String& name) const override;
+ Field GetFieldInfo(int id) const override;
+ int GetFieldCount() const override;
+
+protected:
+ ObjectFactory GetFactory() const override;
+};
+
+}
+
+#endif /* OBJECTTYPE_H */
diff --git a/lib/base/perfdatavalue.cpp b/lib/base/perfdatavalue.cpp
new file mode 100644
index 0000000..60a39e4
--- /dev/null
+++ b/lib/base/perfdatavalue.cpp
@@ -0,0 +1,395 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/perfdatavalue.hpp"
+#include "base/perfdatavalue-ti.cpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include "base/function.hpp"
+#include <boost/algorithm/string.hpp>
+#include <cmath>
+#include <stdexcept>
+#include <string>
+#include <unordered_map>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(PerfdataValue);
+REGISTER_FUNCTION(System, parse_performance_data, PerfdataValue::Parse, "perfdata");
+
+struct UoM
+{
+ double Factor;
+ const char* Out;
+};
+
+typedef std::unordered_map<std::string /* in */, UoM> UoMs;
+typedef std::unordered_multimap<std::string /* in */, UoM> DupUoMs;
+
+static const UoMs l_CsUoMs (([]() -> UoMs {
+ DupUoMs uoms ({
+ // Misc:
+ { "", { 1, "" } },
+ { "%", { 1, "percent" } },
+ { "c", { 1, "" } },
+ { "C", { 1, "degrees-celsius" } }
+ });
+
+ {
+ // Data (rate):
+
+ struct { const char* Char; int Power; } prefixes[] = {
+ { "k", 1 }, { "K", 1 },
+ { "m", 2 }, { "M", 2 },
+ { "g", 3 }, { "G", 3 },
+ { "t", 4 }, { "T", 4 },
+ { "p", 5 }, { "P", 5 },
+ { "e", 6 }, { "E", 6 },
+ { "z", 7 }, { "Z", 7 },
+ { "y", 8 }, { "Y", 8 }
+ };
+
+ struct { const char* Char; double Factor; } siIecs[] = {
+ { "", 1000 },
+ { "i", 1024 }, { "I", 1024 }
+ };
+
+ struct { const char *In, *Out; } bases[] = {
+ { "b", "bits" },
+ { "B", "bytes" }
+ };
+
+ for (auto base : bases) {
+ uoms.emplace(base.In, UoM{1, base.Out});
+ }
+
+ for (auto prefix : prefixes) {
+ for (auto siIec : siIecs) {
+ auto factor (pow(siIec.Factor, prefix.Power));
+
+ for (auto base : bases) {
+ uoms.emplace(
+ std::string(prefix.Char) + siIec.Char + base.In,
+ UoM{factor, base.Out}
+ );
+ }
+ }
+ }
+ }
+
+ {
+ // Energy:
+
+ struct { const char* Char; int Power; } prefixes[] = {
+ { "n", -3 }, { "N", -3 },
+ { "u", -2 }, { "U", -2 },
+ { "m", -1 },
+ { "", 0 },
+ { "k", 1 }, { "K", 1 },
+ { "M", 2 },
+ { "g", 3 }, { "G", 3 },
+ { "t", 4 }, { "T", 4 },
+ { "p", 5 }, { "P", 5 },
+ { "e", 6 }, { "E", 6 },
+ { "z", 7 }, { "Z", 7 },
+ { "y", 8 }, { "Y", 8 }
+ };
+
+ {
+ struct { const char* Ins[2]; const char* Out; } bases[] = {
+ { { "a", "A" }, "amperes" },
+ { { "o", "O" }, "ohms" },
+ { { "v", "V" }, "volts" },
+ { { "w", "W" }, "watts" }
+ };
+
+ for (auto prefix : prefixes) {
+ auto factor (pow(1000.0, prefix.Power));
+
+ for (auto base : bases) {
+ for (auto b : base.Ins) {
+ uoms.emplace(std::string(prefix.Char) + b, UoM{factor, base.Out});
+ }
+ }
+ }
+ }
+
+ struct { const char* Char; double Factor; } suffixes[] = {
+ { "s", 1 }, { "S", 1 },
+ { "m", 60 }, { "M", 60 },
+ { "h", 60 * 60 }, { "H", 60 * 60 }
+ };
+
+ struct { const char* Ins[2]; double Factor; const char* Out; } bases[] = {
+ { { "a", "A" }, 1, "ampere-seconds" },
+ { { "w", "W" }, 60 * 60, "watt-hours" }
+ };
+
+ for (auto prefix : prefixes) {
+ auto factor (pow(1000.0, prefix.Power));
+
+ for (auto suffix : suffixes) {
+ auto timeFactor (factor * suffix.Factor);
+
+ for (auto& base : bases) {
+ auto baseFactor (timeFactor / base.Factor);
+
+ for (auto b : base.Ins) {
+ uoms.emplace(
+ std::string(prefix.Char) + b + suffix.Char,
+ UoM{baseFactor, base.Out}
+ );
+ }
+ }
+ }
+ }
+ }
+
+ UoMs uniqUoms;
+
+ for (auto& uom : uoms) {
+ if (!uniqUoms.emplace(uom).second) {
+ throw std::logic_error("Duplicate case-sensitive UoM detected: " + uom.first);
+ }
+ }
+
+ return uniqUoms;
+})());
+
+static const UoMs l_CiUoMs (([]() -> UoMs {
+ DupUoMs uoms ({
+ // Time:
+ { "ns", { 1.0 / 1000 / 1000 / 1000, "seconds" } },
+ { "us", { 1.0 / 1000 / 1000, "seconds" } },
+ { "ms", { 1.0 / 1000, "seconds" } },
+ { "s", { 1, "seconds" } },
+ { "m", { 60, "seconds" } },
+ { "h", { 60 * 60, "seconds" } },
+ { "d", { 60 * 60 * 24, "seconds" } },
+
+ // Mass:
+ { "ng", { 1.0 / 1000 / 1000 / 1000, "grams" } },
+ { "ug", { 1.0 / 1000 / 1000, "grams" } },
+ { "mg", { 1.0 / 1000, "grams" } },
+ { "g", { 1, "grams" } },
+ { "kg", { 1000, "grams" } },
+ { "t", { 1000 * 1000, "grams" } },
+
+ // Volume:
+ { "ml", { 1.0 / 1000, "liters" } },
+ { "l", { 1, "liters" } },
+ { "hl", { 100, "liters" } },
+
+ // Misc:
+ { "packets", { 1, "packets" } },
+ { "lm", { 1, "lumens" } },
+ { "dbm", { 1, "decibel-milliwatts" } },
+ { "f", { 1, "degrees-fahrenheit" } },
+ { "k", { 1, "degrees-kelvin" } }
+ });
+
+ UoMs uniqUoms;
+
+ for (auto& uom : uoms) {
+ if (!uniqUoms.emplace(uom).second) {
+ throw std::logic_error("Duplicate case-insensitive UoM detected: " + uom.first);
+ }
+ }
+
+ for (auto& uom : l_CsUoMs) {
+ auto input (uom.first);
+ boost::algorithm::to_lower(input);
+
+ auto pos (uoms.find(input));
+
+ if (pos != uoms.end()) {
+ throw std::logic_error("Duplicate case-sensitive/case-insensitive UoM detected: " + pos->first);
+ }
+ }
+
+ return uniqUoms;
+})());
+
+PerfdataValue::PerfdataValue(const String& label, double value, bool counter,
+ const String& unit, const Value& warn, const Value& crit, const Value& min,
+ const Value& max)
+{
+ SetLabel(label, true);
+ SetValue(value, true);
+ SetCounter(counter, true);
+ SetUnit(unit, true);
+ SetWarn(warn, true);
+ SetCrit(crit, true);
+ SetMin(min, true);
+ SetMax(max, true);
+}
+
+PerfdataValue::Ptr PerfdataValue::Parse(const String& perfdata)
+{
+ size_t eqp = perfdata.FindLastOf('=');
+
+ if (eqp == String::NPos)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid performance data value: " + perfdata));
+
+ String label = perfdata.SubStr(0, eqp);
+
+ if (label.GetLength() > 2 && label[0] == '\'' && label[label.GetLength() - 1] == '\'')
+ label = label.SubStr(1, label.GetLength() - 2);
+
+ size_t spq = perfdata.FindFirstOf(' ', eqp);
+
+ if (spq == String::NPos)
+ spq = perfdata.GetLength();
+
+ String valueStr = perfdata.SubStr(eqp + 1, spq - eqp - 1);
+ std::vector<String> tokens = valueStr.Split(";");
+
+ if (valueStr.FindFirstOf(',') != String::NPos || tokens.empty()) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid performance data value: " + perfdata));
+ }
+
+ // Find the position where to split value and unit. Possible values of tokens[0] include:
+ // "1000", "1.0", "1.", "-.1", "+1", "1e10", "1GB", "1e10GB", "1e10EB", "1E10EB", "1.5GB", "1.GB", "+1.E-1EW"
+ // Consider everything up to and including the last digit or decimal point as part of the value.
+ size_t pos = tokens[0].FindLastOf("0123456789.");
+ if (pos != String::NPos) {
+ pos++;
+ }
+
+ double value = Convert::ToDouble(tokens[0].SubStr(0, pos));
+
+ bool counter = false;
+ String unit;
+ Value warn, crit, min, max;
+
+ if (pos != String::NPos)
+ unit = tokens[0].SubStr(pos, String::NPos);
+
+ double base;
+
+ {
+ auto uom (l_CsUoMs.find(unit.GetData()));
+
+ if (uom == l_CsUoMs.end()) {
+ auto ciUnit (unit.ToLower());
+ auto uom (l_CiUoMs.find(ciUnit.GetData()));
+
+ if (uom == l_CiUoMs.end()) {
+ Log(LogDebug, "PerfdataValue")
+ << "Invalid performance data unit: " << unit;
+
+ unit = "";
+ base = 1.0;
+ } else {
+ unit = uom->second.Out;
+ base = uom->second.Factor;
+ }
+ } else {
+ unit = uom->second.Out;
+ base = uom->second.Factor;
+ }
+ }
+
+ if (unit == "c") {
+ counter = true;
+ }
+
+ warn = ParseWarnCritMinMaxToken(tokens, 1, "warning");
+ crit = ParseWarnCritMinMaxToken(tokens, 2, "critical");
+ min = ParseWarnCritMinMaxToken(tokens, 3, "minimum");
+ max = ParseWarnCritMinMaxToken(tokens, 4, "maximum");
+
+ value = value * base;
+
+ if (!warn.IsEmpty())
+ warn = warn * base;
+
+ if (!crit.IsEmpty())
+ crit = crit * base;
+
+ if (!min.IsEmpty())
+ min = min * base;
+
+ if (!max.IsEmpty())
+ max = max * base;
+
+ return new PerfdataValue(label, value, counter, unit, warn, crit, min, max);
+}
+
+static const std::unordered_map<std::string, const char*> l_FormatUoMs ({
+ { "ampere-seconds", "As" },
+ { "amperes", "A" },
+ { "bits", "b" },
+ { "bytes", "B" },
+ { "decibel-milliwatts", "dBm" },
+ { "degrees-celsius", "C" },
+ { "degrees-fahrenheit", "F" },
+ { "degrees-kelvin", "K" },
+ { "grams", "g" },
+ { "liters", "l" },
+ { "lumens", "lm" },
+ { "ohms", "O" },
+ { "percent", "%" },
+ { "seconds", "s" },
+ { "volts", "V" },
+ { "watt-hours", "Wh" },
+ { "watts", "W" }
+});
+
+String PerfdataValue::Format() const
+{
+ std::ostringstream result;
+
+ if (GetLabel().FindFirstOf(" ") != String::NPos)
+ result << "'" << GetLabel() << "'";
+ else
+ result << GetLabel();
+
+ result << "=" << Convert::ToString(GetValue());
+
+ String unit;
+
+ if (GetCounter()) {
+ unit = "c";
+ } else {
+ auto myUnit (GetUnit());
+ auto uom (l_FormatUoMs.find(myUnit.GetData()));
+
+ if (uom != l_FormatUoMs.end()) {
+ unit = uom->second;
+ }
+ }
+
+ result << unit;
+
+ if (!GetWarn().IsEmpty()) {
+ result << ";" << Convert::ToString(GetWarn());
+
+ if (!GetCrit().IsEmpty()) {
+ result << ";" << Convert::ToString(GetCrit());
+
+ if (!GetMin().IsEmpty()) {
+ result << ";" << Convert::ToString(GetMin());
+
+ if (!GetMax().IsEmpty()) {
+ result << ";" << Convert::ToString(GetMax());
+ }
+ }
+ }
+ }
+
+ return result.str();
+}
+
+Value PerfdataValue::ParseWarnCritMinMaxToken(const std::vector<String>& tokens, std::vector<String>::size_type index, const String& description)
+{
+ if (tokens.size() > index && tokens[index] != "U" && tokens[index] != "" && tokens[index].FindFirstNotOf("+-0123456789.eE") == String::NPos)
+ return Convert::ToDouble(tokens[index]);
+ else {
+ if (tokens.size() > index && tokens[index] != "")
+ Log(LogDebug, "PerfdataValue")
+ << "Ignoring unsupported perfdata " << description << " range, value: '" << tokens[index] << "'.";
+ return Empty;
+ }
+}
diff --git a/lib/base/perfdatavalue.hpp b/lib/base/perfdatavalue.hpp
new file mode 100644
index 0000000..05b2c34
--- /dev/null
+++ b/lib/base/perfdatavalue.hpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PERFDATAVALUE_H
+#define PERFDATAVALUE_H
+
+#include "base/i2-base.hpp"
+#include "base/perfdatavalue-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * A performance data value.
+ *
+ * @ingroup base
+ */
+class PerfdataValue final : public ObjectImpl<PerfdataValue>
+{
+public:
+ DECLARE_OBJECT(PerfdataValue);
+
+ PerfdataValue() = default;
+
+ PerfdataValue(const String& label, double value, bool counter = false, const String& unit = "",
+ const Value& warn = Empty, const Value& crit = Empty,
+ const Value& min = Empty, const Value& max = Empty);
+
+ static PerfdataValue::Ptr Parse(const String& perfdata);
+ String Format() const;
+
+private:
+ static Value ParseWarnCritMinMaxToken(const std::vector<String>& tokens,
+ std::vector<String>::size_type index, const String& description);
+};
+
+}
+
+#endif /* PERFDATA_VALUE */
diff --git a/lib/base/perfdatavalue.ti b/lib/base/perfdatavalue.ti
new file mode 100644
index 0000000..b2692e9
--- /dev/null
+++ b/lib/base/perfdatavalue.ti
@@ -0,0 +1,20 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+library base;
+
+namespace icinga
+{
+
+class PerfdataValue
+{
+ [state] String label;
+ [state] double value;
+ [state] bool counter;
+ [state] String unit;
+ [state] Value crit;
+ [state] Value warn;
+ [state] Value min;
+ [state] Value max;
+};
+
+}
diff --git a/lib/base/primitivetype.cpp b/lib/base/primitivetype.cpp
new file mode 100644
index 0000000..10286c7
--- /dev/null
+++ b/lib/base/primitivetype.cpp
@@ -0,0 +1,64 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/primitivetype.hpp"
+#include "base/dictionary.hpp"
+
+using namespace icinga;
+
+PrimitiveType::PrimitiveType(String name, String base, const ObjectFactory& factory)
+ : m_Name(std::move(name)), m_Base(std::move(base)), m_Factory(factory)
+{ }
+
+String PrimitiveType::GetName() const
+{
+ return m_Name;
+}
+
+Type::Ptr PrimitiveType::GetBaseType() const
+{
+ if (m_Base == "None")
+ return nullptr;
+ else
+ return Type::GetByName(m_Base);
+}
+
+int PrimitiveType::GetAttributes() const
+{
+ return 0;
+}
+
+int PrimitiveType::GetFieldId(const String& name) const
+{
+ Type::Ptr base = GetBaseType();
+
+ if (base)
+ return base->GetFieldId(name);
+ else
+ return -1;
+}
+
+Field PrimitiveType::GetFieldInfo(int id) const
+{
+ Type::Ptr base = GetBaseType();
+
+ if (base)
+ return base->GetFieldInfo(id);
+ else
+ throw std::runtime_error("Invalid field ID.");
+}
+
+int PrimitiveType::GetFieldCount() const
+{
+ Type::Ptr base = GetBaseType();
+
+ if (base)
+ return Object::TypeInstance->GetFieldCount();
+ else
+ return 0;
+}
+
+ObjectFactory PrimitiveType::GetFactory() const
+{
+ return m_Factory;
+}
+
diff --git a/lib/base/primitivetype.hpp b/lib/base/primitivetype.hpp
new file mode 100644
index 0000000..439e20f
--- /dev/null
+++ b/lib/base/primitivetype.hpp
@@ -0,0 +1,62 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PRIMITIVETYPE_H
+#define PRIMITIVETYPE_H
+
+#include "base/i2-base.hpp"
+#include "base/type.hpp"
+#include "base/initialize.hpp"
+
+namespace icinga
+{
+
+class PrimitiveType final : public Type
+{
+public:
+ PrimitiveType(String name, String base, const ObjectFactory& factory = ObjectFactory());
+
+ String GetName() const override;
+ Type::Ptr GetBaseType() const override;
+ int GetAttributes() const override;
+ int GetFieldId(const String& name) const override;
+ Field GetFieldInfo(int id) const override;
+ int GetFieldCount() const override;
+
+protected:
+ ObjectFactory GetFactory() const override;
+
+private:
+ String m_Name;
+ String m_Base;
+ ObjectFactory m_Factory;
+};
+
+/* Ensure that the priority is lower than the basic namespace initialization in scriptframe.cpp. */
+#define REGISTER_BUILTIN_TYPE(type, prototype) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ icinga::Type::Ptr t = new PrimitiveType(#type, "None"); \
+ t->SetPrototype(prototype); \
+ icinga::Type::Register(t); \
+ }, InitializePriority::RegisterBuiltinTypes)
+
+#define REGISTER_PRIMITIVE_TYPE_FACTORY(type, base, prototype, factory) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ icinga::Type::Ptr t = new PrimitiveType(#type, #base, factory); \
+ t->SetPrototype(prototype); \
+ icinga::Type::Register(t); \
+ type::TypeInstance = t; \
+ }, InitializePriority::RegisterPrimitiveTypes); \
+ DEFINE_TYPE_INSTANCE(type)
+
+#define REGISTER_PRIMITIVE_TYPE(type, base, prototype) \
+ REGISTER_PRIMITIVE_TYPE_FACTORY(type, base, prototype, DefaultObjectFactory<type>)
+
+#define REGISTER_PRIMITIVE_TYPE_VA(type, base, prototype) \
+ REGISTER_PRIMITIVE_TYPE_FACTORY(type, base, prototype, DefaultObjectFactoryVA<type>)
+
+#define REGISTER_PRIMITIVE_TYPE_NOINST(type, base, prototype) \
+ REGISTER_PRIMITIVE_TYPE_FACTORY(type, base, prototype, nullptr)
+
+}
+
+#endif /* PRIMITIVETYPE_H */
diff --git a/lib/base/process.cpp b/lib/base/process.cpp
new file mode 100644
index 0000000..d4246a6
--- /dev/null
+++ b/lib/base/process.cpp
@@ -0,0 +1,1207 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/process.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/initialize.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/json.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/thread/once.hpp>
+#include <thread>
+#include <iostream>
+
+#ifndef _WIN32
+# include <execvpe.h>
+# include <poll.h>
+# include <string.h>
+
+# ifndef __APPLE__
+extern char **environ;
+# else /* __APPLE__ */
+# include <crt_externs.h>
+# define environ (*_NSGetEnviron())
+# endif /* __APPLE__ */
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+#define IOTHREADS 4
+
+static std::mutex l_ProcessMutex[IOTHREADS];
+static std::map<Process::ProcessHandle, Process::Ptr> l_Processes[IOTHREADS];
+#ifdef _WIN32
+static HANDLE l_Events[IOTHREADS];
+#else /* _WIN32 */
+static int l_EventFDs[IOTHREADS][2];
+static std::map<Process::ConsoleHandle, Process::ProcessHandle> l_FDs[IOTHREADS];
+
+static std::mutex l_ProcessControlMutex;
+static int l_ProcessControlFD = -1;
+static pid_t l_ProcessControlPID;
+#endif /* _WIN32 */
+static boost::once_flag l_ProcessOnceFlag = BOOST_ONCE_INIT;
+static boost::once_flag l_SpawnHelperOnceFlag = BOOST_ONCE_INIT;
+
+Process::Process(Process::Arguments arguments, Dictionary::Ptr extraEnvironment)
+ : m_Arguments(std::move(arguments)), m_ExtraEnvironment(std::move(extraEnvironment)),
+ m_Timeout(600)
+#ifdef _WIN32
+ , m_ReadPending(false), m_ReadFailed(false), m_Overlapped()
+#else /* _WIN32 */
+ , m_SentSigterm(false)
+#endif /* _WIN32 */
+ , m_AdjustPriority(false), m_ResultAvailable(false)
+{
+#ifdef _WIN32
+ m_Overlapped.hEvent = CreateEvent(nullptr, TRUE, FALSE, nullptr);
+#endif /* _WIN32 */
+}
+
+Process::~Process()
+{
+#ifdef _WIN32
+ CloseHandle(m_Overlapped.hEvent);
+#endif /* _WIN32 */
+}
+
+#ifndef _WIN32
+static Value ProcessSpawnImpl(struct msghdr *msgh, const Dictionary::Ptr& request)
+{
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
+
+ if (cmsg == nullptr || cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_len != CMSG_LEN(sizeof(int) * 3)) {
+ std::cerr << "Invalid 'spawn' request: FDs missing" << std::endl;
+ return Empty;
+ }
+
+ auto *fds = (int *)CMSG_DATA(cmsg);
+
+ Array::Ptr arguments = request->Get("arguments");
+ Dictionary::Ptr extraEnvironment = request->Get("extraEnvironment");
+ bool adjustPriority = request->Get("adjustPriority");
+
+ // build argv
+ auto **argv = new char *[arguments->GetLength() + 1];
+
+ for (unsigned int i = 0; i < arguments->GetLength(); i++) {
+ String arg = arguments->Get(i);
+ argv[i] = strdup(arg.CStr());
+ }
+
+ argv[arguments->GetLength()] = nullptr;
+
+ // build envp
+ int envc = 0;
+
+ /* count existing environment variables */
+ while (environ[envc])
+ envc++;
+
+ auto **envp = new char *[envc + (extraEnvironment ? extraEnvironment->GetLength() : 0) + 2];
+ const char* lcnumeric = "LC_NUMERIC=";
+ const char* notifySocket = "NOTIFY_SOCKET=";
+ int j = 0;
+
+ for (int i = 0; i < envc; i++) {
+ if (strncmp(environ[i], lcnumeric, strlen(lcnumeric)) == 0) {
+ continue;
+ }
+
+ if (strncmp(environ[i], notifySocket, strlen(notifySocket)) == 0) {
+ continue;
+ }
+
+ envp[j] = strdup(environ[i]);
+ ++j;
+ }
+
+ if (extraEnvironment) {
+ ObjectLock olock(extraEnvironment);
+
+ for (const Dictionary::Pair& kv : extraEnvironment) {
+ String skv = kv.first + "=" + Convert::ToString(kv.second);
+ envp[j] = strdup(skv.CStr());
+ j++;
+ }
+ }
+
+ envp[j] = strdup("LC_NUMERIC=C");
+ envp[j + 1] = nullptr;
+
+ extraEnvironment.reset();
+
+ pid_t pid = fork();
+
+ int errorCode = 0;
+
+ if (pid < 0)
+ errorCode = errno;
+
+ if (pid == 0) {
+ // child process
+
+ (void)close(l_ProcessControlFD);
+
+ if (setsid() < 0) {
+ perror("setsid() failed");
+ _exit(128);
+ }
+
+ if (dup2(fds[0], STDIN_FILENO) < 0 || dup2(fds[1], STDOUT_FILENO) < 0 || dup2(fds[2], STDERR_FILENO) < 0) {
+ perror("dup2() failed");
+ _exit(128);
+ }
+
+ (void)close(fds[0]);
+ (void)close(fds[1]);
+ (void)close(fds[2]);
+
+#ifdef HAVE_NICE
+ if (adjustPriority) {
+ // Cheating the compiler on "warning: ignoring return value of 'int nice(int)', declared with attribute warn_unused_result [-Wunused-result]".
+ auto x (nice(5));
+ (void)x;
+ }
+#endif /* HAVE_NICE */
+
+ sigset_t mask;
+ sigemptyset(&mask);
+ sigprocmask(SIG_SETMASK, &mask, nullptr);
+
+ if (icinga2_execvpe(argv[0], argv, envp) < 0) {
+ char errmsg[512];
+ strcpy(errmsg, "execvpe(");
+ strncat(errmsg, argv[0], sizeof(errmsg) - strlen(errmsg) - 1);
+ strncat(errmsg, ") failed", sizeof(errmsg) - strlen(errmsg) - 1);
+ errmsg[sizeof(errmsg) - 1] = '\0';
+ perror(errmsg);
+ }
+
+ _exit(128);
+ }
+
+ (void)close(fds[0]);
+ (void)close(fds[1]);
+ (void)close(fds[2]);
+
+ // free arguments
+ for (int i = 0; argv[i]; i++)
+ free(argv[i]);
+
+ delete[] argv;
+
+ // free environment
+ for (int i = 0; envp[i]; i++)
+ free(envp[i]);
+
+ delete[] envp;
+
+ Dictionary::Ptr response = new Dictionary({
+ { "rc", pid },
+ { "errno", errorCode }
+ });
+
+ return response;
+}
+
+static Value ProcessKillImpl(struct msghdr *msgh, const Dictionary::Ptr& request)
+{
+ pid_t pid = request->Get("pid");
+ int signum = request->Get("signum");
+
+ errno = 0;
+ kill(pid, signum);
+ int error = errno;
+
+ Dictionary::Ptr response = new Dictionary({
+ { "errno", error }
+ });
+
+ return response;
+}
+
+static Value ProcessWaitPIDImpl(struct msghdr *msgh, const Dictionary::Ptr& request)
+{
+ pid_t pid = request->Get("pid");
+
+ int status;
+ int rc = waitpid(pid, &status, 0);
+
+ Dictionary::Ptr response = new Dictionary({
+ { "status", status },
+ { "rc", rc }
+ });
+
+ return response;
+}
+
+static void ProcessHandler()
+{
+ sigset_t mask;
+ sigfillset(&mask);
+ sigprocmask(SIG_SETMASK, &mask, nullptr);
+
+ Utility::CloseAllFDs({0, 1, 2, l_ProcessControlFD});
+
+ for (;;) {
+ size_t length;
+
+ struct msghdr msg;
+ memset(&msg, 0, sizeof(msg));
+
+ struct iovec io;
+ io.iov_base = &length;
+ io.iov_len = sizeof(length);
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+
+ char cbuf[4096];
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+
+ int rc = recvmsg(l_ProcessControlFD, &msg, 0);
+
+ if (rc <= 0) {
+ if (rc < 0 && (errno == EINTR || errno == EAGAIN))
+ continue;
+
+ break;
+ }
+
+ auto *mbuf = new char[length];
+
+ size_t count = 0;
+ while (count < length) {
+ rc = recv(l_ProcessControlFD, mbuf + count, length - count, 0);
+
+ if (rc <= 0) {
+ if (rc < 0 && (errno == EINTR || errno == EAGAIN))
+ continue;
+
+ delete [] mbuf;
+
+ _exit(0);
+ }
+
+ count += rc;
+
+ if (rc == 0)
+ break;
+ }
+
+ String jrequest = String(mbuf, mbuf + count);
+
+ delete [] mbuf;
+
+ Dictionary::Ptr request = JsonDecode(jrequest);
+
+ String command = request->Get("command");
+
+ Value response;
+
+ if (command == "spawn")
+ response = ProcessSpawnImpl(&msg, request);
+ else if (command == "waitpid")
+ response = ProcessWaitPIDImpl(&msg, request);
+ else if (command == "kill")
+ response = ProcessKillImpl(&msg, request);
+ else
+ response = Empty;
+
+ String jresponse = JsonEncode(response);
+
+ if (send(l_ProcessControlFD, jresponse.CStr(), jresponse.GetLength(), 0) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("send")
+ << boost::errinfo_errno(errno));
+ }
+ }
+
+ _exit(0);
+}
+
+static void StartSpawnProcessHelper()
+{
+ if (l_ProcessControlFD != -1) {
+ (void)close(l_ProcessControlFD);
+
+ int status;
+ (void)waitpid(l_ProcessControlPID, &status, 0);
+ }
+
+ int controlFDs[2];
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, controlFDs) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("socketpair")
+ << boost::errinfo_errno(errno));
+ }
+
+ pid_t pid = fork();
+
+ if (pid < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fork")
+ << boost::errinfo_errno(errno));
+ }
+
+ if (pid == 0) {
+ (void)close(controlFDs[1]);
+
+ l_ProcessControlFD = controlFDs[0];
+
+ ProcessHandler();
+
+ _exit(1);
+ }
+
+ (void)close(controlFDs[0]);
+
+ l_ProcessControlFD = controlFDs[1];
+ l_ProcessControlPID = pid;
+}
+
+static pid_t ProcessSpawn(const std::vector<String>& arguments, const Dictionary::Ptr& extraEnvironment, bool adjustPriority, int fds[3])
+{
+ Dictionary::Ptr request = new Dictionary({
+ { "command", "spawn" },
+ { "arguments", Array::FromVector(arguments) },
+ { "extraEnvironment", extraEnvironment },
+ { "adjustPriority", adjustPriority }
+ });
+
+ String jrequest = JsonEncode(request);
+ size_t length = jrequest.GetLength();
+
+ std::unique_lock<std::mutex> lock(l_ProcessControlMutex);
+
+ struct msghdr msg;
+ memset(&msg, 0, sizeof(msg));
+
+ struct iovec io;
+ io.iov_base = &length;
+ io.iov_len = sizeof(length);
+
+ msg.msg_iov = &io;
+ msg.msg_iovlen = 1;
+
+ char cbuf[CMSG_SPACE(sizeof(int) * 3)];
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int) * 3);
+
+ memcpy(CMSG_DATA(cmsg), fds, sizeof(int) * 3);
+
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ do {
+ while (sendmsg(l_ProcessControlFD, &msg, 0) < 0) {
+ StartSpawnProcessHelper();
+ }
+ } while (send(l_ProcessControlFD, jrequest.CStr(), jrequest.GetLength(), 0) < 0);
+
+ char buf[4096];
+
+ ssize_t rc = recv(l_ProcessControlFD, buf, sizeof(buf), 0);
+
+ if (rc <= 0)
+ return -1;
+
+ String jresponse = String(buf, buf + rc);
+
+ Dictionary::Ptr response = JsonDecode(jresponse);
+
+ if (response->Get("rc") == -1)
+ errno = response->Get("errno");
+
+ return response->Get("rc");
+}
+
+static int ProcessKill(pid_t pid, int signum)
+{
+ Dictionary::Ptr request = new Dictionary({
+ { "command", "kill" },
+ { "pid", pid },
+ { "signum", signum }
+ });
+
+ String jrequest = JsonEncode(request);
+ size_t length = jrequest.GetLength();
+
+ std::unique_lock<std::mutex> lock(l_ProcessControlMutex);
+
+ do {
+ while (send(l_ProcessControlFD, &length, sizeof(length), 0) < 0) {
+ StartSpawnProcessHelper();
+ }
+ } while (send(l_ProcessControlFD, jrequest.CStr(), jrequest.GetLength(), 0) < 0);
+
+ char buf[4096];
+
+ ssize_t rc = recv(l_ProcessControlFD, buf, sizeof(buf), 0);
+
+ if (rc <= 0)
+ return -1;
+
+ String jresponse = String(buf, buf + rc);
+
+ Dictionary::Ptr response = JsonDecode(jresponse);
+ return response->Get("errno");
+}
+
+static int ProcessWaitPID(pid_t pid, int *status)
+{
+ Dictionary::Ptr request = new Dictionary({
+ { "command", "waitpid" },
+ { "pid", pid }
+ });
+
+ String jrequest = JsonEncode(request);
+ size_t length = jrequest.GetLength();
+
+ std::unique_lock<std::mutex> lock(l_ProcessControlMutex);
+
+ do {
+ while (send(l_ProcessControlFD, &length, sizeof(length), 0) < 0) {
+ StartSpawnProcessHelper();
+ }
+ } while (send(l_ProcessControlFD, jrequest.CStr(), jrequest.GetLength(), 0) < 0);
+
+ char buf[4096];
+
+ ssize_t rc = recv(l_ProcessControlFD, buf, sizeof(buf), 0);
+
+ if (rc <= 0)
+ return -1;
+
+ String jresponse = String(buf, buf + rc);
+
+ Dictionary::Ptr response = JsonDecode(jresponse);
+ *status = response->Get("status");
+ return response->Get("rc");
+}
+
+void Process::InitializeSpawnHelper()
+{
+ if (l_ProcessControlFD == -1)
+ StartSpawnProcessHelper();
+}
+#endif /* _WIN32 */
+
+static void InitializeProcess()
+{
+#ifdef _WIN32
+ for (auto& event : l_Events) {
+ event = CreateEvent(nullptr, TRUE, FALSE, nullptr);
+ }
+#else /* _WIN32 */
+ for (auto& eventFD : l_EventFDs) {
+# ifdef HAVE_PIPE2
+ if (pipe2(eventFD, O_CLOEXEC) < 0) {
+ if (errno == ENOSYS) {
+# endif /* HAVE_PIPE2 */
+ if (pipe(eventFD) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("pipe")
+ << boost::errinfo_errno(errno));
+ }
+
+ Utility::SetCloExec(eventFD[0]);
+ Utility::SetCloExec(eventFD[1]);
+# ifdef HAVE_PIPE2
+ } else {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("pipe2")
+ << boost::errinfo_errno(errno));
+ }
+ }
+# endif /* HAVE_PIPE2 */
+ }
+#endif /* _WIN32 */
+}
+
+INITIALIZE_ONCE(InitializeProcess);
+
+void Process::ThreadInitialize()
+{
+ /* Note to self: Make sure this runs _after_ we've daemonized. */
+ for (int tid = 0; tid < IOTHREADS; tid++) {
+ std::thread t([tid]() { IOThreadProc(tid); });
+ t.detach();
+ }
+}
+
+Process::Arguments Process::PrepareCommand(const Value& command)
+{
+#ifdef _WIN32
+ String args;
+#else /* _WIN32 */
+ std::vector<String> args;
+#endif /* _WIN32 */
+
+ if (command.IsObjectType<Array>()) {
+ Array::Ptr arguments = command;
+
+ ObjectLock olock(arguments);
+ for (const Value& argument : arguments) {
+#ifdef _WIN32
+ if (args != "")
+ args += " ";
+
+ args += Utility::EscapeCreateProcessArg(argument);
+#else /* _WIN32 */
+ args.push_back(argument);
+#endif /* _WIN32 */
+ }
+
+ return args;
+ }
+
+#ifdef _WIN32
+ return command;
+#else /* _WIN32 */
+ return { "sh", "-c", command };
+#endif
+}
+
+void Process::SetTimeout(double timeout)
+{
+ m_Timeout = timeout;
+}
+
+double Process::GetTimeout() const
+{
+ return m_Timeout;
+}
+
+void Process::SetAdjustPriority(bool adjust)
+{
+ m_AdjustPriority = adjust;
+}
+
+bool Process::GetAdjustPriority() const
+{
+ return m_AdjustPriority;
+}
+
+void Process::IOThreadProc(int tid)
+{
+#ifdef _WIN32
+ HANDLE *handles = nullptr;
+ HANDLE *fhandles = nullptr;
+#else /* _WIN32 */
+ pollfd *pfds = nullptr;
+#endif /* _WIN32 */
+ int count = 0;
+ double now;
+
+ Utility::SetThreadName("ProcessIO");
+
+ for (;;) {
+ double timeout = -1;
+
+ now = Utility::GetTime();
+
+ {
+ std::unique_lock<std::mutex> lock(l_ProcessMutex[tid]);
+
+ count = 1 + l_Processes[tid].size();
+#ifdef _WIN32
+ handles = reinterpret_cast<HANDLE *>(realloc(handles, sizeof(HANDLE) * count));
+ fhandles = reinterpret_cast<HANDLE *>(realloc(fhandles, sizeof(HANDLE) * count));
+
+ fhandles[0] = l_Events[tid];
+
+#else /* _WIN32 */
+ pfds = reinterpret_cast<pollfd *>(realloc(pfds, sizeof(pollfd) * count));
+
+ pfds[0].fd = l_EventFDs[tid][0];
+ pfds[0].events = POLLIN;
+ pfds[0].revents = 0;
+#endif /* _WIN32 */
+
+ int i = 1;
+ typedef std::pair<ProcessHandle, Process::Ptr> kv_pair;
+ for (const kv_pair& kv : l_Processes[tid]) {
+ const Process::Ptr& process = kv.second;
+#ifdef _WIN32
+ handles[i] = kv.first;
+
+ if (!process->m_ReadPending) {
+ process->m_ReadPending = true;
+
+ BOOL res = ReadFile(process->m_FD, process->m_ReadBuffer, sizeof(process->m_ReadBuffer), 0, &process->m_Overlapped);
+ if (res || GetLastError() != ERROR_IO_PENDING) {
+ process->m_ReadFailed = !res;
+ SetEvent(process->m_Overlapped.hEvent);
+ }
+ }
+
+ fhandles[i] = process->m_Overlapped.hEvent;
+#else /* _WIN32 */
+ pfds[i].fd = process->m_FD;
+ pfds[i].events = POLLIN;
+ pfds[i].revents = 0;
+#endif /* _WIN32 */
+
+ if (process->m_Timeout != 0) {
+ double delta = process->GetNextTimeout() - (now - process->m_Result.ExecutionStart);
+
+ if (timeout == -1 || delta < timeout)
+ timeout = delta;
+ }
+
+ i++;
+ }
+ }
+
+ if (timeout < 0.01)
+ timeout = 0.5;
+
+ timeout *= 1000;
+
+#ifdef _WIN32
+ DWORD rc = WaitForMultipleObjects(count, fhandles, FALSE, timeout == -1 ? INFINITE : static_cast<DWORD>(timeout));
+#else /* _WIN32 */
+ int rc = poll(pfds, count, timeout);
+
+ if (rc < 0)
+ continue;
+#endif /* _WIN32 */
+
+ now = Utility::GetTime();
+
+ {
+ std::unique_lock<std::mutex> lock(l_ProcessMutex[tid]);
+
+#ifdef _WIN32
+ if (rc == WAIT_OBJECT_0)
+ ResetEvent(l_Events[tid]);
+#else /* _WIN32 */
+ if (pfds[0].revents & (POLLIN | POLLHUP | POLLERR)) {
+ char buffer[512];
+ if (read(l_EventFDs[tid][0], buffer, sizeof(buffer)) < 0)
+ Log(LogCritical, "base", "Read from event FD failed.");
+ }
+#endif /* _WIN32 */
+
+ for (int i = 1; i < count; i++) {
+#ifdef _WIN32
+ auto it = l_Processes[tid].find(handles[i]);
+#else /* _WIN32 */
+ auto it2 = l_FDs[tid].find(pfds[i].fd);
+
+ if (it2 == l_FDs[tid].end())
+ continue; /* This should never happen. */
+
+ auto it = l_Processes[tid].find(it2->second);
+#endif /* _WIN32 */
+
+ if (it == l_Processes[tid].end())
+ continue; /* This should never happen. */
+
+ bool is_timeout = false;
+
+ if (it->second->m_Timeout != 0) {
+ double timeout = it->second->m_Result.ExecutionStart + it->second->GetNextTimeout();
+
+ if (timeout < now)
+ is_timeout = true;
+ }
+
+#ifdef _WIN32
+ if (rc == WAIT_OBJECT_0 + i || is_timeout) {
+#else /* _WIN32 */
+ if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR) || is_timeout) {
+#endif /* _WIN32 */
+ if (!it->second->DoEvents()) {
+#ifdef _WIN32
+ CloseHandle(it->first);
+ CloseHandle(it->second->m_FD);
+#else /* _WIN32 */
+ l_FDs[tid].erase(it->second->m_FD);
+ (void)close(it->second->m_FD);
+#endif /* _WIN32 */
+ l_Processes[tid].erase(it);
+ }
+ }
+ }
+ }
+ }
+}
+
+String Process::PrettyPrintArguments(const Process::Arguments& arguments)
+{
+#ifdef _WIN32
+ return "'" + arguments + "'";
+#else /* _WIN32 */
+ return "'" + boost::algorithm::join(arguments, "' '") + "'";
+#endif /* _WIN32 */
+}
+
+#ifdef _WIN32
+static BOOL CreatePipeOverlapped(HANDLE *outReadPipe, HANDLE *outWritePipe,
+ SECURITY_ATTRIBUTES *securityAttributes, DWORD size, DWORD readMode, DWORD writeMode)
+{
+ static LONG pipeIndex = 0;
+
+ if (size == 0)
+ size = 8192;
+
+ LONG currentIndex = InterlockedIncrement(&pipeIndex);
+
+ char pipeName[128];
+ sprintf(pipeName, "\\\\.\\Pipe\\OverlappedPipe.%d.%d", (int)GetCurrentProcessId(), (int)currentIndex);
+
+ *outReadPipe = CreateNamedPipe(pipeName, PIPE_ACCESS_INBOUND | readMode,
+ PIPE_TYPE_BYTE | PIPE_WAIT, 1, size, size, 60 * 1000, securityAttributes);
+
+ if (*outReadPipe == INVALID_HANDLE_VALUE)
+ return FALSE;
+
+ *outWritePipe = CreateFile(pipeName, GENERIC_WRITE, 0, securityAttributes, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | writeMode, nullptr);
+
+ if (*outWritePipe == INVALID_HANDLE_VALUE) {
+ DWORD error = GetLastError();
+ CloseHandle(*outReadPipe);
+ SetLastError(error);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+#endif /* _WIN32 */
+
+void Process::Run(const std::function<void(const ProcessResult&)>& callback)
+{
+#ifndef _WIN32
+ boost::call_once(l_SpawnHelperOnceFlag, &Process::InitializeSpawnHelper);
+#endif /* _WIN32 */
+ boost::call_once(l_ProcessOnceFlag, &Process::ThreadInitialize);
+
+ m_Result.ExecutionStart = Utility::GetTime();
+
+#ifdef _WIN32
+ SECURITY_ATTRIBUTES sa = {};
+ sa.nLength = sizeof(sa);
+ sa.bInheritHandle = TRUE;
+
+ HANDLE outReadPipe, outWritePipe;
+ if (!CreatePipeOverlapped(&outReadPipe, &outWritePipe, &sa, 0, FILE_FLAG_OVERLAPPED, 0))
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("CreatePipe")
+ << errinfo_win32_error(GetLastError()));
+
+ if (!SetHandleInformation(outReadPipe, HANDLE_FLAG_INHERIT, 0))
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("SetHandleInformation")
+ << errinfo_win32_error(GetLastError()));
+
+ HANDLE outWritePipeDup;
+ if (!DuplicateHandle(GetCurrentProcess(), outWritePipe, GetCurrentProcess(),
+ &outWritePipeDup, 0, TRUE, DUPLICATE_SAME_ACCESS))
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("DuplicateHandle")
+ << errinfo_win32_error(GetLastError()));
+
+/* LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList;
+ SIZE_T cbSize;
+
+ if (!InitializeProcThreadAttributeList(nullptr, 1, 0, &cbSize) && GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("InitializeProcThreadAttributeList")
+ << errinfo_win32_error(GetLastError()));
+
+ lpAttributeList = reinterpret_cast<LPPROC_THREAD_ATTRIBUTE_LIST>(new char[cbSize]);
+
+ if (!InitializeProcThreadAttributeList(lpAttributeList, 1, 0, &cbSize))
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("InitializeProcThreadAttributeList")
+ << errinfo_win32_error(GetLastError()));
+
+ HANDLE rgHandles[3];
+ rgHandles[0] = outWritePipe;
+ rgHandles[1] = outWritePipeDup;
+ rgHandles[2] = GetStdHandle(STD_INPUT_HANDLE);
+
+ if (!UpdateProcThreadAttribute(lpAttributeList, 0, PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
+ rgHandles, sizeof(rgHandles), nullptr, nullptr))
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("UpdateProcThreadAttribute")
+ << errinfo_win32_error(GetLastError()));
+*/
+
+ STARTUPINFOEX si = {};
+ si.StartupInfo.cb = sizeof(si);
+ si.StartupInfo.hStdError = outWritePipe;
+ si.StartupInfo.hStdOutput = outWritePipeDup;
+ si.StartupInfo.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
+ si.StartupInfo.dwFlags = STARTF_USESTDHANDLES;
+// si.lpAttributeList = lpAttributeList;
+
+ PROCESS_INFORMATION pi;
+
+ char *args = new char[m_Arguments.GetLength() + 1];
+ strncpy(args, m_Arguments.CStr(), m_Arguments.GetLength() + 1);
+ args[m_Arguments.GetLength()] = '\0';
+
+ LPCH pEnvironment = GetEnvironmentStrings();
+ size_t ioffset = 0, offset = 0;
+
+ char *envp = nullptr;
+
+ for (;;) {
+ size_t len = strlen(pEnvironment + ioffset);
+
+ if (len == 0)
+ break;
+
+ char *eqp = strchr(pEnvironment + ioffset, '=');
+ if (eqp && m_ExtraEnvironment && m_ExtraEnvironment->Contains(String(pEnvironment + ioffset, eqp))) {
+ ioffset += len + 1;
+ continue;
+ }
+
+ envp = static_cast<char *>(realloc(envp, offset + len + 1));
+
+ if (!envp)
+ BOOST_THROW_EXCEPTION(std::bad_alloc());
+
+ strcpy(envp + offset, pEnvironment + ioffset);
+ offset += len + 1;
+ ioffset += len + 1;
+ }
+
+ FreeEnvironmentStrings(pEnvironment);
+
+ if (m_ExtraEnvironment) {
+ ObjectLock olock(m_ExtraEnvironment);
+
+ for (const Dictionary::Pair& kv : m_ExtraEnvironment) {
+ String skv = kv.first + "=" + Convert::ToString(kv.second);
+
+ envp = static_cast<char *>(realloc(envp, offset + skv.GetLength() + 1));
+
+ if (!envp)
+ BOOST_THROW_EXCEPTION(std::bad_alloc());
+
+ strcpy(envp + offset, skv.CStr());
+ offset += skv.GetLength() + 1;
+ }
+ }
+
+ envp = static_cast<char *>(realloc(envp, offset + 1));
+
+ if (!envp)
+ BOOST_THROW_EXCEPTION(std::bad_alloc());
+
+ envp[offset] = '\0';
+
+ if (!CreateProcess(nullptr, args, nullptr, nullptr, TRUE,
+ 0 /*EXTENDED_STARTUPINFO_PRESENT*/, envp, nullptr, &si.StartupInfo, &pi)) {
+ DWORD error = GetLastError();
+ CloseHandle(outWritePipe);
+ CloseHandle(outWritePipeDup);
+ free(envp);
+/* DeleteProcThreadAttributeList(lpAttributeList);
+ delete [] reinterpret_cast<char *>(lpAttributeList); */
+
+ m_Result.PID = 0;
+ m_Result.ExecutionEnd = Utility::GetTime();
+ m_Result.ExitStatus = 127;
+ m_Result.Output = "Command " + String(args) + " failed to execute: " + Utility::FormatErrorNumber(error);
+
+ delete [] args;
+
+ if (callback) {
+ /*
+ * Explicitly use Process::Ptr to keep the reference counted while the
+ * callback is active and making it crash safe
+ */
+ Process::Ptr process(this);
+ Utility::QueueAsyncCallback([this, process, callback]() { callback(m_Result); });
+ }
+
+ return;
+ }
+
+ delete [] args;
+ free(envp);
+/* DeleteProcThreadAttributeList(lpAttributeList);
+ delete [] reinterpret_cast<char *>(lpAttributeList); */
+
+ CloseHandle(outWritePipe);
+ CloseHandle(outWritePipeDup);
+ CloseHandle(pi.hThread);
+
+ m_Process = pi.hProcess;
+ m_FD = outReadPipe;
+ m_PID = pi.dwProcessId;
+
+ Log(LogNotice, "Process")
+ << "Running command " << PrettyPrintArguments(m_Arguments) << ": PID " << m_PID;
+
+#else /* _WIN32 */
+ int outfds[2];
+
+#ifdef HAVE_PIPE2
+ if (pipe2(outfds, O_CLOEXEC) < 0) {
+ if (errno == ENOSYS) {
+#endif /* HAVE_PIPE2 */
+ if (pipe(outfds) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("pipe")
+ << boost::errinfo_errno(errno));
+ }
+
+ Utility::SetCloExec(outfds[0]);
+ Utility::SetCloExec(outfds[1]);
+#ifdef HAVE_PIPE2
+ } else {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("pipe2")
+ << boost::errinfo_errno(errno));
+ }
+ }
+#endif /* HAVE_PIPE2 */
+
+ int fds[3];
+ fds[0] = STDIN_FILENO;
+ fds[1] = outfds[1];
+ fds[2] = outfds[1];
+
+ m_Process = ProcessSpawn(m_Arguments, m_ExtraEnvironment, m_AdjustPriority, fds);
+ m_PID = m_Process;
+
+ if (m_PID == -1) {
+ m_OutputStream << "Fork failed with error code " << errno << " (" << Utility::FormatErrorNumber(errno) << ")";
+ Log(LogCritical, "Process", m_OutputStream.str());
+ }
+
+ Log(LogNotice, "Process")
+ << "Running command " << PrettyPrintArguments(m_Arguments) << ": PID " << m_PID;
+
+ (void)close(outfds[1]);
+
+ Utility::SetNonBlocking(outfds[0]);
+
+ m_FD = outfds[0];
+#endif /* _WIN32 */
+
+ m_Callback = callback;
+
+ int tid = GetTID();
+
+ {
+ std::unique_lock<std::mutex> lock(l_ProcessMutex[tid]);
+ l_Processes[tid][m_Process] = this;
+#ifndef _WIN32
+ l_FDs[tid][m_FD] = m_Process;
+#endif /* _WIN32 */
+ }
+
+#ifdef _WIN32
+ SetEvent(l_Events[tid]);
+#else /* _WIN32 */
+ if (write(l_EventFDs[tid][1], "T", 1) < 0 && errno != EINTR && errno != EAGAIN)
+ Log(LogCritical, "base", "Write to event FD failed.");
+#endif /* _WIN32 */
+}
+
+const ProcessResult& Process::WaitForResult() {
+ std::unique_lock<std::mutex> lock(m_ResultMutex);
+ m_ResultCondition.wait(lock, [this]{ return m_ResultAvailable; });
+ return m_Result;
+}
+
+bool Process::DoEvents()
+{
+ bool is_timeout = false;
+#ifndef _WIN32
+ bool could_not_kill = false;
+#endif /* _WIN32 */
+
+ if (m_Timeout != 0) {
+ auto now (Utility::GetTime());
+
+#ifndef _WIN32
+ {
+ auto timeout (GetNextTimeout());
+ auto deadline (m_Result.ExecutionStart + timeout);
+
+ if (deadline < now && !m_SentSigterm) {
+ Log(LogWarning, "Process")
+ << "Terminating process " << m_PID << " (" << PrettyPrintArguments(m_Arguments)
+ << ") after timeout of " << timeout << " seconds";
+
+ m_OutputStream << "<Timeout exceeded.>";
+
+ int error = ProcessKill(m_Process, SIGTERM);
+ if (error) {
+ Log(LogWarning, "Process")
+ << "Couldn't terminate the process " << m_PID << " (" << PrettyPrintArguments(m_Arguments)
+ << "): [errno " << error << "] " << strerror(error);
+ }
+
+ m_SentSigterm = true;
+ }
+ }
+#endif /* _WIN32 */
+
+ auto timeout (GetNextTimeout());
+ auto deadline (m_Result.ExecutionStart + timeout);
+
+ if (deadline < now) {
+ Log(LogWarning, "Process")
+ << "Killing process group " << m_PID << " (" << PrettyPrintArguments(m_Arguments)
+ << ") after timeout of " << timeout << " seconds";
+
+#ifdef _WIN32
+ m_OutputStream << "<Timeout exceeded.>";
+ TerminateProcess(m_Process, 3);
+#else /* _WIN32 */
+ int error = ProcessKill(-m_Process, SIGKILL);
+ if (error) {
+ Log(LogWarning, "Process")
+ << "Couldn't kill the process group " << m_PID << " (" << PrettyPrintArguments(m_Arguments)
+ << "): [errno " << error << "] " << strerror(error);
+ could_not_kill = true;
+ }
+#endif /* _WIN32 */
+
+ is_timeout = true;
+ }
+ }
+
+ if (!is_timeout) {
+#ifdef _WIN32
+ m_ReadPending = false;
+
+ DWORD rc;
+ if (!m_ReadFailed && GetOverlappedResult(m_FD, &m_Overlapped, &rc, TRUE) && rc > 0) {
+ m_OutputStream.write(m_ReadBuffer, rc);
+ return true;
+ }
+#else /* _WIN32 */
+ char buffer[512];
+ for (;;) {
+ int rc = read(m_FD, buffer, sizeof(buffer));
+
+ if (rc < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
+ return true;
+
+ if (rc > 0) {
+ m_OutputStream.write(buffer, rc);
+ continue;
+ }
+
+ break;
+ }
+#endif /* _WIN32 */
+ }
+
+ String output = m_OutputStream.str();
+
+#ifdef _WIN32
+ WaitForSingleObject(m_Process, INFINITE);
+
+ DWORD exitcode;
+ GetExitCodeProcess(m_Process, &exitcode);
+
+ Log(LogNotice, "Process")
+ << "PID " << m_PID << " (" << PrettyPrintArguments(m_Arguments) << ") terminated with exit code " << exitcode;
+#else /* _WIN32 */
+ int status, exitcode;
+ if (could_not_kill || m_PID == -1) {
+ exitcode = 128;
+ } else if (ProcessWaitPID(m_Process, &status) != m_Process) {
+ exitcode = 128;
+
+ Log(LogWarning, "Process")
+ << "PID " << m_PID << " (" << PrettyPrintArguments(m_Arguments) << ") died mysteriously: waitpid failed";
+ } else if (WIFEXITED(status)) {
+ exitcode = WEXITSTATUS(status);
+
+ Log msg(LogNotice, "Process");
+ msg << "PID " << m_PID << " (" << PrettyPrintArguments(m_Arguments)
+ << ") terminated with exit code " << exitcode;
+
+ if (m_SentSigterm) {
+ exitcode = 128;
+ msg << " after sending SIGTERM";
+ }
+ } else if (WIFSIGNALED(status)) {
+ int signum = WTERMSIG(status);
+ const char *zsigname = strsignal(signum);
+
+ String signame = Convert::ToString(signum);
+
+ if (zsigname) {
+ signame += " (";
+ signame += zsigname;
+ signame += ")";
+ }
+
+ Log(LogWarning, "Process")
+ << "PID " << m_PID << " was terminated by signal " << signame;
+
+ std::ostringstream outputbuf;
+ outputbuf << "<Terminated by signal " << signame << ".>";
+ output = output + outputbuf.str();
+ exitcode = 128;
+ } else {
+ exitcode = 128;
+ }
+#endif /* _WIN32 */
+
+ {
+ std::lock_guard<std::mutex> lock(m_ResultMutex);
+ m_Result.PID = m_PID;
+ m_Result.ExecutionEnd = Utility::GetTime();
+ m_Result.ExitStatus = exitcode;
+ m_Result.Output = output;
+ m_ResultAvailable = true;
+ }
+ m_ResultCondition.notify_all();
+
+ if (m_Callback) {
+ /*
+ * Explicitly use Process::Ptr to keep the reference counted while the
+ * callback is active and making it crash safe
+ */
+ Process::Ptr process(this);
+ Utility::QueueAsyncCallback([this, process]() { m_Callback(m_Result); });
+ }
+
+ return false;
+}
+
+pid_t Process::GetPID() const
+{
+ return m_PID;
+}
+
+
+int Process::GetTID() const
+{
+ return (reinterpret_cast<uintptr_t>(this) / sizeof(void *)) % IOTHREADS;
+}
+
+double Process::GetNextTimeout() const
+{
+#ifdef _WIN32
+ return m_Timeout;
+#else /* _WIN32 */
+ return m_SentSigterm ? m_Timeout * 1.1 : m_Timeout;
+#endif /* _WIN32 */
+}
diff --git a/lib/base/process.hpp b/lib/base/process.hpp
new file mode 100644
index 0000000..d83ba6e
--- /dev/null
+++ b/lib/base/process.hpp
@@ -0,0 +1,117 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PROCESS_H
+#define PROCESS_H
+
+#include "base/i2-base.hpp"
+#include "base/dictionary.hpp"
+#include <iosfwd>
+#include <deque>
+#include <vector>
+#include <sstream>
+#include <mutex>
+#include <condition_variable>
+
+namespace icinga
+{
+
+/**
+ * The result of a Process task.
+ *
+ * @ingroup base
+ */
+struct ProcessResult
+{
+ pid_t PID;
+ double ExecutionStart;
+ double ExecutionEnd;
+ long ExitStatus;
+ String Output;
+};
+
+/**
+ * A process task. Executes an external application and returns the exit
+ * code and console output.
+ *
+ * @ingroup base
+ */
+class Process final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Process);
+
+#ifdef _WIN32
+ typedef String Arguments;
+ typedef HANDLE ProcessHandle;
+ typedef HANDLE ConsoleHandle;
+#else /* _WIN32 */
+ typedef std::vector<String> Arguments;
+ typedef pid_t ProcessHandle;
+ typedef int ConsoleHandle;
+#endif /* _WIN32 */
+
+ static const std::deque<Process::Ptr>::size_type MaxTasksPerThread = 512;
+
+ Process(Arguments arguments, Dictionary::Ptr extraEnvironment = nullptr);
+ ~Process() override;
+
+ void SetTimeout(double timeout);
+ double GetTimeout() const;
+
+ void SetAdjustPriority(bool adjust);
+ bool GetAdjustPriority() const;
+
+ void Run(const std::function<void (const ProcessResult&)>& callback = std::function<void (const ProcessResult&)>());
+
+ const ProcessResult& WaitForResult();
+
+ pid_t GetPID() const;
+
+ static Arguments PrepareCommand(const Value& command);
+
+ static void ThreadInitialize();
+
+ static String PrettyPrintArguments(const Arguments& arguments);
+
+#ifndef _WIN32
+ static void InitializeSpawnHelper();
+#endif /* _WIN32 */
+
+private:
+ Arguments m_Arguments;
+ Dictionary::Ptr m_ExtraEnvironment;
+
+ double m_Timeout;
+#ifndef _WIN32
+ bool m_SentSigterm;
+#endif /* _WIN32 */
+
+ bool m_AdjustPriority;
+
+ ProcessHandle m_Process;
+ pid_t m_PID;
+ ConsoleHandle m_FD;
+
+#ifdef _WIN32
+ bool m_ReadPending;
+ bool m_ReadFailed;
+ OVERLAPPED m_Overlapped;
+ char m_ReadBuffer[1024];
+#endif /* _WIN32 */
+
+ std::ostringstream m_OutputStream;
+ std::function<void (const ProcessResult&)> m_Callback;
+ ProcessResult m_Result;
+ bool m_ResultAvailable;
+ std::mutex m_ResultMutex;
+ std::condition_variable m_ResultCondition;
+
+ static void IOThreadProc(int tid);
+ bool DoEvents();
+ int GetTID() const;
+ double GetNextTimeout() const;
+};
+
+}
+
+#endif /* PROCESS_H */
diff --git a/lib/base/reference-script.cpp b/lib/base/reference-script.cpp
new file mode 100644
index 0000000..9408245
--- /dev/null
+++ b/lib/base/reference-script.cpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/reference.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+static void ReferenceSet(const Value& value)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Reference::Ptr self = static_cast<Reference::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ self->Set(value);
+}
+
+static Value ReferenceGet()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Reference::Ptr self = static_cast<Reference::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+ return self->Get();
+}
+
+Object::Ptr Reference::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "set", new Function("Reference#set", ReferenceSet, { "value" }) },
+ { "get", new Function("Reference#get", ReferenceGet, {}, true) },
+ });
+
+ return prototype;
+}
diff --git a/lib/base/reference.cpp b/lib/base/reference.cpp
new file mode 100644
index 0000000..b0104af
--- /dev/null
+++ b/lib/base/reference.cpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/reference.hpp"
+#include "base/debug.hpp"
+#include "base/primitivetype.hpp"
+#include "base/dictionary.hpp"
+#include "base/configwriter.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_PRIMITIVE_TYPE_NOINST(Reference, Object, Reference::GetPrototype());
+
+Reference::Reference(const Object::Ptr& parent, const String& index)
+ : m_Parent(parent), m_Index(index)
+{
+}
+
+Value Reference::Get() const
+{
+ return m_Parent->GetFieldByName(m_Index, true, DebugInfo());
+}
+
+void Reference::Set(const Value& value)
+{
+ m_Parent->SetFieldByName(m_Index, value, false, DebugInfo());
+}
+
+Object::Ptr Reference::GetParent() const
+{
+ return m_Parent;
+}
+
+String Reference::GetIndex() const
+{
+ return m_Index;
+}
diff --git a/lib/base/reference.hpp b/lib/base/reference.hpp
new file mode 100644
index 0000000..30faabe
--- /dev/null
+++ b/lib/base/reference.hpp
@@ -0,0 +1,40 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef REFERENCE_H
+#define REFERENCE_H
+
+#include "base/i2-base.hpp"
+#include "base/objectlock.hpp"
+#include "base/value.hpp"
+
+namespace icinga
+{
+
+/**
+ * A reference.
+ *
+ * @ingroup base
+ */
+class Reference final : public Object
+{
+public:
+ DECLARE_OBJECT(Reference);
+
+ Reference(const Object::Ptr& parent, const String& index);
+
+ Value Get() const;
+ void Set(const Value& value);
+
+ Object::Ptr GetParent() const;
+ String GetIndex() const;
+
+ static Object::Ptr GetPrototype();
+
+private:
+ Object::Ptr m_Parent;
+ String m_Index;
+};
+
+}
+
+#endif /* REFERENCE_H */
diff --git a/lib/base/registry.hpp b/lib/base/registry.hpp
new file mode 100644
index 0000000..c13f7e1
--- /dev/null
+++ b/lib/base/registry.hpp
@@ -0,0 +1,121 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef REGISTRY_H
+#define REGISTRY_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include <boost/signals2.hpp>
+#include <map>
+#include <mutex>
+
+namespace icinga
+{
+
+/**
+ * A registry.
+ *
+ * @ingroup base
+ */
+template<typename U, typename T>
+class Registry
+{
+public:
+ typedef std::map<String, T> ItemMap;
+
+ void RegisterIfNew(const String& name, const T& item)
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ if (m_Items.find(name) != m_Items.end())
+ return;
+
+ RegisterInternal(name, item, lock);
+ }
+
+ void Register(const String& name, const T& item)
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ RegisterInternal(name, item, lock);
+ }
+
+ void Unregister(const String& name)
+ {
+ size_t erased;
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ erased = m_Items.erase(name);
+ }
+
+ if (erased > 0)
+ OnUnregistered(name);
+ }
+
+ void Clear()
+ {
+ typename Registry<U, T>::ItemMap items;
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ items = m_Items;
+ }
+
+ for (const auto& kv : items) {
+ OnUnregistered(kv.first);
+ }
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Items.clear();
+ }
+ }
+
+ T GetItem(const String& name) const
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ auto it = m_Items.find(name);
+
+ if (it == m_Items.end())
+ return T();
+
+ return it->second;
+ }
+
+ ItemMap GetItems() const
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_Items; /* Makes a copy of the map. */
+ }
+
+ boost::signals2::signal<void (const String&, const T&)> OnRegistered;
+ boost::signals2::signal<void (const String&)> OnUnregistered;
+
+private:
+ mutable std::mutex m_Mutex;
+ typename Registry<U, T>::ItemMap m_Items;
+
+ void RegisterInternal(const String& name, const T& item, std::unique_lock<std::mutex>& lock)
+ {
+ bool old_item = false;
+
+ if (m_Items.erase(name) > 0)
+ old_item = true;
+
+ m_Items[name] = item;
+
+ lock.unlock();
+
+ if (old_item)
+ OnUnregistered(name);
+
+ OnRegistered(name, item);
+ }
+};
+
+}
+
+#endif /* REGISTRY_H */
diff --git a/lib/base/ringbuffer.cpp b/lib/base/ringbuffer.cpp
new file mode 100644
index 0000000..52e2ae5
--- /dev/null
+++ b/lib/base/ringbuffer.cpp
@@ -0,0 +1,91 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/ringbuffer.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include <algorithm>
+
+using namespace icinga;
+
+RingBuffer::RingBuffer(RingBuffer::SizeType slots)
+ : m_Slots(slots, 0), m_TimeValue(0), m_InsertedValues(0)
+{ }
+
+RingBuffer::SizeType RingBuffer::GetLength() const
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ return m_Slots.size();
+}
+
+void RingBuffer::InsertValue(RingBuffer::SizeType tv, int num)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ InsertValueUnlocked(tv, num);
+}
+
+void RingBuffer::InsertValueUnlocked(RingBuffer::SizeType tv, int num)
+{
+ RingBuffer::SizeType offsetTarget = tv % m_Slots.size();
+
+ if (m_TimeValue == 0)
+ m_InsertedValues = 1;
+
+ if (tv > m_TimeValue) {
+ RingBuffer::SizeType offset = m_TimeValue % m_Slots.size();
+
+ /* walk towards the target offset, resetting slots to 0 */
+ while (offset != offsetTarget) {
+ offset++;
+
+ if (offset >= m_Slots.size())
+ offset = 0;
+
+ m_Slots[offset] = 0;
+
+ if (m_TimeValue != 0 && m_InsertedValues < m_Slots.size())
+ m_InsertedValues++;
+ }
+
+ m_TimeValue = tv;
+ }
+
+ m_Slots[offsetTarget] += num;
+}
+
+int RingBuffer::UpdateAndGetValues(RingBuffer::SizeType tv, RingBuffer::SizeType span)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return UpdateAndGetValuesUnlocked(tv, span);
+}
+
+int RingBuffer::UpdateAndGetValuesUnlocked(RingBuffer::SizeType tv, RingBuffer::SizeType span)
+{
+ InsertValueUnlocked(tv, 0);
+
+ if (span > m_Slots.size())
+ span = m_Slots.size();
+
+ int off = m_TimeValue % m_Slots.size();
+ int sum = 0;
+ while (span > 0) {
+ sum += m_Slots[off];
+
+ if (off == 0)
+ off = m_Slots.size();
+
+ off--;
+ span--;
+ }
+
+ return sum;
+}
+
+double RingBuffer::CalculateRate(RingBuffer::SizeType tv, RingBuffer::SizeType span)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ int sum = UpdateAndGetValuesUnlocked(tv, span);
+ return sum / static_cast<double>(std::min(span, m_InsertedValues));
+}
diff --git a/lib/base/ringbuffer.hpp b/lib/base/ringbuffer.hpp
new file mode 100644
index 0000000..9fbef53
--- /dev/null
+++ b/lib/base/ringbuffer.hpp
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef RINGBUFFER_H
+#define RINGBUFFER_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include <vector>
+#include <mutex>
+
+namespace icinga
+{
+
+/**
+ * A ring buffer that holds a pre-defined number of integers.
+ *
+ * @ingroup base
+ */
+class RingBuffer final
+{
+public:
+ DECLARE_PTR_TYPEDEFS(RingBuffer);
+
+ typedef std::vector<int>::size_type SizeType;
+
+ RingBuffer(SizeType slots);
+
+ SizeType GetLength() const;
+ void InsertValue(SizeType tv, int num);
+ int UpdateAndGetValues(SizeType tv, SizeType span);
+ double CalculateRate(SizeType tv, SizeType span);
+
+private:
+ mutable std::mutex m_Mutex;
+ std::vector<int> m_Slots;
+ SizeType m_TimeValue;
+ SizeType m_InsertedValues;
+
+ void InsertValueUnlocked(SizeType tv, int num);
+ int UpdateAndGetValuesUnlocked(SizeType tv, SizeType span);
+};
+
+}
+
+#endif /* RINGBUFFER_H */
diff --git a/lib/base/scriptframe.cpp b/lib/base/scriptframe.cpp
new file mode 100644
index 0000000..7a7f44c
--- /dev/null
+++ b/lib/base/scriptframe.cpp
@@ -0,0 +1,130 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/scriptframe.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/namespace.hpp"
+#include "base/exception.hpp"
+#include "base/configuration.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+boost::thread_specific_ptr<std::stack<ScriptFrame *> > ScriptFrame::m_ScriptFrames;
+
+static Namespace::Ptr l_SystemNS, l_StatsNS;
+
+/* Ensure that this gets called with highest priority
+ * and wins against other static initializers in lib/icinga, etc.
+ * LTO-enabled builds will cause trouble otherwise, see GH #6575.
+ */
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ Namespace::Ptr globalNS = ScriptGlobal::GetGlobals();
+
+ l_SystemNS = new Namespace(true);
+ l_SystemNS->Set("PlatformKernel", Utility::GetPlatformKernel());
+ l_SystemNS->Set("PlatformKernelVersion", Utility::GetPlatformKernelVersion());
+ l_SystemNS->Set("PlatformName", Utility::GetPlatformName());
+ l_SystemNS->Set("PlatformVersion", Utility::GetPlatformVersion());
+ l_SystemNS->Set("PlatformArchitecture", Utility::GetPlatformArchitecture());
+ l_SystemNS->Set("BuildHostName", ICINGA_BUILD_HOST_NAME);
+ l_SystemNS->Set("BuildCompilerName", ICINGA_BUILD_COMPILER_NAME);
+ l_SystemNS->Set("BuildCompilerVersion", ICINGA_BUILD_COMPILER_VERSION);
+ globalNS->Set("System", l_SystemNS, true);
+
+ l_SystemNS->Set("Configuration", new Configuration());
+
+ l_StatsNS = new Namespace(true);
+ globalNS->Set("StatsFunctions", l_StatsNS, true);
+
+ globalNS->Set("Internal", new Namespace(), true);
+}, InitializePriority::CreateNamespaces);
+
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ l_SystemNS->Freeze();
+ l_StatsNS->Freeze();
+}, InitializePriority::FreezeNamespaces);
+
+ScriptFrame::ScriptFrame(bool allocLocals)
+ : Locals(allocLocals ? new Dictionary() : nullptr), Self(ScriptGlobal::GetGlobals()), Sandboxed(false), Depth(0)
+{
+ InitializeFrame();
+}
+
+ScriptFrame::ScriptFrame(bool allocLocals, Value self)
+ : Locals(allocLocals ? new Dictionary() : nullptr), Self(std::move(self)), Sandboxed(false), Depth(0)
+{
+ InitializeFrame();
+}
+
+void ScriptFrame::InitializeFrame()
+{
+ std::stack<ScriptFrame *> *frames = m_ScriptFrames.get();
+
+ if (frames && !frames->empty()) {
+ ScriptFrame *frame = frames->top();
+
+ Sandboxed = frame->Sandboxed;
+ }
+
+ PushFrame(this);
+}
+
+ScriptFrame::~ScriptFrame()
+{
+ ScriptFrame *frame = PopFrame();
+ ASSERT(frame == this);
+
+#ifndef I2_DEBUG
+ (void)frame;
+#endif /* I2_DEBUG */
+}
+
+void ScriptFrame::IncreaseStackDepth()
+{
+ if (Depth + 1 > 300)
+ BOOST_THROW_EXCEPTION(ScriptError("Stack overflow while evaluating expression: Recursion level too deep."));
+
+ Depth++;
+}
+
+void ScriptFrame::DecreaseStackDepth()
+{
+ Depth--;
+}
+
+ScriptFrame *ScriptFrame::GetCurrentFrame()
+{
+ std::stack<ScriptFrame *> *frames = m_ScriptFrames.get();
+
+ ASSERT(!frames->empty());
+ return frames->top();
+}
+
+ScriptFrame *ScriptFrame::PopFrame()
+{
+ std::stack<ScriptFrame *> *frames = m_ScriptFrames.get();
+
+ ASSERT(!frames->empty());
+
+ ScriptFrame *frame = frames->top();
+ frames->pop();
+
+ return frame;
+}
+
+void ScriptFrame::PushFrame(ScriptFrame *frame)
+{
+ std::stack<ScriptFrame *> *frames = m_ScriptFrames.get();
+
+ if (!frames) {
+ frames = new std::stack<ScriptFrame *>();
+ m_ScriptFrames.reset(frames);
+ }
+
+ if (!frames->empty()) {
+ ScriptFrame *parent = frames->top();
+ frame->Depth += parent->Depth;
+ }
+
+ frames->push(frame);
+}
diff --git a/lib/base/scriptframe.hpp b/lib/base/scriptframe.hpp
new file mode 100644
index 0000000..18e23ef
--- /dev/null
+++ b/lib/base/scriptframe.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SCRIPTFRAME_H
+#define SCRIPTFRAME_H
+
+#include "base/i2-base.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include <boost/thread/tss.hpp>
+#include <stack>
+
+namespace icinga
+{
+
+struct ScriptFrame
+{
+ Dictionary::Ptr Locals;
+ Value Self;
+ bool Sandboxed;
+ int Depth;
+
+ ScriptFrame(bool allocLocals);
+ ScriptFrame(bool allocLocals, Value self);
+ ~ScriptFrame();
+
+ void IncreaseStackDepth();
+ void DecreaseStackDepth();
+
+ static ScriptFrame *GetCurrentFrame();
+
+private:
+ static boost::thread_specific_ptr<std::stack<ScriptFrame *> > m_ScriptFrames;
+
+ static void PushFrame(ScriptFrame *frame);
+ static ScriptFrame *PopFrame();
+
+ void InitializeFrame();
+};
+
+}
+
+#endif /* SCRIPTFRAME_H */
diff --git a/lib/base/scriptglobal.cpp b/lib/base/scriptglobal.cpp
new file mode 100644
index 0000000..e85e9ec
--- /dev/null
+++ b/lib/base/scriptglobal.cpp
@@ -0,0 +1,110 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/atomic-file.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/singleton.hpp"
+#include "base/logger.hpp"
+#include "base/stdiostream.hpp"
+#include "base/netstring.hpp"
+#include "base/json.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/exception.hpp"
+#include "base/namespace.hpp"
+#include "base/utility.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+Namespace::Ptr ScriptGlobal::m_Globals = new Namespace();
+
+Value ScriptGlobal::Get(const String& name, const Value *defaultValue)
+{
+ Value result;
+
+ if (!m_Globals->Get(name, &result)) {
+ if (defaultValue)
+ return *defaultValue;
+
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Tried to access undefined script variable '" + name + "'"));
+ }
+
+ return result;
+}
+
+void ScriptGlobal::Set(const String& name, const Value& value)
+{
+ std::vector<String> tokens = name.Split(".");
+
+ if (tokens.empty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Name must not be empty"));
+
+ {
+ ObjectLock olock(m_Globals);
+
+ Namespace::Ptr parent = m_Globals;
+
+ for (std::vector<String>::size_type i = 0; i < tokens.size(); i++) {
+ const String& token = tokens[i];
+
+ if (i + 1 != tokens.size()) {
+ Value vparent;
+
+ if (!parent->Get(token, &vparent)) {
+ Namespace::Ptr dict = new Namespace();
+ parent->Set(token, dict);
+ parent = dict;
+ } else {
+ parent = vparent;
+ }
+ }
+ }
+
+ parent->Set(tokens[tokens.size() - 1], value);
+ }
+}
+
+void ScriptGlobal::SetConst(const String& name, const Value& value)
+{
+ GetGlobals()->Set(name, value, true);
+}
+
+bool ScriptGlobal::Exists(const String& name)
+{
+ return m_Globals->Contains(name);
+}
+
+Namespace::Ptr ScriptGlobal::GetGlobals()
+{
+ return m_Globals;
+}
+
+void ScriptGlobal::WriteToFile(const String& filename)
+{
+ Log(LogInformation, "ScriptGlobal")
+ << "Dumping variables to file '" << filename << "'";
+
+ AtomicFile fp (filename, 0600);
+ StdioStream::Ptr sfp = new StdioStream(&fp, false);
+
+ ObjectLock olock(m_Globals);
+ for (const Namespace::Pair& kv : m_Globals) {
+ Value value = kv.second.Val;
+
+ if (value.IsObject())
+ value = Convert::ToString(value);
+
+ Dictionary::Ptr persistentVariable = new Dictionary({
+ { "name", kv.first },
+ { "value", value }
+ });
+
+ String json = JsonEncode(persistentVariable);
+
+ NetString::WriteStringToStream(sfp, json);
+ }
+
+ sfp->Close();
+ fp.Commit();
+}
+
diff --git a/lib/base/scriptglobal.hpp b/lib/base/scriptglobal.hpp
new file mode 100644
index 0000000..f349b7b
--- /dev/null
+++ b/lib/base/scriptglobal.hpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SCRIPTGLOBAL_H
+#define SCRIPTGLOBAL_H
+
+#include "base/i2-base.hpp"
+#include "base/namespace.hpp"
+
+namespace icinga
+{
+
+/**
+ * Global script variables.
+ *
+ * @ingroup base
+ */
+class ScriptGlobal
+{
+public:
+ static Value Get(const String& name, const Value *defaultValue = nullptr);
+ static void Set(const String& name, const Value& value);
+ static void SetConst(const String& name, const Value& value);
+ static bool Exists(const String& name);
+
+ static void WriteToFile(const String& filename);
+
+ static Namespace::Ptr GetGlobals();
+
+private:
+ static Namespace::Ptr m_Globals;
+};
+
+}
+
+#endif /* SCRIPTGLOBAL_H */
diff --git a/lib/base/scriptutils.cpp b/lib/base/scriptutils.cpp
new file mode 100644
index 0000000..7fe856d
--- /dev/null
+++ b/lib/base/scriptutils.cpp
@@ -0,0 +1,570 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/scriptutils.hpp"
+#include "base/function.hpp"
+#include "base/scriptframe.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/configtype.hpp"
+#include "base/application.hpp"
+#include "base/dependencygraph.hpp"
+#include "base/initialize.hpp"
+#include "base/namespace.hpp"
+#include "config/configitem.hpp"
+#include <boost/regex.hpp>
+#include <algorithm>
+#include <set>
+#ifdef _WIN32
+#include <msi.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+REGISTER_SAFE_FUNCTION(System, regex, &ScriptUtils::Regex, "pattern:text:mode");
+REGISTER_SAFE_FUNCTION(System, match, &ScriptUtils::Match, "pattern:text:mode");
+REGISTER_SAFE_FUNCTION(System, cidr_match, &ScriptUtils::CidrMatch, "pattern:ip:mode");
+REGISTER_SAFE_FUNCTION(System, len, &ScriptUtils::Len, "value");
+REGISTER_SAFE_FUNCTION(System, union, &ScriptUtils::Union, "");
+REGISTER_SAFE_FUNCTION(System, intersection, &ScriptUtils::Intersection, "");
+REGISTER_FUNCTION(System, log, &ScriptUtils::Log, "severity:facility:value");
+REGISTER_FUNCTION(System, range, &ScriptUtils::Range, "start:end:increment");
+REGISTER_FUNCTION(System, exit, &Application::Exit, "status");
+REGISTER_SAFE_FUNCTION(System, typeof, &ScriptUtils::TypeOf, "value");
+REGISTER_SAFE_FUNCTION(System, keys, &ScriptUtils::Keys, "value");
+REGISTER_SAFE_FUNCTION(System, random, &Utility::Random, "");
+REGISTER_SAFE_FUNCTION(System, get_template, &ScriptUtils::GetTemplate, "type:name");
+REGISTER_SAFE_FUNCTION(System, get_templates, &ScriptUtils::GetTemplates, "type");
+REGISTER_SAFE_FUNCTION(System, get_object, &ScriptUtils::GetObject, "type:name");
+REGISTER_SAFE_FUNCTION(System, get_objects, &ScriptUtils::GetObjects, "type");
+REGISTER_FUNCTION(System, assert, &ScriptUtils::Assert, "value");
+REGISTER_SAFE_FUNCTION(System, string, &ScriptUtils::CastString, "value");
+REGISTER_SAFE_FUNCTION(System, number, &ScriptUtils::CastNumber, "value");
+REGISTER_SAFE_FUNCTION(System, bool, &ScriptUtils::CastBool, "value");
+REGISTER_SAFE_FUNCTION(System, get_time, &Utility::GetTime, "");
+REGISTER_SAFE_FUNCTION(System, basename, &Utility::BaseName, "path");
+REGISTER_SAFE_FUNCTION(System, dirname, &Utility::DirName, "path");
+REGISTER_SAFE_FUNCTION(System, getenv, &ScriptUtils::GetEnv, "value");
+REGISTER_SAFE_FUNCTION(System, msi_get_component_path, &ScriptUtils::MsiGetComponentPathShim, "component");
+REGISTER_SAFE_FUNCTION(System, track_parents, &ScriptUtils::TrackParents, "child");
+REGISTER_SAFE_FUNCTION(System, escape_shell_cmd, &Utility::EscapeShellCmd, "cmd");
+REGISTER_SAFE_FUNCTION(System, escape_shell_arg, &Utility::EscapeShellArg, "arg");
+#ifdef _WIN32
+REGISTER_SAFE_FUNCTION(System, escape_create_process_arg, &Utility::EscapeCreateProcessArg, "arg");
+#endif /* _WIN32 */
+REGISTER_FUNCTION(System, ptr, &ScriptUtils::Ptr, "object");
+REGISTER_FUNCTION(System, sleep, &Utility::Sleep, "interval");
+REGISTER_FUNCTION(System, path_exists, &Utility::PathExists, "path");
+REGISTER_FUNCTION(System, glob, &ScriptUtils::Glob, "pathspec:callback:type");
+REGISTER_FUNCTION(System, glob_recursive, &ScriptUtils::GlobRecursive, "pathspec:callback:type");
+
+INITIALIZE_ONCE(&ScriptUtils::StaticInitialize);
+
+enum MatchType
+{
+ MatchAll,
+ MatchAny
+};
+
+void ScriptUtils::StaticInitialize()
+{
+ ScriptGlobal::Set("System.MatchAll", MatchAll);
+ ScriptGlobal::Set("System.MatchAny", MatchAny);
+
+ ScriptGlobal::Set("System.GlobFile", GlobFile);
+ ScriptGlobal::Set("System.GlobDirectory", GlobDirectory);
+}
+
+String ScriptUtils::CastString(const Value& value)
+{
+ return value;
+}
+
+double ScriptUtils::CastNumber(const Value& value)
+{
+ return value;
+}
+
+bool ScriptUtils::CastBool(const Value& value)
+{
+ return value.ToBool();
+}
+
+bool ScriptUtils::Regex(const std::vector<Value>& args)
+{
+ if (args.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Regular expression and text must be specified for regex()."));
+
+ String pattern = args[0];
+ const Value& argTexts = args[1];
+
+ if (argTexts.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Dictionaries are not supported by regex()."));
+
+ MatchType mode;
+
+ if (args.size() > 2)
+ mode = static_cast<MatchType>(static_cast<int>(args[2]));
+ else
+ mode = MatchAll;
+
+ boost::regex expr(pattern.GetData());
+
+ Array::Ptr texts;
+
+ if (argTexts.IsObject())
+ texts = argTexts;
+
+ if (texts) {
+ ObjectLock olock(texts);
+
+ if (texts->GetLength() == 0)
+ return false;
+
+ for (const String& text : texts) {
+ bool res = false;
+ try {
+ boost::smatch what;
+ res = boost::regex_search(text.GetData(), what, expr);
+ } catch (boost::exception&) {
+ res = false; /* exception means something went terribly wrong */
+ }
+
+ if (mode == MatchAny && res)
+ return true;
+ else if (mode == MatchAll && !res)
+ return false;
+ }
+
+ /* MatchAny: Nothing matched. MatchAll: Everything matched. */
+ return mode == MatchAll;
+ } else {
+ String text = argTexts;
+ boost::smatch what;
+ return boost::regex_search(text.GetData(), what, expr);
+ }
+}
+
+bool ScriptUtils::Match(const std::vector<Value>& args)
+{
+ if (args.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Pattern and text must be specified for match()."));
+
+ String pattern = args[0];
+ const Value& argTexts = args[1];
+
+ if (argTexts.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Dictionaries are not supported by match()."));
+
+ MatchType mode;
+
+ if (args.size() > 2)
+ mode = static_cast<MatchType>(static_cast<int>(args[2]));
+ else
+ mode = MatchAll;
+
+ Array::Ptr texts;
+
+ if (argTexts.IsObject())
+ texts = argTexts;
+
+ if (texts) {
+ ObjectLock olock(texts);
+
+ if (texts->GetLength() == 0)
+ return false;
+
+ for (const String& text : texts) {
+ bool res = Utility::Match(pattern, text);
+
+ if (mode == MatchAny && res)
+ return true;
+ else if (mode == MatchAll && !res)
+ return false;
+ }
+
+ /* MatchAny: Nothing matched. MatchAll: Everything matched. */
+ return mode == MatchAll;
+ } else {
+ String text = argTexts;
+ return Utility::Match(pattern, argTexts);
+ }
+}
+
+bool ScriptUtils::CidrMatch(const std::vector<Value>& args)
+{
+ if (args.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("CIDR and IP address must be specified for cidr_match()."));
+
+ String pattern = args[0];
+ const Value& argIps = args[1];
+
+ if (argIps.IsObjectType<Dictionary>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Dictionaries are not supported by cidr_match()."));
+
+ MatchType mode;
+
+ if (args.size() > 2)
+ mode = static_cast<MatchType>(static_cast<int>(args[2]));
+ else
+ mode = MatchAll;
+
+ Array::Ptr ips;
+
+ if (argIps.IsObject())
+ ips = argIps;
+
+ if (ips) {
+ ObjectLock olock(ips);
+
+ if (ips->GetLength() == 0)
+ return false;
+
+ for (const String& ip : ips) {
+ bool res = Utility::CidrMatch(pattern, ip);
+
+ if (mode == MatchAny && res)
+ return true;
+ else if (mode == MatchAll && !res)
+ return false;
+ }
+
+ /* MatchAny: Nothing matched. MatchAll: Everything matched. */
+ return mode == MatchAll;
+ } else {
+ String ip = argIps;
+ return Utility::CidrMatch(pattern, ip);
+ }
+}
+
+double ScriptUtils::Len(const Value& value)
+{
+ if (value.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dict = value;
+ return dict->GetLength();
+ } else if (value.IsObjectType<Array>()) {
+ Array::Ptr array = value;
+ return array->GetLength();
+ } else if (value.IsString()) {
+ return Convert::ToString(value).GetLength();
+ } else {
+ return 0;
+ }
+}
+
+Array::Ptr ScriptUtils::Union(const std::vector<Value>& arguments)
+{
+ std::set<Value> values;
+
+ for (const Value& varr : arguments) {
+ Array::Ptr arr = varr;
+
+ if (arr) {
+ ObjectLock olock(arr);
+ for (const Value& value : arr) {
+ values.insert(value);
+ }
+ }
+ }
+
+ return Array::FromSet(values);
+}
+
+Array::Ptr ScriptUtils::Intersection(const std::vector<Value>& arguments)
+{
+ if (arguments.size() == 0)
+ return new Array();
+
+ Array::Ptr result = new Array();
+
+ Array::Ptr arg1 = arguments[0];
+
+ if (!arg1)
+ return result;
+
+ Array::Ptr arr1 = arg1->ShallowClone();
+
+ for (std::vector<Value>::size_type i = 1; i < arguments.size(); i++) {
+ {
+ ObjectLock olock(arr1);
+ std::sort(arr1->Begin(), arr1->End());
+ }
+
+ Array::Ptr arg2 = arguments[i];
+
+ if (!arg2)
+ return result;
+
+ Array::Ptr arr2 = arg2->ShallowClone();
+ {
+ ObjectLock olock(arr2);
+ std::sort(arr2->Begin(), arr2->End());
+ }
+
+ result->Resize(std::max(arr1->GetLength(), arr2->GetLength()));
+ Array::SizeType len;
+ {
+ ObjectLock olock(arr1), xlock(arr2), ylock(result);
+ auto it = std::set_intersection(arr1->Begin(), arr1->End(), arr2->Begin(), arr2->End(), result->Begin());
+ len = it - result->Begin();
+ }
+ result->Resize(len);
+ arr1 = result;
+ }
+
+ return result;
+}
+
+void ScriptUtils::Log(const std::vector<Value>& arguments)
+{
+ if (arguments.size() != 1 && arguments.size() != 3)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid number of arguments for log()"));
+
+ LogSeverity severity;
+ String facility;
+ Value message;
+
+ if (arguments.size() == 1) {
+ severity = LogInformation;
+ facility = "config";
+ message = arguments[0];
+ } else {
+ auto sval = static_cast<int>(arguments[0]);
+ severity = static_cast<LogSeverity>(sval);
+ facility = arguments[1];
+ message = arguments[2];
+ }
+
+ if (message.IsString() || (!message.IsObjectType<Array>() && !message.IsObjectType<Dictionary>()))
+ ::Log(severity, facility, message);
+ else
+ ::Log(severity, facility, JsonEncode(message));
+}
+
+Array::Ptr ScriptUtils::Range(const std::vector<Value>& arguments)
+{
+ double start, end, increment;
+
+ switch (arguments.size()) {
+ case 1:
+ start = 0;
+ end = arguments[0];
+ increment = 1;
+ break;
+ case 2:
+ start = arguments[0];
+ end = arguments[1];
+ increment = 1;
+ break;
+ case 3:
+ start = arguments[0];
+ end = arguments[1];
+ increment = arguments[2];
+ break;
+ default:
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid number of arguments for range()"));
+ }
+
+ ArrayData result;
+
+ if ((start < end && increment <= 0) ||
+ (start > end && increment >= 0))
+ return new Array();
+
+ for (double i = start; (increment > 0 ? i < end : i > end); i += increment)
+ result.push_back(i);
+
+ return new Array(std::move(result));
+}
+
+Type::Ptr ScriptUtils::TypeOf(const Value& value)
+{
+ return value.GetReflectionType();
+}
+
+Array::Ptr ScriptUtils::Keys(const Object::Ptr& obj)
+{
+ ArrayData result;
+
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(obj);
+
+ if (dict) {
+ ObjectLock olock(dict);
+ for (const Dictionary::Pair& kv : dict) {
+ result.push_back(kv.first);
+ }
+ }
+
+ Namespace::Ptr ns = dynamic_pointer_cast<Namespace>(obj);
+
+ if (ns) {
+ ObjectLock olock(ns);
+ for (const Namespace::Pair& kv : ns) {
+ result.push_back(kv.first);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+static Dictionary::Ptr GetTargetForTemplate(const ConfigItem::Ptr& item)
+{
+ DebugInfo di = item->GetDebugInfo();
+
+ return new Dictionary({
+ { "name", item->GetName() },
+ { "type", item->GetType()->GetName() },
+ { "location", new Dictionary({
+ { "path", di.Path },
+ { "first_line", di.FirstLine },
+ { "first_column", di.FirstColumn },
+ { "last_line", di.LastLine },
+ { "last_column", di.LastColumn }
+ }) }
+ });
+}
+
+Dictionary::Ptr ScriptUtils::GetTemplate(const Value& vtype, const String& name)
+{
+ Type::Ptr ptype;
+
+ if (vtype.IsObjectType<Type>())
+ ptype = vtype;
+ else
+ ptype = Type::GetByName(vtype);
+
+ ConfigItem::Ptr item = ConfigItem::GetByTypeAndName(ptype, name);
+
+ if (!item || !item->IsAbstract())
+ return nullptr;
+
+ DebugInfo di = item->GetDebugInfo();
+
+ return GetTargetForTemplate(item);
+}
+
+Array::Ptr ScriptUtils::GetTemplates(const Type::Ptr& type)
+{
+ if (!type)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid type: Must not be null"));
+
+ ArrayData result;
+
+ for (const ConfigItem::Ptr& item : ConfigItem::GetItems(type)) {
+ if (item->IsAbstract())
+ result.push_back(GetTargetForTemplate(item));
+ }
+
+ return new Array(std::move(result));
+}
+
+ConfigObject::Ptr ScriptUtils::GetObject(const Value& vtype, const String& name)
+{
+ Type::Ptr ptype;
+
+ if (vtype.IsObjectType<Type>())
+ ptype = vtype;
+ else
+ ptype = Type::GetByName(vtype);
+
+ auto *ctype = dynamic_cast<ConfigType *>(ptype.get());
+
+ if (!ctype)
+ return nullptr;
+
+ return ctype->GetObject(name);
+}
+
+Array::Ptr ScriptUtils::GetObjects(const Type::Ptr& type)
+{
+ if (!type)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid type: Must not be null"));
+
+ auto *ctype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!ctype)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid type: Type must inherit from 'ConfigObject'"));
+
+ ArrayData result;
+
+ for (const ConfigObject::Ptr& object : ctype->GetObjects())
+ result.push_back(object);
+
+ return new Array(std::move(result));
+}
+
+void ScriptUtils::Assert(const Value& arg)
+{
+ if (!arg.ToBool())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Assertion failed"));
+}
+
+String ScriptUtils::MsiGetComponentPathShim(const String& component)
+{
+#ifdef _WIN32
+ TCHAR productCode[39];
+ if (MsiGetProductCode(component.CStr(), productCode) != ERROR_SUCCESS)
+ return "";
+ TCHAR path[2048];
+ DWORD szPath = sizeof(path);
+ path[0] = '\0';
+ MsiGetComponentPath(productCode, component.CStr(), path, &szPath);
+ return path;
+#else /* _WIN32 */
+ return String();
+#endif /* _WIN32 */
+}
+
+Array::Ptr ScriptUtils::TrackParents(const Object::Ptr& child)
+{
+ return Array::FromVector(DependencyGraph::GetParents(child));
+}
+
+double ScriptUtils::Ptr(const Object::Ptr& object)
+{
+ return reinterpret_cast<intptr_t>(object.get());
+}
+
+Value ScriptUtils::Glob(const std::vector<Value>& args)
+{
+ if (args.size() < 1)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Path must be specified."));
+
+ String pathSpec = args[0];
+ int type = GlobFile | GlobDirectory;
+
+ if (args.size() > 1)
+ type = args[1];
+
+ std::vector<String> paths;
+ Utility::Glob(pathSpec, [&paths](const String& path) { paths.push_back(path); }, type);
+
+ return Array::FromVector(paths);
+}
+
+Value ScriptUtils::GlobRecursive(const std::vector<Value>& args)
+{
+ if (args.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Path and pattern must be specified."));
+
+ String path = args[0];
+ String pattern = args[1];
+
+ int type = GlobFile | GlobDirectory;
+
+ if (args.size() > 2)
+ type = args[2];
+
+ std::vector<String> paths;
+ Utility::GlobRecursive(path, pattern, [&paths](const String& newPath) { paths.push_back(newPath); }, type);
+
+ return Array::FromVector(paths);
+}
+
+String ScriptUtils::GetEnv(const String& key)
+{
+ return Utility::GetFromEnvironment(key);
+}
diff --git a/lib/base/scriptutils.hpp b/lib/base/scriptutils.hpp
new file mode 100644
index 0000000..7bd3e8b
--- /dev/null
+++ b/lib/base/scriptutils.hpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SCRIPTUTILS_H
+#define SCRIPTUTILS_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include "base/type.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup base
+ */
+class ScriptUtils
+{
+public:
+ static void StaticInitialize();
+ static String CastString(const Value& value);
+ static double CastNumber(const Value& value);
+ static bool CastBool(const Value& value);
+ static bool Regex(const std::vector<Value>& args);
+ static bool Match(const std::vector<Value>& args);
+ static bool CidrMatch(const std::vector<Value>& args);
+ static double Len(const Value& value);
+ static Array::Ptr Union(const std::vector<Value>& arguments);
+ static Array::Ptr Intersection(const std::vector<Value>& arguments);
+ static void Log(const std::vector<Value>& arguments);
+ static Array::Ptr Range(const std::vector<Value>& arguments);
+ static Type::Ptr TypeOf(const Value& value);
+ static Array::Ptr Keys(const Object::Ptr& obj);
+ static Dictionary::Ptr GetTemplate(const Value& vtype, const String& name);
+ static Array::Ptr GetTemplates(const Type::Ptr& type);
+ static ConfigObject::Ptr GetObject(const Value& type, const String& name);
+ static Array::Ptr GetObjects(const Type::Ptr& type);
+ static void Assert(const Value& arg);
+ static String MsiGetComponentPathShim(const String& component);
+ static Array::Ptr TrackParents(const Object::Ptr& parent);
+ static double Ptr(const Object::Ptr& object);
+ static Value Glob(const std::vector<Value>& args);
+ static Value GlobRecursive(const std::vector<Value>& args);
+ static String GetEnv(const String& key);
+
+private:
+ ScriptUtils();
+};
+
+}
+
+#endif /* SCRIPTUTILS_H */
diff --git a/lib/base/serializer.cpp b/lib/base/serializer.cpp
new file mode 100644
index 0000000..b8b140a
--- /dev/null
+++ b/lib/base/serializer.cpp
@@ -0,0 +1,331 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/serializer.hpp"
+#include "base/type.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+#include "base/namespace.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <deque>
+#include <utility>
+
+using namespace icinga;
+
+struct SerializeStackEntry
+{
+ String Name;
+ Value Val;
+};
+
+CircularReferenceError::CircularReferenceError(String message, std::vector<String> path)
+ : m_Message(message), m_Path(path)
+{ }
+
+const char *CircularReferenceError::what(void) const throw()
+{
+ return m_Message.CStr();
+}
+
+std::vector<String> CircularReferenceError::GetPath() const
+{
+ return m_Path;
+}
+
+struct SerializeStack
+{
+ std::deque<SerializeStackEntry> Entries;
+
+ inline void Push(const String& name, const Value& val)
+ {
+ Object::Ptr obj;
+
+ if (val.IsObject())
+ obj = val;
+
+ if (obj) {
+ for (const auto& entry : Entries) {
+ if (entry.Val == obj) {
+ std::vector<String> path;
+ for (const auto& entry : Entries)
+ path.push_back(entry.Name);
+ path.push_back(name);
+ BOOST_THROW_EXCEPTION(CircularReferenceError("Cannot serialize object which recursively refers to itself. Attribute path which leads to the cycle: " + boost::algorithm::join(path, " -> "), path));
+ }
+ }
+ }
+
+ Entries.push_back({ name, obj });
+ }
+
+ inline void Pop()
+ {
+ Entries.pop_back();
+ }
+};
+
+static Value SerializeInternal(const Value& value, int attributeTypes, SerializeStack& stack, bool dryRun);
+
+static Array::Ptr SerializeArray(const Array::Ptr& input, int attributeTypes, SerializeStack& stack, bool dryRun)
+{
+ ArrayData result;
+
+ if (!dryRun) {
+ result.reserve(input->GetLength());
+ }
+
+ ObjectLock olock(input);
+
+ int index = 0;
+
+ for (const Value& value : input) {
+ stack.Push(Convert::ToString(index), value);
+
+ auto serialized (SerializeInternal(value, attributeTypes, stack, dryRun));
+
+ if (!dryRun) {
+ result.emplace_back(std::move(serialized));
+ }
+
+ stack.Pop();
+ index++;
+ }
+
+ return dryRun ? nullptr : new Array(std::move(result));
+}
+
+static Dictionary::Ptr SerializeDictionary(const Dictionary::Ptr& input, int attributeTypes, SerializeStack& stack, bool dryRun)
+{
+ DictionaryData result;
+
+ if (!dryRun) {
+ result.reserve(input->GetLength());
+ }
+
+ ObjectLock olock(input);
+
+ for (const Dictionary::Pair& kv : input) {
+ stack.Push(kv.first, kv.second);
+
+ auto serialized (SerializeInternal(kv.second, attributeTypes, stack, dryRun));
+
+ if (!dryRun) {
+ result.emplace_back(kv.first, std::move(serialized));
+ }
+
+ stack.Pop();
+ }
+
+ return dryRun ? nullptr : new Dictionary(std::move(result));
+}
+
+static Dictionary::Ptr SerializeNamespace(const Namespace::Ptr& input, int attributeTypes, SerializeStack& stack, bool dryRun)
+{
+ DictionaryData result;
+
+ if (!dryRun) {
+ result.reserve(input->GetLength());
+ }
+
+ ObjectLock olock(input);
+
+ for (const Namespace::Pair& kv : input) {
+ Value val = kv.second.Val;
+ stack.Push(kv.first, val);
+
+ auto serialized (SerializeInternal(val, attributeTypes, stack, dryRun));
+
+ if (!dryRun) {
+ result.emplace_back(kv.first, std::move(serialized));
+ }
+
+ stack.Pop();
+ }
+
+ return dryRun ? nullptr : new Dictionary(std::move(result));
+}
+
+static Object::Ptr SerializeObject(const Object::Ptr& input, int attributeTypes, SerializeStack& stack, bool dryRun)
+{
+ Type::Ptr type = input->GetReflectionType();
+
+ if (!type)
+ return nullptr;
+
+ DictionaryData fields;
+
+ if (!dryRun) {
+ fields.reserve(type->GetFieldCount() + 1);
+ }
+
+ ObjectLock olock(input);
+
+ for (int i = 0; i < type->GetFieldCount(); i++) {
+ Field field = type->GetFieldInfo(i);
+
+ if (attributeTypes != 0 && (field.Attributes & attributeTypes) == 0)
+ continue;
+
+ if (strcmp(field.Name, "type") == 0)
+ continue;
+
+ Value value = input->GetField(i);
+ stack.Push(field.Name, value);
+
+ auto serialized (SerializeInternal(value, attributeTypes, stack, dryRun));
+
+ if (!dryRun) {
+ fields.emplace_back(field.Name, std::move(serialized));
+ }
+
+ stack.Pop();
+ }
+
+ if (!dryRun) {
+ fields.emplace_back("type", type->GetName());
+ }
+
+ return dryRun ? nullptr : new Dictionary(std::move(fields));
+}
+
+static Array::Ptr DeserializeArray(const Array::Ptr& input, bool safe_mode, int attributeTypes)
+{
+ ArrayData result;
+
+ result.reserve(input->GetLength());
+
+ ObjectLock olock(input);
+
+ for (const Value& value : input) {
+ result.emplace_back(Deserialize(value, safe_mode, attributeTypes));
+ }
+
+ return new Array(std::move(result));
+}
+
+static Dictionary::Ptr DeserializeDictionary(const Dictionary::Ptr& input, bool safe_mode, int attributeTypes)
+{
+ DictionaryData result;
+
+ result.reserve(input->GetLength());
+
+ ObjectLock olock(input);
+
+ for (const Dictionary::Pair& kv : input) {
+ result.emplace_back(kv.first, Deserialize(kv.second, safe_mode, attributeTypes));
+ }
+
+ return new Dictionary(std::move(result));
+}
+
+static Object::Ptr DeserializeObject(const Object::Ptr& object, const Dictionary::Ptr& input, bool safe_mode, int attributeTypes)
+{
+ if (!object && safe_mode)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Tried to instantiate object while safe mode is enabled."));
+
+ Type::Ptr type;
+
+ if (object)
+ type = object->GetReflectionType();
+ else
+ type = Type::GetByName(input->Get("type"));
+
+ if (!type)
+ return object;
+
+ Object::Ptr instance;
+
+ if (object)
+ instance = object;
+ else
+ instance = type->Instantiate(std::vector<Value>());
+
+ ObjectLock olock(input);
+ for (const Dictionary::Pair& kv : input) {
+ if (kv.first.IsEmpty())
+ continue;
+
+ int fid = type->GetFieldId(kv.first);
+
+ if (fid < 0)
+ continue;
+
+ Field field = type->GetFieldInfo(fid);
+
+ if ((field.Attributes & attributeTypes) == 0)
+ continue;
+
+ try {
+ instance->SetField(fid, Deserialize(kv.second, safe_mode, attributeTypes), true);
+ } catch (const std::exception&) {
+ instance->SetField(fid, Empty);
+ }
+ }
+
+ return instance;
+}
+
+static Value SerializeInternal(const Value& value, int attributeTypes, SerializeStack& stack, bool dryRun)
+{
+ if (!value.IsObject())
+ return dryRun ? Empty : value;
+
+ Object::Ptr input = value;
+
+ Array::Ptr array = dynamic_pointer_cast<Array>(input);
+
+ if (array)
+ return SerializeArray(array, attributeTypes, stack, dryRun);
+
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(input);
+
+ if (dict)
+ return SerializeDictionary(dict, attributeTypes, stack, dryRun);
+
+ Namespace::Ptr ns = dynamic_pointer_cast<Namespace>(input);
+
+ if (ns)
+ return SerializeNamespace(ns, attributeTypes, stack, dryRun);
+
+ return SerializeObject(input, attributeTypes, stack, dryRun);
+}
+
+void icinga::AssertNoCircularReferences(const Value& value)
+{
+ SerializeStack stack;
+ SerializeInternal(value, FAConfig, stack, true);
+}
+
+Value icinga::Serialize(const Value& value, int attributeTypes)
+{
+ SerializeStack stack;
+ return SerializeInternal(value, attributeTypes, stack, false);
+}
+
+Value icinga::Deserialize(const Value& value, bool safe_mode, int attributeTypes)
+{
+ return Deserialize(nullptr, value, safe_mode, attributeTypes);
+}
+
+Value icinga::Deserialize(const Object::Ptr& object, const Value& value, bool safe_mode, int attributeTypes)
+{
+ if (!value.IsObject())
+ return value;
+
+ Object::Ptr input = value;
+
+ Array::Ptr array = dynamic_pointer_cast<Array>(input);
+
+ if (array)
+ return DeserializeArray(array, safe_mode, attributeTypes);
+
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(input);
+
+ ASSERT(dict);
+
+ if ((safe_mode && !object) || !dict->Contains("type"))
+ return DeserializeDictionary(dict, safe_mode, attributeTypes);
+ else
+ return DeserializeObject(object, dict, safe_mode, attributeTypes);
+}
diff --git a/lib/base/serializer.hpp b/lib/base/serializer.hpp
new file mode 100644
index 0000000..f055b3b
--- /dev/null
+++ b/lib/base/serializer.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERIALIZER_H
+#define SERIALIZER_H
+
+#include "base/i2-base.hpp"
+#include "base/type.hpp"
+#include "base/value.hpp"
+#include "base/exception.hpp"
+
+namespace icinga
+{
+
+class CircularReferenceError : virtual public user_error
+{
+public:
+ CircularReferenceError(String message, std::vector<String> path);
+
+ const char *what(void) const throw() final;
+ std::vector<String> GetPath() const;
+
+private:
+ String m_Message;
+ std::vector<String> m_Path;
+};
+
+void AssertNoCircularReferences(const Value& value);
+Value Serialize(const Value& value, int attributeTypes = FAState);
+Value Deserialize(const Value& value, bool safe_mode = false, int attributeTypes = FAState);
+Value Deserialize(const Object::Ptr& object, const Value& value, bool safe_mode = false, int attributeTypes = FAState);
+
+}
+
+#endif /* SERIALIZER_H */
diff --git a/lib/base/shared-memory.hpp b/lib/base/shared-memory.hpp
new file mode 100644
index 0000000..dd350c8
--- /dev/null
+++ b/lib/base/shared-memory.hpp
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2023 Icinga GmbH | GPLv2+ */
+
+#pragma once
+
+#include <boost/interprocess/anonymous_shared_memory.hpp>
+#include <utility>
+
+namespace icinga
+{
+
+/**
+ * Type-safe memory shared across fork(2).
+ *
+ * @ingroup base
+ */
+template<class T>
+class SharedMemory
+{
+public:
+ template<class... Args>
+ SharedMemory(Args&&... args) : m_Memory(boost::interprocess::anonymous_shared_memory(sizeof(T)))
+ {
+ new(GetAddress()) T(std::forward<Args>(args)...);
+ }
+
+ SharedMemory(const SharedMemory&) = delete;
+ SharedMemory(SharedMemory&&) = delete;
+ SharedMemory& operator=(const SharedMemory&) = delete;
+ SharedMemory& operator=(SharedMemory&&) = delete;
+
+ inline T& Get() const
+ {
+ return *GetAddress();
+ }
+
+private:
+ inline T* GetAddress() const
+ {
+ return (T*)m_Memory.get_address();
+ }
+
+ boost::interprocess::mapped_region m_Memory;
+};
+
+}
diff --git a/lib/base/shared-object.hpp b/lib/base/shared-object.hpp
new file mode 100644
index 0000000..58636dc
--- /dev/null
+++ b/lib/base/shared-object.hpp
@@ -0,0 +1,73 @@
+/* Icinga 2 | (c) 2019 Icinga GmbH | GPLv2+ */
+
+#ifndef SHARED_OBJECT_H
+#define SHARED_OBJECT_H
+
+#include "base/atomic.hpp"
+#include "base/object.hpp"
+#include <cstdint>
+
+namespace icinga
+{
+
+class SharedObject;
+
+inline void intrusive_ptr_add_ref(SharedObject *object);
+inline void intrusive_ptr_release(SharedObject *object);
+
+/**
+ * Seamless and polymorphistic base for any class to create shared pointers of.
+ * Saves a memory allocation compared to std::shared_ptr.
+ *
+ * @ingroup base
+ */
+class SharedObject
+{
+ friend void intrusive_ptr_add_ref(SharedObject *object);
+ friend void intrusive_ptr_release(SharedObject *object);
+
+protected:
+ inline SharedObject() : m_References(0)
+ {
+ }
+
+ inline SharedObject(const SharedObject&) : SharedObject()
+ {
+ }
+
+ inline SharedObject(SharedObject&&) : SharedObject()
+ {
+ }
+
+ inline SharedObject& operator=(const SharedObject&)
+ {
+ return *this;
+ }
+
+ inline SharedObject& operator=(SharedObject&&)
+ {
+ return *this;
+ }
+
+ inline virtual
+ ~SharedObject() = default;
+
+private:
+ Atomic<uint_fast64_t> m_References;
+};
+
+inline void intrusive_ptr_add_ref(SharedObject *object)
+{
+ object->m_References.fetch_add(1);
+}
+
+inline void intrusive_ptr_release(SharedObject *object)
+{
+ if (object->m_References.fetch_sub(1) == 1u) {
+ delete object;
+ }
+}
+
+}
+
+#endif /* SHARED_OBJECT_H */
diff --git a/lib/base/shared.hpp b/lib/base/shared.hpp
new file mode 100644
index 0000000..63b35cb
--- /dev/null
+++ b/lib/base/shared.hpp
@@ -0,0 +1,101 @@
+/* Icinga 2 | (c) 2019 Icinga GmbH | GPLv2+ */
+
+#ifndef SHARED_H
+#define SHARED_H
+
+#include "base/atomic.hpp"
+#include <boost/smart_ptr/intrusive_ptr.hpp>
+#include <cstdint>
+#include <utility>
+
+namespace icinga
+{
+
+template<class T>
+class Shared;
+
+template<class T>
+inline void intrusive_ptr_add_ref(Shared<T> *object)
+{
+ object->m_References.fetch_add(1);
+}
+
+template<class T>
+inline void intrusive_ptr_release(Shared<T> *object)
+{
+ if (object->m_References.fetch_sub(1) == 1u) {
+ delete object;
+ }
+}
+
+/**
+ * Seamless wrapper for any class to create shared pointers of.
+ * Saves a memory allocation compared to std::shared_ptr.
+ *
+ * @ingroup base
+ */
+template<class T>
+class Shared : public T
+{
+ friend void intrusive_ptr_add_ref<>(Shared<T> *object);
+ friend void intrusive_ptr_release<>(Shared<T> *object);
+
+public:
+ typedef boost::intrusive_ptr<Shared> Ptr;
+
+ /**
+ * Like std::make_shared, but for this class.
+ *
+ * @param args Constructor arguments
+ *
+ * @return Ptr
+ */
+ template<class... Args>
+ static inline
+ Ptr Make(Args&&... args)
+ {
+ return new Shared(std::forward<Args>(args)...);
+ }
+
+ inline Shared(const Shared& origin) : Shared((const T&)origin)
+ {
+ }
+
+ inline Shared(Shared&& origin) : Shared((T&&)origin)
+ {
+ }
+
+ template<class... Args>
+ inline Shared(Args&&... args) : T(std::forward<Args>(args)...), m_References(0)
+ {
+ }
+
+ inline Shared& operator=(const Shared& rhs)
+ {
+ return operator=((const T&)rhs);
+ }
+
+ inline Shared& operator=(Shared&& rhs)
+ {
+ return operator=((T&&)rhs);
+ }
+
+ inline Shared& operator=(const T& rhs)
+ {
+ T::operator=(rhs);
+ return *this;
+ }
+
+ inline Shared& operator=(T&& rhs)
+ {
+ T::operator=(std::move(rhs));
+ return *this;
+ }
+
+private:
+ Atomic<uint_fast64_t> m_References;
+};
+
+}
+
+#endif /* SHARED_H */
diff --git a/lib/base/singleton.hpp b/lib/base/singleton.hpp
new file mode 100644
index 0000000..77511c0
--- /dev/null
+++ b/lib/base/singleton.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SINGLETON_H
+#define SINGLETON_H
+
+#include "base/i2-base.hpp"
+
+namespace icinga
+{
+
+/**
+ * A singleton.
+ *
+ * @ingroup base
+ */
+template<typename T>
+class Singleton
+{
+public:
+ static T *GetInstance()
+ {
+ static T instance;
+ return &instance;
+ }
+};
+
+}
+
+#endif /* SINGLETON_H */
diff --git a/lib/base/socket.cpp b/lib/base/socket.cpp
new file mode 100644
index 0000000..4c967de
--- /dev/null
+++ b/lib/base/socket.cpp
@@ -0,0 +1,430 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/socket.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include <sstream>
+#include <iostream>
+#include <boost/exception/errinfo_api_function.hpp>
+#include <boost/exception/errinfo_errno.hpp>
+#include <socketpair.h>
+
+#ifndef _WIN32
+# include <poll.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+/**
+ * Constructor for the Socket class.
+ */
+Socket::Socket(SOCKET fd)
+{
+ SetFD(fd);
+}
+
+/**
+ * Destructor for the Socket class.
+ */
+Socket::~Socket()
+{
+ Close();
+}
+
+/**
+ * Sets the file descriptor for this socket object.
+ *
+ * @param fd The file descriptor.
+ */
+void Socket::SetFD(SOCKET fd)
+{
+ if (fd != INVALID_SOCKET) {
+#ifndef _WIN32
+ /* mark the socket as close-on-exec */
+ Utility::SetCloExec(fd);
+#endif /* _WIN32 */
+ }
+
+ ObjectLock olock(this);
+ m_FD = fd;
+}
+
+/**
+ * Retrieves the file descriptor for this socket object.
+ *
+ * @returns The file descriptor.
+ */
+SOCKET Socket::GetFD() const
+{
+ ObjectLock olock(this);
+
+ return m_FD;
+}
+
+/**
+ * Closes the socket.
+ */
+void Socket::Close()
+{
+ ObjectLock olock(this);
+
+ if (m_FD != INVALID_SOCKET) {
+ closesocket(m_FD);
+ m_FD = INVALID_SOCKET;
+ }
+}
+
+/**
+ * Retrieves the last error that occurred for the socket.
+ *
+ * @returns An error code.
+ */
+int Socket::GetError() const
+{
+ int opt;
+ socklen_t optlen = sizeof(opt);
+
+ int rc = getsockopt(GetFD(), SOL_SOCKET, SO_ERROR, (char *)&opt, &optlen);
+
+ if (rc >= 0)
+ return opt;
+
+ return 0;
+}
+
+/**
+ * Formats a sockaddr in a human-readable way.
+ *
+ * @returns A pair of host and service.
+ */
+String Socket::GetHumanReadableAddress(const std::pair<String, String>& socketDetails)
+{
+ std::ostringstream s;
+ s << "[" << socketDetails.first << "]:" << socketDetails.second;
+ return s.str();
+}
+
+/**
+ * Returns host and service as pair.
+ *
+ * @returns A pair with host and service.
+ */
+std::pair<String, String> Socket::GetDetailsFromSockaddr(sockaddr *address, socklen_t len)
+{
+ char host[NI_MAXHOST];
+ char service[NI_MAXSERV];
+
+ if (getnameinfo(address, len, host, sizeof(host), service,
+ sizeof(service), NI_NUMERICHOST | NI_NUMERICSERV) < 0) {
+#ifndef _WIN32
+ Log(LogCritical, "Socket")
+ << "getnameinfo() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getnameinfo")
+ << boost::errinfo_errno(errno));
+#else /* _WIN32 */
+ Log(LogCritical, "Socket")
+ << "getnameinfo() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getnameinfo")
+ << errinfo_win32_error(WSAGetLastError()));
+#endif /* _WIN32 */
+ }
+
+ return std::make_pair(host, service);
+}
+
+/**
+ * Returns a pair describing the local host and service of the socket.
+ *
+ * @returns A pair describing the local host and service.
+ */
+std::pair<String, String> Socket::GetClientAddressDetails()
+{
+ std::unique_lock<std::mutex> lock(m_SocketMutex);
+
+ sockaddr_storage sin;
+ socklen_t len = sizeof(sin);
+
+ if (getsockname(GetFD(), (sockaddr *)&sin, &len) < 0) {
+#ifndef _WIN32
+ Log(LogCritical, "Socket")
+ << "getsockname() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getsockname")
+ << boost::errinfo_errno(errno));
+#else /* _WIN32 */
+ Log(LogCritical, "Socket")
+ << "getsockname() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getsockname")
+ << errinfo_win32_error(WSAGetLastError()));
+#endif /* _WIN32 */
+ }
+
+ std::pair<String, String> details;
+ try {
+ details = GetDetailsFromSockaddr((sockaddr *)&sin, len);
+ } catch (const std::exception&) {
+ /* already logged */
+ }
+
+ return details;
+}
+
+/**
+ * Returns a String describing the local address of the socket.
+ *
+ * @returns A String describing the local address.
+ */
+String Socket::GetClientAddress()
+{
+ return GetHumanReadableAddress(GetClientAddressDetails());
+}
+
+/**
+ * Returns a pair describing the peer host and service of the socket.
+ *
+ * @returns A pair describing the peer host and service.
+ */
+std::pair<String, String> Socket::GetPeerAddressDetails()
+{
+ std::unique_lock<std::mutex> lock(m_SocketMutex);
+
+ sockaddr_storage sin;
+ socklen_t len = sizeof(sin);
+
+ if (getpeername(GetFD(), (sockaddr *)&sin, &len) < 0) {
+#ifndef _WIN32
+ Log(LogCritical, "Socket")
+ << "getpeername() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getpeername")
+ << boost::errinfo_errno(errno));
+#else /* _WIN32 */
+ Log(LogCritical, "Socket")
+ << "getpeername() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getpeername")
+ << errinfo_win32_error(WSAGetLastError()));
+#endif /* _WIN32 */
+ }
+
+ std::pair<String, String> details;
+ try {
+ details = GetDetailsFromSockaddr((sockaddr *)&sin, len);
+ } catch (const std::exception&) {
+ /* already logged */
+ }
+
+ return details;
+}
+
+/**
+ * Returns a String describing the peer address of the socket.
+ *
+ * @returns A String describing the peer address.
+ */
+String Socket::GetPeerAddress()
+{
+ return GetHumanReadableAddress(GetPeerAddressDetails());
+}
+
+/**
+ * Starts listening for incoming client connections.
+ */
+void Socket::Listen()
+{
+ if (listen(GetFD(), SOMAXCONN) < 0) {
+#ifndef _WIN32
+ Log(LogCritical, "Socket")
+ << "listen() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("listen")
+ << boost::errinfo_errno(errno));
+#else /* _WIN32 */
+ Log(LogCritical, "Socket")
+ << "listen() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("listen")
+ << errinfo_win32_error(WSAGetLastError()));
+#endif /* _WIN32 */
+ }
+}
+
+/**
+ * Sends data for the socket.
+ */
+size_t Socket::Write(const void *buffer, size_t count)
+{
+ int rc;
+
+#ifndef _WIN32
+ rc = write(GetFD(), (const char *)buffer, count);
+#else /* _WIN32 */
+ rc = send(GetFD(), (const char *)buffer, count, 0);
+#endif /* _WIN32 */
+
+ if (rc < 0) {
+#ifndef _WIN32
+ Log(LogCritical, "Socket")
+ << "send() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("send")
+ << boost::errinfo_errno(errno));
+#else /* _WIN32 */
+ Log(LogCritical, "Socket")
+ << "send() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("send")
+ << errinfo_win32_error(WSAGetLastError()));
+#endif /* _WIN32 */
+ }
+
+ return rc;
+}
+
+/**
+ * Processes data that can be written for this socket.
+ */
+size_t Socket::Read(void *buffer, size_t count)
+{
+ int rc;
+
+#ifndef _WIN32
+ rc = read(GetFD(), (char *)buffer, count);
+#else /* _WIN32 */
+ rc = recv(GetFD(), (char *)buffer, count, 0);
+#endif /* _WIN32 */
+
+ if (rc < 0) {
+#ifndef _WIN32
+ Log(LogCritical, "Socket")
+ << "recv() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("recv")
+ << boost::errinfo_errno(errno));
+#else /* _WIN32 */
+ Log(LogCritical, "Socket")
+ << "recv() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("recv")
+ << errinfo_win32_error(WSAGetLastError()));
+#endif /* _WIN32 */
+ }
+
+ return rc;
+}
+
+/**
+ * Accepts a new client and creates a new client object for it.
+ */
+Socket::Ptr Socket::Accept()
+{
+ sockaddr_storage addr;
+ socklen_t addrlen = sizeof(addr);
+
+ SOCKET fd = accept(GetFD(), (sockaddr *)&addr, &addrlen);
+
+#ifndef _WIN32
+ if (fd < 0) {
+ Log(LogCritical, "Socket")
+ << "accept() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("accept")
+ << boost::errinfo_errno(errno));
+ }
+#else /* _WIN32 */
+ if (fd == INVALID_SOCKET) {
+ Log(LogCritical, "Socket")
+ << "accept() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("accept")
+ << errinfo_win32_error(WSAGetLastError()));
+ }
+#endif /* _WIN32 */
+
+ return new Socket(fd);
+}
+
+bool Socket::Poll(bool read, bool write, struct timeval *timeout)
+{
+ int rc;
+
+#ifdef _WIN32
+ fd_set readfds, writefds, exceptfds;
+
+ FD_ZERO(&readfds);
+ if (read)
+ FD_SET(GetFD(), &readfds);
+
+ FD_ZERO(&writefds);
+ if (write)
+ FD_SET(GetFD(), &writefds);
+
+ FD_ZERO(&exceptfds);
+ FD_SET(GetFD(), &exceptfds);
+
+ rc = select(GetFD() + 1, &readfds, &writefds, &exceptfds, timeout);
+
+ if (rc < 0) {
+ Log(LogCritical, "Socket")
+ << "select() failed with error code " << WSAGetLastError() << ", \"" << Utility::FormatErrorNumber(WSAGetLastError()) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("select")
+ << errinfo_win32_error(WSAGetLastError()));
+ }
+#else /* _WIN32 */
+ pollfd pfd;
+ pfd.fd = GetFD();
+ pfd.events = (read ? POLLIN : 0) | (write ? POLLOUT : 0);
+ pfd.revents = 0;
+
+ rc = poll(&pfd, 1, timeout ? (timeout->tv_sec + 1000 + timeout->tv_usec / 1000) : -1);
+
+ if (rc < 0) {
+ Log(LogCritical, "Socket")
+ << "poll() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("poll")
+ << boost::errinfo_errno(errno));
+ }
+#endif /* _WIN32 */
+
+ return (rc != 0);
+}
+
+void Socket::MakeNonBlocking()
+{
+#ifdef _WIN32
+ Utility::SetNonBlockingSocket(GetFD());
+#else /* _WIN32 */
+ Utility::SetNonBlocking(GetFD());
+#endif /* _WIN32 */
+}
+
+void Socket::SocketPair(SOCKET s[2])
+{
+ if (dumb_socketpair(s, 0) < 0)
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("socketpair")
+ << boost::errinfo_errno(errno));
+}
diff --git a/lib/base/socket.hpp b/lib/base/socket.hpp
new file mode 100644
index 0000000..f7acf7f
--- /dev/null
+++ b/lib/base/socket.hpp
@@ -0,0 +1,66 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SOCKET_H
+#define SOCKET_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include <mutex>
+
+namespace icinga
+{
+
+/**
+ * Base class for connection-oriented sockets.
+ *
+ * @ingroup base
+ */
+class Socket : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Socket);
+
+ Socket() = default;
+ Socket(SOCKET fd);
+ ~Socket() override;
+
+ SOCKET GetFD() const;
+
+ void Close();
+
+ std::pair<String, String> GetClientAddressDetails();
+ String GetClientAddress();
+ std::pair<String, String> GetPeerAddressDetails();
+ String GetPeerAddress();
+
+ size_t Read(void *buffer, size_t size);
+ size_t Write(const void *buffer, size_t size);
+
+ void Listen();
+ Socket::Ptr Accept();
+
+ bool Poll(bool read, bool write, struct timeval *timeout = nullptr);
+
+ void MakeNonBlocking();
+
+ static void SocketPair(SOCKET s[2]);
+
+protected:
+ void SetFD(SOCKET fd);
+
+ int GetError() const;
+
+ mutable std::mutex m_SocketMutex;
+
+private:
+ SOCKET m_FD{INVALID_SOCKET}; /**< The socket descriptor. */
+
+ static std::pair<String, String> GetDetailsFromSockaddr(sockaddr *address, socklen_t len);
+ static String GetHumanReadableAddress(const std::pair<String, String>& socketDetails);
+};
+
+class socket_error : virtual public std::exception, virtual public boost::exception { };
+
+}
+
+#endif /* SOCKET_H */
diff --git a/lib/base/stacktrace.cpp b/lib/base/stacktrace.cpp
new file mode 100644
index 0000000..e3f15ce
--- /dev/null
+++ b/lib/base/stacktrace.cpp
@@ -0,0 +1,43 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#include <base/i2-base.hpp>
+#include "base/stacktrace.hpp"
+#include <iostream>
+#include <iomanip>
+#include <vector>
+
+#ifdef HAVE_BACKTRACE_SYMBOLS
+# include <execinfo.h>
+#endif /* HAVE_BACKTRACE_SYMBOLS */
+
+using namespace icinga;
+
+std::ostream &icinga::operator<<(std::ostream &os, const StackTraceFormatter &f)
+{
+ /* In most cases, this operator<< just relies on the operator<< for the `boost::stacktrace::stacktrace` wrapped in
+ * the `StackTraceFormatter`. But as this operator turned out to not work properly on some platforms, there is a
+ * fallback implementation that can be enabled using the `-DICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS` flag at
+ * compile time. This will then switch to `backtrace_symbols()` from `<execinfo.h>` instead of the implementation
+ * provided by Boost.
+ */
+
+ const boost::stacktrace::stacktrace &stack = f.m_Stack;
+
+#ifdef ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS
+ std::vector<void *> addrs;
+ addrs.reserve(stack.size());
+ std::transform(stack.begin(), stack.end(), std::back_inserter(addrs), [](const boost::stacktrace::frame &f) {
+ return const_cast<void *>(f.address());
+ });
+
+ char **symbols = backtrace_symbols(addrs.data(), addrs.size());
+ for (size_t i = 0; i < addrs.size(); i++) {
+ os << std::setw(2) << i << "# " << symbols[i] << std::endl;
+ }
+ std::free(symbols);
+#else /* ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS */
+ os << stack;
+#endif /* ICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS */
+
+ return os;
+}
diff --git a/lib/base/stacktrace.hpp b/lib/base/stacktrace.hpp
new file mode 100644
index 0000000..b4a9765
--- /dev/null
+++ b/lib/base/stacktrace.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#ifndef STACKTRACE_H
+#define STACKTRACE_H
+
+#include <boost/stacktrace.hpp>
+
+namespace icinga
+{
+
+/**
+ * Formatter for `boost::stacktrace::stacktrace` objects
+ *
+ * This class wraps `boost::stacktrace::stacktrace` objects and provides an operator<<
+ * for printing them to an `std::ostream` in a custom format.
+ */
+class StackTraceFormatter {
+public:
+ StackTraceFormatter(const boost::stacktrace::stacktrace &stack) : m_Stack(stack) {}
+
+private:
+ const boost::stacktrace::stacktrace &m_Stack;
+
+ friend std::ostream &operator<<(std::ostream &os, const StackTraceFormatter &f);
+};
+
+std::ostream& operator<<(std::ostream& os, const StackTraceFormatter &f);
+
+}
+
+#endif /* STACKTRACE_H */
diff --git a/lib/base/statsfunction.hpp b/lib/base/statsfunction.hpp
new file mode 100644
index 0000000..ecac33c
--- /dev/null
+++ b/lib/base/statsfunction.hpp
@@ -0,0 +1,17 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STATSFUNCTION_H
+#define STATSFUNCTION_H
+
+#include "base/i2-base.hpp"
+#include "base/function.hpp"
+
+namespace icinga
+{
+
+#define REGISTER_STATSFUNCTION(name, callback) \
+ REGISTER_FUNCTION(StatsFunctions, name, callback, "status:perfdata")
+
+}
+
+#endif /* STATSFUNCTION_H */
diff --git a/lib/base/stdiostream.cpp b/lib/base/stdiostream.cpp
new file mode 100644
index 0000000..449036f
--- /dev/null
+++ b/lib/base/stdiostream.cpp
@@ -0,0 +1,57 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/stdiostream.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+/**
+ * Constructor for the StdioStream class.
+ *
+ * @param innerStream The inner stream.
+ * @param ownsStream Whether the new object owns the inner stream. If true
+ * the stream's destructor deletes the inner stream.
+ */
+StdioStream::StdioStream(std::iostream *innerStream, bool ownsStream)
+ : m_InnerStream(innerStream), m_OwnsStream(ownsStream)
+{ }
+
+StdioStream::~StdioStream()
+{
+ Close();
+}
+
+size_t StdioStream::Read(void *buffer, size_t size, bool allow_partial)
+{
+ ObjectLock olock(this);
+
+ m_InnerStream->read(static_cast<char *>(buffer), size);
+ return m_InnerStream->gcount();
+}
+
+void StdioStream::Write(const void *buffer, size_t size)
+{
+ ObjectLock olock(this);
+
+ m_InnerStream->write(static_cast<const char *>(buffer), size);
+}
+
+void StdioStream::Close()
+{
+ Stream::Close();
+
+ if (m_OwnsStream) {
+ delete m_InnerStream;
+ m_OwnsStream = false;
+ }
+}
+
+bool StdioStream::IsDataAvailable() const
+{
+ return !IsEof();
+}
+
+bool StdioStream::IsEof() const
+{
+ return !m_InnerStream->good();
+}
diff --git a/lib/base/stdiostream.hpp b/lib/base/stdiostream.hpp
new file mode 100644
index 0000000..b305c7f
--- /dev/null
+++ b/lib/base/stdiostream.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STDIOSTREAM_H
+#define STDIOSTREAM_H
+
+#include "base/i2-base.hpp"
+#include "base/stream.hpp"
+#include <iosfwd>
+#include <iostream>
+
+namespace icinga {
+
+class StdioStream final : public Stream
+{
+public:
+ DECLARE_PTR_TYPEDEFS(StdioStream);
+
+ StdioStream(std::iostream *innerStream, bool ownsStream);
+ ~StdioStream() override;
+
+ size_t Read(void *buffer, size_t size, bool allow_partial = false) override;
+ void Write(const void *buffer, size_t size) override;
+
+ void Close() override;
+
+ bool IsDataAvailable() const override;
+ bool IsEof() const override;
+
+private:
+ std::iostream *m_InnerStream;
+ bool m_OwnsStream;
+};
+
+}
+
+#endif /* STDIOSTREAM_H */
diff --git a/lib/base/stream.cpp b/lib/base/stream.cpp
new file mode 100644
index 0000000..e558385
--- /dev/null
+++ b/lib/base/stream.cpp
@@ -0,0 +1,149 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/stream.hpp"
+#include <boost/algorithm/string/trim.hpp>
+#include <chrono>
+
+using namespace icinga;
+
+void Stream::RegisterDataHandler(const std::function<void(const Stream::Ptr&)>& handler)
+{
+ if (SupportsWaiting())
+ OnDataAvailable.connect(handler);
+ else
+ BOOST_THROW_EXCEPTION(std::runtime_error("Stream does not support waiting."));
+}
+
+bool Stream::SupportsWaiting() const
+{
+ return false;
+}
+
+bool Stream::IsDataAvailable() const
+{
+ return false;
+}
+
+void Stream::Shutdown()
+{
+ BOOST_THROW_EXCEPTION(std::runtime_error("Stream does not support Shutdown()."));
+}
+
+size_t Stream::Peek(void *buffer, size_t count, bool allow_partial)
+{
+ BOOST_THROW_EXCEPTION(std::runtime_error("Stream does not support Peek()."));
+}
+
+void Stream::SignalDataAvailable()
+{
+ OnDataAvailable(this);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_CV.notify_all();
+ }
+}
+
+bool Stream::WaitForData()
+{
+ if (!SupportsWaiting())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Stream does not support waiting."));
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ while (!IsDataAvailable() && !IsEof())
+ m_CV.wait(lock);
+
+ return IsDataAvailable() || IsEof();
+}
+
+bool Stream::WaitForData(int timeout)
+{
+ namespace ch = std::chrono;
+
+ if (!SupportsWaiting())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Stream does not support waiting."));
+
+ if (timeout < 0)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Timeout can't be negative"));
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_CV.wait_for(lock, ch::duration<int>(timeout), [this]() { return IsDataAvailable() || IsEof(); });
+}
+
+void Stream::Close()
+{
+ OnDataAvailable.disconnect_all_slots();
+
+ /* Force signals2 to remove the slots, see https://stackoverflow.com/questions/2049291/force-deletion-of-slot-in-boostsignals2
+ * for details. */
+ OnDataAvailable.connect([](const Stream::Ptr&) { });
+}
+
+StreamReadStatus Stream::ReadLine(String *line, StreamReadContext& context, bool may_wait)
+{
+ if (context.Eof)
+ return StatusEof;
+
+ if (context.MustRead) {
+ if (!context.FillFromStream(this, may_wait)) {
+ context.Eof = true;
+
+ *line = String(context.Buffer, &(context.Buffer[context.Size]));
+ boost::algorithm::trim_right(*line);
+
+ return StatusNewItem;
+ }
+ }
+
+ for (size_t i = 0; i < context.Size; i++) {
+ if (context.Buffer[i] == '\n') {
+ *line = String(context.Buffer, context.Buffer + i);
+ boost::algorithm::trim_right(*line);
+
+ context.DropData(i + 1u);
+
+ context.MustRead = !context.Size;
+ return StatusNewItem;
+ }
+ }
+
+ context.MustRead = true;
+ return StatusNeedData;
+}
+
+bool StreamReadContext::FillFromStream(const Stream::Ptr& stream, bool may_wait)
+{
+ if (may_wait && stream->SupportsWaiting())
+ stream->WaitForData();
+
+ size_t count = 0;
+
+ do {
+ Buffer = (char *)realloc(Buffer, Size + 4096);
+
+ if (!Buffer)
+ throw std::bad_alloc();
+
+ if (stream->IsEof())
+ break;
+
+ size_t rc = stream->Read(Buffer + Size, 4096, true);
+
+ Size += rc;
+ count += rc;
+ } while (count < 64 * 1024 && stream->IsDataAvailable());
+
+ if (count == 0 && stream->IsEof())
+ return false;
+ else
+ return true;
+}
+
+void StreamReadContext::DropData(size_t count)
+{
+ ASSERT(count <= Size);
+ memmove(Buffer, Buffer + count, Size - count);
+ Size -= count;
+}
diff --git a/lib/base/stream.hpp b/lib/base/stream.hpp
new file mode 100644
index 0000000..6bc8fed
--- /dev/null
+++ b/lib/base/stream.hpp
@@ -0,0 +1,133 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STREAM_H
+#define STREAM_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include <boost/signals2.hpp>
+#include <condition_variable>
+#include <mutex>
+
+namespace icinga
+{
+
+class String;
+class Stream;
+
+enum ConnectionRole
+{
+ RoleClient,
+ RoleServer
+};
+
+struct StreamReadContext
+{
+ ~StreamReadContext()
+ {
+ free(Buffer);
+ }
+
+ bool FillFromStream(const intrusive_ptr<Stream>& stream, bool may_wait);
+ void DropData(size_t count);
+
+ char *Buffer{nullptr};
+ size_t Size{0};
+ bool MustRead{true};
+ bool Eof{false};
+};
+
+enum StreamReadStatus
+{
+ StatusNewItem,
+ StatusNeedData,
+ StatusEof
+};
+
+/**
+ * A stream.
+ *
+ * @ingroup base
+ */
+class Stream : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Stream);
+
+ /**
+ * Reads data from the stream without removing it from the stream buffer.
+ *
+ * @param buffer The buffer where data should be stored. May be nullptr if you're
+ * not actually interested in the data.
+ * @param count The number of bytes to read from the queue.
+ * @param allow_partial Whether to allow partial reads.
+ * @returns The number of bytes actually read.
+ */
+ virtual size_t Peek(void *buffer, size_t count, bool allow_partial = false);
+
+ /**
+ * Reads data from the stream.
+ *
+ * @param buffer The buffer where data should be stored. May be nullptr if you're
+ * not actually interested in the data.
+ * @param count The number of bytes to read from the queue.
+ * @param allow_partial Whether to allow partial reads.
+ * @returns The number of bytes actually read.
+ */
+ virtual size_t Read(void *buffer, size_t count, bool allow_partial = false) = 0;
+
+ /**
+ * Writes data to the stream.
+ *
+ * @param buffer The data that is to be written.
+ * @param count The number of bytes to write.
+ * @returns The number of bytes written
+ */
+ virtual void Write(const void *buffer, size_t count) = 0;
+
+ /**
+ * Causes the stream to be closed (via Close()) once all pending data has been
+ * written.
+ */
+ virtual void Shutdown();
+
+ /**
+ * Closes the stream and releases resources.
+ */
+ virtual void Close();
+
+ /**
+ * Checks whether we've reached the end-of-file condition.
+ *
+ * @returns true if EOF.
+ */
+ virtual bool IsEof() const = 0;
+
+ /**
+ * Waits until data can be read from the stream.
+ * Optionally with a timeout.
+ */
+ bool WaitForData();
+ bool WaitForData(int timeout);
+
+ virtual bool SupportsWaiting() const;
+
+ virtual bool IsDataAvailable() const;
+
+ void RegisterDataHandler(const std::function<void(const Stream::Ptr&)>& handler);
+
+ StreamReadStatus ReadLine(String *line, StreamReadContext& context, bool may_wait = false);
+
+protected:
+ void SignalDataAvailable();
+
+private:
+ boost::signals2::signal<void(const Stream::Ptr&)> OnDataAvailable;
+
+ std::mutex m_Mutex;
+ std::condition_variable m_CV;
+};
+
+}
+
+#endif /* STREAM_H */
diff --git a/lib/base/streamlogger.cpp b/lib/base/streamlogger.cpp
new file mode 100644
index 0000000..162b9c3
--- /dev/null
+++ b/lib/base/streamlogger.cpp
@@ -0,0 +1,119 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/streamlogger.hpp"
+#include "base/streamlogger-ti.cpp"
+#include "base/utility.hpp"
+#include "base/objectlock.hpp"
+#include "base/console.hpp"
+#include <iostream>
+
+using namespace icinga;
+
+REGISTER_TYPE(StreamLogger);
+
+std::mutex StreamLogger::m_Mutex;
+
+void StreamLogger::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<StreamLogger>::Stop(runtimeRemoved);
+
+ // make sure we flush the log data on shutdown, even if we don't call the destructor
+ if (m_Stream)
+ m_Stream->flush();
+}
+
+/**
+ * Destructor for the StreamLogger class.
+ */
+StreamLogger::~StreamLogger()
+{
+ if (m_FlushLogTimer)
+ m_FlushLogTimer->Stop(true);
+
+ if (m_Stream && m_OwnsStream)
+ delete m_Stream;
+}
+
+void StreamLogger::FlushLogTimerHandler()
+{
+ Flush();
+}
+
+void StreamLogger::Flush()
+{
+ ObjectLock oLock (this);
+
+ if (m_Stream)
+ m_Stream->flush();
+}
+
+void StreamLogger::BindStream(std::ostream *stream, bool ownsStream)
+{
+ ObjectLock olock(this);
+
+ if (m_Stream && m_OwnsStream)
+ delete m_Stream;
+
+ m_Stream = stream;
+ m_OwnsStream = ownsStream;
+
+ if (!m_FlushLogTimer) {
+ m_FlushLogTimer = Timer::Create();
+ m_FlushLogTimer->SetInterval(1);
+ m_FlushLogTimer->OnTimerExpired.connect([this](const Timer * const&) { FlushLogTimerHandler(); });
+ m_FlushLogTimer->Start();
+ }
+}
+
+/**
+ * Processes a log entry and outputs it to a stream.
+ *
+ * @param stream The output stream.
+ * @param entry The log entry.
+ */
+void StreamLogger::ProcessLogEntry(std::ostream& stream, const LogEntry& entry)
+{
+ String timestamp = Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", entry.Timestamp);
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ if (Logger::IsTimestampEnabled())
+ stream << "[" << timestamp << "] ";
+
+ int color;
+
+ switch (entry.Severity) {
+ case LogDebug:
+ color = Console_ForegroundCyan;
+ break;
+ case LogNotice:
+ color = Console_ForegroundBlue;
+ break;
+ case LogInformation:
+ color = Console_ForegroundGreen;
+ break;
+ case LogWarning:
+ color = Console_ForegroundYellow | Console_Bold;
+ break;
+ case LogCritical:
+ color = Console_ForegroundRed | Console_Bold;
+ break;
+ default:
+ return;
+ }
+
+ stream << ConsoleColorTag(color);
+ stream << Logger::SeverityToString(entry.Severity);
+ stream << ConsoleColorTag(Console_Normal);
+ stream << "/" << entry.Facility << ": " << entry.Message << "\n";
+}
+
+/**
+ * Processes a log entry and outputs it to a stream.
+ *
+ * @param entry The log entry.
+ */
+void StreamLogger::ProcessLogEntry(const LogEntry& entry)
+{
+ ProcessLogEntry(*m_Stream, entry);
+}
diff --git a/lib/base/streamlogger.hpp b/lib/base/streamlogger.hpp
new file mode 100644
index 0000000..8cbe313
--- /dev/null
+++ b/lib/base/streamlogger.hpp
@@ -0,0 +1,47 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STREAMLOGGER_H
+#define STREAMLOGGER_H
+
+#include "base/i2-base.hpp"
+#include "base/streamlogger-ti.hpp"
+#include "base/timer.hpp"
+#include <iosfwd>
+
+namespace icinga
+{
+
+/**
+ * A logger that logs to an iostream.
+ *
+ * @ingroup base
+ */
+class StreamLogger : public ObjectImpl<StreamLogger>
+{
+public:
+ DECLARE_OBJECT(StreamLogger);
+
+ void Stop(bool runtimeRemoved) override;
+ ~StreamLogger() override;
+
+ void BindStream(std::ostream *stream, bool ownsStream);
+
+ static void ProcessLogEntry(std::ostream& stream, const LogEntry& entry);
+
+protected:
+ void ProcessLogEntry(const LogEntry& entry) final;
+ void Flush() final;
+
+private:
+ static std::mutex m_Mutex;
+ std::ostream *m_Stream{nullptr};
+ bool m_OwnsStream{false};
+
+ Timer::Ptr m_FlushLogTimer;
+
+ void FlushLogTimerHandler();
+};
+
+}
+
+#endif /* STREAMLOGGER_H */
diff --git a/lib/base/streamlogger.ti b/lib/base/streamlogger.ti
new file mode 100644
index 0000000..6dc36e0
--- /dev/null
+++ b/lib/base/streamlogger.ti
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/logger.hpp"
+
+library base;
+
+namespace icinga
+{
+
+abstract class StreamLogger : Logger
+{
+};
+
+}
diff --git a/lib/base/string-script.cpp b/lib/base/string-script.cpp
new file mode 100644
index 0000000..323f99c
--- /dev/null
+++ b/lib/base/string-script.cpp
@@ -0,0 +1,138 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/object.hpp"
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string.hpp>
+
+using namespace icinga;
+
+static int StringLen()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ return self.GetLength();
+}
+
+static String StringToString()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ return vframe->Self;
+}
+
+static String StringSubstr(const std::vector<Value>& args)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+
+ if (args.empty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments"));
+
+ if (static_cast<double>(args[0]) < 0 || static_cast<double>(args[0]) >= self.GetLength())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("String index is out of range"));
+
+ if (args.size() > 1)
+ return self.SubStr(args[0], args[1]);
+ else
+ return self.SubStr(args[0]);
+}
+
+static String StringUpper()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ return boost::to_upper_copy(self);
+}
+
+static String StringLower()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ return boost::to_lower_copy(self);
+}
+
+static Array::Ptr StringSplit(const String& delims)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ std::vector<String> tokens = self.Split(delims.CStr());
+
+ return Array::FromVector(tokens);
+}
+
+static int StringFind(const std::vector<Value>& args)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+
+ if (args.empty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments"));
+
+ String::SizeType result;
+
+ if (args.size() > 1) {
+ if (static_cast<double>(args[1]) < 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("String index is out of range"));
+
+ result = self.Find(args[0], args[1]);
+ } else
+ result = self.Find(args[0]);
+
+ if (result == String::NPos)
+ return -1;
+ else
+ return result;
+}
+
+static bool StringContains(const String& str)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ return self.Contains(str);
+}
+
+static Value StringReplace(const String& search, const String& replacement)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+
+ boost::algorithm::replace_all(self, search, replacement);
+ return self;
+}
+
+static String StringReverse()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ return self.Reverse();
+}
+
+static String StringTrim()
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ String self = vframe->Self;
+ return self.Trim();
+}
+
+Object::Ptr String::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "len", new Function("String#len", StringLen, {}, true) },
+ { "to_string", new Function("String#to_string", StringToString, {}, true) },
+ { "substr", new Function("String#substr", StringSubstr, { "start", "len" }, true) },
+ { "upper", new Function("String#upper", StringUpper, {}, true) },
+ { "lower", new Function("String#lower", StringLower, {}, true) },
+ { "split", new Function("String#split", StringSplit, { "delims" }, true) },
+ { "find", new Function("String#find", StringFind, { "str", "start" }, true) },
+ { "contains", new Function("String#contains", StringContains, { "str" }, true) },
+ { "replace", new Function("String#replace", StringReplace, { "search", "replacement" }, true) },
+ { "reverse", new Function("String#reverse", StringReverse, {}, true) },
+ { "trim", new Function("String#trim", StringTrim, {}, true) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/string.cpp b/lib/base/string.cpp
new file mode 100644
index 0000000..3c440cd
--- /dev/null
+++ b/lib/base/string.cpp
@@ -0,0 +1,468 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/string.hpp"
+#include "base/value.hpp"
+#include "base/primitivetype.hpp"
+#include "base/dictionary.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <boost/algorithm/string/trim.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <ostream>
+
+using namespace icinga;
+
+template class std::vector<String>;
+
+REGISTER_BUILTIN_TYPE(String, String::GetPrototype());
+
+const String::SizeType String::NPos = std::string::npos;
+
+String::String(const char *data)
+ : m_Data(data)
+{ }
+
+String::String(std::string data)
+ : m_Data(std::move(data))
+{ }
+
+String::String(String::SizeType n, char c)
+ : m_Data(n, c)
+{ }
+
+String::String(const String& other)
+ : m_Data(other)
+{ }
+
+String::String(String&& other)
+ : m_Data(std::move(other.m_Data))
+{ }
+
+#ifndef _MSC_VER
+String::String(Value&& other)
+{
+ *this = std::move(other);
+}
+#endif /* _MSC_VER */
+
+String& String::operator=(Value&& other)
+{
+ if (other.IsString())
+ m_Data = std::move(other.Get<String>());
+ else
+ *this = static_cast<String>(other);
+
+ return *this;
+}
+
+String& String::operator+=(const Value& rhs)
+{
+ m_Data += static_cast<String>(rhs);
+ return *this;
+}
+
+String& String::operator=(const String& rhs)
+{
+ m_Data = rhs.m_Data;
+ return *this;
+}
+
+String& String::operator=(String&& rhs)
+{
+ m_Data = std::move(rhs.m_Data);
+ return *this;
+}
+
+String& String::operator=(const std::string& rhs)
+{
+ m_Data = rhs;
+ return *this;
+}
+
+String& String::operator=(const char *rhs)
+{
+ m_Data = rhs;
+ return *this;
+}
+
+const char& String::operator[](String::SizeType pos) const
+{
+ return m_Data[pos];
+}
+
+char& String::operator[](String::SizeType pos)
+{
+ return m_Data[pos];
+}
+
+String& String::operator+=(const String& rhs)
+{
+ m_Data += rhs.m_Data;
+ return *this;
+}
+
+String& String::operator+=(const char *rhs)
+{
+ m_Data += rhs;
+ return *this;
+}
+
+String& String::operator+=(char rhs)
+{
+ m_Data += rhs;
+ return *this;
+}
+
+bool String::IsEmpty() const
+{
+ return m_Data.empty();
+}
+
+bool String::operator<(const String& rhs) const
+{
+ return m_Data < rhs.m_Data;
+}
+
+String::operator const std::string&() const
+{
+ return m_Data;
+}
+
+/**
+ * Conversion function to boost::beast::string_view.
+ *
+ * This allows using String as the value for HTTP headers in boost::beast::http::basic_fields::set.
+ *
+ * @return A boost::beast::string_view representing this string.
+ */
+String::operator boost::beast::string_view() const
+{
+ return boost::beast::string_view(m_Data);
+}
+
+const char *String::CStr() const
+{
+ return m_Data.c_str();
+}
+
+void String::Clear()
+{
+ m_Data.clear();
+}
+
+String::SizeType String::GetLength() const
+{
+ return m_Data.size();
+}
+
+std::string& String::GetData()
+{
+ return m_Data;
+}
+
+const std::string& String::GetData() const
+{
+ return m_Data;
+}
+
+String::SizeType String::Find(const String& str, String::SizeType pos) const
+{
+ return m_Data.find(str, pos);
+}
+
+String::SizeType String::RFind(const String& str, String::SizeType pos) const
+{
+ return m_Data.rfind(str, pos);
+}
+
+String::SizeType String::FindFirstOf(const char *s, String::SizeType pos) const
+{
+ return m_Data.find_first_of(s, pos);
+}
+
+String::SizeType String::FindFirstOf(char ch, String::SizeType pos) const
+{
+ return m_Data.find_first_of(ch, pos);
+}
+
+String::SizeType String::FindFirstNotOf(const char *s, String::SizeType pos) const
+{
+ return m_Data.find_first_not_of(s, pos);
+}
+
+String::SizeType String::FindFirstNotOf(char ch, String::SizeType pos) const
+{
+ return m_Data.find_first_not_of(ch, pos);
+}
+
+String::SizeType String::FindLastOf(const char *s, String::SizeType pos) const
+{
+ return m_Data.find_last_of(s, pos);
+}
+
+String::SizeType String::FindLastOf(char ch, String::SizeType pos) const
+{
+ return m_Data.find_last_of(ch, pos);
+}
+
+String String::SubStr(String::SizeType first, String::SizeType len) const
+{
+ return m_Data.substr(first, len);
+}
+
+std::vector<String> String::Split(const char *separators) const
+{
+ std::vector<String> result;
+ boost::algorithm::split(result, m_Data, boost::is_any_of(separators));
+ return result;
+}
+
+void String::Replace(String::SizeType first, String::SizeType second, const String& str)
+{
+ m_Data.replace(first, second, str);
+}
+
+String String::Trim() const
+{
+ String t = m_Data;
+ boost::algorithm::trim(t);
+ return t;
+}
+
+String String::ToLower() const
+{
+ String t = m_Data;
+ boost::algorithm::to_lower(t);
+ return t;
+}
+
+String String::ToUpper() const
+{
+ String t = m_Data;
+ boost::algorithm::to_upper(t);
+ return t;
+}
+
+String String::Reverse() const
+{
+ String t = m_Data;
+ std::reverse(t.m_Data.begin(), t.m_Data.end());
+ return t;
+}
+
+void String::Append(int count, char ch)
+{
+ m_Data.append(count, ch);
+}
+
+bool String::Contains(const String& str) const
+{
+ return (m_Data.find(str) != std::string::npos);
+}
+
+void String::swap(String& str)
+{
+ m_Data.swap(str.m_Data);
+}
+
+String::Iterator String::erase(String::Iterator first, String::Iterator last)
+{
+ return m_Data.erase(first, last);
+}
+
+String::Iterator String::Begin()
+{
+ return m_Data.begin();
+}
+
+String::ConstIterator String::Begin() const
+{
+ return m_Data.begin();
+}
+
+String::Iterator String::End()
+{
+ return m_Data.end();
+}
+
+String::ConstIterator String::End() const
+{
+ return m_Data.end();
+}
+
+String::ReverseIterator String::RBegin()
+{
+ return m_Data.rbegin();
+}
+
+String::ConstReverseIterator String::RBegin() const
+{
+ return m_Data.rbegin();
+}
+
+String::ReverseIterator String::REnd()
+{
+ return m_Data.rend();
+}
+
+String::ConstReverseIterator String::REnd() const
+{
+ return m_Data.rend();
+}
+
+std::ostream& icinga::operator<<(std::ostream& stream, const String& str)
+{
+ stream << str.GetData();
+ return stream;
+}
+
+std::istream& icinga::operator>>(std::istream& stream, String& str)
+{
+ std::string tstr;
+ stream >> tstr;
+ str = tstr;
+ return stream;
+}
+
+String icinga::operator+(const String& lhs, const String& rhs)
+{
+ return lhs.GetData() + rhs.GetData();
+}
+
+String icinga::operator+(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() + rhs;
+}
+
+String icinga::operator+(const char *lhs, const String& rhs)
+{
+ return lhs + rhs.GetData();
+}
+
+bool icinga::operator==(const String& lhs, const String& rhs)
+{
+ return lhs.GetData() == rhs.GetData();
+}
+
+bool icinga::operator==(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() == rhs;
+}
+
+bool icinga::operator==(const char *lhs, const String& rhs)
+{
+ return lhs == rhs.GetData();
+}
+
+bool icinga::operator<(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() < rhs;
+}
+
+bool icinga::operator<(const char *lhs, const String& rhs)
+{
+ return lhs < rhs.GetData();
+}
+
+bool icinga::operator>(const String& lhs, const String& rhs)
+{
+ return lhs.GetData() > rhs.GetData();
+}
+
+bool icinga::operator>(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() > rhs;
+}
+
+bool icinga::operator>(const char *lhs, const String& rhs)
+{
+ return lhs > rhs.GetData();
+}
+
+bool icinga::operator<=(const String& lhs, const String& rhs)
+{
+ return lhs.GetData() <= rhs.GetData();
+}
+
+bool icinga::operator<=(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() <= rhs;
+}
+
+bool icinga::operator<=(const char *lhs, const String& rhs)
+{
+ return lhs <= rhs.GetData();
+}
+
+bool icinga::operator>=(const String& lhs, const String& rhs)
+{
+ return lhs.GetData() >= rhs.GetData();
+}
+
+bool icinga::operator>=(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() >= rhs;
+}
+
+bool icinga::operator>=(const char *lhs, const String& rhs)
+{
+ return lhs >= rhs.GetData();
+}
+
+bool icinga::operator!=(const String& lhs, const String& rhs)
+{
+ return lhs.GetData() != rhs.GetData();
+}
+
+bool icinga::operator!=(const String& lhs, const char *rhs)
+{
+ return lhs.GetData() != rhs;
+}
+
+bool icinga::operator!=(const char *lhs, const String& rhs)
+{
+ return lhs != rhs.GetData();
+}
+
+String::Iterator icinga::begin(String& x)
+{
+ return x.Begin();
+}
+
+String::ConstIterator icinga::begin(const String& x)
+{
+ return x.Begin();
+}
+
+String::Iterator icinga::end(String& x)
+{
+ return x.End();
+}
+
+String::ConstIterator icinga::end(const String& x)
+{
+ return x.End();
+}
+String::Iterator icinga::range_begin(String& x)
+{
+ return x.Begin();
+}
+
+String::ConstIterator icinga::range_begin(const String& x)
+{
+ return x.Begin();
+}
+
+String::Iterator icinga::range_end(String& x)
+{
+ return x.End();
+}
+
+String::ConstIterator icinga::range_end(const String& x)
+{
+ return x.End();
+}
+
+std::size_t std::hash<String>::operator()(const String& s) const noexcept
+{
+ return std::hash<std::string>{}(s.GetData());
+}
diff --git a/lib/base/string.hpp b/lib/base/string.hpp
new file mode 100644
index 0000000..0eb08b5
--- /dev/null
+++ b/lib/base/string.hpp
@@ -0,0 +1,208 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STRING_H
+#define STRING_H
+
+#include "base/i2-base.hpp"
+#include "base/object.hpp"
+#include <boost/beast/core.hpp>
+#include <boost/range/iterator.hpp>
+#include <boost/utility/string_view.hpp>
+#include <functional>
+#include <string>
+#include <iosfwd>
+
+namespace icinga {
+
+class Value;
+
+/**
+ * String class.
+ *
+ * Rationale for having this: The std::string class has an ambiguous assignment
+ * operator when used in conjunction with the Value class.
+ */
+class String
+{
+public:
+ typedef std::string::iterator Iterator;
+ typedef std::string::const_iterator ConstIterator;
+
+ typedef std::string::iterator iterator;
+ typedef std::string::const_iterator const_iterator;
+
+ typedef std::string::reverse_iterator ReverseIterator;
+ typedef std::string::const_reverse_iterator ConstReverseIterator;
+
+ typedef std::string::reverse_iterator reverse_iterator;
+ typedef std::string::const_reverse_iterator const_reverse_iterator;
+
+ typedef std::string::size_type SizeType;
+
+ String() = default;
+ String(const char *data);
+ String(std::string data);
+ String(String::SizeType n, char c);
+ String(const String& other);
+ String(String&& other);
+
+#ifndef _MSC_VER
+ String(Value&& other);
+#endif /* _MSC_VER */
+
+ template<typename InputIterator>
+ String(InputIterator begin, InputIterator end)
+ : m_Data(begin, end)
+ { }
+
+ String& operator=(const String& rhs);
+ String& operator=(String&& rhs);
+ String& operator=(Value&& rhs);
+ String& operator=(const std::string& rhs);
+ String& operator=(const char *rhs);
+
+ const char& operator[](SizeType pos) const;
+ char& operator[](SizeType pos);
+
+ String& operator+=(const String& rhs);
+ String& operator+=(const char *rhs);
+ String& operator+=(const Value& rhs);
+ String& operator+=(char rhs);
+
+ bool IsEmpty() const;
+
+ bool operator<(const String& rhs) const;
+
+ operator const std::string&() const;
+ operator boost::beast::string_view() const;
+
+ const char *CStr() const;
+
+ void Clear();
+
+ SizeType GetLength() const;
+
+ std::string& GetData();
+ const std::string& GetData() const;
+
+ SizeType Find(const String& str, SizeType pos = 0) const;
+ SizeType RFind(const String& str, SizeType pos = NPos) const;
+ SizeType FindFirstOf(const char *s, SizeType pos = 0) const;
+ SizeType FindFirstOf(char ch, SizeType pos = 0) const;
+ SizeType FindFirstNotOf(const char *s, SizeType pos = 0) const;
+ SizeType FindFirstNotOf(char ch, SizeType pos = 0) const;
+ SizeType FindLastOf(const char *s, SizeType pos = NPos) const;
+ SizeType FindLastOf(char ch, SizeType pos = NPos) const;
+
+ String SubStr(SizeType first, SizeType len = NPos) const;
+
+ std::vector<String> Split(const char *separators) const;
+
+ void Replace(SizeType first, SizeType second, const String& str);
+
+ String Trim() const;
+
+ String ToLower() const;
+
+ String ToUpper() const;
+
+ String Reverse() const;
+
+ void Append(int count, char ch);
+
+ bool Contains(const String& str) const;
+
+ void swap(String& str);
+
+ Iterator erase(Iterator first, Iterator last);
+
+ template<typename InputIterator>
+ void insert(Iterator p, InputIterator first, InputIterator last)
+ {
+ m_Data.insert(p, first, last);
+ }
+
+ Iterator Begin();
+ ConstIterator Begin() const;
+ Iterator End();
+ ConstIterator End() const;
+ ReverseIterator RBegin();
+ ConstReverseIterator RBegin() const;
+ ReverseIterator REnd();
+ ConstReverseIterator REnd() const;
+
+ static const SizeType NPos;
+
+ static Object::Ptr GetPrototype();
+
+private:
+ std::string m_Data;
+};
+
+std::ostream& operator<<(std::ostream& stream, const String& str);
+std::istream& operator>>(std::istream& stream, String& str);
+
+String operator+(const String& lhs, const String& rhs);
+String operator+(const String& lhs, const char *rhs);
+String operator+(const char *lhs, const String& rhs);
+
+bool operator==(const String& lhs, const String& rhs);
+bool operator==(const String& lhs, const char *rhs);
+bool operator==(const char *lhs, const String& rhs);
+
+bool operator<(const String& lhs, const char *rhs);
+bool operator<(const char *lhs, const String& rhs);
+
+bool operator>(const String& lhs, const String& rhs);
+bool operator>(const String& lhs, const char *rhs);
+bool operator>(const char *lhs, const String& rhs);
+
+bool operator<=(const String& lhs, const String& rhs);
+bool operator<=(const String& lhs, const char *rhs);
+bool operator<=(const char *lhs, const String& rhs);
+
+bool operator>=(const String& lhs, const String& rhs);
+bool operator>=(const String& lhs, const char *rhs);
+bool operator>=(const char *lhs, const String& rhs);
+
+bool operator!=(const String& lhs, const String& rhs);
+bool operator!=(const String& lhs, const char *rhs);
+bool operator!=(const char *lhs, const String& rhs);
+
+String::Iterator begin(String& x);
+String::ConstIterator begin(const String& x);
+String::Iterator end(String& x);
+String::ConstIterator end(const String& x);
+String::Iterator range_begin(String& x);
+String::ConstIterator range_begin(const String& x);
+String::Iterator range_end(String& x);
+String::ConstIterator range_end(const String& x);
+
+}
+
+template<>
+struct std::hash<icinga::String>
+{
+ std::size_t operator()(const icinga::String& s) const noexcept;
+};
+
+extern template class std::vector<icinga::String>;
+
+namespace boost
+{
+
+template<>
+struct range_mutable_iterator<icinga::String>
+{
+ typedef icinga::String::Iterator type;
+};
+
+template<>
+struct range_const_iterator<icinga::String>
+{
+ typedef icinga::String::ConstIterator type;
+};
+
+}
+
+#endif /* STRING_H */
diff --git a/lib/base/sysloglogger.cpp b/lib/base/sysloglogger.cpp
new file mode 100644
index 0000000..fc2ec09
--- /dev/null
+++ b/lib/base/sysloglogger.cpp
@@ -0,0 +1,144 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+#include "base/sysloglogger.hpp"
+#include "base/sysloglogger-ti.cpp"
+#include "base/configtype.hpp"
+#include "base/statsfunction.hpp"
+#include <syslog.h>
+
+using namespace icinga;
+
+REGISTER_TYPE(SyslogLogger);
+
+REGISTER_STATSFUNCTION(SyslogLogger, &SyslogLogger::StatsFunc);
+
+INITIALIZE_ONCE(&SyslogHelper::StaticInitialize);
+
+std::map<String, int> SyslogHelper::m_FacilityMap;
+
+void SyslogHelper::StaticInitialize()
+{
+ ScriptGlobal::Set("System.FacilityAuth", "LOG_AUTH");
+ ScriptGlobal::Set("System.FacilityAuthPriv", "LOG_AUTHPRIV");
+ ScriptGlobal::Set("System.FacilityCron", "LOG_CRON");
+ ScriptGlobal::Set("System.FacilityDaemon", "LOG_DAEMON");
+ ScriptGlobal::Set("System.FacilityFtp", "LOG_FTP");
+ ScriptGlobal::Set("System.FacilityKern", "LOG_KERN");
+ ScriptGlobal::Set("System.FacilityLocal0", "LOG_LOCAL0");
+ ScriptGlobal::Set("System.FacilityLocal1", "LOG_LOCAL1");
+ ScriptGlobal::Set("System.FacilityLocal2", "LOG_LOCAL2");
+ ScriptGlobal::Set("System.FacilityLocal3", "LOG_LOCAL3");
+ ScriptGlobal::Set("System.FacilityLocal4", "LOG_LOCAL4");
+ ScriptGlobal::Set("System.FacilityLocal5", "LOG_LOCAL5");
+ ScriptGlobal::Set("System.FacilityLocal6", "LOG_LOCAL6");
+ ScriptGlobal::Set("System.FacilityLocal7", "LOG_LOCAL7");
+ ScriptGlobal::Set("System.FacilityLpr", "LOG_LPR");
+ ScriptGlobal::Set("System.FacilityMail", "LOG_MAIL");
+ ScriptGlobal::Set("System.FacilityNews", "LOG_NEWS");
+ ScriptGlobal::Set("System.FacilitySyslog", "LOG_SYSLOG");
+ ScriptGlobal::Set("System.FacilityUser", "LOG_USER");
+ ScriptGlobal::Set("System.FacilityUucp", "LOG_UUCP");
+
+ m_FacilityMap["LOG_AUTH"] = LOG_AUTH;
+ m_FacilityMap["LOG_AUTHPRIV"] = LOG_AUTHPRIV;
+ m_FacilityMap["LOG_CRON"] = LOG_CRON;
+ m_FacilityMap["LOG_DAEMON"] = LOG_DAEMON;
+#ifdef LOG_FTP
+ m_FacilityMap["LOG_FTP"] = LOG_FTP;
+#endif /* LOG_FTP */
+ m_FacilityMap["LOG_KERN"] = LOG_KERN;
+ m_FacilityMap["LOG_LOCAL0"] = LOG_LOCAL0;
+ m_FacilityMap["LOG_LOCAL1"] = LOG_LOCAL1;
+ m_FacilityMap["LOG_LOCAL2"] = LOG_LOCAL2;
+ m_FacilityMap["LOG_LOCAL3"] = LOG_LOCAL3;
+ m_FacilityMap["LOG_LOCAL4"] = LOG_LOCAL4;
+ m_FacilityMap["LOG_LOCAL5"] = LOG_LOCAL5;
+ m_FacilityMap["LOG_LOCAL6"] = LOG_LOCAL6;
+ m_FacilityMap["LOG_LOCAL7"] = LOG_LOCAL7;
+ m_FacilityMap["LOG_LPR"] = LOG_LPR;
+ m_FacilityMap["LOG_MAIL"] = LOG_MAIL;
+ m_FacilityMap["LOG_NEWS"] = LOG_NEWS;
+ m_FacilityMap["LOG_SYSLOG"] = LOG_SYSLOG;
+ m_FacilityMap["LOG_USER"] = LOG_USER;
+ m_FacilityMap["LOG_UUCP"] = LOG_UUCP;
+}
+
+bool SyslogHelper::ValidateFacility(const String& facility)
+{
+ if (m_FacilityMap.find(facility) == m_FacilityMap.end()) {
+ try {
+ Convert::ToLong(facility);
+ } catch (const std::exception&) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int SyslogHelper::SeverityToNumber(LogSeverity severity)
+{
+ switch (severity) {
+ case LogDebug:
+ return LOG_DEBUG;
+ case LogNotice:
+ return LOG_NOTICE;
+ case LogWarning:
+ return LOG_WARNING;
+ case LogCritical:
+ return LOG_CRIT;
+ case LogInformation:
+ default:
+ return LOG_INFO;
+ }
+}
+
+int SyslogHelper::FacilityToNumber(const String& facility)
+{
+ auto it = m_FacilityMap.find(facility);
+ if (it != m_FacilityMap.end())
+ return it->second;
+ else
+ return Convert::ToLong(facility);
+}
+
+void SyslogLogger::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const SyslogLogger::Ptr& sysloglogger : ConfigType::GetObjectsByType<SyslogLogger>()) {
+ nodes.emplace_back(sysloglogger->GetName(), 1); //add more stats
+ }
+
+ status->Set("sysloglogger", new Dictionary(std::move(nodes)));
+}
+
+void SyslogLogger::OnConfigLoaded()
+{
+ ObjectImpl<SyslogLogger>::OnConfigLoaded();
+ m_Facility = SyslogHelper::FacilityToNumber(GetFacility());
+}
+
+void SyslogLogger::ValidateFacility(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<SyslogLogger>::ValidateFacility(lvalue, utils);
+ if (!SyslogHelper::ValidateFacility(lvalue()))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "facility" }, "Invalid facility specified."));
+}
+
+/**
+ * Processes a log entry and outputs it to syslog.
+ *
+ * @param entry The log entry.
+ */
+void SyslogLogger::ProcessLogEntry(const LogEntry& entry)
+{
+ syslog(SyslogHelper::SeverityToNumber(entry.Severity) | m_Facility,
+ "%s", entry.Message.CStr());
+}
+
+void SyslogLogger::Flush()
+{
+ /* Nothing to do here. */
+}
+#endif /* _WIN32 */
diff --git a/lib/base/sysloglogger.hpp b/lib/base/sysloglogger.hpp
new file mode 100644
index 0000000..d1d6859
--- /dev/null
+++ b/lib/base/sysloglogger.hpp
@@ -0,0 +1,56 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SYSLOGLOGGER_H
+#define SYSLOGLOGGER_H
+
+#ifndef _WIN32
+#include "base/i2-base.hpp"
+#include "base/sysloglogger-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * Helper class to handle syslog facility strings and numbers.
+ *
+ * @ingroup base
+ */
+class SyslogHelper final
+{
+public:
+ static void StaticInitialize();
+ static bool ValidateFacility(const String& facility);
+ static int SeverityToNumber(LogSeverity severity);
+ static int FacilityToNumber(const String& facility);
+
+private:
+ static std::map<String, int> m_FacilityMap;
+};
+
+/**
+ * A logger that logs to syslog.
+ *
+ * @ingroup base
+ */
+class SyslogLogger final : public ObjectImpl<SyslogLogger>
+{
+public:
+ DECLARE_OBJECT(SyslogLogger);
+ DECLARE_OBJECTNAME(SyslogLogger);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void OnConfigLoaded() override;
+ void ValidateFacility(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ int m_Facility;
+
+ void ProcessLogEntry(const LogEntry& entry) override;
+ void Flush() override;
+};
+
+}
+#endif /* _WIN32 */
+
+#endif /* SYSLOGLOGGER_H */
diff --git a/lib/base/sysloglogger.ti b/lib/base/sysloglogger.ti
new file mode 100644
index 0000000..8f34359
--- /dev/null
+++ b/lib/base/sysloglogger.ti
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/logger.hpp"
+
+library base;
+
+namespace icinga
+{
+
+class SyslogLogger : Logger
+{
+ activation_priority -100;
+
+ [config] String facility {
+ default {{{ return "LOG_USER"; }}}
+ };
+};
+
+}
diff --git a/lib/base/tcpsocket.cpp b/lib/base/tcpsocket.cpp
new file mode 100644
index 0000000..a9390e5
--- /dev/null
+++ b/lib/base/tcpsocket.cpp
@@ -0,0 +1,211 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/tcpsocket.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include <boost/exception/errinfo_api_function.hpp>
+#include <boost/exception/errinfo_errno.hpp>
+#include <iostream>
+
+using namespace icinga;
+
+/**
+ * Creates a socket and binds it to the specified service.
+ *
+ * @param service The service.
+ * @param family The address family for the socket.
+ */
+void TcpSocket::Bind(const String& service, int family)
+{
+ Bind(String(), service, family);
+}
+
+/**
+ * Creates a socket and binds it to the specified node and service.
+ *
+ * @param node The node.
+ * @param service The service.
+ * @param family The address family for the socket.
+ */
+void TcpSocket::Bind(const String& node, const String& service, int family)
+{
+ addrinfo hints;
+ addrinfo *result;
+ int error;
+ const char *func;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = family;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_protocol = IPPROTO_TCP;
+ hints.ai_flags = AI_PASSIVE;
+
+ int rc = getaddrinfo(node.IsEmpty() ? nullptr : node.CStr(),
+ service.CStr(), &hints, &result);
+
+ if (rc != 0) {
+ Log(LogCritical, "TcpSocket")
+ << "getaddrinfo() failed with error code " << rc << ", \"" << gai_strerror(rc) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getaddrinfo")
+ << errinfo_getaddrinfo_error(rc));
+ }
+
+ int fd = INVALID_SOCKET;
+
+ for (addrinfo *info = result; info != nullptr; info = info->ai_next) {
+ fd = socket(info->ai_family, info->ai_socktype, info->ai_protocol);
+
+ if (fd == INVALID_SOCKET) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ func = "socket";
+
+ continue;
+ }
+
+ const int optFalse = 0;
+ setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast<const char *>(&optFalse), sizeof(optFalse));
+
+ const int optTrue = 1;
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<const char *>(&optTrue), sizeof(optTrue));
+#ifdef SO_REUSEPORT
+ setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, reinterpret_cast<const char *>(&optTrue), sizeof(optTrue));
+#endif /* SO_REUSEPORT */
+
+ int rc = bind(fd, info->ai_addr, info->ai_addrlen);
+
+ if (rc < 0) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ func = "bind";
+
+ closesocket(fd);
+
+ continue;
+ }
+
+ SetFD(fd);
+
+ break;
+ }
+
+ freeaddrinfo(result);
+
+ if (GetFD() == INVALID_SOCKET) {
+ Log(LogCritical, "TcpSocket")
+ << "Invalid socket: " << Utility::FormatErrorNumber(error);
+
+#ifndef _WIN32
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function(func)
+ << boost::errinfo_errno(error));
+#else /* _WIN32 */
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function(func)
+ << errinfo_win32_error(error));
+#endif /* _WIN32 */
+ }
+}
+
+/**
+ * Creates a socket and connects to the specified node and service.
+ *
+ * @param node The node.
+ * @param service The service.
+ */
+void TcpSocket::Connect(const String& node, const String& service)
+{
+ addrinfo hints;
+ addrinfo *result;
+ int error;
+ const char *func;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_protocol = IPPROTO_TCP;
+
+ int rc = getaddrinfo(node.CStr(), service.CStr(), &hints, &result);
+
+ if (rc != 0) {
+ Log(LogCritical, "TcpSocket")
+ << "getaddrinfo() failed with error code " << rc << ", \"" << gai_strerror(rc) << "\"";
+
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function("getaddrinfo")
+ << errinfo_getaddrinfo_error(rc));
+ }
+
+ SOCKET fd = INVALID_SOCKET;
+
+ for (addrinfo *info = result; info != nullptr; info = info->ai_next) {
+ fd = socket(info->ai_family, info->ai_socktype, info->ai_protocol);
+
+ if (fd == INVALID_SOCKET) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ func = "socket";
+
+ continue;
+ }
+
+ const int optTrue = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, reinterpret_cast<const char *>(&optTrue), sizeof(optTrue)) != 0) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ Log(LogWarning, "TcpSocket")
+ << "setsockopt() unable to enable TCP keep-alives with error code " << rc;
+ }
+
+ rc = connect(fd, info->ai_addr, info->ai_addrlen);
+
+ if (rc < 0) {
+#ifdef _WIN32
+ error = WSAGetLastError();
+#else /* _WIN32 */
+ error = errno;
+#endif /* _WIN32 */
+ func = "connect";
+
+ closesocket(fd);
+
+ continue;
+ }
+
+ SetFD(fd);
+
+ break;
+ }
+
+ freeaddrinfo(result);
+
+ if (GetFD() == INVALID_SOCKET) {
+ Log(LogCritical, "TcpSocket")
+ << "Invalid socket: " << Utility::FormatErrorNumber(error);
+
+#ifndef _WIN32
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function(func)
+ << boost::errinfo_errno(error));
+#else /* _WIN32 */
+ BOOST_THROW_EXCEPTION(socket_error()
+ << boost::errinfo_api_function(func)
+ << errinfo_win32_error(error));
+#endif /* _WIN32 */
+ }
+}
diff --git a/lib/base/tcpsocket.hpp b/lib/base/tcpsocket.hpp
new file mode 100644
index 0000000..471ad8d
--- /dev/null
+++ b/lib/base/tcpsocket.hpp
@@ -0,0 +1,102 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TCPSOCKET_H
+#define TCPSOCKET_H
+
+#include "base/i2-base.hpp"
+#include "base/io-engine.hpp"
+#include "base/socket.hpp"
+#include <boost/asio/error.hpp>
+#include <boost/asio/ip/tcp.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/system/system_error.hpp>
+
+namespace icinga
+{
+
+/**
+ * A TCP socket. DEPRECATED - Use Boost ASIO instead.
+ *
+ * @ingroup base
+ */
+class TcpSocket final : public Socket
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TcpSocket);
+
+ void Bind(const String& service, int family);
+ void Bind(const String& node, const String& service, int family);
+
+ void Connect(const String& node, const String& service);
+};
+
+/**
+ * TCP Connect based on Boost ASIO.
+ *
+ * @ingroup base
+ */
+template<class Socket>
+void Connect(Socket& socket, const String& node, const String& service)
+{
+ using boost::asio::ip::tcp;
+
+ tcp::resolver resolver (IoEngine::Get().GetIoContext());
+ tcp::resolver::query query (node, service);
+ auto result (resolver.resolve(query));
+ auto current (result.begin());
+
+ for (;;) {
+ try {
+ socket.open(current->endpoint().protocol());
+ socket.set_option(tcp::socket::keep_alive(true));
+ socket.connect(current->endpoint());
+
+ break;
+ } catch (const std::exception& ex) {
+ auto se (dynamic_cast<const boost::system::system_error*>(&ex));
+
+ if (se && se->code() == boost::asio::error::operation_aborted || ++current == result.end()) {
+ throw;
+ }
+
+ if (socket.is_open()) {
+ socket.close();
+ }
+ }
+ }
+}
+
+template<class Socket>
+void Connect(Socket& socket, const String& node, const String& service, boost::asio::yield_context yc)
+{
+ using boost::asio::ip::tcp;
+
+ tcp::resolver resolver (IoEngine::Get().GetIoContext());
+ tcp::resolver::query query (node, service);
+ auto result (resolver.async_resolve(query, yc));
+ auto current (result.begin());
+
+ for (;;) {
+ try {
+ socket.open(current->endpoint().protocol());
+ socket.set_option(tcp::socket::keep_alive(true));
+ socket.async_connect(current->endpoint(), yc);
+
+ break;
+ } catch (const std::exception& ex) {
+ auto se (dynamic_cast<const boost::system::system_error*>(&ex));
+
+ if (se && se->code() == boost::asio::error::operation_aborted || ++current == result.end()) {
+ throw;
+ }
+
+ if (socket.is_open()) {
+ socket.close();
+ }
+ }
+ }
+}
+
+}
+
+#endif /* TCPSOCKET_H */
diff --git a/lib/base/threadpool.cpp b/lib/base/threadpool.cpp
new file mode 100644
index 0000000..dc76e7b
--- /dev/null
+++ b/lib/base/threadpool.cpp
@@ -0,0 +1,51 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/threadpool.hpp"
+#include <boost/thread/locks.hpp>
+
+using namespace icinga;
+
+ThreadPool::ThreadPool() : m_Pending(0)
+{
+ Start();
+}
+
+ThreadPool::~ThreadPool()
+{
+ Stop();
+}
+
+void ThreadPool::Start()
+{
+ boost::unique_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ if (!m_Pool) {
+ InitializePool();
+ }
+}
+
+void ThreadPool::InitializePool()
+{
+ m_Pool = decltype(m_Pool)(new boost::asio::thread_pool(Configuration::Concurrency * 2u));
+}
+
+void ThreadPool::Stop()
+{
+ boost::unique_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ if (m_Pool) {
+ m_Pool->join();
+ m_Pool = nullptr;
+ }
+}
+
+void ThreadPool::Restart()
+{
+ boost::unique_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ if (m_Pool) {
+ m_Pool->join();
+ }
+
+ InitializePool();
+}
diff --git a/lib/base/threadpool.hpp b/lib/base/threadpool.hpp
new file mode 100644
index 0000000..d30fa69
--- /dev/null
+++ b/lib/base/threadpool.hpp
@@ -0,0 +1,101 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef THREADPOOL_H
+#define THREADPOOL_H
+
+#include "base/atomic.hpp"
+#include "base/configuration.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include <cstddef>
+#include <exception>
+#include <functional>
+#include <memory>
+#include <thread>
+#include <boost/asio/post.hpp>
+#include <boost/asio/thread_pool.hpp>
+#include <boost/thread/locks.hpp>
+#include <boost/thread/shared_mutex.hpp>
+#include <cstdint>
+
+namespace icinga
+{
+
+enum SchedulerPolicy
+{
+ DefaultScheduler,
+ LowLatencyScheduler
+};
+
+/**
+ * A thread pool.
+ *
+ * @ingroup base
+ */
+class ThreadPool
+{
+public:
+ typedef std::function<void ()> WorkFunction;
+
+ ThreadPool();
+ ~ThreadPool();
+
+ void Start();
+ void Stop();
+ void Restart();
+
+ /**
+ * Appends a work item to the work queue. Work items will be processed in FIFO order.
+ *
+ * @param callback The callback function for the work item.
+ * @returns true if the item was queued, false otherwise.
+ */
+ template<class T>
+ bool Post(T callback, SchedulerPolicy)
+ {
+ boost::shared_lock<decltype(m_Mutex)> lock (m_Mutex);
+
+ if (m_Pool) {
+ m_Pending.fetch_add(1);
+
+ boost::asio::post(*m_Pool, [this, callback]() {
+ m_Pending.fetch_sub(1);
+
+ try {
+ callback();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ThreadPool")
+ << "Exception thrown in event handler:\n"
+ << DiagnosticInformation(ex);
+ } catch (...) {
+ Log(LogCritical, "ThreadPool", "Exception of unknown type thrown in event handler.");
+ }
+ });
+
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
+ * Returns the amount of queued tasks not started yet.
+ *
+ * @returns amount of queued tasks.
+ */
+ inline uint_fast64_t GetPending()
+ {
+ return m_Pending.load();
+ }
+
+private:
+ boost::shared_mutex m_Mutex;
+ std::unique_ptr<boost::asio::thread_pool> m_Pool;
+ Atomic<uint_fast64_t> m_Pending;
+
+ void InitializePool();
+};
+
+}
+
+#endif /* THREADPOOL_H */
diff --git a/lib/base/timer.cpp b/lib/base/timer.cpp
new file mode 100644
index 0000000..ffe1c39
--- /dev/null
+++ b/lib/base/timer.cpp
@@ -0,0 +1,354 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/defer.hpp"
+#include "base/timer.hpp"
+#include "base/debug.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include <boost/multi_index_container.hpp>
+#include <boost/multi_index/ordered_index.hpp>
+#include <boost/multi_index/key_extractors.hpp>
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+#include <utility>
+
+using namespace icinga;
+
+namespace icinga {
+
+class TimerHolder {
+public:
+ TimerHolder(Timer *timer)
+ : m_Timer(timer)
+ { }
+
+ inline Timer *GetObject() const
+ {
+ return m_Timer;
+ }
+
+ inline double GetNextUnlocked() const
+ {
+ return m_Timer->m_Next;
+ }
+
+ operator Timer *() const
+ {
+ return m_Timer;
+ }
+
+private:
+ Timer *m_Timer;
+};
+
+}
+
+typedef boost::multi_index_container<
+ TimerHolder,
+ boost::multi_index::indexed_by<
+ boost::multi_index::ordered_unique<boost::multi_index::const_mem_fun<TimerHolder, Timer *, &TimerHolder::GetObject> >,
+ boost::multi_index::ordered_non_unique<boost::multi_index::const_mem_fun<TimerHolder, double, &TimerHolder::GetNextUnlocked> >
+ >
+> TimerSet;
+
+static std::mutex l_TimerMutex;
+static std::condition_variable l_TimerCV;
+static std::thread l_TimerThread;
+static bool l_StopTimerThread;
+static TimerSet l_Timers;
+static int l_AliveTimers = 0;
+
+static Defer l_ShutdownTimersCleanlyOnExit (&Timer::Uninitialize);
+
+Timer::Ptr Timer::Create()
+{
+ Ptr t (new Timer());
+
+ t->m_Self = t;
+
+ return t;
+}
+
+/**
+ * Destructor for the Timer class.
+ */
+Timer::~Timer()
+{
+ Stop(true);
+}
+
+void Timer::Initialize()
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+
+ if (l_AliveTimers > 0) {
+ InitializeThread();
+ }
+}
+
+void Timer::Uninitialize()
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+
+ if (l_AliveTimers > 0) {
+ UninitializeThread();
+ }
+}
+
+void Timer::InitializeThread()
+{
+ l_StopTimerThread = false;
+ l_TimerThread = std::thread(&Timer::TimerThreadProc);
+}
+
+void Timer::UninitializeThread()
+{
+ {
+ l_StopTimerThread = true;
+ l_TimerCV.notify_all();
+ }
+
+ l_TimerMutex.unlock();
+
+ if (l_TimerThread.joinable())
+ l_TimerThread.join();
+
+ l_TimerMutex.lock();
+}
+
+/**
+ * Calls this timer.
+ */
+void Timer::Call()
+{
+ try {
+ OnTimerExpired(this);
+ } catch (...) {
+ InternalReschedule(true);
+
+ throw;
+ }
+
+ InternalReschedule(true);
+}
+
+/**
+ * Sets the interval for this timer.
+ *
+ * @param interval The new interval.
+ */
+void Timer::SetInterval(double interval)
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+ m_Interval = interval;
+}
+
+/**
+ * Retrieves the interval for this timer.
+ *
+ * @returns The interval.
+ */
+double Timer::GetInterval() const
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+ return m_Interval;
+}
+
+/**
+ * Registers the timer and starts processing events for it.
+ */
+void Timer::Start()
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+
+ if (!m_Started && ++l_AliveTimers == 1) {
+ InitializeThread();
+ }
+
+ m_Started = true;
+
+ InternalRescheduleUnlocked(false, m_Interval > 0 ? -1 : m_Next);
+}
+
+/**
+ * Unregisters the timer and stops processing events for it.
+ */
+void Timer::Stop(bool wait)
+{
+ if (l_StopTimerThread)
+ return;
+
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+
+ if (m_Started && --l_AliveTimers == 0) {
+ UninitializeThread();
+ }
+
+ m_Started = false;
+ l_Timers.erase(this);
+
+ /* Notify the worker thread that we've disabled a timer. */
+ l_TimerCV.notify_all();
+
+ while (wait && m_Running)
+ l_TimerCV.wait(lock);
+}
+
+void Timer::Reschedule(double next)
+{
+ InternalReschedule(false, next);
+}
+
+void Timer::InternalReschedule(bool completed, double next)
+{
+ std::unique_lock<std::mutex> lock (l_TimerMutex);
+
+ InternalRescheduleUnlocked(completed, next);
+}
+
+/**
+ * Reschedules this timer.
+ *
+ * @param completed Whether the timer has just completed its callback.
+ * @param next The time when this timer should be called again. Use -1 to let
+ * the timer figure out a suitable time based on the interval.
+ */
+void Timer::InternalRescheduleUnlocked(bool completed, double next)
+{
+ if (completed)
+ m_Running = false;
+
+ if (next < 0) {
+ /* Don't schedule the next call if this is not a periodic timer. */
+ if (m_Interval <= 0)
+ return;
+
+ next = Utility::GetTime() + m_Interval;
+ }
+
+ m_Next = next;
+
+ if (m_Started && !m_Running) {
+ /* Remove and re-add the timer to update the index. */
+ l_Timers.erase(this);
+ l_Timers.insert(this);
+
+ /* Notify the worker that we've rescheduled a timer. */
+ l_TimerCV.notify_all();
+ }
+}
+
+/**
+ * Retrieves when the timer is next due.
+ *
+ * @returns The timestamp.
+ */
+double Timer::GetNext() const
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+ return m_Next;
+}
+
+/**
+ * Adjusts all periodic timers by adding the specified amount of time to their
+ * next scheduled timestamp.
+ *
+ * @param adjustment The adjustment.
+ */
+void Timer::AdjustTimers(double adjustment)
+{
+ std::unique_lock<std::mutex> lock(l_TimerMutex);
+
+ double now = Utility::GetTime();
+
+ typedef boost::multi_index::nth_index<TimerSet, 1>::type TimerView;
+ TimerView& idx = boost::get<1>(l_Timers);
+
+ std::vector<Timer *> timers;
+
+ for (Timer *timer : idx) {
+ /* Don't schedule the next call if this is not a periodic timer. */
+ if (timer->m_Interval <= 0) {
+ continue;
+ }
+
+ if (std::fabs(now - (timer->m_Next + adjustment)) <
+ std::fabs(now - timer->m_Next)) {
+ timer->m_Next += adjustment;
+ timers.push_back(timer);
+ }
+ }
+
+ for (Timer *timer : timers) {
+ l_Timers.erase(timer);
+ l_Timers.insert(timer);
+ }
+
+ /* Notify the worker that we've rescheduled some timers. */
+ l_TimerCV.notify_all();
+}
+
+/**
+ * Worker thread proc for Timer objects.
+ */
+void Timer::TimerThreadProc()
+{
+ namespace ch = std::chrono;
+
+ Log(LogDebug, "Timer", "TimerThreadProc started.");
+
+ Utility::SetThreadName("Timer Thread");
+
+ std::unique_lock<std::mutex> lock (l_TimerMutex);
+
+ for (;;) {
+ typedef boost::multi_index::nth_index<TimerSet, 1>::type NextTimerView;
+ NextTimerView& idx = boost::get<1>(l_Timers);
+
+ /* Wait until there is at least one timer. */
+ while (idx.empty() && !l_StopTimerThread)
+ l_TimerCV.wait(lock);
+
+ if (l_StopTimerThread)
+ break;
+
+ auto it = idx.begin();
+
+ // timer->~Timer() may be called at any moment (if the last
+ // smart pointer gets destroyed) or even already waiting for
+ // l_TimerMutex (before doing anything else) which we have
+ // locked at the moment. Until our unlock using *timer is safe.
+ Timer *timer = *it;
+
+ ch::time_point<ch::system_clock, ch::duration<double>> next (ch::duration<double>(timer->m_Next));
+
+ if (next - ch::system_clock::now() > ch::duration<double>(0.01)) {
+ /* Wait for the next timer. */
+ l_TimerCV.wait_until(lock, next);
+
+ continue;
+ }
+
+ /* Remove the timer from the list so it doesn't get called again
+ * until the current call is completed. */
+ l_Timers.erase(timer);
+
+ auto keepAlive (timer->m_Self.lock());
+
+ if (!keepAlive) {
+ // The last std::shared_ptr is gone, let ~Timer() proceed
+ continue;
+ }
+
+ timer->m_Running = true;
+
+ lock.unlock();
+
+ /* Asynchronously call the timer. */
+ Utility::QueueAsyncCallback([timer=std::move(keepAlive)]() { timer->Call(); });
+
+ lock.lock();
+ }
+}
diff --git a/lib/base/timer.hpp b/lib/base/timer.hpp
new file mode 100644
index 0000000..db0f0b7
--- /dev/null
+++ b/lib/base/timer.hpp
@@ -0,0 +1,65 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TIMER_H
+#define TIMER_H
+
+#include "base/i2-base.hpp"
+#include <boost/signals2.hpp>
+#include <memory>
+
+namespace icinga {
+
+class TimerHolder;
+
+/**
+ * A timer that periodically triggers an event.
+ *
+ * @ingroup base
+ */
+class Timer final
+{
+public:
+ typedef std::shared_ptr<Timer> Ptr;
+
+ static Ptr Create();
+
+ ~Timer();
+
+ static void Initialize();
+ static void Uninitialize();
+ static void InitializeThread();
+ static void UninitializeThread();
+
+ void SetInterval(double interval);
+ double GetInterval() const;
+
+ static void AdjustTimers(double adjustment);
+
+ void Start();
+ void Stop(bool wait = false);
+
+ void Reschedule(double next = -1);
+ double GetNext() const;
+
+ boost::signals2::signal<void(const Timer * const&)> OnTimerExpired;
+
+private:
+ double m_Interval{0}; /**< The interval of the timer. */
+ double m_Next{0}; /**< When the next event should happen. */
+ bool m_Started{false}; /**< Whether the timer is enabled. */
+ bool m_Running{false}; /**< Whether the timer proc is currently running. */
+ std::weak_ptr<Timer> m_Self;
+
+ Timer() = default;
+ void Call();
+ void InternalReschedule(bool completed, double next = -1);
+ void InternalRescheduleUnlocked(bool completed, double next = -1);
+
+ static void TimerThreadProc();
+
+ friend class TimerHolder;
+};
+
+}
+
+#endif /* TIMER_H */
diff --git a/lib/base/tlsstream.cpp b/lib/base/tlsstream.cpp
new file mode 100644
index 0000000..db54c91
--- /dev/null
+++ b/lib/base/tlsstream.cpp
@@ -0,0 +1,71 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/tlsstream.hpp"
+#include "base/application.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include "base/configuration.hpp"
+#include "base/convert.hpp"
+#include <boost/asio/ssl/context.hpp>
+#include <boost/asio/ssl/verify_context.hpp>
+#include <boost/asio/ssl/verify_mode.hpp>
+#include <iostream>
+#include <openssl/ssl.h>
+#include <openssl/tls1.h>
+#include <openssl/x509.h>
+#include <sstream>
+
+using namespace icinga;
+
+bool UnbufferedAsioTlsStream::IsVerifyOK() const
+{
+ return m_VerifyOK;
+}
+
+String UnbufferedAsioTlsStream::GetVerifyError() const
+{
+ return m_VerifyError;
+}
+
+std::shared_ptr<X509> UnbufferedAsioTlsStream::GetPeerCertificate()
+{
+ return std::shared_ptr<X509>(SSL_get_peer_certificate(native_handle()), X509_free);
+}
+
+void UnbufferedAsioTlsStream::BeforeHandshake(handshake_type type)
+{
+ namespace ssl = boost::asio::ssl;
+
+ if (!m_Hostname.IsEmpty()) {
+ X509_VERIFY_PARAM_set1_host(SSL_get0_param(native_handle()), m_Hostname.CStr(), m_Hostname.GetLength());
+ }
+
+ set_verify_mode(ssl::verify_peer | ssl::verify_client_once);
+
+ set_verify_callback([this](bool preverified, ssl::verify_context& ctx) {
+ if (!preverified) {
+ m_VerifyOK = false;
+
+ std::ostringstream msgbuf;
+ int err = X509_STORE_CTX_get_error(ctx.native_handle());
+
+ msgbuf << "code " << err << ": " << X509_verify_cert_error_string(err);
+ m_VerifyError = msgbuf.str();
+ }
+
+ return true;
+ });
+
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ if (type == client && !m_Hostname.IsEmpty()) {
+ String environmentName = Application::GetAppEnvironment();
+ String serverName = m_Hostname;
+
+ if (!environmentName.IsEmpty())
+ serverName += ":" + environmentName;
+
+ SSL_set_tlsext_host_name(native_handle(), serverName.CStr());
+ }
+#endif /* SSL_CTRL_SET_TLSEXT_HOSTNAME */
+}
diff --git a/lib/base/tlsstream.hpp b/lib/base/tlsstream.hpp
new file mode 100644
index 0000000..f6e5209
--- /dev/null
+++ b/lib/base/tlsstream.hpp
@@ -0,0 +1,129 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TLSSTREAM_H
+#define TLSSTREAM_H
+
+#include "base/i2-base.hpp"
+#include "base/shared.hpp"
+#include "base/socket.hpp"
+#include "base/stream.hpp"
+#include "base/tlsutility.hpp"
+#include "base/fifo.hpp"
+#include "base/utility.hpp"
+#include <atomic>
+#include <memory>
+#include <utility>
+#include <boost/asio/buffered_stream.hpp>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/ip/tcp.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/asio/ssl/stream.hpp>
+
+namespace icinga
+{
+
+template<class ARS>
+class SeenStream : public ARS
+{
+public:
+ template<class... Args>
+ SeenStream(Args&&... args) : ARS(std::forward<Args>(args)...)
+ {
+ m_Seen.store(nullptr);
+ }
+
+ template<class... Args>
+ auto async_read_some(Args&&... args) -> decltype(((ARS*)nullptr)->async_read_some(std::forward<Args>(args)...))
+ {
+ {
+ auto seen (m_Seen.load());
+
+ if (seen) {
+ *seen = Utility::GetTime();
+ }
+ }
+
+ return ((ARS*)this)->async_read_some(std::forward<Args>(args)...);
+ }
+
+ inline void SetSeen(double* seen)
+ {
+ m_Seen.store(seen);
+ }
+
+private:
+ std::atomic<double*> m_Seen;
+};
+
+struct UnbufferedAsioTlsStreamParams
+{
+ boost::asio::io_context& IoContext;
+ boost::asio::ssl::context& SslContext;
+ const String& Hostname;
+};
+
+typedef SeenStream<boost::asio::ssl::stream<boost::asio::ip::tcp::socket>> AsioTcpTlsStream;
+
+class UnbufferedAsioTlsStream : public AsioTcpTlsStream
+{
+public:
+ inline
+ UnbufferedAsioTlsStream(UnbufferedAsioTlsStreamParams& init)
+ : AsioTcpTlsStream(init.IoContext, init.SslContext), m_VerifyOK(true), m_Hostname(init.Hostname)
+ {
+ }
+
+ bool IsVerifyOK() const;
+ String GetVerifyError() const;
+ std::shared_ptr<X509> GetPeerCertificate();
+
+ template<class... Args>
+ inline
+ auto async_handshake(handshake_type type, Args&&... args) -> decltype(((AsioTcpTlsStream*)nullptr)->async_handshake(type, std::forward<Args>(args)...))
+ {
+ BeforeHandshake(type);
+
+ return AsioTcpTlsStream::async_handshake(type, std::forward<Args>(args)...);
+ }
+
+ template<class... Args>
+ inline
+ auto handshake(handshake_type type, Args&&... args) -> decltype(((AsioTcpTlsStream*)nullptr)->handshake(type, std::forward<Args>(args)...))
+ {
+ BeforeHandshake(type);
+
+ return AsioTcpTlsStream::handshake(type, std::forward<Args>(args)...);
+ }
+
+private:
+ bool m_VerifyOK;
+ String m_VerifyError;
+ String m_Hostname;
+
+ void BeforeHandshake(handshake_type type);
+};
+
+class AsioTlsStream : public boost::asio::buffered_stream<UnbufferedAsioTlsStream>
+{
+public:
+ inline
+ AsioTlsStream(boost::asio::io_context& ioContext, boost::asio::ssl::context& sslContext, const String& hostname = String())
+ : AsioTlsStream(UnbufferedAsioTlsStreamParams{ioContext, sslContext, hostname})
+ {
+ }
+
+private:
+ inline
+ AsioTlsStream(UnbufferedAsioTlsStreamParams init)
+ : buffered_stream(init)
+ {
+ }
+};
+
+typedef boost::asio::buffered_stream<boost::asio::ip::tcp::socket> AsioTcpStream;
+typedef std::pair<Shared<AsioTlsStream>::Ptr, Shared<AsioTcpStream>::Ptr> OptionalTlsStream;
+
+}
+
+#endif /* TLSSTREAM_H */
diff --git a/lib/base/tlsutility.cpp b/lib/base/tlsutility.cpp
new file mode 100644
index 0000000..2e1b90a
--- /dev/null
+++ b/lib/base/tlsutility.cpp
@@ -0,0 +1,1086 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/tlsutility.hpp"
+#include "base/convert.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/application.hpp"
+#include "base/exception.hpp"
+#include <boost/asio/ssl/context.hpp>
+#include <openssl/opensslv.h>
+#include <openssl/crypto.h>
+#include <openssl/ssl.h>
+#include <openssl/ssl3.h>
+#include <fstream>
+
+namespace icinga
+{
+
+static bool l_SSLInitialized = false;
+static std::mutex *l_Mutexes;
+static std::mutex l_RandomMutex;
+
+String GetOpenSSLVersion()
+{
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ return OpenSSL_version(OPENSSL_VERSION);
+#else /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+ return SSLeay_version(SSLEAY_VERSION);
+#endif /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+}
+
+#ifdef CRYPTO_LOCK
+static void OpenSSLLockingCallback(int mode, int type, const char *, int)
+{
+ if (mode & CRYPTO_LOCK)
+ l_Mutexes[type].lock();
+ else
+ l_Mutexes[type].unlock();
+}
+
+static unsigned long OpenSSLIDCallback()
+{
+#ifdef _WIN32
+ return (unsigned long)GetCurrentThreadId();
+#else /* _WIN32 */
+ return (unsigned long)pthread_self();
+#endif /* _WIN32 */
+}
+#endif /* CRYPTO_LOCK */
+
+/**
+ * Initializes the OpenSSL library.
+ */
+void InitializeOpenSSL()
+{
+ if (l_SSLInitialized)
+ return;
+
+ SSL_library_init();
+ SSL_load_error_strings();
+
+ SSL_COMP_get_compression_methods();
+
+#ifdef CRYPTO_LOCK
+ l_Mutexes = new std::mutex[CRYPTO_num_locks()];
+ CRYPTO_set_locking_callback(&OpenSSLLockingCallback);
+ CRYPTO_set_id_callback(&OpenSSLIDCallback);
+#endif /* CRYPTO_LOCK */
+
+ l_SSLInitialized = true;
+}
+
+static void InitSslContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& pubkey, const String& privkey, const String& cakey)
+{
+ char errbuf[256];
+
+ // Enforce TLS v1.2 as minimum
+ context->set_options(
+ boost::asio::ssl::context::default_workarounds |
+ boost::asio::ssl::context::no_compression |
+ boost::asio::ssl::context::no_sslv2 |
+ boost::asio::ssl::context::no_sslv3 |
+ boost::asio::ssl::context::no_tlsv1 |
+ boost::asio::ssl::context::no_tlsv1_1
+ );
+
+ // Custom TLS flags
+ SSL_CTX *sslContext = context->native_handle();
+
+ long flags = SSL_CTX_get_options(sslContext);
+
+ flags |= SSL_OP_CIPHER_SERVER_PREFERENCE;
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ SSL_CTX_set_info_callback(sslContext, [](const SSL* ssl, int where, int) {
+ if (where & SSL_CB_HANDSHAKE_DONE) {
+ ssl->s3->flags |= SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS;
+ }
+ });
+#else /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+ flags |= SSL_OP_NO_RENEGOTIATION;
+#endif /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+
+ SSL_CTX_set_options(sslContext, flags);
+
+ SSL_CTX_set_mode(sslContext, SSL_MODE_ENABLE_PARTIAL_WRITE | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
+ SSL_CTX_set_session_id_context(sslContext, (const unsigned char *)"Icinga 2", 8);
+
+ // Explicitly load ECC ciphers, required on el7 - https://github.com/Icinga/icinga2/issues/7247
+ // SSL_CTX_set_ecdh_auto is deprecated and removed in OpenSSL 1.1.x - https://github.com/openssl/openssl/issues/1437
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+# ifdef SSL_CTX_set_ecdh_auto
+ SSL_CTX_set_ecdh_auto(sslContext, 1);
+# endif /* SSL_CTX_set_ecdh_auto */
+#endif /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ // The built-in DH parameters have to be enabled explicitly to allow the use of ciphers that use a DHE key exchange.
+ // SSL_CTX_set_dh_auto is only documented in OpenSSL starting from version 3.0.0 but was already added in 1.1.0.
+ // https://github.com/openssl/openssl/commit/09599b52d4e295c380512ba39958a11994d63401
+ // https://github.com/openssl/openssl/commit/0437309fdf544492e272943e892523653df2f189
+ SSL_CTX_set_dh_auto(sslContext, 1);
+#endif /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+
+ if (!pubkey.IsEmpty()) {
+ if (!SSL_CTX_use_certificate_chain_file(sslContext, pubkey.CStr())) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error with public key file '" << pubkey << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_use_certificate_chain_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(pubkey));
+ }
+ }
+
+ if (!privkey.IsEmpty()) {
+ if (!SSL_CTX_use_PrivateKey_file(sslContext, privkey.CStr(), SSL_FILETYPE_PEM)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error with private key file '" << privkey << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_use_PrivateKey_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(privkey));
+ }
+
+ if (!SSL_CTX_check_private_key(sslContext)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error checking private key '" << privkey << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_check_private_key")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+ }
+
+ if (cakey.IsEmpty()) {
+ if (!SSL_CTX_set_default_verify_paths(sslContext)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error loading system's root CAs: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_set_default_verify_paths")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+ } else {
+ if (!SSL_CTX_load_verify_locations(sslContext, cakey.CStr(), nullptr)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error loading and verifying locations in ca key file '" << cakey << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_load_verify_locations")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(cakey));
+ }
+
+ STACK_OF(X509_NAME) *cert_names;
+
+ cert_names = SSL_load_client_CA_file(cakey.CStr());
+ if (!cert_names) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error loading client ca key file '" << cakey << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_load_client_CA_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(cakey));
+ }
+
+ SSL_CTX_set_client_CA_list(sslContext, cert_names);
+ }
+}
+
+/**
+ * Initializes an SSL context using the specified certificates.
+ *
+ * @param pubkey The public key.
+ * @param privkey The matching private key.
+ * @param cakey CA certificate chain file.
+ * @returns An SSL context.
+ */
+Shared<boost::asio::ssl::context>::Ptr MakeAsioSslContext(const String& pubkey, const String& privkey, const String& cakey)
+{
+ namespace ssl = boost::asio::ssl;
+
+ InitializeOpenSSL();
+
+ auto context (Shared<ssl::context>::Make(ssl::context::tls));
+
+ InitSslContext(context, pubkey, privkey, cakey);
+
+ return context;
+}
+
+/**
+ * Set the cipher list to the specified SSL context.
+ * @param context The ssl context.
+ * @param cipherList The ciper list.
+ **/
+void SetCipherListToSSLContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& cipherList)
+{
+ char errbuf[256];
+
+ if (SSL_CTX_set_cipher_list(context->native_handle(), cipherList.CStr()) == 0) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Cipher list '"
+ << cipherList
+ << "' does not specify any usable ciphers: "
+ << ERR_peek_error() << ", \""
+ << errbuf << "\"";
+
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_set_cipher_list")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ //With OpenSSL 1.1.0, there might not be any returned 0.
+ STACK_OF(SSL_CIPHER) *ciphers;
+ Array::Ptr cipherNames = new Array();
+
+ ciphers = SSL_CTX_get_ciphers(context->native_handle());
+ for (int i = 0; i < sk_SSL_CIPHER_num(ciphers); i++) {
+ const SSL_CIPHER *cipher = sk_SSL_CIPHER_value(ciphers, i);
+ String cipher_name = SSL_CIPHER_get_name(cipher);
+
+ cipherNames->Add(cipher_name);
+ }
+
+ Log(LogNotice, "TlsUtility")
+ << "Available TLS cipher list: " << cipherNames->Join(" ");
+#endif /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+}
+
+/**
+ * Resolves a string describing a TLS protocol version to the value of a TLS*_VERSION macro of OpenSSL.
+ *
+ * Throws an exception if the version is unknown or not supported.
+ *
+ * @param version String of a TLS version, for example "TLSv1.2".
+ * @return The value of the corresponding TLS*_VERSION macro.
+ */
+int ResolveTlsProtocolVersion(const std::string& version) {
+ if (version == "TLSv1.2") {
+ return TLS1_2_VERSION;
+ } else if (version == "TLSv1.3") {
+#if OPENSSL_VERSION_NUMBER >= 0x10101000L
+ return TLS1_3_VERSION;
+#else /* OPENSSL_VERSION_NUMBER >= 0x10101000L */
+ throw std::runtime_error("'" + version + "' is only supported with OpenSSL 1.1.1 or newer");
+#endif /* OPENSSL_VERSION_NUMBER >= 0x10101000L */
+ } else {
+ throw std::runtime_error("Unknown TLS protocol version '" + version + "'");
+ }
+}
+
+Shared<boost::asio::ssl::context>::Ptr SetupSslContext(String certPath, String keyPath,
+ String caPath, String crlPath, String cipherList, String protocolmin, DebugInfo di)
+{
+ namespace ssl = boost::asio::ssl;
+
+ Shared<ssl::context>::Ptr context;
+
+ try {
+ context = MakeAsioSslContext(certPath, keyPath, caPath);
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot make SSL context for cert path: '"
+ + certPath + "' key path: '" + keyPath + "' ca path: '" + caPath + "'.", di));
+ }
+
+ if (!crlPath.IsEmpty()) {
+ try {
+ AddCRLToSSLContext(context, crlPath);
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot add certificate revocation list to SSL context for crl path: '"
+ + crlPath + "'.", di));
+ }
+ }
+
+ if (!cipherList.IsEmpty()) {
+ try {
+ SetCipherListToSSLContext(context, cipherList);
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot set cipher list to SSL context for cipher list: '"
+ + cipherList + "'.", di));
+ }
+ }
+
+ if (!protocolmin.IsEmpty()){
+ try {
+ SetTlsProtocolminToSSLContext(context, protocolmin);
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot set minimum TLS protocol version to SSL context with tls_protocolmin: '" + protocolmin + "'.", di));
+ }
+ }
+
+ return context;
+}
+
+/**
+ * Set the minimum TLS protocol version to the specified SSL context.
+ *
+ * @param context The ssl context.
+ * @param tlsProtocolmin The minimum TLS protocol version.
+ */
+void SetTlsProtocolminToSSLContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& tlsProtocolmin)
+{
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ int ret = SSL_CTX_set_min_proto_version(context->native_handle(), ResolveTlsProtocolVersion(tlsProtocolmin));
+
+ if (ret != 1) {
+ char errbuf[256];
+
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error setting minimum TLS protocol version: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SSL_CTX_set_min_proto_version")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+#else /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+ // This should never happen. On this OpenSSL version, ResolveTlsProtocolVersion() should either return TLS 1.2
+ // or throw an exception, as that's the only TLS version supported by both Icinga and ancient OpenSSL.
+ VERIFY(ResolveTlsProtocolVersion(tlsProtocolmin) == TLS1_2_VERSION);
+#endif /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+}
+
+/**
+ * Loads a CRL and appends its certificates to the specified Boost SSL context.
+ *
+ * @param context The SSL context.
+ * @param crlPath The path to the CRL file.
+ */
+void AddCRLToSSLContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& crlPath)
+{
+ X509_STORE *x509_store = SSL_CTX_get_cert_store(context->native_handle());
+ AddCRLToSSLContext(x509_store, crlPath);
+}
+
+/**
+ * Loads a CRL and appends its certificates to the specified OpenSSL X509 store.
+ *
+ * @param context The SSL context.
+ * @param crlPath The path to the CRL file.
+ */
+void AddCRLToSSLContext(X509_STORE *x509_store, const String& crlPath)
+{
+ char errbuf[256];
+
+ X509_LOOKUP *lookup;
+ lookup = X509_STORE_add_lookup(x509_store, X509_LOOKUP_file());
+
+ if (!lookup) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error adding X509 store lookup: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("X509_STORE_add_lookup")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (X509_LOOKUP_load_file(lookup, crlPath.CStr(), X509_FILETYPE_PEM) != 1) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error loading crl file '" << crlPath << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("X509_LOOKUP_load_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(crlPath));
+ }
+
+ X509_VERIFY_PARAM *param = X509_VERIFY_PARAM_new();
+ X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_CRL_CHECK);
+ X509_STORE_set1_param(x509_store, param);
+ X509_VERIFY_PARAM_free(param);
+}
+
+static String GetX509NameCN(X509_NAME *name)
+{
+ char errbuf[256];
+ char buffer[256];
+
+ int rc = X509_NAME_get_text_by_NID(name, NID_commonName, buffer, sizeof(buffer));
+
+ if (rc == -1) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error with x509 NAME getting text by NID: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("X509_NAME_get_text_by_NID")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ return buffer;
+}
+
+/**
+ * Retrieves the common name for an X509 certificate.
+ *
+ * @param certificate The X509 certificate.
+ * @returns The common name.
+ */
+String GetCertificateCN(const std::shared_ptr<X509>& certificate)
+{
+ return GetX509NameCN(X509_get_subject_name(certificate.get()));
+}
+
+/**
+ * Retrieves an X509 certificate from the specified file.
+ *
+ * @param pemfile The filename.
+ * @returns An X509 certificate.
+ */
+std::shared_ptr<X509> GetX509Certificate(const String& pemfile)
+{
+ char errbuf[256];
+ X509 *cert;
+ BIO *fpcert = BIO_new(BIO_s_file());
+
+ if (!fpcert) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error creating new BIO: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("BIO_new")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (BIO_read_filename(fpcert, pemfile.CStr()) < 0) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error reading pem file '" << pemfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("BIO_read_filename")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(pemfile));
+ }
+
+ cert = PEM_read_bio_X509_AUX(fpcert, nullptr, nullptr, nullptr);
+ if (!cert) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on bio X509 AUX reading pem file '" << pemfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("PEM_read_bio_X509_AUX")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(pemfile));
+ }
+
+ BIO_free(fpcert);
+
+ return std::shared_ptr<X509>(cert, X509_free);
+}
+
+int MakeX509CSR(const String& cn, const String& keyfile, const String& csrfile, const String& certfile, bool ca)
+{
+ char errbuf[256];
+
+ InitializeOpenSSL();
+
+ RSA *rsa = RSA_new();
+ BIGNUM *e = BN_new();
+
+ if (!rsa || !e) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while creating RSA key: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("RSA_generate_key")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ BN_set_word(e, RSA_F4);
+
+ if (!RSA_generate_key_ex(rsa, 4096, e, nullptr)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while creating RSA key: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("RSA_generate_key")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ BN_free(e);
+
+ Log(LogInformation, "base")
+ << "Writing private key to '" << keyfile << "'.";
+
+ BIO *bio = BIO_new_file(const_cast<char *>(keyfile.CStr()), "w");
+
+ if (!bio) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while opening private RSA key file '" << keyfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("BIO_new_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(keyfile));
+ }
+
+ if (!PEM_write_bio_RSAPrivateKey(bio, rsa, nullptr, nullptr, 0, nullptr, nullptr)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while writing private RSA key to file '" << keyfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("PEM_write_bio_RSAPrivateKey")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(keyfile));
+ }
+
+ BIO_free(bio);
+
+#ifndef _WIN32
+ chmod(keyfile.CStr(), 0600);
+#endif /* _WIN32 */
+
+ EVP_PKEY *key = EVP_PKEY_new();
+ EVP_PKEY_assign_RSA(key, rsa);
+
+ if (!certfile.IsEmpty()) {
+ X509_NAME *subject = X509_NAME_new();
+ X509_NAME_add_entry_by_txt(subject, "CN", MBSTRING_ASC, (unsigned char *)cn.CStr(), -1, -1, 0);
+
+ std::shared_ptr<X509> cert = CreateCert(key, subject, subject, key, ca);
+
+ X509_NAME_free(subject);
+
+ Log(LogInformation, "base")
+ << "Writing X509 certificate to '" << certfile << "'.";
+
+ bio = BIO_new_file(const_cast<char *>(certfile.CStr()), "w");
+
+ if (!bio) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while opening certificate file '" << certfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("BIO_new_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(certfile));
+ }
+
+ if (!PEM_write_bio_X509(bio, cert.get())) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while writing certificate to file '" << certfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("PEM_write_bio_X509")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(certfile));
+ }
+
+ BIO_free(bio);
+ }
+
+ if (!csrfile.IsEmpty()) {
+ X509_REQ *req = X509_REQ_new();
+
+ if (!req)
+ return 0;
+
+ X509_REQ_set_version(req, 0);
+ X509_REQ_set_pubkey(req, key);
+
+ X509_NAME *name = X509_REQ_get_subject_name(req);
+ X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, (unsigned char *)cn.CStr(), -1, -1, 0);
+
+ if (!ca) {
+ String san = "DNS:" + cn;
+ X509_EXTENSION *subjectAltNameExt = X509V3_EXT_conf_nid(nullptr, nullptr, NID_subject_alt_name, const_cast<char *>(san.CStr()));
+ if (subjectAltNameExt) {
+ /* OpenSSL 0.9.8 requires STACK_OF(X509_EXTENSION), otherwise we would just use stack_st_X509_EXTENSION. */
+ STACK_OF(X509_EXTENSION) *exts = sk_X509_EXTENSION_new_null();
+ sk_X509_EXTENSION_push(exts, subjectAltNameExt);
+ X509_REQ_add_extensions(req, exts);
+ sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
+ }
+ }
+
+ X509_REQ_sign(req, key, EVP_sha256());
+
+ Log(LogInformation, "base")
+ << "Writing certificate signing request to '" << csrfile << "'.";
+
+ bio = BIO_new_file(const_cast<char *>(csrfile.CStr()), "w");
+
+ if (!bio) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while opening CSR file '" << csrfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("BIO_new_file")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(csrfile));
+ }
+
+ if (!PEM_write_bio_X509_REQ(bio, req)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error while writing CSR to file '" << csrfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("PEM_write_bio_X509")
+ << errinfo_openssl_error(ERR_peek_error())
+ << boost::errinfo_file_name(csrfile));
+ }
+
+ BIO_free(bio);
+
+ X509_REQ_free(req);
+ }
+
+ EVP_PKEY_free(key);
+
+ return 1;
+}
+
+std::shared_ptr<X509> CreateCert(EVP_PKEY *pubkey, X509_NAME *subject, X509_NAME *issuer, EVP_PKEY *cakey, bool ca)
+{
+ X509 *cert = X509_new();
+ X509_set_version(cert, 2);
+ X509_gmtime_adj(X509_get_notBefore(cert), 0);
+ X509_gmtime_adj(X509_get_notAfter(cert), ca ? ROOT_VALID_FOR : LEAF_VALID_FOR);
+ X509_set_pubkey(cert, pubkey);
+
+ X509_set_subject_name(cert, subject);
+ X509_set_issuer_name(cert, issuer);
+
+ String id = Utility::NewUniqueID();
+
+ char errbuf[256];
+ SHA_CTX context;
+ unsigned char digest[SHA_DIGEST_LENGTH];
+
+ if (!SHA1_Init(&context)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA1 Init: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Init")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA1_Update(&context, (unsigned char*)id.CStr(), id.GetLength())) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA1 Update: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Update")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA1_Final(digest, &context)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA1 Final: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Final")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ BIGNUM *bn = BN_new();
+ BN_bin2bn(digest, sizeof(digest), bn);
+ BN_to_ASN1_INTEGER(bn, X509_get_serialNumber(cert));
+ BN_free(bn);
+
+ X509V3_CTX ctx;
+ X509V3_set_ctx_nodb(&ctx);
+ X509V3_set_ctx(&ctx, cert, cert, nullptr, nullptr, 0);
+
+ const char *attr;
+
+ if (ca)
+ attr = "critical,CA:TRUE";
+ else
+ attr = "critical,CA:FALSE";
+
+ X509_EXTENSION *basicConstraintsExt = X509V3_EXT_conf_nid(nullptr, &ctx, NID_basic_constraints, const_cast<char *>(attr));
+
+ if (basicConstraintsExt) {
+ X509_add_ext(cert, basicConstraintsExt, -1);
+ X509_EXTENSION_free(basicConstraintsExt);
+ }
+
+ String cn = GetX509NameCN(subject);
+
+ if (!ca) {
+ String san = "DNS:" + cn;
+ X509_EXTENSION *subjectAltNameExt = X509V3_EXT_conf_nid(nullptr, &ctx, NID_subject_alt_name, const_cast<char *>(san.CStr()));
+ if (subjectAltNameExt) {
+ X509_add_ext(cert, subjectAltNameExt, -1);
+ X509_EXTENSION_free(subjectAltNameExt);
+ }
+ }
+
+ X509_sign(cert, cakey, EVP_sha256());
+
+ return std::shared_ptr<X509>(cert, X509_free);
+}
+
+String GetIcingaCADir()
+{
+ return Configuration::DataDir + "/ca";
+}
+
+std::shared_ptr<X509> CreateCertIcingaCA(EVP_PKEY *pubkey, X509_NAME *subject, bool ca)
+{
+ char errbuf[256];
+
+ String cadir = GetIcingaCADir();
+
+ String cakeyfile = cadir + "/ca.key";
+
+ RSA *rsa;
+
+ BIO *cakeybio = BIO_new_file(const_cast<char *>(cakeyfile.CStr()), "r");
+
+ if (!cakeybio) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Could not open CA key file '" << cakeyfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ return std::shared_ptr<X509>();
+ }
+
+ rsa = PEM_read_bio_RSAPrivateKey(cakeybio, nullptr, nullptr, nullptr);
+
+ if (!rsa) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Could not read RSA key from CA key file '" << cakeyfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ return std::shared_ptr<X509>();
+ }
+
+ BIO_free(cakeybio);
+
+ String cacertfile = cadir + "/ca.crt";
+
+ std::shared_ptr<X509> cacert = GetX509Certificate(cacertfile);
+
+ EVP_PKEY *privkey = EVP_PKEY_new();
+ EVP_PKEY_assign_RSA(privkey, rsa);
+
+ return CreateCert(pubkey, subject, X509_get_subject_name(cacert.get()), privkey, ca);
+}
+
+std::shared_ptr<X509> CreateCertIcingaCA(const std::shared_ptr<X509>& cert)
+{
+ std::shared_ptr<EVP_PKEY> pkey = std::shared_ptr<EVP_PKEY>(X509_get_pubkey(cert.get()), EVP_PKEY_free);
+ return CreateCertIcingaCA(pkey.get(), X509_get_subject_name(cert.get()));
+}
+
+static inline
+bool CertExpiresWithin(X509* cert, int seconds)
+{
+ time_t renewalStart = time(nullptr) + seconds;
+
+ return X509_cmp_time(X509_get_notAfter(cert), &renewalStart) < 0;
+}
+
+bool IsCertUptodate(const std::shared_ptr<X509>& cert)
+{
+ if (CertExpiresWithin(cert.get(), RENEW_THRESHOLD)) {
+ return false;
+ }
+
+ /* auto-renew all certificates which were created before 2017 to force an update of the CA,
+ * because Icinga versions older than 2.4 sometimes create certificates with an invalid
+ * serial number. */
+ time_t forceRenewalEnd = 1483228800; /* January 1st, 2017 */
+
+ return X509_cmp_time(X509_get_notBefore(cert.get()), &forceRenewalEnd) >= 0;
+}
+
+bool IsCaUptodate(X509* cert)
+{
+ return !CertExpiresWithin(cert, LEAF_VALID_FOR);
+}
+
+String CertificateToString(X509* cert)
+{
+ BIO *mem = BIO_new(BIO_s_mem());
+ PEM_write_bio_X509(mem, cert);
+
+ char *data;
+ long len = BIO_get_mem_data(mem, &data);
+
+ String result = String(data, data + len);
+
+ BIO_free(mem);
+
+ return result;
+}
+
+std::shared_ptr<X509> StringToCertificate(const String& cert)
+{
+ BIO *bio = BIO_new(BIO_s_mem());
+ BIO_write(bio, (const void *)cert.CStr(), cert.GetLength());
+
+ X509 *rawCert = PEM_read_bio_X509_AUX(bio, nullptr, nullptr, nullptr);
+
+ BIO_free(bio);
+
+ if (!rawCert)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The specified X509 certificate is invalid."));
+
+ return std::shared_ptr<X509>(rawCert, X509_free);
+}
+
+String PBKDF2_SHA1(const String& password, const String& salt, int iterations)
+{
+ unsigned char digest[SHA_DIGEST_LENGTH];
+ PKCS5_PBKDF2_HMAC_SHA1(password.CStr(), password.GetLength(), reinterpret_cast<const unsigned char *>(salt.CStr()), salt.GetLength(),
+ iterations, sizeof(digest), digest);
+
+ char output[SHA_DIGEST_LENGTH*2+1];
+ for (int i = 0; i < SHA_DIGEST_LENGTH; i++)
+ sprintf(output + 2 * i, "%02x", digest[i]);
+
+ return output;
+}
+
+String PBKDF2_SHA256(const String& password, const String& salt, int iterations)
+{
+ unsigned char digest[SHA256_DIGEST_LENGTH];
+ PKCS5_PBKDF2_HMAC(password.CStr(), password.GetLength(), reinterpret_cast<const unsigned char *>(salt.CStr()),
+ salt.GetLength(), iterations, EVP_sha256(), SHA256_DIGEST_LENGTH, digest);
+
+ char output[SHA256_DIGEST_LENGTH*2+1];
+ for (int i = 0; i < SHA256_DIGEST_LENGTH; i++)
+ sprintf(output + 2 * i, "%02x", digest[i]);
+
+ return output;
+}
+
+String SHA1(const String& s, bool binary)
+{
+ char errbuf[256];
+ SHA_CTX context;
+ unsigned char digest[SHA_DIGEST_LENGTH];
+
+ if (!SHA1_Init(&context)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA Init: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Init")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA1_Update(&context, (unsigned char*)s.CStr(), s.GetLength())) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA Update: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Update")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA1_Final(digest, &context)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA Final: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA1_Final")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (binary)
+ return String(reinterpret_cast<const char*>(digest), reinterpret_cast<const char *>(digest + SHA_DIGEST_LENGTH));
+
+ return BinaryToHex(digest, SHA_DIGEST_LENGTH);
+}
+
+String SHA256(const String& s)
+{
+ char errbuf[256];
+ SHA256_CTX context;
+ unsigned char digest[SHA256_DIGEST_LENGTH];
+
+ if (!SHA256_Init(&context)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA256 Init: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA256_Init")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA256_Update(&context, (unsigned char*)s.CStr(), s.GetLength())) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA256 Update: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA256_Update")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ if (!SHA256_Final(digest, &context)) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Error on SHA256 Final: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("SHA256_Final")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ char output[SHA256_DIGEST_LENGTH*2+1];
+ for (int i = 0; i < 32; i++)
+ sprintf(output + 2 * i, "%02x", digest[i]);
+
+ return output;
+}
+
+String RandomString(int length)
+{
+ auto *bytes = new unsigned char[length];
+
+ /* Ensure that password generation is atomic. RAND_bytes is not thread-safe
+ * in OpenSSL < 1.1.0.
+ */
+ std::unique_lock<std::mutex> lock(l_RandomMutex);
+
+ if (!RAND_bytes(bytes, length)) {
+ delete [] bytes;
+
+ char errbuf[256];
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+
+ Log(LogCritical, "SSL")
+ << "Error for RAND_bytes: " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("RAND_bytes")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ lock.unlock();
+
+ auto *output = new char[length * 2 + 1];
+ for (int i = 0; i < length; i++)
+ sprintf(output + 2 * i, "%02x", bytes[i]);
+
+ String result = output;
+ delete [] bytes;
+ delete [] output;
+
+ return result;
+}
+
+String BinaryToHex(const unsigned char* data, size_t length) {
+ static const char hexdigits[] = "0123456789abcdef";
+
+ String output(2*length, 0);
+ for (int i = 0; i < SHA_DIGEST_LENGTH; i++) {
+ output[2 * i] = hexdigits[data[i] >> 4];
+ output[2 * i + 1] = hexdigits[data[i] & 0xf];
+ }
+
+ return output;
+}
+
+bool VerifyCertificate(const std::shared_ptr<X509> &caCertificate, const std::shared_ptr<X509> &certificate, const String& crlFile)
+{
+ X509_STORE *store = X509_STORE_new();
+
+ if (!store)
+ return false;
+
+ X509_STORE_add_cert(store, caCertificate.get());
+
+ if (!crlFile.IsEmpty()) {
+ AddCRLToSSLContext(store, crlFile);
+ }
+
+ X509_STORE_CTX *csc = X509_STORE_CTX_new();
+ X509_STORE_CTX_init(csc, store, certificate.get(), nullptr);
+
+ int rc = X509_verify_cert(csc);
+
+ X509_STORE_CTX_free(csc);
+ X509_STORE_free(store);
+
+ if (rc == 0) {
+ int err = X509_STORE_CTX_get_error(csc);
+
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("X509_verify_cert")
+ << errinfo_openssl_error(err));
+ }
+
+ return rc == 1;
+}
+
+bool IsCa(const std::shared_ptr<X509>& cacert)
+{
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ /* OpenSSL 1.1.x provides https://www.openssl.org/docs/man1.1.0/man3/X509_check_ca.html
+ *
+ * 0 if it is not CA certificate,
+ * 1 if it is proper X509v3 CA certificate with basicConstraints extension CA:TRUE,
+ * 3 if it is self-signed X509 v1 certificate
+ * 4 if it is certificate with keyUsage extension with bit keyCertSign set, but without basicConstraints,
+ * 5 if it has outdated Netscape Certificate Type extension telling that it is CA certificate.
+ */
+ return (X509_check_ca(cacert.get()) == 1);
+#else /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Not supported on this platform, OpenSSL version too old."));
+#endif /* OPENSSL_VERSION_NUMBER >= 0x10100000L */
+}
+
+int GetCertificateVersion(const std::shared_ptr<X509>& cert)
+{
+ return X509_get_version(cert.get()) + 1;
+}
+
+String GetSignatureAlgorithm(const std::shared_ptr<X509>& cert)
+{
+ int alg;
+ int sign_alg;
+ X509_PUBKEY *key;
+ X509_ALGOR *algor;
+
+ key = X509_get_X509_PUBKEY(cert.get());
+
+ X509_PUBKEY_get0_param(nullptr, nullptr, 0, &algor, key); //TODO: Error handling
+
+ alg = OBJ_obj2nid (algor->algorithm);
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ sign_alg = OBJ_obj2nid((cert.get())->sig_alg->algorithm);
+#else /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+ sign_alg = X509_get_signature_nid(cert.get());
+#endif /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+
+ return Convert::ToString((sign_alg == NID_undef) ? "Unknown" : OBJ_nid2ln(sign_alg));
+}
+
+Array::Ptr GetSubjectAltNames(const std::shared_ptr<X509>& cert)
+{
+ GENERAL_NAMES* subjectAltNames = (GENERAL_NAMES*)X509_get_ext_d2i(cert.get(), NID_subject_alt_name, nullptr, nullptr);
+
+ Array::Ptr sans = new Array();
+
+ for (int i = 0; i < sk_GENERAL_NAME_num(subjectAltNames); i++) {
+ GENERAL_NAME* gen = sk_GENERAL_NAME_value(subjectAltNames, i);
+ if (gen->type == GEN_URI || gen->type == GEN_DNS || gen->type == GEN_EMAIL) {
+ ASN1_IA5STRING *asn1_str = gen->d.uniformResourceIdentifier;
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ String san = Convert::ToString(ASN1_STRING_data(asn1_str));
+#else /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+ String san = Convert::ToString(ASN1_STRING_get0_data(asn1_str));
+#endif /* OPENSSL_VERSION_NUMBER < 0x10100000L */
+
+ sans->Add(san);
+ }
+ }
+
+ GENERAL_NAMES_free(subjectAltNames);
+
+ return sans;
+}
+
+}
diff --git a/lib/base/tlsutility.hpp b/lib/base/tlsutility.hpp
new file mode 100644
index 0000000..b064120
--- /dev/null
+++ b/lib/base/tlsutility.hpp
@@ -0,0 +1,94 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TLSUTILITY_H
+#define TLSUTILITY_H
+
+#include "base/i2-base.hpp"
+#include "base/debuginfo.hpp"
+#include "base/object.hpp"
+#include "base/shared.hpp"
+#include "base/array.hpp"
+#include "base/string.hpp"
+#include <openssl/ssl.h>
+#include <openssl/bio.h>
+#include <openssl/err.h>
+#include <openssl/comp.h>
+#include <openssl/sha.h>
+#include <openssl/pem.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/evp.h>
+#include <openssl/rand.h>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/exception/info.hpp>
+
+namespace icinga
+{
+
+// Source: https://ssl-config.mozilla.org, i.e.
+// ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
+// Modified so that AES256 is preferred over AES128.
+const char * const DEFAULT_TLS_CIPHERS = "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256";
+
+const char * const DEFAULT_TLS_PROTOCOLMIN = "TLSv1.2";
+const unsigned int DEFAULT_CONNECT_TIMEOUT = 15;
+
+const auto ROOT_VALID_FOR = 60 * 60 * 24 * 365 * 15;
+const auto LEAF_VALID_FOR = 60 * 60 * 24 * 397;
+const auto RENEW_THRESHOLD = 60 * 60 * 24 * 30;
+const auto RENEW_INTERVAL = 60 * 60 * 24;
+
+void InitializeOpenSSL();
+
+String GetOpenSSLVersion();
+
+Shared<boost::asio::ssl::context>::Ptr MakeAsioSslContext(const String& pubkey = String(), const String& privkey = String(), const String& cakey = String());
+void AddCRLToSSLContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& crlPath);
+void AddCRLToSSLContext(X509_STORE *x509_store, const String& crlPath);
+void SetCipherListToSSLContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& cipherList);
+void SetTlsProtocolminToSSLContext(const Shared<boost::asio::ssl::context>::Ptr& context, const String& tlsProtocolmin);
+int ResolveTlsProtocolVersion(const std::string& version);
+
+Shared<boost::asio::ssl::context>::Ptr SetupSslContext(String certPath, String keyPath,
+ String caPath, String crlPath, String cipherList, String protocolmin, DebugInfo di);
+
+String GetCertificateCN(const std::shared_ptr<X509>& certificate);
+std::shared_ptr<X509> GetX509Certificate(const String& pemfile);
+int MakeX509CSR(const String& cn, const String& keyfile, const String& csrfile = String(), const String& certfile = String(), bool ca = false);
+std::shared_ptr<X509> CreateCert(EVP_PKEY *pubkey, X509_NAME *subject, X509_NAME *issuer, EVP_PKEY *cakey, bool ca);
+
+String GetIcingaCADir();
+String CertificateToString(X509* cert);
+
+inline String CertificateToString(const std::shared_ptr<X509>& cert)
+{
+ return CertificateToString(cert.get());
+}
+
+std::shared_ptr<X509> StringToCertificate(const String& cert);
+std::shared_ptr<X509> CreateCertIcingaCA(EVP_PKEY *pubkey, X509_NAME *subject, bool ca = false);
+std::shared_ptr<X509> CreateCertIcingaCA(const std::shared_ptr<X509>& cert);
+bool IsCertUptodate(const std::shared_ptr<X509>& cert);
+bool IsCaUptodate(X509* cert);
+
+String PBKDF2_SHA1(const String& password, const String& salt, int iterations);
+String PBKDF2_SHA256(const String& password, const String& salt, int iterations);
+String SHA1(const String& s, bool binary = false);
+String SHA256(const String& s);
+String RandomString(int length);
+String BinaryToHex(const unsigned char* data, size_t length);
+
+bool VerifyCertificate(const std::shared_ptr<X509>& caCertificate, const std::shared_ptr<X509>& certificate, const String& crlFile);
+bool IsCa(const std::shared_ptr<X509>& cacert);
+int GetCertificateVersion(const std::shared_ptr<X509>& cert);
+String GetSignatureAlgorithm(const std::shared_ptr<X509>& cert);
+Array::Ptr GetSubjectAltNames(const std::shared_ptr<X509>& cert);
+
+class openssl_error : virtual public std::exception, virtual public boost::exception { };
+
+struct errinfo_openssl_error_;
+typedef boost::error_info<struct errinfo_openssl_error_, unsigned long> errinfo_openssl_error;
+
+}
+
+#endif /* TLSUTILITY_H */
diff --git a/lib/base/type.cpp b/lib/base/type.cpp
new file mode 100644
index 0000000..14794cb
--- /dev/null
+++ b/lib/base/type.cpp
@@ -0,0 +1,217 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/type.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/namespace.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+Type::Ptr Type::TypeInstance;
+
+static Namespace::Ptr l_TypesNS = new Namespace(true);
+
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ ScriptGlobal::GetGlobals()->Set("Types", l_TypesNS, true);
+}, InitializePriority::CreateNamespaces);
+
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ l_TypesNS->Freeze();
+
+ ObjectLock olock (l_TypesNS);
+ for (const auto& t : l_TypesNS) {
+ VERIFY(t.second.Val.IsObjectType<Type>());
+ }
+}, InitializePriority::FreezeNamespaces);
+
+/* Ensure that the priority is lower than the basic namespace initialization in scriptframe.cpp. */
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ Type::Ptr type = new TypeType();
+ type->SetPrototype(TypeType::GetPrototype());
+ Type::TypeInstance = type;
+ Type::Register(type);
+}, InitializePriority::RegisterTypeType);
+
+String Type::ToString() const
+{
+ return "type '" + GetName() + "'";
+}
+
+void Type::Register(const Type::Ptr& type)
+{
+ ScriptGlobal::Set("Types." + type->GetName(), type);
+}
+
+Type::Ptr Type::GetByName(const String& name)
+{
+ Value ptype;
+
+ if (!l_TypesNS->Get(name, &ptype))
+ return nullptr;
+
+ return ptype;
+}
+
+std::vector<Type::Ptr> Type::GetAllTypes()
+{
+ std::vector<Type::Ptr> types;
+
+ Namespace::Ptr typesNS = ScriptGlobal::Get("Types", &Empty);
+
+ if (typesNS) {
+ ObjectLock olock(typesNS);
+
+ for (const Namespace::Pair& kv : typesNS) {
+ Value value = kv.second.Val;
+
+ if (value.IsObjectType<Type>())
+ types.push_back(value);
+ }
+ }
+
+ return types;
+}
+
+String Type::GetPluralName() const
+{
+ String name = GetName();
+
+ if (name.GetLength() >= 2 && name[name.GetLength() - 1] == 'y' &&
+ name.SubStr(name.GetLength() - 2, 1).FindFirstOf("aeiou") == String::NPos)
+ return name.SubStr(0, name.GetLength() - 1) + "ies";
+ else
+ return name + "s";
+}
+
+Object::Ptr Type::Instantiate(const std::vector<Value>& args) const
+{
+ ObjectFactory factory = GetFactory();
+
+ if (!factory)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Type does not have a factory function."));
+
+ return factory(args);
+}
+
+bool Type::IsAbstract() const
+{
+ return ((GetAttributes() & TAAbstract) != 0);
+}
+
+bool Type::IsAssignableFrom(const Type::Ptr& other) const
+{
+ for (Type::Ptr t = other; t; t = t->GetBaseType()) {
+ if (t.get() == this)
+ return true;
+ }
+
+ return false;
+}
+
+Object::Ptr Type::GetPrototype() const
+{
+ return m_Prototype;
+}
+
+void Type::SetPrototype(const Object::Ptr& object)
+{
+ m_Prototype = object;
+}
+
+void Type::SetField(int id, const Value& value, bool suppress_events, const Value& cookie)
+{
+ if (id == 1) {
+ SetPrototype(value);
+ return;
+ }
+
+ Object::SetField(id, value, suppress_events, cookie);
+}
+
+Value Type::GetField(int id) const
+{
+ int real_id = id - Object::TypeInstance->GetFieldCount();
+ if (real_id < 0)
+ return Object::GetField(id);
+
+ if (real_id == 0)
+ return GetName();
+ else if (real_id == 1)
+ return GetPrototype();
+ else if (real_id == 2)
+ return GetBaseType();
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid field ID."));
+}
+
+const std::unordered_set<Type*>& Type::GetLoadDependencies() const
+{
+ static const std::unordered_set<Type*> noDeps;
+ return noDeps;
+}
+
+int Type::GetActivationPriority() const
+{
+ return 0;
+}
+
+void Type::RegisterAttributeHandler(int fieldId, const AttributeHandler& callback)
+{
+ throw std::runtime_error("Invalid field ID.");
+}
+
+String TypeType::GetName() const
+{
+ return "Type";
+}
+
+Type::Ptr TypeType::GetBaseType() const
+{
+ return Object::TypeInstance;
+}
+
+int TypeType::GetAttributes() const
+{
+ return 0;
+}
+
+int TypeType::GetFieldId(const String& name) const
+{
+ int base_field_count = GetBaseType()->GetFieldCount();
+
+ if (name == "name")
+ return base_field_count + 0;
+ else if (name == "prototype")
+ return base_field_count + 1;
+ else if (name == "base")
+ return base_field_count + 2;
+
+ return GetBaseType()->GetFieldId(name);
+}
+
+Field TypeType::GetFieldInfo(int id) const
+{
+ int real_id = id - GetBaseType()->GetFieldCount();
+ if (real_id < 0)
+ return GetBaseType()->GetFieldInfo(id);
+
+ if (real_id == 0)
+ return {0, "String", "name", "", nullptr, 0, 0};
+ else if (real_id == 1)
+ return Field(1, "Object", "prototype", "", nullptr, 0, 0);
+ else if (real_id == 2)
+ return Field(2, "Type", "base", "", nullptr, 0, 0);
+
+ throw std::runtime_error("Invalid field ID.");
+}
+
+int TypeType::GetFieldCount() const
+{
+ return GetBaseType()->GetFieldCount() + 3;
+}
+
+ObjectFactory TypeType::GetFactory() const
+{
+ return nullptr;
+}
+
diff --git a/lib/base/type.hpp b/lib/base/type.hpp
new file mode 100644
index 0000000..7b8d1ca
--- /dev/null
+++ b/lib/base/type.hpp
@@ -0,0 +1,148 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TYPE_H
+#define TYPE_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include "base/object.hpp"
+#include "base/initialize.hpp"
+#include <unordered_set>
+#include <vector>
+
+namespace icinga
+{
+
+/* keep this in sync with tools/mkclass/classcompiler.hpp */
+enum FieldAttribute
+{
+ FAEphemeral = 1,
+ FAConfig = 2,
+ FAState = 4,
+ FARequired = 256,
+ FANavigation = 512,
+ FANoUserModify = 1024,
+ FANoUserView = 2048,
+ FADeprecated = 4096,
+};
+
+class Type;
+
+struct Field
+{
+ int ID;
+ const char *TypeName;
+ const char *Name;
+ const char *NavigationName;
+ const char *RefTypeName;
+ int Attributes;
+ int ArrayRank;
+
+ Field(int id, const char *type, const char *name, const char *navigationName, const char *reftype, int attributes, int arrayRank)
+ : ID(id), TypeName(type), Name(name), NavigationName(navigationName), RefTypeName(reftype), Attributes(attributes), ArrayRank(arrayRank)
+ { }
+};
+
+enum TypeAttribute
+{
+ TAAbstract = 1
+};
+
+class ValidationUtils
+{
+public:
+ virtual bool ValidateName(const String& type, const String& name) const = 0;
+};
+
+class Type : public Object
+{
+public:
+ DECLARE_OBJECT(Type);
+
+ String ToString() const override;
+
+ virtual String GetName() const = 0;
+ virtual Type::Ptr GetBaseType() const = 0;
+ virtual int GetAttributes() const = 0;
+ virtual int GetFieldId(const String& name) const = 0;
+ virtual Field GetFieldInfo(int id) const = 0;
+ virtual int GetFieldCount() const = 0;
+
+ String GetPluralName() const;
+
+ Object::Ptr Instantiate(const std::vector<Value>& args) const;
+
+ bool IsAssignableFrom(const Type::Ptr& other) const;
+
+ bool IsAbstract() const;
+
+ Object::Ptr GetPrototype() const;
+ void SetPrototype(const Object::Ptr& object);
+
+ static void Register(const Type::Ptr& type);
+ static Type::Ptr GetByName(const String& name);
+ static std::vector<Type::Ptr> GetAllTypes();
+
+ void SetField(int id, const Value& value, bool suppress_events = false, const Value& cookie = Empty) override;
+ Value GetField(int id) const override;
+
+ virtual const std::unordered_set<Type*>& GetLoadDependencies() const;
+ virtual int GetActivationPriority() const;
+
+ typedef std::function<void (const Object::Ptr&, const Value&)> AttributeHandler;
+ virtual void RegisterAttributeHandler(int fieldId, const AttributeHandler& callback);
+
+protected:
+ virtual ObjectFactory GetFactory() const = 0;
+
+private:
+ Object::Ptr m_Prototype;
+};
+
+class TypeType final : public Type
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Type);
+
+ String GetName() const override;
+ Type::Ptr GetBaseType() const override;
+ int GetAttributes() const override;
+ int GetFieldId(const String& name) const override;
+ Field GetFieldInfo(int id) const override;
+ int GetFieldCount() const override;
+
+ static Object::Ptr GetPrototype();
+
+protected:
+ ObjectFactory GetFactory() const override;
+};
+
+template<typename T>
+class TypeImpl
+{
+};
+
+/* Ensure that the priority is lower than the basic namespace initialization in scriptframe.cpp. */
+#define REGISTER_TYPE(type) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ icinga::Type::Ptr t = new TypeImpl<type>(); \
+ type::TypeInstance = t; \
+ icinga::Type::Register(t); \
+ }, InitializePriority::RegisterTypes); \
+ DEFINE_TYPE_INSTANCE(type)
+
+#define REGISTER_TYPE_WITH_PROTOTYPE(type, prototype) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ icinga::Type::Ptr t = new TypeImpl<type>(); \
+ t->SetPrototype(prototype); \
+ type::TypeInstance = t; \
+ icinga::Type::Register(t); \
+ }, InitializePriority::RegisterTypes); \
+ DEFINE_TYPE_INSTANCE(type)
+
+#define DEFINE_TYPE_INSTANCE(type) \
+ Type::Ptr type::TypeInstance
+
+}
+
+#endif /* TYPE_H */
diff --git a/lib/base/typetype-script.cpp b/lib/base/typetype-script.cpp
new file mode 100644
index 0000000..9077de8
--- /dev/null
+++ b/lib/base/typetype-script.cpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/type.hpp"
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+static void TypeRegisterAttributeHandler(const String& fieldName, const Function::Ptr& callback)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Type::Ptr self = static_cast<Type::Ptr>(vframe->Self);
+ REQUIRE_NOT_NULL(self);
+
+ int fid = self->GetFieldId(fieldName);
+ self->RegisterAttributeHandler(fid, [callback](const Object::Ptr& object, const Value& cookie) {
+ callback->Invoke({ object });
+ });
+}
+
+Object::Ptr TypeType::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "register_attribute_handler", new Function("Type#register_attribute_handler", TypeRegisterAttributeHandler, { "field", "callback" }, false) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/base/unix.hpp b/lib/base/unix.hpp
new file mode 100644
index 0000000..7413a5b
--- /dev/null
+++ b/lib/base/unix.hpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef UNIX_H
+#define UNIX_H
+
+#include <limits.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <netdb.h>
+#include <sys/ioctl.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <libgen.h>
+#include <syslog.h>
+#include <sys/file.h>
+#include <sys/wait.h>
+#include <glob.h>
+#include <dlfcn.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <strings.h>
+#include <errno.h>
+
+typedef int SOCKET;
+#define INVALID_SOCKET (-1)
+
+#define closesocket close
+#define ioctlsocket ioctl
+
+#ifndef SUN_LEN
+/* TODO: Ideally this should take into the account how
+ * long the socket path really is.
+ */
+# define SUN_LEN(sun) (sizeof(sockaddr_un))
+#endif /* SUN_LEN */
+
+#ifndef PATH_MAX
+# define PATH_MAX 1024
+#endif /* PATH_MAX */
+
+#ifndef MAXPATHLEN
+# define MAXPATHLEN PATH_MAX
+#endif /* MAXPATHLEN */
+#endif /* UNIX_H */
diff --git a/lib/base/unixsocket.cpp b/lib/base/unixsocket.cpp
new file mode 100644
index 0000000..dcc56ff
--- /dev/null
+++ b/lib/base/unixsocket.cpp
@@ -0,0 +1,53 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/unixsocket.hpp"
+#include "base/exception.hpp"
+
+#ifndef _WIN32
+using namespace icinga;
+
+UnixSocket::UnixSocket()
+{
+ int fd = socket(AF_UNIX, SOCK_STREAM, 0);
+
+ if (fd < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("socket")
+ << boost::errinfo_errno(errno));
+ }
+
+ SetFD(fd);
+}
+
+void UnixSocket::Bind(const String& path)
+{
+ unlink(path.CStr());
+
+ sockaddr_un s_un;
+ memset(&s_un, 0, sizeof(s_un));
+ s_un.sun_family = AF_UNIX;
+ strncpy(s_un.sun_path, path.CStr(), sizeof(s_un.sun_path));
+ s_un.sun_path[sizeof(s_un.sun_path) - 1] = '\0';
+
+ if (bind(GetFD(), (sockaddr *)&s_un, SUN_LEN(&s_un)) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("bind")
+ << boost::errinfo_errno(errno));
+ }
+}
+
+void UnixSocket::Connect(const String& path)
+{
+ sockaddr_un s_un;
+ memset(&s_un, 0, sizeof(s_un));
+ s_un.sun_family = AF_UNIX;
+ strncpy(s_un.sun_path, path.CStr(), sizeof(s_un.sun_path));
+ s_un.sun_path[sizeof(s_un.sun_path) - 1] = '\0';
+
+ if (connect(GetFD(), (sockaddr *)&s_un, SUN_LEN(&s_un)) < 0 && errno != EINPROGRESS) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("connect")
+ << boost::errinfo_errno(errno));
+ }
+}
+#endif /* _WIN32 */
diff --git a/lib/base/unixsocket.hpp b/lib/base/unixsocket.hpp
new file mode 100644
index 0000000..80a9f25
--- /dev/null
+++ b/lib/base/unixsocket.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef UNIXSOCKET_H
+#define UNIXSOCKET_H
+
+#include "base/socket.hpp"
+
+#ifndef _WIN32
+namespace icinga
+{
+
+/**
+ * A TCP socket. DEPRECATED - Use Boost ASIO instead.
+ *
+ * @ingroup base
+ */
+class UnixSocket final : public Socket
+{
+public:
+ DECLARE_PTR_TYPEDEFS(UnixSocket);
+
+ UnixSocket();
+
+ void Bind(const String& path);
+
+ void Connect(const String& path);
+};
+
+}
+#endif /* _WIN32 */
+
+#endif /* UNIXSOCKET_H */
diff --git a/lib/base/utility.cpp b/lib/base/utility.cpp
new file mode 100644
index 0000000..6ff84ae
--- /dev/null
+++ b/lib/base/utility.cpp
@@ -0,0 +1,1975 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/atomic-file.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/application.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/socket.hpp"
+#include "base/utility.hpp"
+#include "base/json.hpp"
+#include "base/objectlock.hpp"
+#include <algorithm>
+#include <cstdint>
+#include <mmatch.h>
+#include <boost/filesystem.hpp>
+#include <boost/lexical_cast.hpp>
+#include <boost/system/error_code.hpp>
+#include <boost/thread/tss.hpp>
+#include <boost/algorithm/string/trim.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/uuid/uuid_io.hpp>
+#include <boost/uuid/uuid_generators.hpp>
+#include <boost/regex.hpp>
+#include <ios>
+#include <fstream>
+#include <iostream>
+#include <iterator>
+#include <stdlib.h>
+#include <future>
+#include <set>
+#include <utf8.h>
+#include <vector>
+
+#ifdef __FreeBSD__
+# include <pthread_np.h>
+#endif /* __FreeBSD__ */
+
+#ifdef HAVE_CXXABI_H
+# include <cxxabi.h>
+#endif /* HAVE_CXXABI_H */
+
+#ifndef _WIN32
+# include <sys/types.h>
+# include <sys/utsname.h>
+# include <pwd.h>
+# include <grp.h>
+# include <errno.h>
+# include <unistd.h>
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+# include <VersionHelpers.h>
+# include <windows.h>
+# include <io.h>
+# include <msi.h>
+# include <shlobj.h>
+#endif /*_WIN32*/
+
+using namespace icinga;
+
+boost::thread_specific_ptr<String> Utility::m_ThreadName;
+boost::thread_specific_ptr<unsigned int> Utility::m_RandSeed;
+
+#ifdef I2_DEBUG
+double Utility::m_DebugTime = -1;
+#endif /* I2_DEBUG */
+
+/**
+ * Demangles a symbol name.
+ *
+ * @param sym The symbol name.
+ * @returns A human-readable version of the symbol name.
+ */
+String Utility::DemangleSymbolName(const String& sym)
+{
+ String result = sym;
+
+#ifdef HAVE_CXXABI_H
+ int status;
+ char *realname = abi::__cxa_demangle(sym.CStr(), nullptr, nullptr, &status);
+
+ if (realname) {
+ result = String(realname);
+ free(realname);
+ }
+#elif defined(_MSC_VER) /* HAVE_CXXABI_H */
+ CHAR output[256];
+
+ if (UnDecorateSymbolName(sym.CStr(), output, sizeof(output), UNDNAME_COMPLETE) > 0)
+ result = output;
+#else /* _MSC_VER */
+ /* We're pretty much out of options here. */
+#endif /* _MSC_VER */
+
+ return result;
+}
+
+/**
+ * Returns a human-readable type name of a type_info object.
+ *
+ * @param ti A type_info object.
+ * @returns The type name of the object.
+ */
+String Utility::GetTypeName(const std::type_info& ti)
+{
+ return DemangleSymbolName(ti.name());
+}
+
+String Utility::GetSymbolName(const void *addr)
+{
+#ifdef HAVE_DLADDR
+ Dl_info dli;
+
+ if (dladdr(const_cast<void *>(addr), &dli) > 0)
+ return dli.dli_sname;
+#endif /* HAVE_DLADDR */
+
+#ifdef _WIN32
+ char buffer[sizeof(SYMBOL_INFO)+MAX_SYM_NAME * sizeof(TCHAR)];
+ PSYMBOL_INFO pSymbol = (PSYMBOL_INFO)buffer;
+ pSymbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ pSymbol->MaxNameLen = MAX_SYM_NAME;
+
+ DWORD64 dwAddress = (DWORD64)addr;
+ DWORD64 dwDisplacement;
+
+ IMAGEHLP_LINE64 line;
+ line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+
+ if (SymFromAddr(GetCurrentProcess(), dwAddress, &dwDisplacement, pSymbol)) {
+ char output[256];
+ if (UnDecorateSymbolName(pSymbol->Name, output, sizeof(output), UNDNAME_COMPLETE))
+ return String(output) + "+" + Convert::ToString(dwDisplacement);
+ else
+ return String(pSymbol->Name) + "+" + Convert::ToString(dwDisplacement);
+ }
+#endif /* _WIN32 */
+
+ return "(unknown function)";
+}
+
+/**
+ * Performs wildcard pattern matching.
+ *
+ * @param pattern The wildcard pattern.
+ * @param text The String that should be checked.
+ * @returns true if the wildcard pattern matches, false otherwise.
+ */
+bool Utility::Match(const String& pattern, const String& text)
+{
+ return (match(pattern.CStr(), text.CStr()) == 0);
+}
+
+static bool ParseIp(const String& ip, char addr[16], int *proto)
+{
+ if (inet_pton(AF_INET, ip.CStr(), addr + 12) == 1) {
+ /* IPv4-mapped IPv6 address (::ffff:<ipv4-bits>) */
+ memset(addr, 0, 10);
+ memset(addr + 10, 0xff, 2);
+ *proto = AF_INET;
+
+ return true;
+ }
+
+ if (inet_pton(AF_INET6, ip.CStr(), addr) == 1) {
+ *proto = AF_INET6;
+
+ return true;
+ }
+
+ return false;
+}
+
+static void ParseIpMask(const String& ip, char mask[16], int *bits)
+{
+ String::SizeType slashp = ip.FindFirstOf("/");
+ String uip;
+
+ if (slashp == String::NPos) {
+ uip = ip;
+ *bits = 0;
+ } else {
+ uip = ip.SubStr(0, slashp);
+ *bits = Convert::ToLong(ip.SubStr(slashp + 1));
+ }
+
+ int proto;
+
+ if (!ParseIp(uip, mask, &proto))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid IP address specified."));
+
+ if (proto == AF_INET) {
+ if (*bits > 32 || *bits < 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Mask must be between 0 and 32 for IPv4 CIDR masks."));
+
+ *bits += 96;
+ }
+
+ if (slashp == String::NPos)
+ *bits = 128;
+
+ if (*bits > 128 || *bits < 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Mask must be between 0 and 128 for IPv6 CIDR masks."));
+
+ for (int i = 0; i < 16; i++) {
+ int lbits = std::max(0, *bits - i * 8);
+
+ if (lbits >= 8)
+ continue;
+
+ if (mask[i] & (0xff >> lbits))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Masked-off bits must all be zero."));
+ }
+}
+
+static bool IpMaskCheck(char addr[16], char mask[16], int bits)
+{
+ for (int i = 0; i < 16; i++) {
+ if (bits < 8)
+ return !((addr[i] ^ mask[i]) >> (8 - bits));
+
+ if (mask[i] != addr[i])
+ return false;
+
+ bits -= 8;
+
+ if (bits == 0)
+ return true;
+ }
+
+ return true;
+}
+
+bool Utility::CidrMatch(const String& pattern, const String& ip)
+{
+ char mask[16];
+ int bits;
+
+ ParseIpMask(pattern, mask, &bits);
+
+ char addr[16];
+ int proto;
+
+ if (!ParseIp(ip, addr, &proto))
+ return false;
+
+ return IpMaskCheck(addr, mask, bits);
+}
+
+/**
+ * Returns the directory component of a path. See dirname(3) for details.
+ *
+ * @param path The full path.
+ * @returns The directory.
+ */
+String Utility::DirName(const String& path)
+{
+ return boost::filesystem::path(path.Begin(), path.End()).parent_path().string();
+}
+
+/**
+ * Returns the file component of a path. See basename(3) for details.
+ *
+ * @param path The full path.
+ * @returns The filename.
+ */
+String Utility::BaseName(const String& path)
+{
+ return boost::filesystem::path(path.Begin(), path.End()).filename().string();
+}
+
+/**
+ * Null deleter. Used as a parameter for the shared_ptr constructor.
+ *
+ * @param - The object that should be deleted.
+ */
+void Utility::NullDeleter(void *)
+{
+ /* Nothing to do here. */
+}
+
+#ifdef I2_DEBUG
+/**
+ * (DEBUG / TESTING ONLY) Sets the current system time to a static value,
+ * that will be be retrieved by any component of Icinga, when using GetTime().
+ *
+ * This should be only used for testing purposes, e.g. unit tests and debugging of certain functionalities.
+ */
+void Utility::SetTime(double time)
+{
+ m_DebugTime = time;
+}
+
+/**
+ * (DEBUG / TESTING ONLY) Increases the set debug system time by X seconds.
+ *
+ * This should be only used for testing purposes, e.g. unit tests and debugging of certain functionalities.
+ */
+void Utility::IncrementTime(double diff)
+{
+ m_DebugTime += diff;
+}
+#endif /* I2_DEBUG */
+
+/**
+ * Returns the current UNIX timestamp including fractions of seconds.
+ *
+ * @returns The current time.
+ */
+double Utility::GetTime()
+{
+#ifdef I2_DEBUG
+ if (m_DebugTime >= 0) {
+ // (DEBUG / TESTING ONLY) this will return a *STATIC* system time, if the value has been set!
+ return m_DebugTime;
+ }
+#endif /* I2_DEBUG */
+#ifdef _WIN32
+ FILETIME cft;
+ GetSystemTimeAsFileTime(&cft);
+
+ ULARGE_INTEGER ucft;
+ ucft.HighPart = cft.dwHighDateTime;
+ ucft.LowPart = cft.dwLowDateTime;
+
+ SYSTEMTIME est = { 1970, 1, 4, 1, 0, 0, 0, 0};
+ FILETIME eft;
+ SystemTimeToFileTime(&est, &eft);
+
+ ULARGE_INTEGER ueft;
+ ueft.HighPart = eft.dwHighDateTime;
+ ueft.LowPart = eft.dwLowDateTime;
+
+ return ((ucft.QuadPart - ueft.QuadPart) / 10000) / 1000.0;
+#else /* _WIN32 */
+ struct timeval tv;
+
+ int rc = gettimeofday(&tv, nullptr);
+ VERIFY(rc >= 0);
+
+ return tv.tv_sec + tv.tv_usec / 1000000.0;
+#endif /* _WIN32 */
+}
+
+/**
+ * Returns the ID of the current process.
+ *
+ * @returns The PID.
+ */
+pid_t Utility::GetPid()
+{
+#ifndef _WIN32
+ return getpid();
+#else /* _WIN32 */
+ return GetCurrentProcessId();
+#endif /* _WIN32 */
+}
+
+/**
+ * Sleeps for the specified amount of time.
+ *
+ * @param timeout The timeout in seconds.
+ */
+void Utility::Sleep(double timeout)
+{
+#ifndef _WIN32
+ unsigned long micros = timeout * 1000000u;
+ if (timeout >= 1.0)
+ sleep((unsigned)timeout);
+
+ usleep(micros % 1000000u);
+#else /* _WIN32 */
+ ::Sleep(timeout * 1000);
+#endif /* _WIN32 */
+}
+
+/**
+ * Generates a new unique ID.
+ *
+ * @returns The new unique ID.
+ */
+String Utility::NewUniqueID()
+{
+ return boost::lexical_cast<std::string>(boost::uuids::random_generator()());
+}
+
+#ifdef _WIN32
+static bool GlobHelper(const String& pathSpec, int type, std::vector<String>& files, std::vector<String>& dirs)
+{
+ HANDLE handle;
+ WIN32_FIND_DATA wfd;
+
+ handle = FindFirstFile(pathSpec.CStr(), &wfd);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ DWORD errorCode = GetLastError();
+
+ if (errorCode == ERROR_FILE_NOT_FOUND)
+ return false;
+
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("FindFirstFile")
+ << errinfo_win32_error(errorCode)
+ << boost::errinfo_file_name(pathSpec));
+ }
+
+ do {
+ if (strcmp(wfd.cFileName, ".") == 0 || strcmp(wfd.cFileName, "..") == 0)
+ continue;
+
+ String path = Utility::DirName(pathSpec) + "/" + wfd.cFileName;
+
+ if ((wfd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && (type & GlobDirectory))
+ dirs.push_back(path);
+ else if (!(wfd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && (type & GlobFile))
+ files.push_back(path);
+ } while (FindNextFile(handle, &wfd));
+
+ if (!FindClose(handle)) {
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("FindClose")
+ << errinfo_win32_error(GetLastError()));
+ }
+
+ return true;
+}
+#endif /* _WIN32 */
+
+#ifndef _WIN32
+static int GlobErrorHandler(const char *epath, int eerrno)
+{
+ if (eerrno == ENOTDIR)
+ return 0;
+
+ return eerrno;
+}
+#endif /* _WIN32 */
+
+/**
+ * Calls the specified callback for each file matching the path specification.
+ *
+ * @param pathSpec The path specification.
+ * @param callback The callback which is invoked for each matching file.
+ * @param type The file type (a combination of GlobFile and GlobDirectory)
+ */
+bool Utility::Glob(const String& pathSpec, const std::function<void (const String&)>& callback, int type)
+{
+ std::vector<String> files, dirs;
+
+#ifdef _WIN32
+ std::vector<String> tokens = pathSpec.Split("\\/");
+
+ String part1;
+
+ for (std::vector<String>::size_type i = 0; i < tokens.size() - 1; i++) {
+ const String& token = tokens[i];
+
+ if (!part1.IsEmpty())
+ part1 += "/";
+
+ part1 += token;
+
+ if (token.FindFirstOf("?*") != String::NPos) {
+ String part2;
+
+ for (std::vector<String>::size_type k = i + 1; k < tokens.size(); k++) {
+ if (!part2.IsEmpty())
+ part2 += "/";
+
+ part2 += tokens[k];
+ }
+
+ std::vector<String> files2, dirs2;
+
+ if (!GlobHelper(part1, GlobDirectory, files2, dirs2))
+ return false;
+
+ for (const String& dir : dirs2) {
+ if (!Utility::Glob(dir + "/" + part2, callback, type))
+ return false;
+ }
+
+ return true;
+ }
+ }
+
+ if (!GlobHelper(part1 + "/" + tokens[tokens.size() - 1], type, files, dirs))
+ return false;
+#else /* _WIN32 */
+ glob_t gr;
+
+ int rc = glob(pathSpec.CStr(), GLOB_NOSORT, GlobErrorHandler, &gr);
+
+ if (rc) {
+ if (rc == GLOB_NOMATCH)
+ return false;
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("glob")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(pathSpec));
+ }
+
+ if (gr.gl_pathc == 0) {
+ globfree(&gr);
+ return false;
+ }
+
+ size_t left;
+ char **gp;
+ for (gp = gr.gl_pathv, left = gr.gl_pathc; left > 0; gp++, left--) {
+ struct stat statbuf;
+
+ if (stat(*gp, &statbuf) < 0)
+ continue;
+
+ if (!S_ISDIR(statbuf.st_mode) && !S_ISREG(statbuf.st_mode))
+ continue;
+
+ if (S_ISDIR(statbuf.st_mode) && (type & GlobDirectory))
+ dirs.emplace_back(*gp);
+ else if (!S_ISDIR(statbuf.st_mode) && (type & GlobFile))
+ files.emplace_back(*gp);
+ }
+
+ globfree(&gr);
+#endif /* _WIN32 */
+
+ std::sort(files.begin(), files.end());
+ for (const String& cpath : files) {
+ callback(cpath);
+ }
+
+ std::sort(dirs.begin(), dirs.end());
+ for (const String& cpath : dirs) {
+ callback(cpath);
+ }
+
+ return true;
+}
+
+/**
+ * Calls the specified callback for each file in the specified directory
+ * or any of its child directories if the file name matches the specified
+ * pattern.
+ *
+ * @param path The path.
+ * @param pattern The pattern.
+ * @param callback The callback which is invoked for each matching file.
+ * @param type The file type (a combination of GlobFile and GlobDirectory)
+ */
+bool Utility::GlobRecursive(const String& path, const String& pattern, const std::function<void (const String&)>& callback, int type)
+{
+ std::vector<String> files, dirs, alldirs;
+
+#ifdef _WIN32
+ HANDLE handle;
+ WIN32_FIND_DATA wfd;
+
+ String pathSpec = path + "/*";
+
+ handle = FindFirstFile(pathSpec.CStr(), &wfd);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ DWORD errorCode = GetLastError();
+
+ if (errorCode == ERROR_FILE_NOT_FOUND)
+ return false;
+
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("FindFirstFile")
+ << errinfo_win32_error(errorCode)
+ << boost::errinfo_file_name(pathSpec));
+ }
+
+ do {
+ if (strcmp(wfd.cFileName, ".") == 0 || strcmp(wfd.cFileName, "..") == 0)
+ continue;
+
+ String cpath = path + "/" + wfd.cFileName;
+
+ if (wfd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+ alldirs.push_back(cpath);
+
+ if (!Utility::Match(pattern, wfd.cFileName))
+ continue;
+
+ if (!(wfd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && (type & GlobFile))
+ files.push_back(cpath);
+
+ if ((wfd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && (type & GlobDirectory))
+ dirs.push_back(cpath);
+ } while (FindNextFile(handle, &wfd));
+
+ if (!FindClose(handle)) {
+ BOOST_THROW_EXCEPTION(win32_error()
+ << boost::errinfo_api_function("FindClose")
+ << errinfo_win32_error(GetLastError()));
+ }
+#else /* _WIN32 */
+ DIR *dirp;
+
+ dirp = opendir(path.CStr());
+
+ if (!dirp)
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("opendir")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(path));
+
+ while (dirp) {
+ dirent *pent;
+
+ errno = 0;
+ pent = readdir(dirp);
+ if (!pent && errno != 0) {
+ closedir(dirp);
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("readdir")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(path));
+ }
+
+ if (!pent)
+ break;
+
+ if (strcmp(pent->d_name, ".") == 0 || strcmp(pent->d_name, "..") == 0)
+ continue;
+
+ String cpath = path + "/" + pent->d_name;
+
+ struct stat statbuf;
+
+ if (stat(cpath.CStr(), &statbuf) < 0)
+ continue;
+
+ if (S_ISDIR(statbuf.st_mode))
+ alldirs.push_back(cpath);
+
+ if (!Utility::Match(pattern, pent->d_name))
+ continue;
+
+ if (S_ISDIR(statbuf.st_mode) && (type & GlobDirectory))
+ dirs.push_back(cpath);
+
+ if (!S_ISDIR(statbuf.st_mode) && (type & GlobFile))
+ files.push_back(cpath);
+ }
+
+ closedir(dirp);
+
+#endif /* _WIN32 */
+
+ std::sort(files.begin(), files.end());
+ for (const String& cpath : files) {
+ callback(cpath);
+ }
+
+ std::sort(dirs.begin(), dirs.end());
+ for (const String& cpath : dirs) {
+ callback(cpath);
+ }
+
+ std::sort(alldirs.begin(), alldirs.end());
+ for (const String& cpath : alldirs) {
+ GlobRecursive(cpath, pattern, callback, type);
+ }
+
+ return true;
+}
+
+
+void Utility::MkDir(const String& path, int mode)
+{
+
+#ifndef _WIN32
+ if (mkdir(path.CStr(), mode) < 0 && errno != EEXIST) {
+#else /*_ WIN32 */
+ if (mkdir(path.CStr()) < 0 && errno != EEXIST) {
+#endif /* _WIN32 */
+
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("mkdir")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(path));
+ }
+}
+
+void Utility::MkDirP(const String& path, int mode)
+{
+ size_t pos = 0;
+
+ while (pos != String::NPos) {
+#ifndef _WIN32
+ pos = path.Find("/", pos + 1);
+#else /*_ WIN32 */
+ pos = path.FindFirstOf("/\\", pos + 1);
+#endif /* _WIN32 */
+
+ String spath = path.SubStr(0, pos + 1);
+ struct stat statbuf;
+ if (stat(spath.CStr(), &statbuf) < 0 && errno == ENOENT)
+ MkDir(path.SubStr(0, pos), mode);
+ }
+}
+
+void Utility::Remove(const String& path)
+{
+ namespace fs = boost::filesystem;
+
+ (void)fs::remove(fs::path(path.Begin(), path.End()));
+}
+
+void Utility::RemoveDirRecursive(const String& path)
+{
+ namespace fs = boost::filesystem;
+
+ (void)fs::remove_all(fs::path(path.Begin(), path.End()));
+}
+
+/*
+ * Copies a source file to a target location.
+ * Caller must ensure that the target's base directory exists and is writable.
+ */
+void Utility::CopyFile(const String& source, const String& target)
+{
+ namespace fs = boost::filesystem;
+
+#if BOOST_VERSION >= 107400
+ fs::copy_file(fs::path(source.Begin(), source.End()), fs::path(target.Begin(), target.End()), fs::copy_options::overwrite_existing);
+#else /* BOOST_VERSION */
+ fs::copy_file(fs::path(source.Begin(), source.End()), fs::path(target.Begin(), target.End()), fs::copy_option::overwrite_if_exists);
+#endif /* BOOST_VERSION */
+}
+
+/*
+ * Renames a source file to a target location.
+ * Caller must ensure that the target's base directory exists and is writable.
+ */
+void Utility::RenameFile(const String& source, const String& target)
+{
+ namespace fs = boost::filesystem;
+
+ fs::path sourcePath(source.Begin(), source.End()), targetPath(target.Begin(), target.End());
+
+#ifndef _WIN32
+ fs::rename(sourcePath, targetPath);
+#else /* _WIN32 */
+ /*
+ * Renaming files can be tricky on Windows, especially if your application is built around POSIX filesystem
+ * semantics. For example, the quite common pattern of replacing a file by writing a new version to a temporary
+ * location and then moving it to the final location can fail if the destination file already exists and any
+ * process has an open file handle to it.
+ *
+ * We try to handle this situation as best as we can by retrying the rename operation a few times hoping the other
+ * process closes its file handle in the meantime. This is similar to what for example Go does internally in some
+ * situations (https://golang.org/pkg/cmd/go/internal/robustio/#Rename):
+ *
+ * robustio.Rename is like os.Rename, but on Windows retries errors that may occur if the file is concurrently
+ * read or overwritten. (See https://golang.org/issue/31247 and https://golang.org/issue/32188)
+ */
+
+ double sleep = 0.1;
+ int last_error = ERROR_SUCCESS;
+
+ for (int retries = 0, remaining = 15;; retries++, remaining--) {
+ try {
+ fs::rename(sourcePath, targetPath);
+
+ if (retries > 0) {
+ Log(LogWarning, "Utility") << "Renaming '" << source << "' to '" << target
+ << "' succeeded after " << retries << " retries";
+ }
+
+ break;
+ } catch (const fs::filesystem_error& ex) {
+ int error = ex.code().value();
+ bool ephemeral = error == ERROR_ACCESS_DENIED ||
+ error == ERROR_FILE_NOT_FOUND ||
+ error == ERROR_SHARING_VIOLATION;
+
+ if (remaining <= 0 || !ephemeral) {
+ throw; // giving up
+ }
+
+ if (error != last_error) {
+ Log(LogWarning, "Utility") << "Renaming '" << source << "' to '" << target << "' failed: "
+ << ex.code().message() << " (trying up to " << remaining << " more times)";
+ last_error = error;
+ }
+
+ Utility::Sleep(sleep);
+ sleep *= 1.3;
+ }
+ }
+#endif /* _WIN32 */
+}
+
+/*
+ * Set file permissions
+ */
+bool Utility::SetFileOwnership(const String& file, const String& user, const String& group)
+{
+#ifndef _WIN32
+ errno = 0;
+ struct passwd *pw = getpwnam(user.CStr());
+
+ if (!pw) {
+ if (errno == 0) {
+ Log(LogCritical, "cli")
+ << "Invalid user specified: " << user;
+ return false;
+ } else {
+ Log(LogCritical, "cli")
+ << "getpwnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return false;
+ }
+ }
+
+ errno = 0;
+ struct group *gr = getgrnam(group.CStr());
+
+ if (!gr) {
+ if (errno == 0) {
+ Log(LogCritical, "cli")
+ << "Invalid group specified: " << group;
+ return false;
+ } else {
+ Log(LogCritical, "cli")
+ << "getgrnam() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return false;
+ }
+ }
+
+ if (chown(file.CStr(), pw->pw_uid, gr->gr_gid) < 0) {
+ Log(LogCritical, "cli")
+ << "chown() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return false;
+ }
+#endif /* _WIN32 */
+
+ return true;
+}
+
+#ifndef _WIN32
+void Utility::SetNonBlocking(int fd, bool nb)
+{
+ int flags = fcntl(fd, F_GETFL, 0);
+
+ if (flags < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fcntl")
+ << boost::errinfo_errno(errno));
+ }
+
+ if (nb)
+ flags |= O_NONBLOCK;
+ else
+ flags &= ~O_NONBLOCK;
+
+ if (fcntl(fd, F_SETFL, flags) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fcntl")
+ << boost::errinfo_errno(errno));
+ }
+}
+
+void Utility::SetCloExec(int fd, bool cloexec)
+{
+ int flags = fcntl(fd, F_GETFD, 0);
+
+ if (flags < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fcntl")
+ << boost::errinfo_errno(errno));
+ }
+
+ if (cloexec)
+ flags |= FD_CLOEXEC;
+ else
+ flags &= ~FD_CLOEXEC;
+
+ if (fcntl(fd, F_SETFD, flags) < 0) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("fcntl")
+ << boost::errinfo_errno(errno));
+ }
+}
+
+void Utility::CloseAllFDs(const std::vector<int>& except, std::function<void(int)> onClose)
+{
+#if defined(__linux__) || defined(__APPLE__)
+ namespace fs = boost::filesystem;
+
+ std::set<int> fds;
+
+#ifdef __linux__
+ const char *dir = "/proc/self/fd";
+#endif /* __linux__ */
+#ifdef __APPLE__
+ const char *dir = "/dev/fd";
+#endif /* __APPLE__ */
+
+ for (fs::directory_iterator current {fs::path(dir)}, end; current != end; ++current) {
+ auto entry (current->path().filename());
+ int fd;
+
+ try {
+ fd = boost::lexical_cast<int>(entry.c_str());
+ } catch (...) {
+ continue;
+ }
+
+ fds.emplace(fd);
+ }
+
+ for (auto fd : except) {
+ fds.erase(fd);
+ }
+
+ for (auto fd : fds) {
+ if (close(fd) >= 0 && onClose) {
+ onClose(fd);
+ }
+ }
+#else /* __linux__ || __APPLE__ */
+ rlimit rl;
+
+ if (getrlimit(RLIMIT_NOFILE, &rl) >= 0) {
+ rlim_t maxfds = rl.rlim_max;
+
+ if (maxfds == RLIM_INFINITY) {
+ maxfds = 65536;
+ }
+
+ for (int fd = 0; fd < maxfds; ++fd) {
+ if (std::find(except.begin(), except.end(), fd) == except.end() && close(fd) >= 0 && onClose) {
+ onClose(fd);
+ }
+ }
+ }
+#endif /* __linux__ || __APPLE__ */
+}
+#endif /* _WIN32 */
+
+void Utility::SetNonBlockingSocket(SOCKET s, bool nb)
+{
+#ifndef _WIN32
+ SetNonBlocking(s, nb);
+#else /* _WIN32 */
+ unsigned long lflag = nb;
+ ioctlsocket(s, FIONBIO, &lflag);
+#endif /* _WIN32 */
+}
+
+void Utility::QueueAsyncCallback(const std::function<void ()>& callback, SchedulerPolicy policy)
+{
+ Application::GetTP().Post(callback, policy);
+}
+
+String Utility::NaturalJoin(const std::vector<String>& tokens)
+{
+ String result;
+
+ for (std::vector<String>::size_type i = 0; i < tokens.size(); i++) {
+ result += tokens[i];
+
+ if (tokens.size() > i + 1) {
+ if (i < tokens.size() - 2)
+ result += ", ";
+ else if (i == tokens.size() - 2)
+ result += " and ";
+ }
+ }
+
+ return result;
+}
+
+String Utility::Join(const Array::Ptr& tokens, char separator, bool escapeSeparator)
+{
+ String result;
+ bool first = true;
+
+ ObjectLock olock(tokens);
+ for (const Value& vtoken : tokens) {
+ String token = Convert::ToString(vtoken);
+
+ if (escapeSeparator) {
+ boost::algorithm::replace_all(token, "\\", "\\\\");
+
+ char sep_before[2], sep_after[3];
+ sep_before[0] = separator;
+ sep_before[1] = '\0';
+ sep_after[0] = '\\';
+ sep_after[1] = separator;
+ sep_after[2] = '\0';
+ boost::algorithm::replace_all(token, sep_before, sep_after);
+ }
+
+ if (first)
+ first = false;
+ else
+ result += String(1, separator);
+
+ result += token;
+ }
+
+ return result;
+}
+
+String Utility::FormatDuration(double duration)
+{
+ std::vector<String> tokens;
+ String result;
+
+ if (duration >= 86400) {
+ int days = duration / 86400;
+ tokens.emplace_back(Convert::ToString(days) + (days != 1 ? " days" : " day"));
+ duration = static_cast<int>(duration) % 86400;
+ }
+
+ if (duration >= 3600) {
+ int hours = duration / 3600;
+ tokens.emplace_back(Convert::ToString(hours) + (hours != 1 ? " hours" : " hour"));
+ duration = static_cast<int>(duration) % 3600;
+ }
+
+ if (duration >= 60) {
+ int minutes = duration / 60;
+ tokens.emplace_back(Convert::ToString(minutes) + (minutes != 1 ? " minutes" : " minute"));
+ duration = static_cast<int>(duration) % 60;
+ }
+
+ if (duration >= 1) {
+ int seconds = duration;
+ tokens.emplace_back(Convert::ToString(seconds) + (seconds != 1 ? " seconds" : " second"));
+ }
+
+ if (tokens.size() == 0) {
+ int milliseconds = std::floor(duration * 1000);
+ if (milliseconds >= 1)
+ tokens.emplace_back(Convert::ToString(milliseconds) + (milliseconds != 1 ? " milliseconds" : " millisecond"));
+ else
+ tokens.emplace_back("less than 1 millisecond");
+ }
+
+ return NaturalJoin(tokens);
+}
+
+String Utility::FormatDateTime(const char *format, double ts)
+{
+ char timestamp[128];
+ auto tempts = (time_t)ts; /* We don't handle sub-second timestamps here just yet. */
+ tm tmthen;
+
+#ifdef _MSC_VER
+ tm *temp = localtime(&tempts);
+
+ if (!temp) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("localtime")
+ << boost::errinfo_errno(errno));
+ }
+
+ tmthen = *temp;
+#else /* _MSC_VER */
+ if (!localtime_r(&tempts, &tmthen)) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("localtime_r")
+ << boost::errinfo_errno(errno));
+ }
+#endif /* _MSC_VER */
+
+ strftime(timestamp, sizeof(timestamp), format, &tmthen);
+
+ return timestamp;
+}
+
+String Utility::FormatErrorNumber(int code) {
+ std::ostringstream msgbuf;
+
+#ifdef _WIN32
+ char *message;
+ String result = "Unknown error.";
+
+ DWORD rc = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, code, 0, (char *)&message,
+ 0, nullptr);
+
+ if (rc != 0) {
+ result = String(message);
+ LocalFree(message);
+
+ /* remove trailing new-line characters */
+ boost::algorithm::trim_right(result);
+ }
+
+ msgbuf << code << ", \"" << result << "\"";
+#else
+ msgbuf << strerror(code);
+#endif
+ return msgbuf.str();
+}
+
+String Utility::EscapeShellCmd(const String& s)
+{
+ String result;
+ size_t prev_quote = String::NPos;
+ int index = -1;
+
+ for (char ch : s) {
+ bool escape = false;
+
+ index++;
+
+#ifdef _WIN32
+ if (ch == '%' || ch == '"' || ch == '\'')
+ escape = true;
+#else /* _WIN32 */
+ if (ch == '"' || ch == '\'') {
+ /* Find a matching closing quotation character. */
+ if (prev_quote == String::NPos && (prev_quote = s.FindFirstOf(ch, index + 1)) != String::NPos)
+ ; /* Empty statement. */
+ else if (prev_quote != String::NPos && s[prev_quote] == ch)
+ prev_quote = String::NPos;
+ else
+ escape = true;
+ }
+#endif /* _WIN32 */
+
+ if (ch == '#' || ch == '&' || ch == ';' || ch == '`' || ch == '|' ||
+ ch == '*' || ch == '?' || ch == '~' || ch == '<' || ch == '>' ||
+ ch == '^' || ch == '(' || ch == ')' || ch == '[' || ch == ']' ||
+ ch == '{' || ch == '}' || ch == '$' || ch == '\\' || ch == '\x0A' ||
+ ch == '\xFF')
+ escape = true;
+
+ if (escape)
+#ifdef _WIN32
+ result += '^';
+#else /* _WIN32 */
+ result += '\\';
+#endif /* _WIN32 */
+
+ result += ch;
+ }
+
+ return result;
+}
+
+String Utility::EscapeShellArg(const String& s)
+{
+ String result;
+
+#ifdef _WIN32
+ result = "\"";
+#else /* _WIN32 */
+ result = "'";
+#endif /* _WIN32 */
+
+ for (char ch : s) {
+#ifdef _WIN32
+ if (ch == '"' || ch == '%') {
+ result += ' ';
+ }
+#else /* _WIN32 */
+ if (ch == '\'')
+ result += "'\\'";
+#endif
+ result += ch;
+ }
+
+#ifdef _WIN32
+ result += '"';
+#else /* _WIN32 */
+ result += '\'';
+#endif /* _WIN32 */
+
+ return result;
+}
+
+#ifdef _WIN32
+String Utility::EscapeCreateProcessArg(const String& arg)
+{
+ if (arg.FindFirstOf(" \t\n\v\"") == String::NPos)
+ return arg;
+
+ String result = "\"";
+
+ for (String::ConstIterator it = arg.Begin(); ; it++) {
+ int numBackslashes = 0;
+
+ while (it != arg.End() && *it == '\\') {
+ it++;
+ numBackslashes++;
+ }
+
+ if (it == arg.End()) {
+ result.Append(numBackslashes * 2, '\\');
+ break;
+ } else if (*it == '"') {
+ result.Append(numBackslashes * 2 + 1, '\\');
+ result.Append(1, *it);
+ } else {
+ result.Append(numBackslashes, '\\');
+ result.Append(1, *it);
+ }
+ }
+
+ result += "\"";
+
+ return result;
+}
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+static void WindowsSetThreadName(const char *name)
+{
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = name;
+ info.dwThreadID = -1;
+ info.dwFlags = 0;
+
+ __try {
+ RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR *)&info);
+ } __except(EXCEPTION_EXECUTE_HANDLER) {
+ /* Nothing to do here. */
+ }
+}
+#endif /* _WIN32 */
+
+void Utility::SetThreadName(const String& name, bool os)
+{
+ m_ThreadName.reset(new String(name));
+
+ if (!os)
+ return;
+
+#ifdef _WIN32
+ WindowsSetThreadName(name.CStr());
+#endif /* _WIN32 */
+
+#ifdef HAVE_PTHREAD_SET_NAME_NP
+ pthread_set_name_np(pthread_self(), name.CStr());
+#endif /* HAVE_PTHREAD_SET_NAME_NP */
+
+#ifdef HAVE_PTHREAD_SETNAME_NP
+# ifdef __APPLE__
+ pthread_setname_np(name.CStr());
+# else /* __APPLE__ */
+ String tname = name.SubStr(0, 15);
+ pthread_setname_np(pthread_self(), tname.CStr());
+# endif /* __APPLE__ */
+#endif /* HAVE_PTHREAD_SETNAME_NP */
+}
+
+String Utility::GetThreadName()
+{
+ String *name = m_ThreadName.get();
+
+ if (!name) {
+ std::ostringstream idbuf;
+ idbuf << std::this_thread::get_id();
+ return idbuf.str();
+ }
+
+ return *name;
+}
+
+unsigned long Utility::SDBM(const String& str, size_t len)
+{
+ unsigned long hash = 0;
+ size_t current = 0;
+
+ for (char c : str) {
+ if (current >= len)
+ break;
+
+ hash = c + (hash << 6) + (hash << 16) - hash;
+
+ current++;
+ }
+
+ return hash;
+}
+
+String Utility::ParseVersion(const String& v)
+{
+ /*
+ * 2.11.0-0.rc1.1
+ * v2.10.5
+ * r2.10.3
+ * v2.11.0-rc1-58-g7c1f716da
+ */
+ boost::regex pattern("^[vr]?(2\\.\\d+\\.\\d+).*$");
+ boost::smatch result;
+
+ if (boost::regex_search(v.GetData(), result, pattern)) {
+ String res(result[1].first, result[1].second);
+ return res;
+ }
+
+ // Couldn't not extract anything, return unparsed version
+ return v;
+}
+
+int Utility::CompareVersion(const String& v1, const String& v2)
+{
+ std::vector<String> tokensv1 = v1.Split(".");
+ std::vector<String> tokensv2 = v2.Split(".");
+
+ for (std::vector<String>::size_type i = 0; i < tokensv2.size() - tokensv1.size(); i++)
+ tokensv1.emplace_back("0");
+
+ for (std::vector<String>::size_type i = 0; i < tokensv1.size() - tokensv2.size(); i++)
+ tokensv2.emplace_back("0");
+
+ for (std::vector<String>::size_type i = 0; i < tokensv1.size(); i++) {
+ if (Convert::ToLong(tokensv2[i]) > Convert::ToLong(tokensv1[i]))
+ return 1;
+ else if (Convert::ToLong(tokensv2[i]) < Convert::ToLong(tokensv1[i]))
+ return -1;
+ }
+
+ return 0;
+}
+
+String Utility::GetHostName()
+{
+ char name[255];
+
+ if (gethostname(name, sizeof(name)) < 0)
+ return "localhost";
+
+ return name;
+}
+
+/**
+ * Returns the fully-qualified domain name for the host
+ * we're running on.
+ *
+ * @returns The FQDN.
+ */
+String Utility::GetFQDN()
+{
+ String hostname = GetHostName();
+
+ addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_flags = AI_CANONNAME;
+
+ addrinfo *result;
+ int rc = getaddrinfo(hostname.CStr(), nullptr, &hints, &result);
+
+ if (rc != 0)
+ result = nullptr;
+
+ if (result) {
+ if (strcmp(result->ai_canonname, "localhost") != 0)
+ hostname = result->ai_canonname;
+
+ freeaddrinfo(result);
+ }
+
+ return hostname;
+}
+
+int Utility::Random()
+{
+#ifdef _WIN32
+ return rand();
+#else /* _WIN32 */
+ unsigned int *seed = m_RandSeed.get();
+
+ if (!seed) {
+ seed = new unsigned int(Utility::GetTime());
+ m_RandSeed.reset(seed);
+ }
+
+ return rand_r(seed);
+#endif /* _WIN32 */
+}
+
+tm Utility::LocalTime(time_t ts)
+{
+#ifdef _MSC_VER
+ tm *result = localtime(&ts);
+
+ if (!result) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("localtime")
+ << boost::errinfo_errno(errno));
+ }
+
+ return *result;
+#else /* _MSC_VER */
+ tm result;
+
+ if (!localtime_r(&ts, &result)) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("localtime_r")
+ << boost::errinfo_errno(errno));
+ }
+
+ return result;
+#endif /* _MSC_VER */
+}
+
+bool Utility::PathExists(const String& path)
+{
+ namespace fs = boost::filesystem;
+
+ boost::system::error_code ec;
+
+ return fs::exists(fs::path(path.Begin(), path.End()), ec) && !ec;
+}
+
+time_t Utility::GetFileCreationTime(const String& path)
+{
+ namespace fs = boost::filesystem;
+
+ return fs::last_write_time(boost::lexical_cast<fs::path>(path));
+}
+
+Value Utility::LoadJsonFile(const String& path)
+{
+ std::ifstream fp;
+ fp.open(path.CStr());
+
+ String json((std::istreambuf_iterator<char>(fp)), std::istreambuf_iterator<char>());
+
+ fp.close();
+
+ if (fp.fail())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not read JSON file '" + path + "'."));
+
+ return JsonDecode(json);
+}
+
+void Utility::SaveJsonFile(const String& path, int mode, const Value& value)
+{
+ AtomicFile::Write(path, mode, JsonEncode(value));
+}
+
+static void HexEncode(char ch, std::ostream& os)
+{
+ const char *hex_chars = "0123456789ABCDEF";
+
+ os << hex_chars[ch >> 4 & 0x0f];
+ os << hex_chars[ch & 0x0f];
+}
+
+static int HexDecode(char hc)
+{
+ if (hc >= '0' && hc <= '9')
+ return hc - '0';
+ else if (hc >= 'a' && hc <= 'f')
+ return hc - 'a' + 10;
+ else if (hc >= 'A' && hc <= 'F')
+ return hc - 'A' + 10;
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid hex character."));
+}
+
+String Utility::EscapeString(const String& s, const String& chars, const bool illegal)
+{
+ std::ostringstream result;
+ if (illegal) {
+ for (char ch : s) {
+ if (chars.FindFirstOf(ch) != String::NPos || ch == '%') {
+ result << '%';
+ HexEncode(ch, result);
+ } else
+ result << ch;
+ }
+ } else {
+ for (char ch : s) {
+ if (chars.FindFirstOf(ch) == String::NPos || ch == '%') {
+ result << '%';
+ HexEncode(ch, result);
+ } else
+ result << ch;
+ }
+ }
+
+ return result.str();
+}
+
+String Utility::UnescapeString(const String& s)
+{
+ std::ostringstream result;
+
+ for (String::SizeType i = 0; i < s.GetLength(); i++) {
+ if (s[i] == '%') {
+ if (i + 2 > s.GetLength() - 1)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid escape sequence."));
+
+ char ch = HexDecode(s[i + 1]) * 16 + HexDecode(s[i + 2]);
+ result << ch;
+
+ i += 2;
+ } else
+ result << s[i];
+ }
+
+ return result.str();
+}
+
+#ifndef _WIN32
+static String UnameHelper(char type)
+{
+ struct utsname name;
+ uname(&name);
+
+ switch (type) {
+ case 'm':
+ return (char*)name.machine;
+ case 'n':
+ return (char*)name.nodename;
+ case 'r':
+ return (char*)name.release;
+ case 's':
+ return (char*)name.sysname;
+ case 'v':
+ return (char*)name.version;
+ default:
+ VERIFY(!"Invalid uname query.");
+ }
+}
+#endif /* _WIN32 */
+static bool ReleaseHelper(String *platformName, String *platformVersion)
+{
+#ifdef _WIN32
+ if (platformName)
+ *platformName = "Windows";
+
+ if (platformVersion) {
+ *platformVersion = "Vista";
+ if (IsWindowsVistaSP1OrGreater())
+ *platformVersion = "Vista SP1";
+ if (IsWindowsVistaSP2OrGreater())
+ *platformVersion = "Vista SP2";
+ if (IsWindows7OrGreater())
+ *platformVersion = "7";
+ if (IsWindows7SP1OrGreater())
+ *platformVersion = "7 SP1";
+ if (IsWindows8OrGreater())
+ *platformVersion = "8";
+ if (IsWindows8Point1OrGreater())
+ *platformVersion = "8.1 or greater";
+ if (IsWindowsServer())
+ *platformVersion += " (Server)";
+ }
+
+ return true;
+#else /* _WIN32 */
+ if (platformName)
+ *platformName = "Unknown";
+
+ if (platformVersion)
+ *platformVersion = "Unknown";
+
+ /* You have systemd or Ubuntu etc. */
+ std::ifstream release("/etc/os-release");
+ if (release.is_open()) {
+ std::string release_line;
+ while (getline(release, release_line)) {
+ std::string::size_type pos = release_line.find("=");
+
+ if (pos == std::string::npos)
+ continue;
+
+ std::string key = release_line.substr(0, pos);
+ std::string value = release_line.substr(pos + 1);
+
+ std::string::size_type firstQuote = value.find("\"");
+
+ if (firstQuote != std::string::npos)
+ value.erase(0, firstQuote + 1);
+
+ std::string::size_type lastQuote = value.rfind("\"");
+
+ if (lastQuote != std::string::npos)
+ value.erase(lastQuote);
+
+ if (platformName && key == "NAME")
+ *platformName = value;
+
+ if (platformVersion && key == "VERSION")
+ *platformVersion = value;
+ }
+
+ return true;
+ }
+
+ /* You are using a distribution which supports LSB. */
+ FILE *fp = popen("type lsb_release >/dev/null 2>&1 && lsb_release -s -i 2>&1", "r");
+
+ if (fp) {
+ std::ostringstream msgbuf;
+ char line[1024];
+ while (fgets(line, sizeof(line), fp))
+ msgbuf << line;
+ int status = pclose(fp);
+ if (WEXITSTATUS(status) == 0) {
+ if (platformName)
+ *platformName = msgbuf.str();
+ }
+ }
+
+ fp = popen("type lsb_release >/dev/null 2>&1 && lsb_release -s -r 2>&1", "r");
+
+ if (fp) {
+ std::ostringstream msgbuf;
+ char line[1024];
+ while (fgets(line, sizeof(line), fp))
+ msgbuf << line;
+ int status = pclose(fp);
+ if (WEXITSTATUS(status) == 0) {
+ if (platformVersion)
+ *platformVersion = msgbuf.str();
+ }
+ }
+
+ /* OS X */
+ fp = popen("type sw_vers >/dev/null 2>&1 && sw_vers -productName 2>&1", "r");
+
+ if (fp) {
+ std::ostringstream msgbuf;
+ char line[1024];
+ while (fgets(line, sizeof(line), fp))
+ msgbuf << line;
+ int status = pclose(fp);
+ if (WEXITSTATUS(status) == 0) {
+ String info = msgbuf.str();
+ info = info.Trim();
+
+ if (platformName)
+ *platformName = info;
+ }
+ }
+
+ fp = popen("type sw_vers >/dev/null 2>&1 && sw_vers -productVersion 2>&1", "r");
+
+ if (fp) {
+ std::ostringstream msgbuf;
+ char line[1024];
+ while (fgets(line, sizeof(line), fp))
+ msgbuf << line;
+ int status = pclose(fp);
+ if (WEXITSTATUS(status) == 0) {
+ String info = msgbuf.str();
+ info = info.Trim();
+
+ if (platformVersion)
+ *platformVersion = info;
+
+ return true;
+ }
+ }
+
+ /* Centos/RHEL < 7 */
+ release.close();
+ release.open("/etc/redhat-release");
+ if (release.is_open()) {
+ std::string release_line;
+ getline(release, release_line);
+
+ String info = release_line;
+
+ /* example: Red Hat Enterprise Linux Server release 6.7 (Santiago) */
+ if (platformName)
+ *platformName = info.SubStr(0, info.Find("release") - 1);
+
+ if (platformVersion)
+ *platformVersion = info.SubStr(info.Find("release") + 8);
+
+ return true;
+ }
+
+ /* sles 11 sp3, opensuse w/e */
+ release.close();
+ release.open("/etc/SuSE-release");
+ if (release.is_open()) {
+ std::string release_line;
+ getline(release, release_line);
+
+ String info = release_line;
+
+ if (platformName)
+ *platformName = info.SubStr(0, info.FindFirstOf(" "));
+
+ if (platformVersion)
+ *platformVersion = info.SubStr(info.FindFirstOf(" ") + 1);
+
+ return true;
+ }
+
+ /* Just give up */
+ return false;
+#endif /* _WIN32 */
+}
+
+String Utility::GetPlatformKernel()
+{
+#ifdef _WIN32
+ return "Windows";
+#else /* _WIN32 */
+ return UnameHelper('s');
+#endif /* _WIN32 */
+}
+
+String Utility::GetPlatformKernelVersion()
+{
+#ifdef _WIN32
+ OSVERSIONINFO info;
+ info.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&info);
+
+ std::ostringstream msgbuf;
+ msgbuf << info.dwMajorVersion << "." << info.dwMinorVersion;
+
+ return msgbuf.str();
+#else /* _WIN32 */
+ return UnameHelper('r');
+#endif /* _WIN32 */
+}
+
+String Utility::GetPlatformName()
+{
+ String platformName;
+ if (!ReleaseHelper(&platformName, nullptr))
+ return "Unknown";
+ return platformName;
+}
+
+String Utility::GetPlatformVersion()
+{
+ String platformVersion;
+ if (!ReleaseHelper(nullptr, &platformVersion))
+ return "Unknown";
+ return platformVersion;
+}
+
+String Utility::GetPlatformArchitecture()
+{
+#ifdef _WIN32
+ SYSTEM_INFO info;
+ GetNativeSystemInfo(&info);
+ switch (info.wProcessorArchitecture) {
+ case PROCESSOR_ARCHITECTURE_AMD64:
+ return "x86_64";
+ case PROCESSOR_ARCHITECTURE_ARM:
+ return "arm";
+ case PROCESSOR_ARCHITECTURE_INTEL:
+ return "x86";
+ default:
+ return "unknown";
+ }
+#else /* _WIN32 */
+ return UnameHelper('m');
+#endif /* _WIN32 */
+}
+
+const char l_Utf8Replacement[] = "\xEF\xBF\xBD";
+
+String Utility::ValidateUTF8(const String& input)
+{
+ std::string output;
+ output.reserve(input.GetLength());
+
+ try {
+ utf8::replace_invalid(input.Begin(), input.End(), std::back_inserter(output));
+ } catch (const utf8::not_enough_room&) {
+ output.insert(output.end(), (const char*)l_Utf8Replacement, (const char*)l_Utf8Replacement + 3);
+ }
+
+ return String(std::move(output));
+}
+
+#ifdef _WIN32
+/* mkstemp extracted from libc/sysdeps/posix/tempname.c. Copyright
+ * (C) 1991-1999, 2000, 2001, 2006 Free Software Foundation, Inc.
+ *
+ * The GNU C Library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ */
+
+#define _O_EXCL 0x0400
+#define _O_CREAT 0x0100
+#define _O_RDWR 0x0002
+#define O_EXCL _O_EXCL
+#define O_CREAT _O_CREAT
+#define O_RDWR _O_RDWR
+
+static const char letters[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+
+/* Generate a temporary file name based on TMPL. TMPL must match the
+ * rules for mk[s]temp (i.e. end in "XXXXXX"). The name constructed
+ * does not exist at the time of the call to mkstemp. TMPL is
+ * overwritten with the result.
+ */
+int Utility::MksTemp(char *tmpl)
+{
+ int len;
+ char *XXXXXX;
+ static unsigned long long value;
+ unsigned long long random_time_bits;
+ unsigned int count;
+ int fd = -1;
+ int save_errno = errno;
+
+ /* A lower bound on the number of temporary files to attempt to
+ * generate. The maximum total number of temporary file names that
+ * can exist for a given template is 62**6. It should never be
+ * necessary to try all these combinations. Instead if a reasonable
+ * number of names is tried (we define reasonable as 62**3) fail to
+ * give the system administrator the chance to remove the problems.
+ */
+#define ATTEMPTS_MIN (62 * 62 * 62)
+
+ /* The number of times to attempt to generate a temporary file
+ * To conform to POSIX, this must be no smaller than TMP_MAX.
+ */
+#if ATTEMPTS_MIN < TMP_MAX
+ unsigned int attempts = TMP_MAX;
+#else
+ unsigned int attempts = ATTEMPTS_MIN;
+#endif
+
+ len = strlen (tmpl);
+ if (len < 6 || strcmp (&tmpl[len - 6], "XXXXXX")) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ /* This is where the Xs start. */
+ XXXXXX = &tmpl[len - 6];
+
+ /* Get some more or less random data. */
+ {
+ SYSTEMTIME stNow;
+ FILETIME ftNow;
+
+ // get system time
+ GetSystemTime(&stNow);
+ stNow.wMilliseconds = 500;
+ if (!SystemTimeToFileTime(&stNow, &ftNow)) {
+ errno = -1;
+ return -1;
+ }
+
+ random_time_bits = (((unsigned long long)ftNow.dwHighDateTime << 32) | (unsigned long long)ftNow.dwLowDateTime);
+ }
+
+ value += random_time_bits ^ (unsigned long long)GetCurrentThreadId();
+
+ for (count = 0; count < attempts; value += 7777, ++count) {
+ unsigned long long v = value;
+
+ /* Fill in the random bits. */
+ XXXXXX[0] = letters[v % 62];
+ v /= 62;
+ XXXXXX[1] = letters[v % 62];
+ v /= 62;
+ XXXXXX[2] = letters[v % 62];
+ v /= 62;
+ XXXXXX[3] = letters[v % 62];
+ v /= 62;
+ XXXXXX[4] = letters[v % 62];
+ v /= 62;
+ XXXXXX[5] = letters[v % 62];
+
+ fd = open(tmpl, O_RDWR | O_CREAT | O_EXCL, _S_IREAD | _S_IWRITE);
+ if (fd >= 0) {
+ errno = save_errno;
+ return fd;
+ } else if (errno != EEXIST)
+ return -1;
+ }
+
+ /* We got out of the loop because we ran out of combinations to try. */
+ errno = EEXIST;
+ return -1;
+}
+
+String Utility::GetIcingaInstallPath()
+{
+ char szProduct[39];
+
+ for (int i = 0; MsiEnumProducts(i, szProduct) == ERROR_SUCCESS; i++) {
+ char szName[128];
+ DWORD cbName = sizeof(szName);
+ if (MsiGetProductInfo(szProduct, INSTALLPROPERTY_INSTALLEDPRODUCTNAME, szName, &cbName) != ERROR_SUCCESS)
+ continue;
+
+ if (strcmp(szName, "Icinga 2") != 0)
+ continue;
+
+ char szLocation[1024];
+ DWORD cbLocation = sizeof(szLocation);
+ if (MsiGetProductInfo(szProduct, INSTALLPROPERTY_INSTALLLOCATION, szLocation, &cbLocation) == ERROR_SUCCESS)
+ return szLocation;
+ }
+
+ return "";
+}
+
+String Utility::GetIcingaDataPath()
+{
+ char path[MAX_PATH];
+ if (!SUCCEEDED(SHGetFolderPath(nullptr, CSIDL_COMMON_APPDATA, nullptr, 0, path)))
+ return "";
+ return String(path) + "\\icinga2";
+}
+
+#endif /* _WIN32 */
+
+/**
+ * Retrieve the environment variable value by given key.
+ *
+ * @param env Environment variable name.
+ */
+
+String Utility::GetFromEnvironment(const String& env)
+{
+ const char *envValue = getenv(env.CStr());
+
+ if (envValue == NULL)
+ return String();
+ else
+ return String(envValue);
+}
+
+/**
+ * Compare the password entered by a client with the actual password.
+ * The comparision is safe against timing attacks.
+ */
+bool Utility::ComparePasswords(const String& enteredPassword, const String& actualPassword)
+{
+ volatile const char * volatile enteredPasswordCStr = enteredPassword.CStr();
+ volatile size_t enteredPasswordLen = enteredPassword.GetLength();
+
+ volatile const char * volatile actualPasswordCStr = actualPassword.CStr();
+ volatile size_t actualPasswordLen = actualPassword.GetLength();
+
+ volatile uint_fast8_t result = enteredPasswordLen == actualPasswordLen;
+
+ if (result) {
+ auto cStr (actualPasswordCStr);
+ auto len (actualPasswordLen);
+
+ actualPasswordCStr = cStr;
+ actualPasswordLen = len;
+ } else {
+ auto cStr (enteredPasswordCStr);
+ auto len (enteredPasswordLen);
+
+ actualPasswordCStr = cStr;
+ actualPasswordLen = len;
+ }
+
+ for (volatile size_t i = 0; i < enteredPasswordLen; ++i) {
+ result &= uint_fast8_t(enteredPasswordCStr[i] == actualPasswordCStr[i]);
+ }
+
+ return result;
+}
diff --git a/lib/base/utility.hpp b/lib/base/utility.hpp
new file mode 100644
index 0000000..47b68d2
--- /dev/null
+++ b/lib/base/utility.hpp
@@ -0,0 +1,200 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef UTILITY_H
+#define UTILITY_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include "base/array.hpp"
+#include "base/threadpool.hpp"
+#include "base/tlsutility.hpp"
+#include <boost/thread/tss.hpp>
+#include <openssl/sha.h>
+#include <functional>
+#include <typeinfo>
+#include <vector>
+
+namespace icinga
+{
+
+#ifdef _WIN32
+#define MS_VC_EXCEPTION 0x406D1388
+
+# pragma pack(push, 8)
+struct THREADNAME_INFO
+{
+ DWORD dwType;
+ LPCSTR szName;
+ DWORD dwThreadID;
+ DWORD dwFlags;
+};
+# pragma pack(pop)
+#endif
+
+enum GlobType
+{
+ GlobFile = 1,
+ GlobDirectory = 2
+};
+
+/**
+ * Helper functions.
+ *
+ * @ingroup base
+ */
+class Utility
+{
+public:
+ static String DemangleSymbolName(const String& sym);
+ static String GetTypeName(const std::type_info& ti);
+ static String GetSymbolName(const void *addr);
+
+ static bool Match(const String& pattern, const String& text);
+ static bool CidrMatch(const String& pattern, const String& ip);
+
+ static String DirName(const String& path);
+ static String BaseName(const String& path);
+
+ static void NullDeleter(void *);
+
+ static double GetTime();
+
+ static pid_t GetPid();
+
+ static void Sleep(double timeout);
+
+ static String NewUniqueID();
+
+ static bool Glob(const String& pathSpec, const std::function<void (const String&)>& callback, int type = GlobFile | GlobDirectory);
+ static bool GlobRecursive(const String& path, const String& pattern, const std::function<void (const String&)>& callback, int type = GlobFile | GlobDirectory);
+ static void MkDir(const String& path, int mode);
+ static void MkDirP(const String& path, int mode);
+ static bool SetFileOwnership(const String& file, const String& user, const String& group);
+
+ static void QueueAsyncCallback(const std::function<void ()>& callback, SchedulerPolicy policy = DefaultScheduler);
+
+ static String NaturalJoin(const std::vector<String>& tokens);
+ static String Join(const Array::Ptr& tokens, char separator, bool escapeSeparator = true);
+
+ static String FormatDuration(double duration);
+ static String FormatDateTime(const char *format, double ts);
+ static String FormatErrorNumber(int code);
+
+#ifndef _WIN32
+ static void SetNonBlocking(int fd, bool nb = true);
+ static void SetCloExec(int fd, bool cloexec = true);
+
+ static void CloseAllFDs(const std::vector<int>& except, std::function<void(int)> onClose = nullptr);
+#endif /* _WIN32 */
+
+ static void SetNonBlockingSocket(SOCKET s, bool nb = true);
+
+ static String EscapeShellCmd(const String& s);
+ static String EscapeShellArg(const String& s);
+#ifdef _WIN32
+ static String EscapeCreateProcessArg(const String& arg);
+#endif /* _WIN32 */
+
+ static String EscapeString(const String& s, const String& chars, const bool illegal);
+ static String UnescapeString(const String& s);
+
+ static void SetThreadName(const String& name, bool os = true);
+ static String GetThreadName();
+
+ static unsigned long SDBM(const String& str, size_t len = String::NPos);
+
+ static String ParseVersion(const String& v);
+ static int CompareVersion(const String& v1, const String& v2);
+
+ static int Random();
+
+ static String GetHostName();
+ static String GetFQDN();
+
+ static tm LocalTime(time_t ts);
+
+ static bool PathExists(const String& path);
+ static time_t GetFileCreationTime(const String& path);
+
+ static void Remove(const String& path);
+ static void RemoveDirRecursive(const String& path);
+ static void CopyFile(const String& source, const String& target);
+ static void RenameFile(const String& source, const String& target);
+
+ static Value LoadJsonFile(const String& path);
+ static void SaveJsonFile(const String& path, int mode, const Value& value);
+
+ static String GetPlatformKernel();
+ static String GetPlatformKernelVersion();
+ static String GetPlatformName();
+ static String GetPlatformVersion();
+ static String GetPlatformArchitecture();
+
+ static String ValidateUTF8(const String& input);
+
+#ifdef _WIN32
+ static int MksTemp(char *tmpl);
+#endif /* _WIN32 */
+
+#ifdef _WIN32
+ static String GetIcingaInstallPath();
+ static String GetIcingaDataPath();
+#endif /* _WIN32 */
+
+ static String GetFromEnvironment(const String& env);
+
+ static bool ComparePasswords(const String& enteredPassword, const String& actualPassword);
+
+#ifdef I2_DEBUG
+ static void SetTime(double);
+ static void IncrementTime(double);
+#endif /* I2_DEBUG */
+
+ /**
+ * TruncateUsingHash truncates a given string to an allowed maximum length while avoiding collisions in the output
+ * using a hash function (SHA1).
+ *
+ * For inputs shorter than the maximum output length, the output will be the same as the input. If the input has at
+ * least the maximum output length, it is hashed used SHA1 and the output has the format "A...B" where A is a prefix
+ * of the input and B is the hex-encoded SHA1 hash of the input. The length of A is chosen so that the result has
+ * the maximum allowed output length.
+ *
+ * @tparam maxLength Maximum length of the output string (must be at least 44)
+ * @param in String to truncate
+ * @return A truncated string derived from in of at most length maxLength
+ */
+ template<size_t maxLength>
+ static String TruncateUsingHash(const String &in) {
+ /*
+ * Note: be careful when changing this function as it is used to derive file names that should not change
+ * between versions or would need special handling if they do (/var/lib/icinga2/api/packages/_api).
+ */
+
+ const size_t sha1HexLength = SHA_DIGEST_LENGTH*2;
+ static_assert(maxLength >= 1 + 3 + sha1HexLength,
+ "maxLength must be at least 44 to hold one character, '...', and a hex-encoded SHA1 hash");
+
+ /* If the input is shorter than the limit, no truncation is needed */
+ if (in.GetLength() < maxLength) {
+ return in;
+ }
+
+ const char *trunc = "...";
+
+ return in.SubStr(0, maxLength - sha1HexLength - strlen(trunc)) + trunc + SHA1(in);
+ }
+
+private:
+ Utility();
+
+#ifdef I2_DEBUG
+ static double m_DebugTime;
+#endif /* I2_DEBUG */
+
+ static boost::thread_specific_ptr<String> m_ThreadName;
+ static boost::thread_specific_ptr<unsigned int> m_RandSeed;
+};
+
+}
+
+#endif /* UTILITY_H */
diff --git a/lib/base/value-operators.cpp b/lib/base/value-operators.cpp
new file mode 100644
index 0000000..d00c3e2
--- /dev/null
+++ b/lib/base/value-operators.cpp
@@ -0,0 +1,719 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/value.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include "base/datetime.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/objectlock.hpp"
+#include <boost/lexical_cast.hpp>
+
+using namespace icinga;
+
+Value::operator double() const
+{
+ const double *value = boost::get<double>(&m_Value);
+
+ if (value)
+ return *value;
+
+ const bool *fvalue = boost::get<bool>(&m_Value);
+
+ if (fvalue)
+ return *fvalue;
+
+ if (IsEmpty())
+ return 0;
+
+ try {
+ return boost::lexical_cast<double>(m_Value);
+ } catch (const std::exception&) {
+ std::ostringstream msgbuf;
+ msgbuf << "Can't convert '" << *this << "' to a floating point number.";
+ BOOST_THROW_EXCEPTION(std::invalid_argument(msgbuf.str()));
+ }
+}
+
+Value::operator String() const
+{
+ Object *object;
+
+ switch (GetType()) {
+ case ValueEmpty:
+ return String();
+ case ValueNumber:
+ return Convert::ToString(boost::get<double>(m_Value));
+ case ValueBoolean:
+ if (boost::get<bool>(m_Value))
+ return "true";
+ else
+ return "false";
+ case ValueString:
+ return boost::get<String>(m_Value);
+ case ValueObject:
+ object = boost::get<Object::Ptr>(m_Value).get();
+ return object->ToString();
+ default:
+ BOOST_THROW_EXCEPTION(std::runtime_error("Unknown value type."));
+ }
+}
+
+std::ostream& icinga::operator<<(std::ostream& stream, const Value& value)
+{
+ if (value.IsBoolean())
+ stream << static_cast<int>(value);
+ else
+ stream << static_cast<String>(value);
+
+ return stream;
+}
+
+std::istream& icinga::operator>>(std::istream& stream, Value& value)
+{
+ String tstr;
+ stream >> tstr;
+ value = tstr;
+ return stream;
+}
+
+bool Value::operator==(bool rhs) const
+{
+ return *this == Value(rhs);
+}
+
+bool Value::operator!=(bool rhs) const
+{
+ return !(*this == rhs);
+}
+
+bool Value::operator==(int rhs) const
+{
+ return *this == Value(rhs);
+}
+
+bool Value::operator!=(int rhs) const
+{
+ return !(*this == rhs);
+}
+
+bool Value::operator==(double rhs) const
+{
+ return *this == Value(rhs);
+}
+
+bool Value::operator!=(double rhs) const
+{
+ return !(*this == rhs);
+}
+
+bool Value::operator==(const char *rhs) const
+{
+ return static_cast<String>(*this) == rhs;
+}
+
+bool Value::operator!=(const char *rhs) const
+{
+ return !(*this == rhs);
+}
+
+bool Value::operator==(const String& rhs) const
+{
+ return static_cast<String>(*this) == rhs;
+}
+
+bool Value::operator!=(const String& rhs) const
+{
+ return !(*this == rhs);
+}
+
+bool Value::operator==(const Value& rhs) const
+{
+ if (IsNumber() && rhs.IsNumber())
+ return Get<double>() == rhs.Get<double>();
+ else if ((IsBoolean() || IsNumber()) && (rhs.IsBoolean() || rhs.IsNumber()) && !(IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(*this) == static_cast<double>(rhs);
+
+ if (IsString() && rhs.IsString())
+ return Get<String>() == rhs.Get<String>();
+ else if ((IsString() || IsEmpty()) && (rhs.IsString() || rhs.IsEmpty()) && !(IsEmpty() && rhs.IsEmpty()))
+ return static_cast<String>(*this) == static_cast<String>(rhs);
+
+ if (IsEmpty() != rhs.IsEmpty())
+ return false;
+
+ if (IsEmpty())
+ return true;
+
+ if (IsObject() != rhs.IsObject())
+ return false;
+
+ if (IsObject()) {
+ if (IsObjectType<DateTime>() && rhs.IsObjectType<DateTime>()) {
+ DateTime::Ptr dt1 = *this;
+ DateTime::Ptr dt2 = rhs;
+
+ return dt1->GetValue() == dt2->GetValue();
+ }
+
+ if (IsObjectType<Array>() && rhs.IsObjectType<Array>()) {
+ Array::Ptr arr1 = *this;
+ Array::Ptr arr2 = rhs;
+
+ if (arr1 == arr2)
+ return true;
+
+ if (arr1->GetLength() != arr2->GetLength())
+ return false;
+
+ for (Array::SizeType i = 0; i < arr1->GetLength(); i++) {
+ if (arr1->Get(i) != arr2->Get(i))
+ return false;
+ }
+
+ return true;
+ }
+
+ return Get<Object::Ptr>() == rhs.Get<Object::Ptr>();
+ }
+
+ return false;
+}
+
+bool Value::operator!=(const Value& rhs) const
+{
+ return !(*this == rhs);
+}
+
+Value icinga::operator+(const Value& lhs, const char *rhs)
+{
+ return lhs + Value(rhs);
+}
+
+Value icinga::operator+(const char *lhs, const Value& rhs)
+{
+ return Value(lhs) + rhs;
+}
+
+Value icinga::operator+(const Value& lhs, const String& rhs)
+{
+ return lhs + Value(rhs);
+}
+
+Value icinga::operator+(const String& lhs, const Value& rhs)
+{
+ return Value(lhs) + rhs;
+}
+
+Value icinga::operator+(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsEmpty() || lhs.IsNumber()) && !lhs.IsString() && (rhs.IsEmpty() || rhs.IsNumber()) && !rhs.IsString() && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) + static_cast<double>(rhs);
+ if ((lhs.IsString() || lhs.IsEmpty() || lhs.IsNumber()) && (rhs.IsString() || rhs.IsEmpty() || rhs.IsNumber()) && (!(lhs.IsEmpty() && rhs.IsEmpty()) || lhs.IsString() || rhs.IsString()))
+ return static_cast<String>(lhs) + static_cast<String>(rhs);
+ else if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) + static_cast<double>(rhs);
+ else if (lhs.IsObjectType<DateTime>() && rhs.IsNumber())
+ return new DateTime(Convert::ToDateTimeValue(lhs) + rhs);
+ else if ((lhs.IsObjectType<Array>() || lhs.IsEmpty()) && (rhs.IsObjectType<Array>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty())) {
+ Array::Ptr result = new Array();
+ if (!lhs.IsEmpty())
+ static_cast<Array::Ptr>(lhs)->CopyTo(result);
+ if (!rhs.IsEmpty())
+ static_cast<Array::Ptr>(rhs)->CopyTo(result);
+ return result;
+ } else if ((lhs.IsObjectType<Dictionary>() || lhs.IsEmpty()) && (rhs.IsObjectType<Dictionary>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty())) {
+ Dictionary::Ptr result = new Dictionary();
+ if (!lhs.IsEmpty())
+ static_cast<Dictionary::Ptr>(lhs)->CopyTo(result);
+ if (!rhs.IsEmpty())
+ static_cast<Dictionary::Ptr>(rhs)->CopyTo(result);
+ return result;
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator + cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+ }
+}
+
+Value icinga::operator+(const Value& lhs, double rhs)
+{
+ return lhs + Value(rhs);
+}
+
+Value icinga::operator+(double lhs, const Value& rhs)
+{
+ return Value(lhs) + rhs;
+}
+
+Value icinga::operator+(const Value& lhs, int rhs)
+{
+ return lhs + Value(rhs);
+}
+
+Value icinga::operator+(int lhs, const Value& rhs)
+{
+ return Value(lhs) + rhs;
+}
+
+Value icinga::operator-(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && !lhs.IsString() && (rhs.IsNumber() || rhs.IsEmpty()) && !rhs.IsString() && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) - static_cast<double>(rhs);
+ else if (lhs.IsObjectType<DateTime>() && rhs.IsNumber())
+ return new DateTime(Convert::ToDateTimeValue(lhs) - rhs);
+ else if (lhs.IsObjectType<DateTime>() && rhs.IsObjectType<DateTime>())
+ return Convert::ToDateTimeValue(lhs) - Convert::ToDateTimeValue(rhs);
+ else if ((lhs.IsObjectType<DateTime>() || lhs.IsEmpty()) && (rhs.IsObjectType<DateTime>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return new DateTime(Convert::ToDateTimeValue(lhs) - Convert::ToDateTimeValue(rhs));
+ else if ((lhs.IsObjectType<Array>() || lhs.IsEmpty()) && (rhs.IsObjectType<Array>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty())) {
+ if (lhs.IsEmpty())
+ return new Array();
+
+ ArrayData result;
+ Array::Ptr left = lhs;
+ Array::Ptr right = rhs;
+
+ ObjectLock olock(left);
+ for (const Value& lv : left) {
+ bool found = false;
+ ObjectLock xlock(right);
+ for (const Value& rv : right) {
+ if (lv == rv) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ result.push_back(lv);
+ }
+
+ return new Array(std::move(result));
+ } else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator - cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator-(const Value& lhs, double rhs)
+{
+ return lhs - Value(rhs);
+}
+
+Value icinga::operator-(double lhs, const Value& rhs)
+{
+ return Value(lhs) - rhs;
+}
+
+Value icinga::operator-(const Value& lhs, int rhs)
+{
+ return lhs - Value(rhs);
+}
+
+Value icinga::operator-(int lhs, const Value& rhs)
+{
+ return Value(lhs) - rhs;
+}
+
+Value icinga::operator*(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) * static_cast<double>(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator * cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator*(const Value& lhs, double rhs)
+{
+ return lhs * Value(rhs);
+}
+
+Value icinga::operator*(double lhs, const Value& rhs)
+{
+ return Value(lhs) * rhs;
+}
+
+Value icinga::operator*(const Value& lhs, int rhs)
+{
+ return lhs * Value(rhs);
+}
+
+Value icinga::operator*(int lhs, const Value& rhs)
+{
+ return Value(lhs) * rhs;
+}
+
+Value icinga::operator/(const Value& lhs, const Value& rhs)
+{
+ if (rhs.IsEmpty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Right-hand side argument for operator / is Empty."));
+ else if ((lhs.IsEmpty() || lhs.IsNumber()) && rhs.IsNumber()) {
+ if (static_cast<double>(rhs) == 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Right-hand side argument for operator / is 0."));
+
+ return static_cast<double>(lhs) / static_cast<double>(rhs);
+ } else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator / cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator/(const Value& lhs, double rhs)
+{
+ return lhs / Value(rhs);
+}
+
+Value icinga::operator/(double lhs, const Value& rhs)
+{
+ return Value(lhs) / rhs;
+}
+
+Value icinga::operator/(const Value& lhs, int rhs)
+{
+ return lhs / Value(rhs);
+}
+
+Value icinga::operator/(int lhs, const Value& rhs)
+{
+ return Value(lhs) / rhs;
+}
+
+Value icinga::operator%(const Value& lhs, const Value& rhs)
+{
+ if (rhs.IsEmpty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Right-hand side argument for operator % is Empty."));
+ else if ((rhs.IsNumber() || lhs.IsNumber()) && rhs.IsNumber()) {
+ if (static_cast<double>(rhs) == 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Right-hand side argument for operator % is 0."));
+
+ return static_cast<int>(lhs) % static_cast<int>(rhs);
+ } else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator % cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator%(const Value& lhs, double rhs)
+{
+ return lhs % Value(rhs);
+}
+
+Value icinga::operator%(double lhs, const Value& rhs)
+{
+ return Value(lhs) % rhs;
+}
+
+Value icinga::operator%(const Value& lhs, int rhs)
+{
+ return lhs % Value(rhs);
+}
+
+Value icinga::operator%(int lhs, const Value& rhs)
+{
+ return Value(lhs) % rhs;
+}
+
+Value icinga::operator^(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<int>(lhs) ^ static_cast<int>(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator & cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator^(const Value& lhs, double rhs)
+{
+ return lhs ^ Value(rhs);
+}
+
+Value icinga::operator^(double lhs, const Value& rhs)
+{
+ return Value(lhs) ^ rhs;
+}
+
+Value icinga::operator^(const Value& lhs, int rhs)
+{
+ return lhs ^ Value(rhs);
+}
+
+Value icinga::operator^(int lhs, const Value& rhs)
+{
+ return Value(lhs) ^ rhs;
+}
+
+Value icinga::operator&(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<int>(lhs) & static_cast<int>(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator & cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator&(const Value& lhs, double rhs)
+{
+ return lhs & Value(rhs);
+}
+
+Value icinga::operator&(double lhs, const Value& rhs)
+{
+ return Value(lhs) & rhs;
+}
+
+Value icinga::operator&(const Value& lhs, int rhs)
+{
+ return lhs & Value(rhs);
+}
+
+Value icinga::operator&(int lhs, const Value& rhs)
+{
+ return Value(lhs) & rhs;
+}
+
+Value icinga::operator|(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<int>(lhs) | static_cast<int>(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator | cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator|(const Value& lhs, double rhs)
+{
+ return lhs | Value(rhs);
+}
+
+Value icinga::operator|(double lhs, const Value& rhs)
+{
+ return Value(lhs) | rhs;
+}
+
+Value icinga::operator|(const Value& lhs, int rhs)
+{
+ return lhs | Value(rhs);
+}
+
+Value icinga::operator|(int lhs, const Value& rhs)
+{
+ return Value(lhs) | rhs;
+}
+
+Value icinga::operator<<(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<int>(lhs) << static_cast<int>(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator << cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator<<(const Value& lhs, double rhs)
+{
+ return lhs << Value(rhs);
+}
+
+Value icinga::operator<<(double lhs, const Value& rhs)
+{
+ return Value(lhs) << rhs;
+}
+
+Value icinga::operator<<(const Value& lhs, int rhs)
+{
+ return lhs << Value(rhs);
+}
+
+Value icinga::operator<<(int lhs, const Value& rhs)
+{
+ return Value(lhs) << rhs;
+}
+
+Value icinga::operator>>(const Value& lhs, const Value& rhs)
+{
+ if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<int>(lhs) >> static_cast<int>(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator >> cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+Value icinga::operator>>(const Value& lhs, double rhs)
+{
+ return lhs >> Value(rhs);
+}
+
+Value icinga::operator>>(double lhs, const Value& rhs)
+{
+ return Value(lhs) >> rhs;
+}
+
+Value icinga::operator>>(const Value& lhs, int rhs)
+{
+ return lhs >> Value(rhs);
+}
+
+Value icinga::operator>>(int lhs, const Value& rhs)
+{
+ return Value(lhs) >> rhs;
+}
+
+bool icinga::operator<(const Value& lhs, const Value& rhs)
+{
+ if (lhs.IsString() && rhs.IsString())
+ return static_cast<String>(lhs) < static_cast<String>(rhs);
+ else if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) < static_cast<double>(rhs);
+ else if ((lhs.IsObjectType<DateTime>() || lhs.IsEmpty()) && (rhs.IsObjectType<DateTime>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return Convert::ToDateTimeValue(lhs) < Convert::ToDateTimeValue(rhs);
+ else if (lhs.IsObjectType<Array>() && rhs.IsObjectType<Array>()) {
+ Array::Ptr larr = lhs;
+ Array::Ptr rarr = rhs;
+
+ ObjectLock llock(larr);
+ ObjectLock rlock(rarr);
+
+ Array::SizeType llen = larr->GetLength();
+ Array::SizeType rlen = rarr->GetLength();
+
+ for (Array::SizeType i = 0; i < std::max(llen, rlen); i++) {
+ Value lval = (i >= llen) ? Empty : larr->Get(i);
+ Value rval = (i >= rlen) ? Empty : rarr->Get(i);
+
+ if (lval < rval)
+ return true;
+ else if (lval > rval)
+ return false;
+ }
+
+ return false;
+ } else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator < cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+bool icinga::operator<(const Value& lhs, double rhs)
+{
+ return lhs < Value(rhs);
+}
+
+bool icinga::operator<(double lhs, const Value& rhs)
+{
+ return Value(lhs) < rhs;
+}
+
+bool icinga::operator<(const Value& lhs, int rhs)
+{
+ return lhs < Value(rhs);
+}
+
+bool icinga::operator<(int lhs, const Value& rhs)
+{
+ return Value(lhs) < rhs;
+}
+
+bool icinga::operator>(const Value& lhs, const Value& rhs)
+{
+ if (lhs.IsString() && rhs.IsString())
+ return static_cast<String>(lhs) > static_cast<String>(rhs);
+ else if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) > static_cast<double>(rhs);
+ else if ((lhs.IsObjectType<DateTime>() || lhs.IsEmpty()) && (rhs.IsObjectType<DateTime>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return Convert::ToDateTimeValue(lhs) > Convert::ToDateTimeValue(rhs);
+ else if (lhs.IsObjectType<Array>() && rhs.IsObjectType<Array>()) {
+ Array::Ptr larr = lhs;
+ Array::Ptr rarr = rhs;
+
+ ObjectLock llock(larr);
+ ObjectLock rlock(rarr);
+
+ Array::SizeType llen = larr->GetLength();
+ Array::SizeType rlen = rarr->GetLength();
+
+ for (Array::SizeType i = 0; i < std::max(llen, rlen); i++) {
+ Value lval = (i >= llen) ? Empty : larr->Get(i);
+ Value rval = (i >= rlen) ? Empty : rarr->Get(i);
+
+ if (lval > rval)
+ return true;
+ else if (lval < rval)
+ return false;
+ }
+
+ return false;
+ } else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator > cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+bool icinga::operator>(const Value& lhs, double rhs)
+{
+ return lhs > Value(rhs);
+}
+
+bool icinga::operator>(double lhs, const Value& rhs)
+{
+ return Value(lhs) > rhs;
+}
+
+bool icinga::operator>(const Value& lhs, int rhs)
+{
+ return lhs > Value(rhs);
+}
+
+bool icinga::operator>(int lhs, const Value& rhs)
+{
+ return Value(lhs) > rhs;
+}
+
+bool icinga::operator<=(const Value& lhs, const Value& rhs)
+{
+ if (lhs.IsString() && rhs.IsString())
+ return static_cast<String>(lhs) <= static_cast<String>(rhs);
+ else if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) <= static_cast<double>(rhs);
+ else if ((lhs.IsObjectType<DateTime>() || lhs.IsEmpty()) && (rhs.IsObjectType<DateTime>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return Convert::ToDateTimeValue(lhs) <= Convert::ToDateTimeValue(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator <= cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+bool icinga::operator<=(const Value& lhs, double rhs)
+{
+ return lhs <= Value(rhs);
+}
+
+bool icinga::operator<=(double lhs, const Value& rhs)
+{
+ return Value(lhs) <= rhs;
+}
+
+bool icinga::operator<=(const Value& lhs, int rhs)
+{
+ return lhs <= Value(rhs);
+}
+
+bool icinga::operator<=(int lhs, const Value& rhs)
+{
+ return Value(lhs) <= rhs;
+}
+
+bool icinga::operator>=(const Value& lhs, const Value& rhs)
+{
+ if (lhs.IsString() && rhs.IsString())
+ return static_cast<String>(lhs) >= static_cast<String>(rhs);
+ else if ((lhs.IsNumber() || lhs.IsEmpty()) && (rhs.IsNumber() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return static_cast<double>(lhs) >= static_cast<double>(rhs);
+ else if ((lhs.IsObjectType<DateTime>() || lhs.IsEmpty()) && (rhs.IsObjectType<DateTime>() || rhs.IsEmpty()) && !(lhs.IsEmpty() && rhs.IsEmpty()))
+ return Convert::ToDateTimeValue(lhs) >= Convert::ToDateTimeValue(rhs);
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Operator >= cannot be applied to values of type '" + lhs.GetTypeName() + "' and '" + rhs.GetTypeName() + "'"));
+}
+
+bool icinga::operator>=(const Value& lhs, double rhs)
+{
+ return lhs >= Value(rhs);
+}
+
+bool icinga::operator>=(double lhs, const Value& rhs)
+{
+ return Value(lhs) >= rhs;
+}
+
+bool icinga::operator>=(const Value& lhs, int rhs)
+{
+ return lhs >= Value(rhs);
+}
+
+bool icinga::operator>=(int lhs, const Value& rhs)
+{
+ return Value(lhs) >= rhs;
+}
diff --git a/lib/base/value.cpp b/lib/base/value.cpp
new file mode 100644
index 0000000..867c821
--- /dev/null
+++ b/lib/base/value.cpp
@@ -0,0 +1,264 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/value.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include "base/type.hpp"
+
+using namespace icinga;
+
+template class boost::variant<boost::blank, double, bool, String, Object::Ptr>;
+template const double& Value::Get<double>() const;
+template const bool& Value::Get<bool>() const;
+template const String& Value::Get<String>() const;
+template const Object::Ptr& Value::Get<Object::Ptr>() const;
+
+Value icinga::Empty;
+
+Value::Value(std::nullptr_t)
+{ }
+
+Value::Value(int value)
+ : m_Value(double(value))
+{ }
+
+Value::Value(unsigned int value)
+ : m_Value(double(value))
+{ }
+
+Value::Value(long value)
+ : m_Value(double(value))
+{ }
+
+Value::Value(unsigned long value)
+ : m_Value(double(value))
+{ }
+
+Value::Value(long long value)
+ : m_Value(double(value))
+{ }
+
+Value::Value(unsigned long long value)
+ : m_Value(double(value))
+{ }
+
+Value::Value(double value)
+ : m_Value(value)
+{ }
+
+Value::Value(bool value)
+ : m_Value(value)
+{ }
+
+Value::Value(const String& value)
+ : m_Value(value)
+{ }
+
+Value::Value(String&& value)
+ : m_Value(value)
+{ }
+
+Value::Value(const char *value)
+ : m_Value(String(value))
+{ }
+
+Value::Value(const Value& other)
+ : m_Value(other.m_Value)
+{ }
+
+Value::Value(Value&& other)
+{
+#if BOOST_VERSION >= 105400
+ m_Value = std::move(other.m_Value);
+#else /* BOOST_VERSION */
+ m_Value.swap(other.m_Value);
+#endif /* BOOST_VERSION */
+}
+
+Value::Value(Object *value)
+ : Value(Object::Ptr(value))
+{ }
+
+Value::Value(const intrusive_ptr<Object>& value)
+{
+ if (value)
+ m_Value = value;
+}
+
+Value& Value::operator=(const Value& other)
+{
+ m_Value = other.m_Value;
+ return *this;
+}
+
+Value& Value::operator=(Value&& other)
+{
+#if BOOST_VERSION >= 105400
+ m_Value = std::move(other.m_Value);
+#else /* BOOST_VERSION */
+ m_Value.swap(other.m_Value);
+#endif /* BOOST_VERSION */
+
+ return *this;
+}
+
+/**
+ * Checks whether the variant is empty.
+ *
+ * @returns true if the variant is empty, false otherwise.
+ */
+bool Value::IsEmpty() const
+{
+ return (GetType() == ValueEmpty || (IsString() && boost::get<String>(m_Value).IsEmpty()));
+}
+
+/**
+ * Checks whether the variant is scalar (i.e. not an object and not empty).
+ *
+ * @returns true if the variant is scalar, false otherwise.
+ */
+bool Value::IsScalar() const
+{
+ return !IsEmpty() && !IsObject();
+}
+
+/**
+* Checks whether the variant is a number.
+*
+* @returns true if the variant is a number.
+*/
+bool Value::IsNumber() const
+{
+ return (GetType() == ValueNumber);
+}
+
+/**
+ * Checks whether the variant is a boolean.
+ *
+ * @returns true if the variant is a boolean.
+ */
+bool Value::IsBoolean() const
+{
+ return (GetType() == ValueBoolean);
+}
+
+/**
+ * Checks whether the variant is a string.
+ *
+ * @returns true if the variant is a string.
+ */
+bool Value::IsString() const
+{
+ return (GetType() == ValueString);
+}
+
+/**
+ * Checks whether the variant is a non-null object.
+ *
+ * @returns true if the variant is a non-null object, false otherwise.
+ */
+bool Value::IsObject() const
+{
+ return (GetType() == ValueObject);
+}
+
+/**
+ * Returns the type of the value.
+ *
+ * @returns The type.
+ */
+ValueType Value::GetType() const
+{
+ return static_cast<ValueType>(m_Value.which());
+}
+
+void Value::Swap(Value& other)
+{
+ m_Value.swap(other.m_Value);
+}
+
+bool Value::ToBool() const
+{
+ switch (GetType()) {
+ case ValueNumber:
+ return static_cast<bool>(boost::get<double>(m_Value));
+
+ case ValueBoolean:
+ return boost::get<bool>(m_Value);
+
+ case ValueString:
+ return !boost::get<String>(m_Value).IsEmpty();
+
+ case ValueObject:
+ if (IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dictionary = *this;
+ return dictionary->GetLength() > 0;
+ } else if (IsObjectType<Array>()) {
+ Array::Ptr array = *this;
+ return array->GetLength() > 0;
+ } else {
+ return true;
+ }
+
+ case ValueEmpty:
+ return false;
+
+ default:
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid variant type."));
+ }
+}
+
+String Value::GetTypeName() const
+{
+ Type::Ptr t;
+
+ switch (GetType()) {
+ case ValueEmpty:
+ return "Empty";
+ case ValueNumber:
+ return "Number";
+ case ValueBoolean:
+ return "Boolean";
+ case ValueString:
+ return "String";
+ case ValueObject:
+ t = boost::get<Object::Ptr>(m_Value)->GetReflectionType();
+ if (!t) {
+ if (IsObjectType<Array>())
+ return "Array";
+ else if (IsObjectType<Dictionary>())
+ return "Dictionary";
+ else
+ return "Object";
+ } else
+ return t->GetName();
+ default:
+ return "Invalid";
+ }
+}
+
+Type::Ptr Value::GetReflectionType() const
+{
+ switch (GetType()) {
+ case ValueEmpty:
+ return Object::TypeInstance;
+ case ValueNumber:
+ return Type::GetByName("Number");
+ case ValueBoolean:
+ return Type::GetByName("Boolean");
+ case ValueString:
+ return Type::GetByName("String");
+ case ValueObject:
+ return boost::get<Object::Ptr>(m_Value)->GetReflectionType();
+ default:
+ return nullptr;
+ }
+}
+
+Value Value::Clone() const
+{
+ if (IsObject())
+ return static_cast<Object::Ptr>(*this)->Clone();
+ else
+ return *this;
+}
diff --git a/lib/base/value.hpp b/lib/base/value.hpp
new file mode 100644
index 0000000..86a3b11
--- /dev/null
+++ b/lib/base/value.hpp
@@ -0,0 +1,251 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef VALUE_H
+#define VALUE_H
+
+#include "base/object.hpp"
+#include "base/string.hpp"
+#include <boost/variant/variant.hpp>
+#include <boost/variant/get.hpp>
+#include <boost/throw_exception.hpp>
+
+namespace icinga
+{
+
+typedef double Timestamp;
+
+/**
+ * The type of a Value.
+ *
+ * @ingroup base
+ */
+enum ValueType
+{
+ ValueEmpty = 0,
+ ValueNumber = 1,
+ ValueBoolean = 2,
+ ValueString = 3,
+ ValueObject = 4
+};
+
+/**
+ * A type that can hold an arbitrary value.
+ *
+ * @ingroup base
+ */
+class Value
+{
+public:
+ Value() = default;
+ Value(std::nullptr_t);
+ Value(int value);
+ Value(unsigned int value);
+ Value(long value);
+ Value(unsigned long value);
+ Value(long long value);
+ Value(unsigned long long value);
+ Value(double value);
+ Value(bool value);
+ Value(const String& value);
+ Value(String&& value);
+ Value(const char *value);
+ Value(const Value& other);
+ Value(Value&& other);
+ Value(Object *value);
+ Value(const intrusive_ptr<Object>& value);
+
+ template<typename T>
+ Value(const intrusive_ptr<T>& value)
+ : Value(static_pointer_cast<Object>(value))
+ {
+ static_assert(!std::is_same<T, Object>::value, "T must not be Object");
+ }
+
+ bool ToBool() const;
+
+ operator double() const;
+ operator String() const;
+
+ Value& operator=(const Value& other);
+ Value& operator=(Value&& other);
+
+ bool operator==(bool rhs) const;
+ bool operator!=(bool rhs) const;
+
+ bool operator==(int rhs) const;
+ bool operator!=(int rhs) const;
+
+ bool operator==(double rhs) const;
+ bool operator!=(double rhs) const;
+
+ bool operator==(const char *rhs) const;
+ bool operator!=(const char *rhs) const;
+
+ bool operator==(const String& rhs) const;
+ bool operator!=(const String& rhs) const;
+
+ bool operator==(const Value& rhs) const;
+ bool operator!=(const Value& rhs) const;
+
+ template<typename T>
+ operator intrusive_ptr<T>() const
+ {
+ if (IsEmpty() && !IsString())
+ return intrusive_ptr<T>();
+
+ if (!IsObject())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Cannot convert value of type '" + GetTypeName() + "' to an object."));
+
+ const auto& object = Get<Object::Ptr>();
+
+ ASSERT(object);
+
+ intrusive_ptr<T> tobject = dynamic_pointer_cast<T>(object);
+
+ if (!tobject)
+ BOOST_THROW_EXCEPTION(std::bad_cast());
+
+ return tobject;
+ }
+
+ bool IsEmpty() const;
+ bool IsScalar() const;
+ bool IsNumber() const;
+ bool IsBoolean() const;
+ bool IsString() const;
+ bool IsObject() const;
+
+ template<typename T>
+ bool IsObjectType() const
+ {
+ if (!IsObject())
+ return false;
+
+ return dynamic_cast<T *>(Get<Object::Ptr>().get());
+ }
+
+ ValueType GetType() const;
+
+ void Swap(Value& other);
+
+ String GetTypeName() const;
+
+ Type::Ptr GetReflectionType() const;
+
+ Value Clone() const;
+
+ template<typename T>
+ const T& Get() const
+ {
+ return boost::get<T>(m_Value);
+ }
+
+private:
+ boost::variant<boost::blank, double, bool, String, Object::Ptr> m_Value;
+};
+
+extern template const double& Value::Get<double>() const;
+extern template const bool& Value::Get<bool>() const;
+extern template const String& Value::Get<String>() const;
+extern template const Object::Ptr& Value::Get<Object::Ptr>() const;
+
+extern Value Empty;
+
+Value operator+(const Value& lhs, const char *rhs);
+Value operator+(const char *lhs, const Value& rhs);
+
+Value operator+(const Value& lhs, const String& rhs);
+Value operator+(const String& lhs, const Value& rhs);
+
+Value operator+(const Value& lhs, const Value& rhs);
+Value operator+(const Value& lhs, double rhs);
+Value operator+(double lhs, const Value& rhs);
+Value operator+(const Value& lhs, int rhs);
+Value operator+(int lhs, const Value& rhs);
+
+Value operator-(const Value& lhs, const Value& rhs);
+Value operator-(const Value& lhs, double rhs);
+Value operator-(double lhs, const Value& rhs);
+Value operator-(const Value& lhs, int rhs);
+Value operator-(int lhs, const Value& rhs);
+
+Value operator*(const Value& lhs, const Value& rhs);
+Value operator*(const Value& lhs, double rhs);
+Value operator*(double lhs, const Value& rhs);
+Value operator*(const Value& lhs, int rhs);
+Value operator*(int lhs, const Value& rhs);
+
+Value operator/(const Value& lhs, const Value& rhs);
+Value operator/(const Value& lhs, double rhs);
+Value operator/(double lhs, const Value& rhs);
+Value operator/(const Value& lhs, int rhs);
+Value operator/(int lhs, const Value& rhs);
+
+Value operator%(const Value& lhs, const Value& rhs);
+Value operator%(const Value& lhs, double rhs);
+Value operator%(double lhs, const Value& rhs);
+Value operator%(const Value& lhs, int rhs);
+Value operator%(int lhs, const Value& rhs);
+
+Value operator^(const Value& lhs, const Value& rhs);
+Value operator^(const Value& lhs, double rhs);
+Value operator^(double lhs, const Value& rhs);
+Value operator^(const Value& lhs, int rhs);
+Value operator^(int lhs, const Value& rhs);
+
+Value operator&(const Value& lhs, const Value& rhs);
+Value operator&(const Value& lhs, double rhs);
+Value operator&(double lhs, const Value& rhs);
+Value operator&(const Value& lhs, int rhs);
+Value operator&(int lhs, const Value& rhs);
+
+Value operator|(const Value& lhs, const Value& rhs);
+Value operator|(const Value& lhs, double rhs);
+Value operator|(double lhs, const Value& rhs);
+Value operator|(const Value& lhs, int rhs);
+Value operator|(int lhs, const Value& rhs);
+
+Value operator<<(const Value& lhs, const Value& rhs);
+Value operator<<(const Value& lhs, double rhs);
+Value operator<<(double lhs, const Value& rhs);
+Value operator<<(const Value& lhs, int rhs);
+Value operator<<(int lhs, const Value& rhs);
+
+Value operator>>(const Value& lhs, const Value& rhs);
+Value operator>>(const Value& lhs, double rhs);
+Value operator>>(double lhs, const Value& rhs);
+Value operator>>(const Value& lhs, int rhs);
+Value operator>>(int lhs, const Value& rhs);
+
+bool operator<(const Value& lhs, const Value& rhs);
+bool operator<(const Value& lhs, double rhs);
+bool operator<(double lhs, const Value& rhs);
+bool operator<(const Value& lhs, int rhs);
+bool operator<(int lhs, const Value& rhs);
+
+bool operator>(const Value& lhs, const Value& rhs);
+bool operator>(const Value& lhs, double rhs);
+bool operator>(double lhs, const Value& rhs);
+bool operator>(const Value& lhs, int rhs);
+bool operator>(int lhs, const Value& rhs);
+
+bool operator<=(const Value& lhs, const Value& rhs);
+bool operator<=(const Value& lhs, double rhs);
+bool operator<=(double lhs, const Value& rhs);
+bool operator<=(const Value& lhs, int rhs);
+bool operator<=(int lhs, const Value& rhs);
+
+bool operator>=(const Value& lhs, const Value& rhs);
+bool operator>=(const Value& lhs, double rhs);
+bool operator>=(double lhs, const Value& rhs);
+bool operator>=(const Value& lhs, int rhs);
+bool operator>=(int lhs, const Value& rhs);
+
+std::ostream& operator<<(std::ostream& stream, const Value& value);
+std::istream& operator>>(std::istream& stream, Value& value);
+
+}
+
+extern template class boost::variant<boost::blank, double, bool, icinga::String, icinga::Object::Ptr>;
+
+#endif /* VALUE_H */
diff --git a/lib/base/win32.hpp b/lib/base/win32.hpp
new file mode 100644
index 0000000..064c5d6
--- /dev/null
+++ b/lib/base/win32.hpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef WIN32_H
+#define WIN32_H
+
+#define WIN32_LEAN_AND_MEAN
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT _WIN32_WINNT_VISTA
+#endif /* _WIN32_WINNT */
+#define NOMINMAX
+#include <winsock2.h>
+#include <windows.h>
+#include <ws2tcpip.h>
+#include <imagehlp.h>
+#include <shlwapi.h>
+
+#include <direct.h>
+
+#ifdef __MINGW32__
+# ifndef IPV6_V6ONLY
+# define IPV6_V6ONLY 27
+# endif /* IPV6_V6ONLY */
+#endif /* __MINGW32__ */
+
+typedef int socklen_t;
+typedef SSIZE_T ssize_t;
+
+#define MAXPATHLEN MAX_PATH
+
+#ifdef _MSC_VER
+typedef DWORD pid_t;
+#define strcasecmp stricmp
+#endif /* _MSC_VER */
+
+#endif /* WIN32_H */
diff --git a/lib/base/windowseventloglogger-provider.mc b/lib/base/windowseventloglogger-provider.mc
new file mode 100644
index 0000000..09e65ba
--- /dev/null
+++ b/lib/base/windowseventloglogger-provider.mc
@@ -0,0 +1,5 @@
+MessageId=0x1
+SymbolicName=MSG_PLAIN_LOG_ENTRY
+Language=English
+%1
+.
diff --git a/lib/base/windowseventloglogger.cpp b/lib/base/windowseventloglogger.cpp
new file mode 100644
index 0000000..cc28358
--- /dev/null
+++ b/lib/base/windowseventloglogger.cpp
@@ -0,0 +1,83 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#ifdef _WIN32
+#include "base/windowseventloglogger.hpp"
+#include "base/windowseventloglogger-ti.cpp"
+#include "base/windowseventloglogger-provider.h"
+#include "base/configtype.hpp"
+#include "base/statsfunction.hpp"
+#include <windows.h>
+
+using namespace icinga;
+
+REGISTER_TYPE(WindowsEventLogLogger);
+
+REGISTER_STATSFUNCTION(WindowsEventLogLogger, &WindowsEventLogLogger::StatsFunc);
+
+INITIALIZE_ONCE(&WindowsEventLogLogger::StaticInitialize);
+
+static HANDLE l_EventLog = nullptr;
+
+void WindowsEventLogLogger::StaticInitialize()
+{
+ l_EventLog = RegisterEventSourceA(nullptr, "Icinga 2");
+}
+
+void WindowsEventLogLogger::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const WindowsEventLogLogger::Ptr& logger : ConfigType::GetObjectsByType<WindowsEventLogLogger>()) {
+ nodes.emplace_back(logger->GetName(), 1);
+ }
+
+ status->Set("windowseventloglogger", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Processes a log entry and outputs it to the Windows Event Log.
+ *
+ * This function implements the interface expected by the Logger base class and passes
+ * the log entry to WindowsEventLogLogger::WriteToWindowsEventLog().
+ *
+ * @param entry The log entry.
+ */
+void WindowsEventLogLogger::ProcessLogEntry(const LogEntry& entry) {
+ WindowsEventLogLogger::WriteToWindowsEventLog(entry);
+}
+
+/**
+ * Writes a LogEntry object to the Windows Event Log.
+ *
+ * @param entry The log entry.
+ */
+void WindowsEventLogLogger::WriteToWindowsEventLog(const LogEntry& entry)
+{
+ if (l_EventLog != nullptr) {
+ std::string message = Logger::SeverityToString(entry.Severity) + "/" + entry.Facility + ": " + entry.Message;
+ std::array<const char *, 1> strings{
+ message.c_str()
+ };
+
+ WORD eventType;
+ switch (entry.Severity) {
+ case LogCritical:
+ eventType = EVENTLOG_ERROR_TYPE;
+ break;
+ case LogWarning:
+ eventType = EVENTLOG_WARNING_TYPE;
+ break;
+ default:
+ eventType = EVENTLOG_INFORMATION_TYPE;
+ }
+
+ ReportEventA(l_EventLog, eventType, 0, MSG_PLAIN_LOG_ENTRY, NULL, strings.size(), 0, strings.data(), NULL);
+ }
+}
+
+void WindowsEventLogLogger::Flush()
+{
+ /* Nothing to do here. */
+}
+
+#endif /* _WIN32 */
diff --git a/lib/base/windowseventloglogger.hpp b/lib/base/windowseventloglogger.hpp
new file mode 100644
index 0000000..cefc245
--- /dev/null
+++ b/lib/base/windowseventloglogger.hpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#ifndef WINDOWSEVENTLOGLOGGER_H
+#define WINDOWSEVENTLOGLOGGER_H
+
+#ifdef _WIN32
+#include "base/i2-base.hpp"
+#include "base/windowseventloglogger-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * A logger that logs to the Windows Event Log.
+ *
+ * @ingroup base
+ */
+class WindowsEventLogLogger final : public ObjectImpl<WindowsEventLogLogger>
+{
+public:
+ DECLARE_OBJECT(WindowsEventLogLogger);
+ DECLARE_OBJECTNAME(WindowsEventLogLogger);
+
+ static void StaticInitialize();
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ static void WriteToWindowsEventLog(const LogEntry& entry);
+
+protected:
+ void ProcessLogEntry(const LogEntry& entry) override;
+ void Flush() override;
+};
+
+}
+#endif /* _WIN32 */
+
+#endif /* WINDOWSEVENTLOGLOGGER_H */
diff --git a/lib/base/windowseventloglogger.ti b/lib/base/windowseventloglogger.ti
new file mode 100644
index 0000000..edf65fc
--- /dev/null
+++ b/lib/base/windowseventloglogger.ti
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "base/logger.hpp"
+
+library base;
+
+namespace icinga
+{
+
+class WindowsEventLogLogger : Logger
+{
+ activation_priority -100;
+};
+
+}
diff --git a/lib/base/workqueue.cpp b/lib/base/workqueue.cpp
new file mode 100644
index 0000000..0b1214b
--- /dev/null
+++ b/lib/base/workqueue.cpp
@@ -0,0 +1,318 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/workqueue.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/application.hpp"
+#include "base/exception.hpp"
+#include <boost/thread/tss.hpp>
+#include <math.h>
+
+using namespace icinga;
+
+std::atomic<int> WorkQueue::m_NextID(1);
+boost::thread_specific_ptr<WorkQueue *> l_ThreadWorkQueue;
+
+WorkQueue::WorkQueue(size_t maxItems, int threadCount, LogSeverity statsLogLevel)
+ : m_ID(m_NextID++), m_ThreadCount(threadCount), m_MaxItems(maxItems),
+ m_TaskStats(15 * 60), m_StatsLogLevel(statsLogLevel)
+{
+ /* Initialize logger. */
+ m_StatusTimerTimeout = Utility::GetTime();
+
+ m_StatusTimer = Timer::Create();
+ m_StatusTimer->SetInterval(10);
+ m_StatusTimer->OnTimerExpired.connect([this](const Timer * const&) { StatusTimerHandler(); });
+ m_StatusTimer->Start();
+}
+
+WorkQueue::~WorkQueue()
+{
+ m_StatusTimer->Stop(true);
+
+ Join(true);
+}
+
+void WorkQueue::SetName(const String& name)
+{
+ m_Name = name;
+}
+
+String WorkQueue::GetName() const
+{
+ return m_Name;
+}
+
+std::unique_lock<std::mutex> WorkQueue::AcquireLock()
+{
+ return std::unique_lock<std::mutex>(m_Mutex);
+}
+
+/**
+ * Enqueues a task. Tasks are guaranteed to be executed in the order
+ * they were enqueued in except if there is more than one worker thread.
+ */
+void WorkQueue::EnqueueUnlocked(std::unique_lock<std::mutex>& lock, std::function<void ()>&& function, WorkQueuePriority priority)
+{
+ if (!m_Spawned) {
+ Log(LogNotice, "WorkQueue")
+ << "Spawning WorkQueue threads for '" << m_Name << "'";
+
+ for (int i = 0; i < m_ThreadCount; i++) {
+ m_Threads.create_thread([this]() { WorkerThreadProc(); });
+ }
+
+ m_Spawned = true;
+ }
+
+ bool wq_thread = IsWorkerThread();
+
+ if (!wq_thread) {
+ while (m_Tasks.size() >= m_MaxItems && m_MaxItems != 0)
+ m_CVFull.wait(lock);
+ }
+
+ m_Tasks.emplace(std::move(function), priority, ++m_NextTaskID);
+
+ m_CVEmpty.notify_one();
+}
+
+/**
+ * Enqueues a task. Tasks are guaranteed to be executed in the order
+ * they were enqueued in except if there is more than one worker thread or when
+ * allowInterleaved is true in which case the new task might be run
+ * immediately if it's being enqueued from within the WorkQueue thread.
+ */
+void WorkQueue::Enqueue(std::function<void ()>&& function, WorkQueuePriority priority,
+ bool allowInterleaved)
+{
+ bool wq_thread = IsWorkerThread();
+
+ if (wq_thread && allowInterleaved) {
+ function();
+
+ return;
+ }
+
+ auto lock = AcquireLock();
+ EnqueueUnlocked(lock, std::move(function), priority);
+}
+
+/**
+ * Waits until all currently enqueued tasks have completed. This only works reliably
+ * when no other thread is enqueuing new tasks when this method is called.
+ *
+ * @param stop Whether to stop the worker threads
+ */
+void WorkQueue::Join(bool stop)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ while (m_Processing || !m_Tasks.empty())
+ m_CVStarved.wait(lock);
+
+ if (stop) {
+ m_Stopped = true;
+ m_CVEmpty.notify_all();
+ lock.unlock();
+
+ m_Threads.join_all();
+ m_Spawned = false;
+
+ Log(LogNotice, "WorkQueue")
+ << "Stopped WorkQueue threads for '" << m_Name << "'";
+ }
+}
+
+/**
+ * Checks whether the calling thread is one of the worker threads
+ * for this work queue.
+ *
+ * @returns true if called from one of the worker threads, false otherwise
+ */
+bool WorkQueue::IsWorkerThread() const
+{
+ WorkQueue **pwq = l_ThreadWorkQueue.get();
+
+ if (!pwq)
+ return false;
+
+ return *pwq == this;
+}
+
+void WorkQueue::SetExceptionCallback(const ExceptionCallback& callback)
+{
+ m_ExceptionCallback = callback;
+}
+
+/**
+ * Checks whether any exceptions have occurred while executing tasks for this
+ * work queue. When a custom exception callback is set this method will always
+ * return false.
+ */
+bool WorkQueue::HasExceptions() const
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return !m_Exceptions.empty();
+}
+
+/**
+ * Returns all exceptions which have occurred for tasks in this work queue. When a
+ * custom exception callback is set this method will always return an empty list.
+ */
+std::vector<boost::exception_ptr> WorkQueue::GetExceptions() const
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_Exceptions;
+}
+
+void WorkQueue::ReportExceptions(const String& facility, bool verbose) const
+{
+ std::vector<boost::exception_ptr> exceptions = GetExceptions();
+
+ for (const auto& eptr : exceptions) {
+ Log(LogCritical, facility)
+ << DiagnosticInformation(eptr, verbose);
+ }
+
+ Log(LogCritical, facility)
+ << exceptions.size() << " error" << (exceptions.size() != 1 ? "s" : "");
+}
+
+size_t WorkQueue::GetLength() const
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_Tasks.size();
+}
+
+void WorkQueue::StatusTimerHandler()
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ ASSERT(!m_Name.IsEmpty());
+
+ size_t pending = m_Tasks.size();
+
+ double now = Utility::GetTime();
+ double gradient = (pending - m_PendingTasks) / (now - m_PendingTasksTimestamp);
+ double timeToZero = pending / gradient;
+
+ String timeInfo;
+
+ if (pending > GetTaskCount(5)) {
+ timeInfo = " empty in ";
+ if (timeToZero < 0 || std::isinf(timeToZero))
+ timeInfo += "infinite time, your task handler isn't able to keep up";
+ else
+ timeInfo += Utility::FormatDuration(timeToZero);
+ }
+
+ m_PendingTasks = pending;
+ m_PendingTasksTimestamp = now;
+
+ /* Log if there are pending items, or 5 minute timeout is reached. */
+ if (pending > 0 || m_StatusTimerTimeout < now) {
+ Log(m_StatsLogLevel, "WorkQueue")
+ << "#" << m_ID << " (" << m_Name << ") "
+ << "items: " << pending << ", "
+ << "rate: " << std::setw(2) << GetTaskCount(60) / 60.0 << "/s "
+ << "(" << GetTaskCount(60) << "/min " << GetTaskCount(60 * 5) << "/5min " << GetTaskCount(60 * 15) << "/15min);"
+ << timeInfo;
+ }
+
+ /* Reschedule next log entry in 5 minutes. */
+ if (m_StatusTimerTimeout < now) {
+ m_StatusTimerTimeout = now + 60 * 5;
+ }
+}
+
+void WorkQueue::RunTaskFunction(const TaskFunction& func)
+{
+ try {
+ func();
+ } catch (const std::exception&) {
+ boost::exception_ptr eptr = boost::current_exception();
+
+ {
+ std::unique_lock<std::mutex> mutex(m_Mutex);
+
+ if (!m_ExceptionCallback)
+ m_Exceptions.push_back(eptr);
+ }
+
+ if (m_ExceptionCallback)
+ m_ExceptionCallback(eptr);
+ }
+}
+
+void WorkQueue::WorkerThreadProc()
+{
+ std::ostringstream idbuf;
+ idbuf << "WQ #" << m_ID;
+ Utility::SetThreadName(idbuf.str());
+
+ l_ThreadWorkQueue.reset(new WorkQueue *(this));
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ for (;;) {
+ while (m_Tasks.empty() && !m_Stopped)
+ m_CVEmpty.wait(lock);
+
+ if (m_Stopped)
+ break;
+
+ if (m_Tasks.size() >= m_MaxItems && m_MaxItems != 0)
+ m_CVFull.notify_all();
+
+ Task task = m_Tasks.top();
+ m_Tasks.pop();
+
+ m_Processing++;
+
+ lock.unlock();
+
+ RunTaskFunction(task.Function);
+
+ /* clear the task so whatever other resources it holds are released _before_ we re-acquire the mutex */
+ task = Task();
+
+ IncreaseTaskCount();
+
+ lock.lock();
+
+ m_Processing--;
+
+ if (m_Tasks.empty())
+ m_CVStarved.notify_all();
+ }
+}
+
+void WorkQueue::IncreaseTaskCount()
+{
+ m_TaskStats.InsertValue(Utility::GetTime(), 1);
+}
+
+size_t WorkQueue::GetTaskCount(RingBuffer::SizeType span)
+{
+ return m_TaskStats.UpdateAndGetValues(Utility::GetTime(), span);
+}
+
+bool icinga::operator<(const Task& a, const Task& b)
+{
+ if (a.Priority < b.Priority)
+ return true;
+
+ if (a.Priority == b.Priority) {
+ if (a.ID > b.ID)
+ return true;
+ else
+ return false;
+ }
+
+ return false;
+}
diff --git a/lib/base/workqueue.hpp b/lib/base/workqueue.hpp
new file mode 100644
index 0000000..9c8a6b8
--- /dev/null
+++ b/lib/base/workqueue.hpp
@@ -0,0 +1,154 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef WORKQUEUE_H
+#define WORKQUEUE_H
+
+#include "base/i2-base.hpp"
+#include "base/timer.hpp"
+#include "base/ringbuffer.hpp"
+#include "base/logger.hpp"
+#include <boost/thread/thread.hpp>
+#include <boost/exception_ptr.hpp>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <deque>
+#include <atomic>
+
+namespace icinga
+{
+
+enum WorkQueuePriority
+{
+ PriorityLow = 0,
+ PriorityNormal = 1,
+ PriorityHigh = 2,
+ PriorityImmediate = 4
+};
+
+using TaskFunction = std::function<void ()>;
+
+struct Task
+{
+ Task() = default;
+
+ Task(TaskFunction function, WorkQueuePriority priority, int id)
+ : Function(std::move(function)), Priority(priority), ID(id)
+ { }
+
+ TaskFunction Function;
+ WorkQueuePriority Priority{PriorityNormal};
+ int ID{-1};
+};
+
+bool operator<(const Task& a, const Task& b);
+
+/**
+ * A workqueue.
+ *
+ * @ingroup base
+ */
+class WorkQueue
+{
+public:
+ typedef std::function<void (boost::exception_ptr)> ExceptionCallback;
+
+ WorkQueue(size_t maxItems = 0, int threadCount = 1, LogSeverity statsLogLevel = LogInformation);
+ ~WorkQueue();
+
+ void SetName(const String& name);
+ String GetName() const;
+
+ std::unique_lock<std::mutex> AcquireLock();
+ void EnqueueUnlocked(std::unique_lock<std::mutex>& lock, TaskFunction&& function, WorkQueuePriority priority = PriorityNormal);
+ void Enqueue(TaskFunction&& function, WorkQueuePriority priority = PriorityNormal,
+ bool allowInterleaved = false);
+ void Join(bool stop = false);
+
+ template<typename VectorType, typename FuncType>
+ void ParallelFor(const VectorType& items, const FuncType& func)
+ {
+ ParallelFor(items, true, func);
+ }
+
+ template<typename VectorType, typename FuncType>
+ void ParallelFor(const VectorType& items, bool preChunk, const FuncType& func)
+ {
+ using SizeType = decltype(items.size());
+
+ SizeType totalCount = items.size();
+ SizeType chunks = preChunk ? m_ThreadCount : totalCount;
+
+ auto lock = AcquireLock();
+
+ SizeType offset = 0;
+
+ for (SizeType i = 0; i < chunks; i++) {
+ SizeType count = totalCount / chunks;
+ if (i < totalCount % chunks)
+ count++;
+
+ EnqueueUnlocked(lock, [&items, func, offset, count, this]() {
+ for (SizeType j = offset; j < offset + count; j++) {
+ RunTaskFunction([&func, &items, j]() {
+ func(items[j]);
+ });
+ }
+ });
+
+ offset += count;
+ }
+
+ ASSERT(offset == items.size());
+ }
+
+ bool IsWorkerThread() const;
+
+ size_t GetLength() const;
+ size_t GetTaskCount(RingBuffer::SizeType span);
+
+ void SetExceptionCallback(const ExceptionCallback& callback);
+
+ bool HasExceptions() const;
+ std::vector<boost::exception_ptr> GetExceptions() const;
+ void ReportExceptions(const String& facility, bool verbose = false) const;
+
+protected:
+ void IncreaseTaskCount();
+
+private:
+ int m_ID;
+ String m_Name;
+ static std::atomic<int> m_NextID;
+ int m_ThreadCount;
+ bool m_Spawned{false};
+
+ mutable std::mutex m_Mutex;
+ std::condition_variable m_CVEmpty;
+ std::condition_variable m_CVFull;
+ std::condition_variable m_CVStarved;
+ boost::thread_group m_Threads;
+ size_t m_MaxItems;
+ bool m_Stopped{false};
+ int m_Processing{0};
+ std::priority_queue<Task, std::deque<Task> > m_Tasks;
+ int m_NextTaskID{0};
+ ExceptionCallback m_ExceptionCallback;
+ std::vector<boost::exception_ptr> m_Exceptions;
+ Timer::Ptr m_StatusTimer;
+ double m_StatusTimerTimeout;
+ LogSeverity m_StatsLogLevel;
+
+ RingBuffer m_TaskStats;
+ size_t m_PendingTasks{0};
+ double m_PendingTasksTimestamp{0};
+
+ void WorkerThreadProc();
+ void StatusTimerHandler();
+
+ void RunTaskFunction(const TaskFunction& func);
+};
+
+}
+
+#endif /* WORKQUEUE_H */
diff --git a/lib/checker/CMakeLists.txt b/lib/checker/CMakeLists.txt
new file mode 100644
index 0000000..5a8334c
--- /dev/null
+++ b/lib/checker/CMakeLists.txt
@@ -0,0 +1,34 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(checkercomponent.ti checkercomponent-ti.cpp checkercomponent-ti.hpp)
+
+set(checker_SOURCES
+ checkercomponent.cpp checkercomponent.hpp checkercomponent-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(checker checker checker_SOURCES)
+endif()
+
+add_library(checker OBJECT ${checker_SOURCES})
+
+add_dependencies(checker base config icinga remote)
+
+set_target_properties (
+ checker PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/checker.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+if(NOT WIN32)
+ install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_CONFIGDIR}/features-enabled\")")
+ install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink ../features-available/checker.conf \"\$ENV{DESTDIR}${ICINGA2_FULL_CONFIGDIR}/features-enabled/checker.conf\")")
+else()
+ install_if_not_exists(${PROJECT_SOURCE_DIR}/etc/icinga2/features-enabled/checker.conf ${ICINGA2_CONFIGDIR}/features-enabled)
+endif()
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/checker/checkercomponent.cpp b/lib/checker/checkercomponent.cpp
new file mode 100644
index 0000000..d92101f
--- /dev/null
+++ b/lib/checker/checkercomponent.cpp
@@ -0,0 +1,358 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "checker/checkercomponent.hpp"
+#include "checker/checkercomponent-ti.cpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/cib.hpp"
+#include "remote/apilistener.hpp"
+#include "base/configuration.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/statsfunction.hpp"
+#include <chrono>
+
+using namespace icinga;
+
+REGISTER_TYPE(CheckerComponent);
+
+REGISTER_STATSFUNCTION(CheckerComponent, &CheckerComponent::StatsFunc);
+
+void CheckerComponent::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const CheckerComponent::Ptr& checker : ConfigType::GetObjectsByType<CheckerComponent>()) {
+ unsigned long idle = checker->GetIdleCheckables();
+ unsigned long pending = checker->GetPendingCheckables();
+
+ nodes.emplace_back(checker->GetName(), new Dictionary({
+ { "idle", idle },
+ { "pending", pending }
+ }));
+
+ String perfdata_prefix = "checkercomponent_" + checker->GetName() + "_";
+ perfdata->Add(new PerfdataValue(perfdata_prefix + "idle", Convert::ToDouble(idle)));
+ perfdata->Add(new PerfdataValue(perfdata_prefix + "pending", Convert::ToDouble(pending)));
+ }
+
+ status->Set("checkercomponent", new Dictionary(std::move(nodes)));
+}
+
+void CheckerComponent::OnConfigLoaded()
+{
+ ConfigObject::OnActiveChanged.connect([this](const ConfigObject::Ptr& object, const Value&) {
+ ObjectHandler(object);
+ });
+ ConfigObject::OnPausedChanged.connect([this](const ConfigObject::Ptr& object, const Value&) {
+ ObjectHandler(object);
+ });
+
+ Checkable::OnNextCheckChanged.connect([this](const Checkable::Ptr& checkable, const Value&) {
+ NextCheckChangedHandler(checkable);
+ });
+}
+
+void CheckerComponent::Start(bool runtimeCreated)
+{
+ ObjectImpl<CheckerComponent>::Start(runtimeCreated);
+
+ Log(LogInformation, "CheckerComponent")
+ << "'" << GetName() << "' started.";
+
+
+ m_Thread = std::thread([this]() { CheckThreadProc(); });
+
+ m_ResultTimer = Timer::Create();
+ m_ResultTimer->SetInterval(5);
+ m_ResultTimer->OnTimerExpired.connect([this](const Timer * const&) { ResultTimerHandler(); });
+ m_ResultTimer->Start();
+}
+
+void CheckerComponent::Stop(bool runtimeRemoved)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Stopped = true;
+ m_CV.notify_all();
+ }
+
+ m_ResultTimer->Stop(true);
+ m_Thread.join();
+
+ Log(LogInformation, "CheckerComponent")
+ << "'" << GetName() << "' stopped.";
+
+ ObjectImpl<CheckerComponent>::Stop(runtimeRemoved);
+}
+
+void CheckerComponent::CheckThreadProc()
+{
+ Utility::SetThreadName("Check Scheduler");
+ IcingaApplication::Ptr icingaApp = IcingaApplication::GetInstance();
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ for (;;) {
+ typedef boost::multi_index::nth_index<CheckableSet, 1>::type CheckTimeView;
+ CheckTimeView& idx = boost::get<1>(m_IdleCheckables);
+
+ while (idx.begin() == idx.end() && !m_Stopped)
+ m_CV.wait(lock);
+
+ if (m_Stopped)
+ break;
+
+ auto it = idx.begin();
+ CheckableScheduleInfo csi = *it;
+
+ double wait = csi.NextCheck - Utility::GetTime();
+
+//#ifdef I2_DEBUG
+// Log(LogDebug, "CheckerComponent")
+// << "Pending checks " << Checkable::GetPendingChecks()
+// << " vs. max concurrent checks " << icingaApp->GetMaxConcurrentChecks() << ".";
+//#endif /* I2_DEBUG */
+
+ if (Checkable::GetPendingChecks() >= icingaApp->GetMaxConcurrentChecks())
+ wait = 0.5;
+
+ if (wait > 0) {
+ /* Wait for the next check. */
+ m_CV.wait_for(lock, std::chrono::duration<double>(wait));
+
+ continue;
+ }
+
+ Checkable::Ptr checkable = csi.Object;
+
+ m_IdleCheckables.erase(checkable);
+
+ bool forced = checkable->GetForceNextCheck();
+ bool check = true;
+ bool notifyNextCheck = false;
+
+ if (!forced) {
+ if (!checkable->IsReachable(DependencyCheckExecution)) {
+ Log(LogNotice, "CheckerComponent")
+ << "Skipping check for object '" << checkable->GetName() << "': Dependency failed.";
+
+ check = false;
+ notifyNextCheck = true;
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ if (host && !service && (!checkable->GetEnableActiveChecks() || !icingaApp->GetEnableHostChecks())) {
+ Log(LogNotice, "CheckerComponent")
+ << "Skipping check for host '" << host->GetName() << "': active host checks are disabled";
+ check = false;
+ }
+ if (host && service && (!checkable->GetEnableActiveChecks() || !icingaApp->GetEnableServiceChecks())) {
+ Log(LogNotice, "CheckerComponent")
+ << "Skipping check for service '" << service->GetName() << "': active service checks are disabled";
+ check = false;
+ }
+
+ TimePeriod::Ptr tp = checkable->GetCheckPeriod();
+
+ if (tp && !tp->IsInside(Utility::GetTime())) {
+ Log(LogNotice, "CheckerComponent")
+ << "Skipping check for object '" << checkable->GetName()
+ << "': not in check period '" << tp->GetName() << "'";
+
+ check = false;
+ notifyNextCheck = true;
+ }
+ }
+
+ /* reschedule the checkable if checks are disabled */
+ if (!check) {
+ m_IdleCheckables.insert(GetCheckableScheduleInfo(checkable));
+ lock.unlock();
+
+ Log(LogDebug, "CheckerComponent")
+ << "Checks for checkable '" << checkable->GetName() << "' are disabled. Rescheduling check.";
+
+ checkable->UpdateNextCheck();
+
+ if (notifyNextCheck) {
+ // Trigger update event for Icinga DB
+ Checkable::OnNextCheckUpdated(checkable);
+ }
+
+ lock.lock();
+
+ continue;
+ }
+
+
+ csi = GetCheckableScheduleInfo(checkable);
+
+ Log(LogDebug, "CheckerComponent")
+ << "Scheduling info for checkable '" << checkable->GetName() << "' ("
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", checkable->GetNextCheck()) << "): Object '"
+ << csi.Object->GetName() << "', Next Check: "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", csi.NextCheck) << "(" << csi.NextCheck << ").";
+
+ m_PendingCheckables.insert(csi);
+
+ lock.unlock();
+
+ if (forced) {
+ ObjectLock olock(checkable);
+ checkable->SetForceNextCheck(false);
+ }
+
+ Log(LogDebug, "CheckerComponent")
+ << "Executing check for '" << checkable->GetName() << "'";
+
+ Checkable::IncreasePendingChecks();
+
+ /*
+ * Explicitly use CheckerComponent::Ptr to keep the reference counted while the
+ * callback is active and making it crash safe
+ */
+ CheckerComponent::Ptr checkComponent(this);
+
+ Utility::QueueAsyncCallback([this, checkComponent, checkable]() { ExecuteCheckHelper(checkable); });
+
+ lock.lock();
+ }
+}
+
+void CheckerComponent::ExecuteCheckHelper(const Checkable::Ptr& checkable)
+{
+ try {
+ checkable->ExecuteCheck();
+ } catch (const std::exception& ex) {
+ CheckResult::Ptr cr = new CheckResult();
+ cr->SetState(ServiceUnknown);
+
+ String output = "Exception occurred while checking '" + checkable->GetName() + "': " + DiagnosticInformation(ex);
+ cr->SetOutput(output);
+
+ double now = Utility::GetTime();
+ cr->SetScheduleStart(now);
+ cr->SetScheduleEnd(now);
+ cr->SetExecutionStart(now);
+ cr->SetExecutionEnd(now);
+
+ checkable->ProcessCheckResult(cr);
+
+ Log(LogCritical, "checker", output);
+ }
+
+ Checkable::DecreasePendingChecks();
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ /* remove the object from the list of pending objects; if it's not in the
+ * list this was a manual (i.e. forced) check and we must not re-add the
+ * object to the list because it's already there. */
+ auto it = m_PendingCheckables.find(checkable);
+
+ if (it != m_PendingCheckables.end()) {
+ m_PendingCheckables.erase(it);
+
+ if (checkable->IsActive())
+ m_IdleCheckables.insert(GetCheckableScheduleInfo(checkable));
+
+ m_CV.notify_all();
+ }
+ }
+
+ Log(LogDebug, "CheckerComponent")
+ << "Check finished for object '" << checkable->GetName() << "'";
+}
+
+void CheckerComponent::ResultTimerHandler()
+{
+ std::ostringstream msgbuf;
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ msgbuf << "Pending checkables: " << m_PendingCheckables.size() << "; Idle checkables: " << m_IdleCheckables.size() << "; Checks/s: "
+ << (CIB::GetActiveHostChecksStatistics(60) + CIB::GetActiveServiceChecksStatistics(60)) / 60.0;
+ }
+
+ Log(LogNotice, "CheckerComponent", msgbuf.str());
+}
+
+void CheckerComponent::ObjectHandler(const ConfigObject::Ptr& object)
+{
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return;
+
+ Zone::Ptr zone = Zone::GetByName(checkable->GetZoneName());
+ bool same_zone = (!zone || Zone::GetLocalZone() == zone);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ if (object->IsActive() && !object->IsPaused() && same_zone) {
+ if (m_PendingCheckables.find(checkable) != m_PendingCheckables.end())
+ return;
+
+ m_IdleCheckables.insert(GetCheckableScheduleInfo(checkable));
+ } else {
+ m_IdleCheckables.erase(checkable);
+ m_PendingCheckables.erase(checkable);
+ }
+
+ m_CV.notify_all();
+ }
+}
+
+CheckableScheduleInfo CheckerComponent::GetCheckableScheduleInfo(const Checkable::Ptr& checkable)
+{
+ CheckableScheduleInfo csi;
+ csi.Object = checkable;
+ csi.NextCheck = checkable->GetNextCheck();
+ return csi;
+}
+
+void CheckerComponent::NextCheckChangedHandler(const Checkable::Ptr& checkable)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ /* remove and re-insert the object from the set in order to force an index update */
+ typedef boost::multi_index::nth_index<CheckableSet, 0>::type CheckableView;
+ CheckableView& idx = boost::get<0>(m_IdleCheckables);
+
+ auto it = idx.find(checkable);
+
+ if (it == idx.end())
+ return;
+
+ idx.erase(checkable);
+
+ CheckableScheduleInfo csi = GetCheckableScheduleInfo(checkable);
+ idx.insert(csi);
+
+ m_CV.notify_all();
+}
+
+unsigned long CheckerComponent::GetIdleCheckables()
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_IdleCheckables.size();
+}
+
+unsigned long CheckerComponent::GetPendingCheckables()
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_PendingCheckables.size();
+}
diff --git a/lib/checker/checkercomponent.hpp b/lib/checker/checkercomponent.hpp
new file mode 100644
index 0000000..5ace757
--- /dev/null
+++ b/lib/checker/checkercomponent.hpp
@@ -0,0 +1,99 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CHECKERCOMPONENT_H
+#define CHECKERCOMPONENT_H
+
+#include "checker/checkercomponent-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include <boost/multi_index_container.hpp>
+#include <boost/multi_index/ordered_index.hpp>
+#include <boost/multi_index/key_extractors.hpp>
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+namespace icinga
+{
+
+/**
+ * @ingroup checker
+ */
+struct CheckableScheduleInfo
+{
+ Checkable::Ptr Object;
+ double NextCheck;
+};
+
+/**
+ * @ingroup checker
+ */
+struct CheckableNextCheckExtractor
+{
+ typedef double result_type;
+
+ /**
+ * @threadsafety Always.
+ */
+ double operator()(const CheckableScheduleInfo& csi)
+ {
+ return csi.NextCheck;
+ }
+};
+
+/**
+ * @ingroup checker
+ */
+class CheckerComponent final : public ObjectImpl<CheckerComponent>
+{
+public:
+ DECLARE_OBJECT(CheckerComponent);
+ DECLARE_OBJECTNAME(CheckerComponent);
+
+ typedef boost::multi_index_container<
+ CheckableScheduleInfo,
+ boost::multi_index::indexed_by<
+ boost::multi_index::ordered_unique<boost::multi_index::member<CheckableScheduleInfo, Checkable::Ptr, &CheckableScheduleInfo::Object> >,
+ boost::multi_index::ordered_non_unique<CheckableNextCheckExtractor>
+ >
+ > CheckableSet;
+
+ void OnConfigLoaded() override;
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+ unsigned long GetIdleCheckables();
+ unsigned long GetPendingCheckables();
+
+private:
+ std::mutex m_Mutex;
+ std::condition_variable m_CV;
+ bool m_Stopped{false};
+ std::thread m_Thread;
+
+ CheckableSet m_IdleCheckables;
+ CheckableSet m_PendingCheckables;
+
+ Timer::Ptr m_ResultTimer;
+
+ void CheckThreadProc();
+ void ResultTimerHandler();
+
+ void ExecuteCheckHelper(const Checkable::Ptr& checkable);
+
+ void AdjustCheckTimer();
+
+ void ObjectHandler(const ConfigObject::Ptr& object);
+ void NextCheckChangedHandler(const Checkable::Ptr& checkable);
+
+ void RescheduleCheckTimer();
+
+ static CheckableScheduleInfo GetCheckableScheduleInfo(const Checkable::Ptr& checkable);
+};
+
+}
+
+#endif /* CHECKERCOMPONENT_H */
diff --git a/lib/checker/checkercomponent.ti b/lib/checker/checkercomponent.ti
new file mode 100644
index 0000000..3959aeb
--- /dev/null
+++ b/lib/checker/checkercomponent.ti
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library checker;
+
+namespace icinga
+{
+
+class CheckerComponent : ConfigObject
+{
+ activation_priority 300;
+
+ /* Has no effect. Keep this here to avoid breaking config changes. */
+ [deprecated, config] int concurrent_checks;
+};
+
+}
diff --git a/lib/cli/CMakeLists.txt b/lib/cli/CMakeLists.txt
new file mode 100644
index 0000000..bbdf801
--- /dev/null
+++ b/lib/cli/CMakeLists.txt
@@ -0,0 +1,49 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+set(cli_SOURCES
+ i2-cli.hpp
+ apisetupcommand.cpp apisetupcommand.hpp
+ apisetuputility.cpp apisetuputility.hpp
+ calistcommand.cpp calistcommand.hpp
+ caremovecommand.cpp caremovecommand.hpp
+ carestorecommand.cpp carestorecommand.hpp
+ casigncommand.cpp casigncommand.hpp
+ clicommand.cpp clicommand.hpp
+ consolecommand.cpp consolecommand.hpp
+ daemoncommand.cpp daemoncommand.hpp
+ daemonutility.cpp daemonutility.hpp
+ editline.hpp
+ featuredisablecommand.cpp featuredisablecommand.hpp
+ featureenablecommand.cpp featureenablecommand.hpp
+ featurelistcommand.cpp featurelistcommand.hpp
+ featureutility.cpp featureutility.hpp
+ internalsignalcommand.cpp internalsignalcommand.hpp
+ nodesetupcommand.cpp nodesetupcommand.hpp
+ nodeutility.cpp nodeutility.hpp
+ nodewizardcommand.cpp nodewizardcommand.hpp
+ objectlistcommand.cpp objectlistcommand.hpp
+ objectlistutility.cpp objectlistutility.hpp
+ pkinewcacommand.cpp pkinewcacommand.hpp
+ pkinewcertcommand.cpp pkinewcertcommand.hpp
+ pkirequestcommand.cpp pkirequestcommand.hpp
+ pkisavecertcommand.cpp pkisavecertcommand.hpp
+ pkisigncsrcommand.cpp pkisigncsrcommand.hpp
+ pkiticketcommand.cpp pkiticketcommand.hpp
+ pkiverifycommand.cpp pkiverifycommand.hpp
+ variablegetcommand.cpp variablegetcommand.hpp
+ variablelistcommand.cpp variablelistcommand.hpp
+ variableutility.cpp variableutility.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(cli cli cli_SOURCES)
+endif()
+
+add_library(cli OBJECT ${cli_SOURCES})
+
+add_dependencies(cli base config icinga remote)
+
+set_target_properties (
+ cli PROPERTIES
+ FOLDER Lib
+)
diff --git a/lib/cli/apisetupcommand.cpp b/lib/cli/apisetupcommand.cpp
new file mode 100644
index 0000000..81b9d8d
--- /dev/null
+++ b/lib/cli/apisetupcommand.cpp
@@ -0,0 +1,59 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/apisetupcommand.hpp"
+#include "cli/apisetuputility.hpp"
+#include "cli/variableutility.hpp"
+#include "base/logger.hpp"
+#include "base/console.hpp"
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("api/setup", ApiSetupCommand);
+
+String ApiSetupCommand::GetDescription() const
+{
+ return "Setup for Icinga 2 API.";
+}
+
+String ApiSetupCommand::GetShortDescription() const
+{
+ return "setup for API";
+}
+
+ImpersonationLevel ApiSetupCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+void ApiSetupCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("cn", po::value<std::string>(), "The certificate's common name");
+}
+
+/**
+ * The entry point for the "api setup" CLI command.
+ *
+ * @returns An exit status.
+ */
+int ApiSetupCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String cn;
+
+ if (vm.count("cn")) {
+ cn = vm["cn"].as<std::string>();
+ } else {
+ cn = VariableUtility::GetVariable("NodeName");
+
+ if (cn.IsEmpty())
+ cn = Utility::GetFQDN();
+ }
+
+ if (!ApiSetupUtility::SetupMaster(cn, true))
+ return 1;
+
+ return 0;
+}
diff --git a/lib/cli/apisetupcommand.hpp b/lib/cli/apisetupcommand.hpp
new file mode 100644
index 0000000..be2693d
--- /dev/null
+++ b/lib/cli/apisetupcommand.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APISETUPCOMMAND_H
+#define APISETUPCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "api setup" command.
+ *
+ * @ingroup cli
+ */
+class ApiSetupCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ApiSetupCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+};
+
+}
+
+#endif /* APISETUPCOMMAND_H */
diff --git a/lib/cli/apisetuputility.cpp b/lib/cli/apisetuputility.cpp
new file mode 100644
index 0000000..8bdd767
--- /dev/null
+++ b/lib/cli/apisetuputility.cpp
@@ -0,0 +1,205 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/apisetuputility.hpp"
+#include "cli/nodeutility.hpp"
+#include "cli/featureutility.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/atomic-file.hpp"
+#include "base/logger.hpp"
+#include "base/console.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/case_conv.hpp>
+#include <iostream>
+#include <string>
+#include <fstream>
+#include <vector>
+
+using namespace icinga;
+
+String ApiSetupUtility::GetConfdPath()
+{
+ return Configuration::ConfigDir + "/conf.d";
+}
+
+String ApiSetupUtility::GetApiUsersConfPath()
+{
+ return ApiSetupUtility::GetConfdPath() + "/api-users.conf";
+}
+
+bool ApiSetupUtility::SetupMaster(const String& cn, bool prompt_restart)
+{
+ if (!SetupMasterCertificates(cn))
+ return false;
+
+ if (!SetupMasterApiUser())
+ return false;
+
+ if (!SetupMasterEnableApi())
+ return false;
+
+ if (!SetupMasterUpdateConstants(cn))
+ return false;
+
+ if (prompt_restart) {
+ std::cout << "Done.\n\n";
+ std::cout << "Now restart your Icinga 2 daemon to finish the installation!\n\n";
+ }
+
+ return true;
+}
+
+bool ApiSetupUtility::SetupMasterCertificates(const String& cn)
+{
+ Log(LogInformation, "cli", "Generating new CA.");
+
+ if (PkiUtility::NewCa() > 0)
+ Log(LogWarning, "cli", "Found CA, skipping and using the existing one.");
+
+ String pki_path = ApiListener::GetCertsDir();
+ Utility::MkDirP(pki_path, 0700);
+
+ String user = Configuration::RunAsUser;
+ String group = Configuration::RunAsGroup;
+
+ if (!Utility::SetFileOwnership(pki_path, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on file '" << pki_path << "'.";
+ }
+
+ String key = pki_path + "/" + cn + ".key";
+ String csr = pki_path + "/" + cn + ".csr";
+
+ if (Utility::PathExists(key)) {
+ Log(LogInformation, "cli")
+ << "Private key file '" << key << "' already exists, not generating new certificate.";
+ return true;
+ }
+
+ Log(LogInformation, "cli")
+ << "Generating new CSR in '" << csr << "'.";
+
+ if (Utility::PathExists(key))
+ NodeUtility::CreateBackupFile(key, true);
+ if (Utility::PathExists(csr))
+ NodeUtility::CreateBackupFile(csr);
+
+ if (PkiUtility::NewCert(cn, key, csr, "") > 0) {
+ Log(LogCritical, "cli", "Failed to create certificate signing request.");
+ return false;
+ }
+
+ /* Sign the CSR with the CA key */
+ String cert = pki_path + "/" + cn + ".crt";
+
+ Log(LogInformation, "cli")
+ << "Signing CSR with CA and writing certificate to '" << cert << "'.";
+
+ if (Utility::PathExists(cert))
+ NodeUtility::CreateBackupFile(cert);
+
+ if (PkiUtility::SignCsr(csr, cert) != 0) {
+ Log(LogCritical, "cli", "Could not sign CSR.");
+ return false;
+ }
+
+ /* Copy CA certificate to /etc/icinga2/pki */
+ String ca_path = ApiListener::GetCaDir();
+ String ca = ca_path + "/ca.crt";
+ String ca_key = ca_path + "/ca.key";
+ String target_ca = pki_path + "/ca.crt";
+
+ Log(LogInformation, "cli")
+ << "Copying CA certificate to '" << target_ca << "'.";
+
+ if (Utility::PathExists(target_ca))
+ NodeUtility::CreateBackupFile(target_ca);
+
+ /* does not overwrite existing files! */
+ Utility::CopyFile(ca, target_ca);
+
+ /* fix permissions: root -> icinga daemon user */
+ for (const String& file : { ca_path, ca, ca_key, target_ca, key, csr, cert }) {
+ if (!Utility::SetFileOwnership(file, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on file '" << file << "'.";
+ }
+ }
+
+ return true;
+}
+
+bool ApiSetupUtility::SetupMasterApiUser()
+{
+ if (!Utility::PathExists(GetConfdPath())) {
+ Log(LogWarning, "cli")
+ << "Path '" << GetConfdPath() << "' do not exist.";
+ Log(LogInformation, "cli")
+ << "Creating path '" << GetConfdPath() << "'.";
+
+ Utility::MkDirP(GetConfdPath(), 0755);
+ }
+
+ String api_username = "root"; // TODO make this available as cli parameter?
+ String api_password = RandomString(8);
+ String apiUsersPath = GetConfdPath() + "/api-users.conf";
+
+ if (Utility::PathExists(apiUsersPath)) {
+ Log(LogInformation, "cli")
+ << "API user config file '" << apiUsersPath << "' already exists, not creating config file.";
+ return true;
+ }
+
+ Log(LogInformation, "cli")
+ << "Adding new ApiUser '" << api_username << "' in '" << apiUsersPath << "'.";
+
+ NodeUtility::CreateBackupFile(apiUsersPath);
+
+ AtomicFile fp (apiUsersPath, 0644);
+
+ fp << "/**\n"
+ << " * The ApiUser objects are used for authentication against the API.\n"
+ << " */\n"
+ << "object ApiUser \"" << api_username << "\" {\n"
+ << " password = \"" << api_password << "\"\n"
+ << " // client_cn = \"\"\n"
+ << "\n"
+ << " permissions = [ \"*\" ]\n"
+ << "}\n";
+
+ fp.Commit();
+
+ return true;
+}
+
+bool ApiSetupUtility::SetupMasterEnableApi()
+{
+ /*
+ * Ensure the api-users.conf file is included, when conf.d inclusion is disabled.
+ */
+ if (!NodeUtility::GetConfigurationIncludeState("\"conf.d\"", true))
+ NodeUtility::UpdateConfiguration("\"conf.d/api-users.conf\"", true, false);
+
+ /*
+ * Enable the API feature
+ */
+ Log(LogInformation, "cli", "Enabling the 'api' feature.");
+
+ FeatureUtility::EnableFeatures({ "api" });
+
+ return true;
+}
+
+bool ApiSetupUtility::SetupMasterUpdateConstants(const String& cn)
+{
+ NodeUtility::UpdateConstant("NodeName", cn);
+ NodeUtility::UpdateConstant("ZoneName", cn);
+
+ return true;
+}
diff --git a/lib/cli/apisetuputility.hpp b/lib/cli/apisetuputility.hpp
new file mode 100644
index 0000000..d361446
--- /dev/null
+++ b/lib/cli/apisetuputility.hpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APISETUPUTILITY_H
+#define APISETUPUTILITY_H
+
+#include "base/i2-base.hpp"
+#include "cli/i2-cli.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "base/value.hpp"
+#include "base/string.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * @ingroup cli
+ */
+class ApiSetupUtility
+{
+public:
+ static bool SetupMaster(const String& cn, bool prompt_restart = false);
+
+ static bool SetupMasterCertificates(const String& cn);
+ static bool SetupMasterApiUser();
+ static bool SetupMasterEnableApi();
+ static bool SetupMasterUpdateConstants(const String& cn);
+
+ static String GetConfdPath();
+ static String GetApiUsersConfPath();
+
+private:
+ ApiSetupUtility();
+};
+
+}
+
+#endif /* APISETUPUTILITY_H */
diff --git a/lib/cli/calistcommand.cpp b/lib/cli/calistcommand.cpp
new file mode 100644
index 0000000..f693ad7
--- /dev/null
+++ b/lib/cli/calistcommand.cpp
@@ -0,0 +1,89 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/calistcommand.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "base/json.hpp"
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("ca/list", CAListCommand);
+
+/**
+ * Provide a long CLI description sentence.
+ *
+ * @return text
+ */
+String CAListCommand::GetDescription() const
+{
+ return "Lists pending certificate signing requests.";
+}
+
+/**
+ * Provide a short CLI description.
+ *
+ * @return text
+ */
+String CAListCommand::GetShortDescription() const
+{
+ return "lists pending certificate signing requests";
+}
+
+/**
+ * Initialize available CLI parameters.
+ *
+ * @param visibleDesc Register visible parameters.
+ * @param hiddenDesc Register hidden parameters.
+ */
+void CAListCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("all", "List all certificate signing requests, including signed. Note: Old requests are automatically cleaned by Icinga after 1 week.")
+ ("removed", "List all removed CSRs (for use with 'ca restore')")
+ ("json", "encode output as JSON");
+}
+
+/**
+ * The entry point for the "ca list" CLI command.
+ *
+ * @return An exit status.
+ */
+int CAListCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ Dictionary::Ptr requests = PkiUtility::GetCertificateRequests(vm.count("removed"));
+
+ if (vm.count("json"))
+ std::cout << JsonEncode(requests);
+ else {
+ ObjectLock olock(requests);
+
+ std::cout << "Fingerprint | Timestamp | Signed | Subject\n";
+ std::cout << "-----------------------------------------------------------------|--------------------------|--------|--------\n";
+
+ for (auto& kv : requests) {
+ Dictionary::Ptr request = kv.second;
+
+ /* Skip signed requests by default. */
+ if (!vm.count("all") && request->Contains("cert_response"))
+ continue;
+
+ std::cout << kv.first
+ << " | "
+/* << Utility::FormatDateTime("%Y/%m/%d %H:%M:%S", request->Get("timestamp")) */
+ << request->Get("timestamp")
+ << " | "
+ << (request->Contains("cert_response") ? "*" : " ") << " "
+ << " | "
+ << request->Get("subject")
+ << "\n";
+ }
+ }
+
+ return 0;
+}
diff --git a/lib/cli/calistcommand.hpp b/lib/cli/calistcommand.hpp
new file mode 100644
index 0000000..ddf44d4
--- /dev/null
+++ b/lib/cli/calistcommand.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CALISTCOMMAND_H
+#define CALISTCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "ca list" command.
+ *
+ * @ingroup cli
+ */
+class CAListCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CAListCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+private:
+ static void PrintRequest(const String& requestFile);
+};
+
+}
+
+#endif /* CALISTCOMMAND_H */
diff --git a/lib/cli/caremovecommand.cpp b/lib/cli/caremovecommand.cpp
new file mode 100644
index 0000000..d894494
--- /dev/null
+++ b/lib/cli/caremovecommand.cpp
@@ -0,0 +1,93 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/caremovecommand.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "remote/apilistener.hpp"
+
+using namespace icinga;
+
+REGISTER_CLICOMMAND("ca/remove", CARemoveCommand);
+
+/**
+ * Provide a long CLI description sentence.
+ *
+ * @return text
+ */
+String CARemoveCommand::GetDescription() const
+{
+ return "Removes an outstanding certificate request.";
+}
+
+/**
+ * Provide a short CLI description.
+ *
+ * @return text
+ */
+String CARemoveCommand::GetShortDescription() const
+{
+ return "removes an outstanding certificate request";
+}
+
+/**
+ * Define minimum arguments without key parameter.
+ *
+ * @return number of arguments
+ */
+int CARemoveCommand::GetMinArguments() const
+{
+ return 1;
+}
+
+/**
+ * Impersonate as Icinga user.
+ *
+ * @return impersonate level
+ */
+ImpersonationLevel CARemoveCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+/**
+ * The entry point for the "ca remove" CLI command.
+ *
+ * @returns An exit status.
+ */
+int CARemoveCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String fingerPrint = ap[0];
+ String requestFile = ApiListener::GetCertificateRequestsDir() + "/" + fingerPrint + ".json";
+
+ if (!Utility::PathExists(requestFile)) {
+ Log(LogCritical, "cli")
+ << "No request exists for fingerprint '" << fingerPrint << "'.";
+ return 1;
+ }
+
+ Dictionary::Ptr request = Utility::LoadJsonFile(requestFile);
+ std::shared_ptr<X509> certRequest = StringToCertificate(request->Get("cert_request"));
+
+ if (!certRequest) {
+ Log(LogCritical, "cli", "Certificate request is invalid. Could not parse X.509 certificate for the 'cert_request' attribute.");
+ return 1;
+ }
+
+ String cn = GetCertificateCN(certRequest);
+
+ if (request->Contains("cert_response")) {
+ Log(LogCritical, "cli")
+ << "Certificate request for CN '" << cn << "' already signed, removal is not possible.";
+ return 1;
+ }
+
+ Utility::SaveJsonFile(ApiListener::GetCertificateRequestsDir() + "/" + fingerPrint + ".removed", 0600, request);
+
+ Utility::Remove(requestFile);
+
+ Log(LogInformation, "cli")
+ << "Certificate request for CN " << cn << " removed.";
+
+ return 0;
+}
diff --git a/lib/cli/caremovecommand.hpp b/lib/cli/caremovecommand.hpp
new file mode 100644
index 0000000..2da92d3
--- /dev/null
+++ b/lib/cli/caremovecommand.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CAREMOVECOMMAND_H
+#define CAREMOVECOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "ca remove" command.
+ *
+ * @ingroup cli
+ */
+class CARemoveCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CARemoveCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMinArguments() const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* CAREMOVECOMMAND_H */
diff --git a/lib/cli/carestorecommand.cpp b/lib/cli/carestorecommand.cpp
new file mode 100644
index 0000000..5020368
--- /dev/null
+++ b/lib/cli/carestorecommand.cpp
@@ -0,0 +1,88 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/carestorecommand.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "remote/apilistener.hpp"
+
+using namespace icinga;
+
+REGISTER_CLICOMMAND("ca/restore", CARestoreCommand);
+
+/**
+ * Provide a long CLI description sentence.
+ *
+ * @return text
+ */
+String CARestoreCommand::GetDescription() const
+{
+ return "Restores a previously removed certificate request.";
+}
+
+/**
+ * Provide a short CLI description.
+ *
+ * @return text
+ */
+String CARestoreCommand::GetShortDescription() const
+{
+ return "restores a removed certificate request";
+}
+
+/**
+ * Define minimum arguments without key parameter.
+ *
+ * @return number of arguments
+ */
+int CARestoreCommand::GetMinArguments() const
+{
+ return 1;
+}
+
+/**
+ * Impersonate as Icinga user.
+ *
+ * @return impersonate level
+ */
+ImpersonationLevel CARestoreCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+/**
+ * The entry point for the "ca restore" CLI command.
+ *
+ * @returns An exit status.
+ */
+int CARestoreCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String fingerPrint = ap[0];
+ String removedRequestFile = ApiListener::GetCertificateRequestsDir() + "/" + fingerPrint + ".removed";
+
+ if (!Utility::PathExists(removedRequestFile)) {
+ Log(LogCritical, "cli")
+ << "Cannot find removed fingerprint '" << fingerPrint << "', bailing out.";
+ return 1;
+ }
+
+ Dictionary::Ptr request = Utility::LoadJsonFile(removedRequestFile);
+ std::shared_ptr<X509> certRequest = StringToCertificate(request->Get("cert_request"));
+
+ if (!certRequest) {
+ Log(LogCritical, "cli", "Certificate request is invalid. Could not parse X.509 certificate for the 'cert_request' attribute.");
+ /* Purge the file when we know that it is broken. */
+ Utility::Remove(removedRequestFile);
+ return 1;
+ }
+
+ Utility::SaveJsonFile(ApiListener::GetCertificateRequestsDir() + "/" + fingerPrint + ".json", 0600, request);
+
+ Utility::Remove(removedRequestFile);
+
+ Log(LogInformation, "cli")
+ << "Restored certificate request for CN '" << GetCertificateCN(certRequest) << "', sign it with:\n"
+ << "\"icinga2 ca sign " << fingerPrint << "\"";
+
+ return 0;
+}
diff --git a/lib/cli/carestorecommand.hpp b/lib/cli/carestorecommand.hpp
new file mode 100644
index 0000000..74a27df
--- /dev/null
+++ b/lib/cli/carestorecommand.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CARESTORECOMMAND_H
+#define CARESTORECOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "ca restore" command.
+ *
+ * @ingroup cli
+ */
+class CARestoreCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CARestoreCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMinArguments() const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* CASTORECOMMAND_H */
diff --git a/lib/cli/casigncommand.cpp b/lib/cli/casigncommand.cpp
new file mode 100644
index 0000000..96d2c2c
--- /dev/null
+++ b/lib/cli/casigncommand.cpp
@@ -0,0 +1,108 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/casigncommand.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "remote/apilistener.hpp"
+
+using namespace icinga;
+
+REGISTER_CLICOMMAND("ca/sign", CASignCommand);
+
+/**
+ * Provide a long CLI description sentence.
+ *
+ * @return text
+ */
+String CASignCommand::GetDescription() const
+{
+ return "Signs an outstanding certificate request.";
+}
+
+/**
+ * Provide a short CLI description.
+ *
+ * @return text
+ */
+String CASignCommand::GetShortDescription() const
+{
+ return "signs an outstanding certificate request";
+}
+
+/**
+ * Define minimum arguments without key parameter.
+ *
+ * @return number of arguments
+ */
+int CASignCommand::GetMinArguments() const
+{
+ return 1;
+}
+
+/**
+ * Impersonate as Icinga user.
+ *
+ * @return impersonate level
+ */
+ImpersonationLevel CASignCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+/**
+ * The entry point for the "ca sign" CLI command.
+ *
+ * @return An exit status.
+ */
+int CASignCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String requestFile = ApiListener::GetCertificateRequestsDir() + "/" + ap[0] + ".json";
+
+ if (!Utility::PathExists(requestFile)) {
+ Log(LogCritical, "cli")
+ << "No request exists for fingerprint '" << ap[0] << "'.";
+ return 1;
+ }
+
+ Dictionary::Ptr request = Utility::LoadJsonFile(requestFile);
+
+ if (!request)
+ return 1;
+
+ String certRequestText = request->Get("cert_request");
+
+ std::shared_ptr<X509> certRequest = StringToCertificate(certRequestText);
+
+ if (!certRequest) {
+ Log(LogCritical, "cli", "Certificate request is invalid. Could not parse X.509 certificate for the 'cert_request' attribute.");
+ return 1;
+ }
+
+ std::shared_ptr<X509> certResponse = CreateCertIcingaCA(certRequest);
+
+ BIO *out = BIO_new(BIO_s_mem());
+ X509_NAME_print_ex(out, X509_get_subject_name(certRequest.get()), 0, XN_FLAG_ONELINE & ~ASN1_STRFLGS_ESC_MSB);
+
+ char *data;
+ long length;
+ length = BIO_get_mem_data(out, &data);
+
+ String subject = String(data, data + length);
+ BIO_free(out);
+
+ if (!certResponse) {
+ Log(LogCritical, "cli")
+ << "Could not sign certificate for '" << subject << "'.";
+ return 1;
+ }
+
+ request->Set("cert_response", CertificateToString(certResponse));
+
+ Utility::SaveJsonFile(requestFile, 0600, request);
+
+ Log(LogInformation, "cli")
+ << "Signed certificate for '" << subject << "'.";
+
+ return 0;
+}
diff --git a/lib/cli/casigncommand.hpp b/lib/cli/casigncommand.hpp
new file mode 100644
index 0000000..0089af7
--- /dev/null
+++ b/lib/cli/casigncommand.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CASIGNCOMMAND_H
+#define CASIGNCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "ca sign" command.
+ *
+ * @ingroup cli
+ */
+class CASignCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CASignCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMinArguments() const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* CASIGNCOMMAND_H */
diff --git a/lib/cli/clicommand.cpp b/lib/cli/clicommand.cpp
new file mode 100644
index 0000000..cfdce09
--- /dev/null
+++ b/lib/cli/clicommand.cpp
@@ -0,0 +1,373 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/clicommand.hpp"
+#include "base/logger.hpp"
+#include "base/console.hpp"
+#include "base/type.hpp"
+#include "base/serializer.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/trim.hpp>
+#include <boost/program_options.hpp>
+#include <algorithm>
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+std::vector<String> icinga::GetBashCompletionSuggestions(const String& type, const String& word)
+{
+ std::vector<String> result;
+
+#ifndef _WIN32
+ String bashArg = "compgen -A " + Utility::EscapeShellArg(type) + " " + Utility::EscapeShellArg(word);
+ String cmd = "bash -c " + Utility::EscapeShellArg(bashArg);
+
+ FILE *fp = popen(cmd.CStr(), "r");
+
+ char line[4096];
+ while (fgets(line, sizeof(line), fp)) {
+ String wline = line;
+ boost::algorithm::trim_right_if(wline, boost::is_any_of("\r\n"));
+ result.push_back(wline);
+ }
+
+ pclose(fp);
+
+ /* Append a slash if there's only one suggestion and it's a directory */
+ if ((type == "file" || type == "directory") && result.size() == 1) {
+ String path = result[0];
+
+ struct stat statbuf;
+ if (lstat(path.CStr(), &statbuf) >= 0) {
+ if (S_ISDIR(statbuf.st_mode)) {
+ result.clear(),
+ result.push_back(path + "/");
+ }
+ }
+ }
+#endif /* _WIN32 */
+
+ return result;
+}
+
+std::vector<String> icinga::GetFieldCompletionSuggestions(const Type::Ptr& type, const String& word)
+{
+ std::vector<String> result;
+
+ for (int i = 0; i < type->GetFieldCount(); i++) {
+ Field field = type->GetFieldInfo(i);
+
+ if (field.Attributes & FANoUserView)
+ continue;
+
+ if (strcmp(field.TypeName, "int") != 0 && strcmp(field.TypeName, "double") != 0
+ && strcmp(field.TypeName, "bool") != 0 && strcmp(field.TypeName, "String") != 0)
+ continue;
+
+ String fname = field.Name;
+
+ String suggestion = fname + "=";
+
+ if (suggestion.Find(word) == 0)
+ result.push_back(suggestion);
+ }
+
+ return result;
+}
+
+int CLICommand::GetMinArguments() const
+{
+ return 0;
+}
+
+int CLICommand::GetMaxArguments() const
+{
+ return GetMinArguments();
+}
+
+bool CLICommand::IsHidden() const
+{
+ return false;
+}
+
+bool CLICommand::IsDeprecated() const
+{
+ return false;
+}
+
+std::mutex& CLICommand::GetRegistryMutex()
+{
+ static std::mutex mtx;
+ return mtx;
+}
+
+std::map<std::vector<String>, CLICommand::Ptr>& CLICommand::GetRegistry()
+{
+ static std::map<std::vector<String>, CLICommand::Ptr> registry;
+ return registry;
+}
+
+CLICommand::Ptr CLICommand::GetByName(const std::vector<String>& name)
+{
+ std::unique_lock<std::mutex> lock(GetRegistryMutex());
+
+ auto it = GetRegistry().find(name);
+
+ if (it == GetRegistry().end())
+ return nullptr;
+
+ return it->second;
+}
+
+void CLICommand::Register(const std::vector<String>& name, const CLICommand::Ptr& function)
+{
+ std::unique_lock<std::mutex> lock(GetRegistryMutex());
+ GetRegistry()[name] = function;
+}
+
+void CLICommand::Unregister(const std::vector<String>& name)
+{
+ std::unique_lock<std::mutex> lock(GetRegistryMutex());
+ GetRegistry().erase(name);
+}
+
+std::vector<String> CLICommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ return std::vector<String>();
+}
+
+std::vector<String> CLICommand::GetPositionalSuggestions(const String& word) const
+{
+ return std::vector<String>();
+}
+
+void CLICommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{ }
+
+ImpersonationLevel CLICommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+bool CLICommand::ParseCommand(int argc, char **argv, po::options_description& visibleDesc,
+ po::options_description& hiddenDesc,
+ po::positional_options_description& positionalDesc,
+ po::variables_map& vm, String& cmdname, CLICommand::Ptr& command, bool autocomplete)
+{
+ std::unique_lock<std::mutex> lock(GetRegistryMutex());
+
+ typedef std::map<std::vector<String>, CLICommand::Ptr>::value_type CLIKeyValue;
+
+ std::vector<String> best_match;
+ int arg_end = 0;
+ bool tried_command = false;
+
+ for (const CLIKeyValue& kv : GetRegistry()) {
+ const std::vector<String>& vname = kv.first;
+
+ std::vector<String>::size_type i;
+ int k;
+ for (i = 0, k = 1; i < vname.size() && k < argc; i++, k++) {
+ if (strncmp(argv[k], "-", 1) == 0 || strncmp(argv[k], "--", 2) == 0) {
+ i--;
+ continue;
+ }
+
+ tried_command = true;
+
+ if (vname[i] != argv[k])
+ break;
+
+ if (i >= best_match.size())
+ best_match.push_back(vname[i]);
+
+ if (i == vname.size() - 1) {
+ cmdname = boost::algorithm::join(vname, " ");
+ command = kv.second;
+ arg_end = k;
+ goto found_command;
+ }
+ }
+ }
+
+found_command:
+ lock.unlock();
+
+ if (command) {
+ po::options_description vdesc("Command options");
+ command->InitParameters(vdesc, hiddenDesc);
+ visibleDesc.add(vdesc);
+ }
+
+ if (autocomplete || (tried_command && !command))
+ return true;
+
+ po::options_description adesc;
+ adesc.add(visibleDesc);
+ adesc.add(hiddenDesc);
+
+ if (command && command->IsDeprecated()) {
+ std::cerr << ConsoleColorTag(Console_ForegroundRed | Console_Bold)
+ << "Warning: CLI command '" << cmdname << "' is DEPRECATED! Please read the Changelog."
+ << ConsoleColorTag(Console_Normal) << std::endl << std::endl;
+ }
+
+ po::store(po::command_line_parser(argc - arg_end, argv + arg_end).options(adesc).positional(positionalDesc).run(), vm);
+ po::notify(vm);
+
+ return true;
+}
+
+void CLICommand::ShowCommands(int argc, char **argv, po::options_description *visibleDesc,
+ po::options_description *hiddenDesc,
+ ArgumentCompletionCallback globalArgCompletionCallback,
+ bool autocomplete, int autoindex)
+{
+ std::unique_lock<std::mutex> lock(GetRegistryMutex());
+
+ typedef std::map<std::vector<String>, CLICommand::Ptr>::value_type CLIKeyValue;
+
+ std::vector<String> best_match;
+ int arg_begin = 0;
+ CLICommand::Ptr command;
+
+ for (const CLIKeyValue& kv : GetRegistry()) {
+ const std::vector<String>& vname = kv.first;
+
+ arg_begin = 0;
+
+ std::vector<String>::size_type i;
+ int k;
+ for (i = 0, k = 1; i < vname.size() && k < argc; i++, k++) {
+ if (strcmp(argv[k], "--no-stack-rlimit") == 0 || strcmp(argv[k], "--autocomplete") == 0 || strcmp(argv[k], "--scm") == 0) {
+ i--;
+ arg_begin++;
+ continue;
+ }
+
+ if (autocomplete && static_cast<int>(i) >= autoindex - 1)
+ break;
+
+ if (vname[i] != argv[k])
+ break;
+
+ if (i >= best_match.size()) {
+ best_match.push_back(vname[i]);
+ }
+
+ if (i == vname.size() - 1) {
+ command = kv.second;
+ break;
+ }
+ }
+ }
+
+ String aword;
+
+ if (autocomplete) {
+ if (autoindex < argc)
+ aword = argv[autoindex];
+
+ if (autoindex - 1 > static_cast<int>(best_match.size()) && !command)
+ return;
+ } else
+ std::cout << "Supported commands: " << std::endl;
+
+ for (const CLIKeyValue& kv : GetRegistry()) {
+ const std::vector<String>& vname = kv.first;
+
+ if (vname.size() < best_match.size() || kv.second->IsHidden())
+ continue;
+
+ bool match = true;
+
+ for (std::vector<String>::size_type i = 0; i < best_match.size(); i++) {
+ if (vname[i] != best_match[i]) {
+ match = false;
+ break;
+ }
+ }
+
+ if (!match)
+ continue;
+
+ if (autocomplete) {
+ String cname;
+
+ if (autoindex - 1 < static_cast<int>(vname.size())) {
+ cname = vname[autoindex - 1];
+
+ if (cname.Find(aword) == 0)
+ std::cout << cname << "\n";
+ }
+ } else {
+ std::cout << " * " << boost::algorithm::join(vname, " ")
+ << " (" << kv.second->GetShortDescription() << ")"
+ << (kv.second->IsDeprecated() ? " (DEPRECATED)" : "") << std::endl;
+ }
+ }
+
+ if (!autocomplete)
+ std::cout << std::endl;
+
+ if (command && autocomplete) {
+ String aname, prefix, pword;
+ const po::option_description *odesc;
+
+ if (autoindex - 2 >= 0 && strcmp(argv[autoindex - 1], "=") == 0 && strstr(argv[autoindex - 2], "--") == argv[autoindex - 2]) {
+ aname = argv[autoindex - 2] + 2;
+ pword = aword;
+ } else if (autoindex - 1 >= 0 && argv[autoindex - 1][0] == '-' && argv[autoindex - 1][1] == '-') {
+ aname = argv[autoindex - 1] + 2;
+ pword = aword;
+
+ if (pword == "=")
+ pword = "";
+ } else if (autoindex - 1 >= 0 && argv[autoindex - 1][0] == '-' && argv[autoindex - 1][1] != '-') {
+ aname = argv[autoindex - 1];
+ pword = aword;
+
+ if (pword == "=")
+ pword = "";
+ } else if (aword.GetLength() > 1 && aword[0] == '-' && aword[1] != '-') {
+ aname = aword.SubStr(0, 2);
+ prefix = aname;
+ pword = aword.SubStr(2);
+ } else {
+ goto complete_option;
+ }
+
+ odesc = visibleDesc->find_nothrow(aname, false);
+
+ if (!odesc)
+ return;
+
+ if (odesc->semantic()->min_tokens() == 0)
+ goto complete_option;
+
+ for (const String& suggestion : globalArgCompletionCallback(odesc->long_name(), pword)) {
+ std::cout << prefix << suggestion << "\n";
+ }
+
+ for (const String& suggestion : command->GetArgumentSuggestions(odesc->long_name(), pword)) {
+ std::cout << prefix << suggestion << "\n";
+ }
+
+ return;
+
+complete_option:
+ for (const boost::shared_ptr<po::option_description>& odesc : visibleDesc->options()) {
+ String cname = "--" + odesc->long_name();
+
+ if (cname.Find(aword) == 0)
+ std::cout << cname << "\n";
+ }
+
+ for (const String& suggestion : command->GetPositionalSuggestions(aword)) {
+ std::cout << suggestion << "\n";
+ }
+ }
+
+ return;
+}
diff --git a/lib/cli/clicommand.hpp b/lib/cli/clicommand.hpp
new file mode 100644
index 0000000..ce58b54
--- /dev/null
+++ b/lib/cli/clicommand.hpp
@@ -0,0 +1,79 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CLICOMMAND_H
+#define CLICOMMAND_H
+
+#include "cli/i2-cli.hpp"
+#include "base/value.hpp"
+#include "base/utility.hpp"
+#include "base/type.hpp"
+#include <vector>
+#include <boost/program_options.hpp>
+
+namespace icinga
+{
+
+std::vector<String> GetBashCompletionSuggestions(const String& type, const String& word);
+std::vector<String> GetFieldCompletionSuggestions(const Type::Ptr& type, const String& word);
+
+enum ImpersonationLevel
+{
+ ImpersonateNone,
+ ImpersonateRoot,
+ ImpersonateIcinga
+};
+
+/**
+ * A CLI command.
+ *
+ * @ingroup base
+ */
+class CLICommand : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CLICommand);
+
+ typedef std::vector<String>(*ArgumentCompletionCallback)(const String&, const String&);
+
+ virtual String GetDescription() const = 0;
+ virtual String GetShortDescription() const = 0;
+ virtual int GetMinArguments() const;
+ virtual int GetMaxArguments() const;
+ virtual bool IsHidden() const;
+ virtual bool IsDeprecated() const;
+ virtual void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const;
+ virtual ImpersonationLevel GetImpersonationLevel() const;
+ virtual int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const = 0;
+ virtual std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const;
+ virtual std::vector<String> GetPositionalSuggestions(const String& word) const;
+
+ static CLICommand::Ptr GetByName(const std::vector<String>& name);
+ static void Register(const std::vector<String>& name, const CLICommand::Ptr& command);
+ static void Unregister(const std::vector<String>& name);
+
+ static bool ParseCommand(int argc, char **argv, boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc,
+ boost::program_options::positional_options_description& positionalDesc,
+ boost::program_options::variables_map& vm, String& cmdname, CLICommand::Ptr& command, bool autocomplete);
+
+ static void ShowCommands(int argc, char **argv,
+ boost::program_options::options_description *visibleDesc = nullptr,
+ boost::program_options::options_description *hiddenDesc = nullptr,
+ ArgumentCompletionCallback globalArgCompletionCallback = nullptr,
+ bool autocomplete = false, int autoindex = -1);
+
+private:
+ static std::mutex& GetRegistryMutex();
+ static std::map<std::vector<String>, CLICommand::Ptr>& GetRegistry();
+};
+
+#define REGISTER_CLICOMMAND(name, klass) \
+ INITIALIZE_ONCE([]() { \
+ std::vector<String> vname = String(name).Split("/"); \
+ CLICommand::Register(vname, new klass()); \
+ })
+
+}
+
+#endif /* CLICOMMAND_H */
diff --git a/lib/cli/consolecommand.cpp b/lib/cli/consolecommand.cpp
new file mode 100644
index 0000000..78906bb
--- /dev/null
+++ b/lib/cli/consolecommand.cpp
@@ -0,0 +1,723 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/consolecommand.hpp"
+#include "config/configcompiler.hpp"
+#include "remote/consolehandler.hpp"
+#include "remote/url.hpp"
+#include "base/configwriter.hpp"
+#include "base/serializer.hpp"
+#include "base/json.hpp"
+#include "base/console.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include "base/unixsocket.hpp"
+#include "base/utility.hpp"
+#include "base/networkstream.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/stream.hpp"
+#include "base/tcpsocket.hpp" /* include global icinga::Connect */
+#include <base/base64.hpp>
+#include "base/exception.hpp"
+#include <boost/asio/ssl/context.hpp>
+#include <boost/beast/core/flat_buffer.hpp>
+#include <boost/beast/http/field.hpp>
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/parser.hpp>
+#include <boost/beast/http/read.hpp>
+#include <boost/beast/http/status.hpp>
+#include <boost/beast/http/string_body.hpp>
+#include <boost/beast/http/verb.hpp>
+#include <boost/beast/http/write.hpp>
+#include <iostream>
+#include <fstream>
+
+
+#ifdef HAVE_EDITLINE
+#include "cli/editline.hpp"
+#endif /* HAVE_EDITLINE */
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+static ScriptFrame *l_ScriptFrame;
+static Url::Ptr l_Url;
+static Shared<AsioTlsStream>::Ptr l_TlsStream;
+static String l_Session;
+
+REGISTER_CLICOMMAND("console", ConsoleCommand);
+
+INITIALIZE_ONCE(&ConsoleCommand::StaticInitialize);
+
+extern "C" void dbg_spawn_console()
+{
+ ScriptFrame frame(true);
+ ConsoleCommand::RunScriptConsole(frame);
+}
+
+extern "C" void dbg_inspect_value(const Value& value)
+{
+ ConfigWriter::EmitValue(std::cout, 1, Serialize(value, 0));
+ std::cout << std::endl;
+}
+
+extern "C" void dbg_inspect_object(Object *obj)
+{
+ Object::Ptr objr = obj;
+ dbg_inspect_value(objr);
+}
+
+extern "C" void dbg_eval(const char *text)
+{
+ std::unique_ptr<Expression> expr;
+
+ try {
+ ScriptFrame frame(true);
+ expr = ConfigCompiler::CompileText("<dbg>", text);
+ Value result = Serialize(expr->Evaluate(frame), 0);
+ dbg_inspect_value(result);
+ } catch (const std::exception& ex) {
+ std::cout << "Error: " << DiagnosticInformation(ex) << "\n";
+ }
+}
+
+extern "C" void dbg_eval_with_value(const Value& value, const char *text)
+{
+ std::unique_ptr<Expression> expr;
+
+ try {
+ ScriptFrame frame(true);
+ frame.Locals = new Dictionary({
+ { "arg", value }
+ });
+ expr = ConfigCompiler::CompileText("<dbg>", text);
+ Value result = Serialize(expr->Evaluate(frame), 0);
+ dbg_inspect_value(result);
+ } catch (const std::exception& ex) {
+ std::cout << "Error: " << DiagnosticInformation(ex) << "\n";
+ }
+}
+
+extern "C" void dbg_eval_with_object(Object *object, const char *text)
+{
+ std::unique_ptr<Expression> expr;
+
+ try {
+ ScriptFrame frame(true);
+ frame.Locals = new Dictionary({
+ { "arg", object }
+ });
+ expr = ConfigCompiler::CompileText("<dbg>", text);
+ Value result = Serialize(expr->Evaluate(frame), 0);
+ dbg_inspect_value(result);
+ } catch (const std::exception& ex) {
+ std::cout << "Error: " << DiagnosticInformation(ex) << "\n";
+ }
+}
+
+void ConsoleCommand::BreakpointHandler(ScriptFrame& frame, ScriptError *ex, const DebugInfo& di)
+{
+ static std::mutex mutex;
+ std::unique_lock<std::mutex> lock(mutex);
+
+ if (!Application::GetScriptDebuggerEnabled())
+ return;
+
+ if (ex && ex->IsHandledByDebugger())
+ return;
+
+ std::cout << "Breakpoint encountered.\n";
+
+ if (ex) {
+ std::cout << "Exception: " << DiagnosticInformation(*ex) << "\n";
+ ex->SetHandledByDebugger(true);
+ } else
+ ShowCodeLocation(std::cout, di);
+
+ std::cout << "You can inspect expressions (such as variables) by entering them at the prompt.\n"
+ << "To leave the debugger and continue the program use \"$continue\".\n"
+ << "For further commands see \"$help\".\n";
+
+#ifdef HAVE_EDITLINE
+ rl_completion_entry_function = ConsoleCommand::ConsoleCompleteHelper;
+ rl_completion_append_character = '\0';
+#endif /* HAVE_EDITLINE */
+
+ ConsoleCommand::RunScriptConsole(frame);
+}
+
+void ConsoleCommand::StaticInitialize()
+{
+ Expression::OnBreakpoint.connect(&ConsoleCommand::BreakpointHandler);
+}
+
+String ConsoleCommand::GetDescription() const
+{
+ return "Interprets Icinga script expressions.";
+}
+
+String ConsoleCommand::GetShortDescription() const
+{
+ return "Icinga console";
+}
+
+ImpersonationLevel ConsoleCommand::GetImpersonationLevel() const
+{
+ return ImpersonateNone;
+}
+
+void ConsoleCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("connect,c", po::value<std::string>(), "connect to an Icinga 2 instance")
+ ("eval,e", po::value<std::string>(), "evaluate expression and terminate")
+ ("file,r", po::value<std::string>(), "evaluate a file and terminate")
+ ("syntax-only", "only validate syntax (requires --eval or --file)")
+ ("sandbox", "enable sandbox mode")
+ ;
+}
+
+#ifdef HAVE_EDITLINE
+char *ConsoleCommand::ConsoleCompleteHelper(const char *word, int state)
+{
+ static std::vector<String> matches;
+
+ if (state == 0) {
+ if (!l_Url)
+ matches = ConsoleHandler::GetAutocompletionSuggestions(word, *l_ScriptFrame);
+ else {
+ Array::Ptr suggestions;
+
+ /* Remote debug console. */
+ try {
+ suggestions = AutoCompleteScript(l_Session, word, l_ScriptFrame->Sandboxed);
+ } catch (...) {
+ return nullptr; //Errors are just ignored here.
+ }
+
+ matches.clear();
+
+ ObjectLock olock(suggestions);
+ std::copy(suggestions->Begin(), suggestions->End(), std::back_inserter(matches));
+ }
+ }
+
+ if (state >= static_cast<int>(matches.size()))
+ return nullptr;
+
+ return strdup(matches[state].CStr());
+}
+#endif /* HAVE_EDITLINE */
+
+/**
+ * The entry point for the "console" CLI command.
+ *
+ * @returns An exit status.
+ */
+int ConsoleCommand::Run(const po::variables_map& vm, const std::vector<std::string>& ap) const
+{
+#ifdef HAVE_EDITLINE
+ rl_completion_entry_function = ConsoleCommand::ConsoleCompleteHelper;
+ rl_completion_append_character = '\0';
+#endif /* HAVE_EDITLINE */
+
+ String addr, session;
+ ScriptFrame scriptFrame(true);
+
+ session = Utility::NewUniqueID();
+
+ if (vm.count("sandbox"))
+ scriptFrame.Sandboxed = true;
+
+ scriptFrame.Self = scriptFrame.Locals;
+
+ if (!vm.count("eval") && !vm.count("file"))
+ std::cout << "Icinga 2 (version: " << Application::GetAppVersion() << ")\n"
+ << "Type $help to view available commands.\n";
+
+ String addrEnv = Utility::GetFromEnvironment("ICINGA2_API_URL");
+ if (!addrEnv.IsEmpty())
+ addr = addrEnv;
+
+ /* Initialize remote connect parameters. */
+ if (vm.count("connect")) {
+ addr = vm["connect"].as<std::string>();
+
+ try {
+ l_Url = new Url(addr);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ConsoleCommand", ex.what());
+ return EXIT_FAILURE;
+ }
+
+ String usernameEnv = Utility::GetFromEnvironment("ICINGA2_API_USERNAME");
+ String passwordEnv = Utility::GetFromEnvironment("ICINGA2_API_PASSWORD");
+
+ if (!usernameEnv.IsEmpty())
+ l_Url->SetUsername(usernameEnv);
+ if (!passwordEnv.IsEmpty())
+ l_Url->SetPassword(passwordEnv);
+
+ if (l_Url->GetPort().IsEmpty())
+ l_Url->SetPort("5665");
+
+ /* User passed --connect and wants to run the expression via REST API.
+ * Evaluate this now before any user input happens.
+ */
+ try {
+ l_TlsStream = ConsoleCommand::Connect();
+ } catch (const std::exception& ex) {
+ return EXIT_FAILURE;
+ }
+ }
+
+ String command;
+ bool syntaxOnly = false;
+
+ if (vm.count("syntax-only")) {
+ if (vm.count("eval") || vm.count("file"))
+ syntaxOnly = true;
+ else {
+ std::cerr << "The option --syntax-only can only be used in combination with --eval or --file." << std::endl;
+ return EXIT_FAILURE;
+ }
+ }
+
+ String commandFileName;
+
+ if (vm.count("eval"))
+ command = vm["eval"].as<std::string>();
+ else if (vm.count("file")) {
+ commandFileName = vm["file"].as<std::string>();
+
+ try {
+ std::ifstream fp(commandFileName.CStr());
+ fp.exceptions(std::ifstream::failbit | std::ifstream::badbit);
+ command = String(std::istreambuf_iterator<char>(fp), std::istreambuf_iterator<char>());
+ } catch (const std::exception&) {
+ std::cerr << "Could not read file '" << commandFileName << "'." << std::endl;
+ return EXIT_FAILURE;
+ }
+ }
+
+ return RunScriptConsole(scriptFrame, addr, session, command, commandFileName, syntaxOnly);
+}
+
+int ConsoleCommand::RunScriptConsole(ScriptFrame& scriptFrame, const String& connectAddr, const String& session,
+ const String& commandOnce, const String& commandOnceFileName, bool syntaxOnly)
+{
+ std::map<String, String> lines;
+ int next_line = 1;
+
+#ifdef HAVE_EDITLINE
+ String homeEnv = Utility::GetFromEnvironment("HOME");
+
+ String historyPath;
+ std::fstream historyfp;
+
+ if (!homeEnv.IsEmpty()) {
+ historyPath = String(homeEnv) + "/.icinga2_history";
+
+ historyfp.open(historyPath.CStr(), std::fstream::in);
+
+ String line;
+ while (std::getline(historyfp, line.GetData()))
+ add_history(line.CStr());
+
+ historyfp.close();
+ }
+#endif /* HAVE_EDITLINE */
+
+ l_ScriptFrame = &scriptFrame;
+ l_Session = session;
+
+ while (std::cin.good()) {
+ String fileName;
+
+ if (commandOnceFileName.IsEmpty())
+ fileName = "<" + Convert::ToString(next_line) + ">";
+ else
+ fileName = commandOnceFileName;
+
+ next_line++;
+
+ bool continuation = false;
+ std::string command;
+
+incomplete:
+ std::string line;
+
+ if (commandOnce.IsEmpty()) {
+#ifdef HAVE_EDITLINE
+ std::ostringstream promptbuf;
+ std::ostream& os = promptbuf;
+#else /* HAVE_EDITLINE */
+ std::ostream& os = std::cout;
+#endif /* HAVE_EDITLINE */
+
+ os << fileName;
+
+ if (!continuation)
+ os << " => ";
+ else
+ os << " .. ";
+
+#ifdef HAVE_EDITLINE
+ String prompt = promptbuf.str();
+
+ char *cline;
+ cline = readline(prompt.CStr());
+
+ if (!cline)
+ break;
+
+ if (commandOnce.IsEmpty() && cline[0] != '\0') {
+ add_history(cline);
+
+ if (!historyPath.IsEmpty()) {
+ historyfp.open(historyPath.CStr(), std::fstream::out | std::fstream::app);
+ historyfp << cline << "\n";
+ historyfp.close();
+ }
+ }
+
+ line = cline;
+
+ free(cline);
+#else /* HAVE_EDITLINE */
+ std::getline(std::cin, line);
+#endif /* HAVE_EDITLINE */
+ } else
+ line = commandOnce;
+
+ if (!line.empty() && line[0] == '$') {
+ if (line == "$continue" || line == "$quit" || line == "$exit")
+ break;
+ else if (line == "$help")
+ std::cout << "Welcome to the Icinga 2 debug console.\n"
+ "Usable commands:\n"
+ " $continue Continue running Icinga 2 (script debugger).\n"
+ " $quit, $exit Stop debugging and quit the console.\n"
+ " $help Print this help.\n\n"
+ "For more information on how to use this console, please consult the documentation at https://icinga.com/docs\n";
+ else
+ std::cout << "Unknown debugger command: " << line << "\n";
+
+ continue;
+ }
+
+ if (!command.empty())
+ command += "\n";
+
+ command += line;
+
+ std::unique_ptr<Expression> expr;
+
+ try {
+ lines[fileName] = command;
+
+ Value result;
+
+ /* Local debug console. */
+ if (connectAddr.IsEmpty()) {
+ expr = ConfigCompiler::CompileText(fileName, command);
+
+ /* This relies on the fact that - for syntax errors - CompileText()
+ * returns an AST where the top-level expression is a 'throw'. */
+ if (!syntaxOnly || dynamic_cast<ThrowExpression *>(expr.get())) {
+ if (syntaxOnly)
+ std::cerr << " => " << command << std::endl;
+ result = Serialize(expr->Evaluate(scriptFrame), 0);
+ } else
+ result = true;
+ } else {
+ /* Remote debug console. */
+ try {
+ result = ExecuteScript(l_Session, command, scriptFrame.Sandboxed);
+ } catch (const ScriptError&) {
+ /* Re-throw the exception for the outside try-catch block. */
+ boost::rethrow_exception(boost::current_exception());
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ConsoleCommand")
+ << "HTTP query failed: " << ex.what();
+
+#ifdef HAVE_EDITLINE
+ /* Ensures that the terminal state is reset */
+ rl_deprep_terminal();
+#endif /* HAVE_EDITLINE */
+
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (commandOnce.IsEmpty()) {
+ std::cout << ConsoleColorTag(Console_ForegroundCyan);
+ ConfigWriter::EmitValue(std::cout, 1, result);
+ std::cout << ConsoleColorTag(Console_Normal) << "\n";
+ } else {
+ std::cout << JsonEncode(result) << "\n";
+ break;
+ }
+ } catch (const ScriptError& ex) {
+ if (ex.IsIncompleteExpression() && commandOnce.IsEmpty()) {
+ continuation = true;
+ goto incomplete;
+ }
+
+ DebugInfo di = ex.GetDebugInfo();
+
+ if (commandOnceFileName.IsEmpty() && lines.find(di.Path) != lines.end()) {
+ String text = lines[di.Path];
+
+ std::vector<String> ulines = text.Split("\n");
+
+ for (decltype(ulines.size()) i = 1; i <= ulines.size(); i++) {
+ int start, len;
+
+ if (i == (decltype(i))di.FirstLine)
+ start = di.FirstColumn;
+ else
+ start = 0;
+
+ if (i == (decltype(i))di.LastLine)
+ len = di.LastColumn - di.FirstColumn + 1;
+ else
+ len = ulines[i - 1].GetLength();
+
+ int offset;
+
+ if (di.Path != fileName) {
+ std::cout << di.Path << ": " << ulines[i - 1] << "\n";
+ offset = 2;
+ } else
+ offset = 4;
+
+ if (i >= (decltype(i))di.FirstLine && i <= (decltype(i))di.LastLine) {
+ std::cout << String(di.Path.GetLength() + offset, ' ');
+ std::cout << String(start, ' ') << String(len, '^') << "\n";
+ }
+ }
+ } else {
+ ShowCodeLocation(std::cout, di);
+ }
+
+ std::cout << ex.what() << "\n";
+
+ if (!commandOnce.IsEmpty())
+ return EXIT_FAILURE;
+ } catch (const std::exception& ex) {
+ std::cout << "Error: " << DiagnosticInformation(ex) << "\n";
+
+ if (!commandOnce.IsEmpty())
+ return EXIT_FAILURE;
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
+
+/**
+ * Connects to host:port and performs a TLS shandshake
+ *
+ * @returns AsioTlsStream pointer for future HTTP connections.
+ */
+Shared<AsioTlsStream>::Ptr ConsoleCommand::Connect()
+{
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext(Empty, Empty, Empty); //TODO: Add support for cert, key, ca parameters
+ } catch(const std::exception& ex) {
+ Log(LogCritical, "DebugConsole")
+ << "Cannot make SSL context: " << ex.what();
+ throw;
+ }
+
+ String host = l_Url->GetHost();
+ String port = l_Url->GetPort();
+
+ Shared<AsioTlsStream>::Ptr stream = Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, host);
+
+ try {
+ icinga::Connect(stream->lowest_layer(), host, port);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "DebugConsole")
+ << "Cannot connect to REST API on host '" << host << "' port '" << port << "': " << ex.what();
+ throw;
+ }
+
+ auto& tlsStream (stream->next_layer());
+
+ try {
+ tlsStream.handshake(tlsStream.client);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "DebugConsole")
+ << "TLS handshake with host '" << host << "' failed: " << ex.what();
+ throw;
+ }
+
+ return stream;
+}
+
+/**
+ * Sends the request via REST API and returns the parsed response.
+ *
+ * @param tlsStream Caller must prepare TLS stream/handshake.
+ * @param url Fully prepared Url object.
+ * @return A dictionary decoded from JSON.
+ */
+Dictionary::Ptr ConsoleCommand::SendRequest()
+{
+ namespace beast = boost::beast;
+ namespace http = beast::http;
+
+ l_TlsStream = ConsoleCommand::Connect();
+
+ Defer s ([&]() {
+ l_TlsStream->next_layer().shutdown();
+ });
+
+ http::request<http::string_body> request(http::verb::post, std::string(l_Url->Format(false)), 10);
+
+ request.set(http::field::user_agent, "Icinga/DebugConsole/" + Application::GetAppVersion());
+ request.set(http::field::host, l_Url->GetHost() + ":" + l_Url->GetPort());
+
+ request.set(http::field::accept, "application/json");
+ request.set(http::field::authorization, "Basic " + Base64::Encode(l_Url->GetUsername() + ":" + l_Url->GetPassword()));
+
+ try {
+ http::write(*l_TlsStream, request);
+ l_TlsStream->flush();
+ } catch (const std::exception &ex) {
+ Log(LogWarning, "DebugConsole")
+ << "Cannot write HTTP request to REST API at URL '" << l_Url->Format(true) << "': " << ex.what();
+ throw;
+ }
+
+ http::parser<false, http::string_body> parser;
+ beast::flat_buffer buf;
+
+ try {
+ http::read(*l_TlsStream, buf, parser);
+ } catch (const std::exception &ex) {
+ Log(LogWarning, "DebugConsole")
+ << "Failed to parse HTTP response from REST API at URL '" << l_Url->Format(true) << "': " << ex.what();
+ throw;
+ }
+
+ auto &response(parser.get());
+
+ /* Handle HTTP errors first. */
+ if (response.result() != http::status::ok) {
+ String message = "HTTP request failed; Code: " + Convert::ToString(response.result())
+ + "; Body: " + response.body();
+ BOOST_THROW_EXCEPTION(ScriptError(message));
+ }
+
+ Dictionary::Ptr jsonResponse;
+ auto &body(response.body());
+
+ //Log(LogWarning, "Console")
+ // << "Got response: " << response.body();
+
+ try {
+ jsonResponse = JsonDecode(body);
+ } catch (...) {
+ String message = "Cannot parse JSON response body: " + response.body();
+ BOOST_THROW_EXCEPTION(ScriptError(message));
+ }
+
+ return jsonResponse;
+}
+
+/**
+ * Executes the DSL script via HTTP and returns HTTP and user errors.
+ *
+ * @param session Local session handler.
+ * @param command The DSL string.
+ * @param sandboxed Whether to run this sandboxed.
+ * @return Result value, also contains user errors.
+ */
+Value ConsoleCommand::ExecuteScript(const String& session, const String& command, bool sandboxed)
+{
+ /* Extend the url parameters for the request. */
+ l_Url->SetPath({"v1", "console", "execute-script"});
+
+ l_Url->SetQuery({
+ {"session", session},
+ {"command", command},
+ {"sandboxed", sandboxed ? "1" : "0"}
+ });
+
+ Dictionary::Ptr jsonResponse = SendRequest();
+
+ /* Extract the result, and handle user input errors too. */
+ Array::Ptr results = jsonResponse->Get("results");
+ Value result;
+
+ if (results && results->GetLength() > 0) {
+ Dictionary::Ptr resultInfo = results->Get(0);
+
+ if (resultInfo->Get("code") >= 200 && resultInfo->Get("code") <= 299) {
+ result = resultInfo->Get("result");
+ } else {
+ String errorMessage = resultInfo->Get("status");
+
+ DebugInfo di;
+ Dictionary::Ptr debugInfo = resultInfo->Get("debug_info");
+
+ if (debugInfo) {
+ di.Path = debugInfo->Get("path");
+ di.FirstLine = debugInfo->Get("first_line");
+ di.FirstColumn = debugInfo->Get("first_column");
+ di.LastLine = debugInfo->Get("last_line");
+ di.LastColumn = debugInfo->Get("last_column");
+ }
+
+ bool incompleteExpression = resultInfo->Get("incomplete_expression");
+ BOOST_THROW_EXCEPTION(ScriptError(errorMessage, di, incompleteExpression));
+ }
+ }
+
+ return result;
+}
+
+/**
+ * Executes the auto completion script via HTTP and returns HTTP and user errors.
+ *
+ * @param session Local session handler.
+ * @param command The auto completion string.
+ * @param sandboxed Whether to run this sandboxed.
+ * @return Result value, also contains user errors.
+ */
+Array::Ptr ConsoleCommand::AutoCompleteScript(const String& session, const String& command, bool sandboxed)
+{
+ /* Extend the url parameters for the request. */
+ l_Url->SetPath({ "v1", "console", "auto-complete-script" });
+
+ l_Url->SetQuery({
+ {"session", session},
+ {"command", command},
+ {"sandboxed", sandboxed ? "1" : "0"}
+ });
+
+ Dictionary::Ptr jsonResponse = SendRequest();
+
+ /* Extract the result, and handle user input errors too. */
+ Array::Ptr results = jsonResponse->Get("results");
+ Array::Ptr suggestions;
+
+ if (results && results->GetLength() > 0) {
+ Dictionary::Ptr resultInfo = results->Get(0);
+
+ if (resultInfo->Get("code") >= 200 && resultInfo->Get("code") <= 299) {
+ suggestions = resultInfo->Get("suggestions");
+ } else {
+ String errorMessage = resultInfo->Get("status");
+ BOOST_THROW_EXCEPTION(ScriptError(errorMessage));
+ }
+ }
+
+ return suggestions;
+}
diff --git a/lib/cli/consolecommand.hpp b/lib/cli/consolecommand.hpp
new file mode 100644
index 0000000..631ec21
--- /dev/null
+++ b/lib/cli/consolecommand.hpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONSOLECOMMAND_H
+#define CONSOLECOMMAND_H
+
+#include "cli/clicommand.hpp"
+#include "base/exception.hpp"
+#include "base/scriptframe.hpp"
+#include "base/tlsstream.hpp"
+#include "remote/url.hpp"
+#include <condition_variable>
+
+
+namespace icinga
+{
+
+/**
+ * The "console" CLI command.
+ *
+ * @ingroup cli
+ */
+class ConsoleCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConsoleCommand);
+
+ static void StaticInitialize();
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+ static int RunScriptConsole(ScriptFrame& scriptFrame, const String& connectAddr = String(),
+ const String& session = String(), const String& commandOnce = String(), const String& commandOnceFileName = String(),
+ bool syntaxOnly = false);
+
+private:
+ mutable std::mutex m_Mutex;
+ mutable std::condition_variable m_CV;
+
+ static Shared<AsioTlsStream>::Ptr Connect();
+
+ static Value ExecuteScript(const String& session, const String& command, bool sandboxed);
+ static Array::Ptr AutoCompleteScript(const String& session, const String& command, bool sandboxed);
+
+ static Dictionary::Ptr SendRequest();
+
+#ifdef HAVE_EDITLINE
+ static char *ConsoleCompleteHelper(const char *word, int state);
+#endif /* HAVE_EDITLINE */
+
+ static void BreakpointHandler(ScriptFrame& frame, ScriptError *ex, const DebugInfo& di);
+
+};
+
+}
+
+#endif /* CONSOLECOMMAND_H */
diff --git a/lib/cli/daemoncommand.cpp b/lib/cli/daemoncommand.cpp
new file mode 100644
index 0000000..3a9ce8c
--- /dev/null
+++ b/lib/cli/daemoncommand.cpp
@@ -0,0 +1,882 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/daemoncommand.hpp"
+#include "cli/daemonutility.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/configobjectslock.hpp"
+#include "remote/configobjectutility.hpp"
+#include "config/configcompiler.hpp"
+#include "config/configcompilercontext.hpp"
+#include "config/configitembuilder.hpp"
+#include "base/atomic.hpp"
+#include "base/defer.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/process.hpp"
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/context.hpp"
+#include "config.h"
+#include <cstdint>
+#include <cstring>
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <fstream>
+
+#ifdef _WIN32
+#include <windows.h>
+#else /* _WIN32 */
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#endif /* _WIN32 */
+
+#ifdef HAVE_SYSTEMD
+#include <systemd/sd-daemon.h>
+#endif /* HAVE_SYSTEMD */
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+static po::variables_map g_AppParams;
+
+REGISTER_CLICOMMAND("daemon", DaemonCommand);
+
+static inline
+void NotifyStatus(const char* status)
+{
+#ifdef HAVE_SYSTEMD
+ (void)sd_notifyf(0, "STATUS=%s", status);
+#endif /* HAVE_SYSTEMD */
+}
+
+/*
+ * Daemonize(). On error, this function logs by itself and exits (i.e. does not return).
+ *
+ * Implementation note: We're only supposed to call exit() in one of the forked processes.
+ * The other process calls _exit(). This prevents issues with exit handlers like atexit().
+ */
+static void Daemonize() noexcept
+{
+#ifndef _WIN32
+ try {
+ Application::UninitializeBase();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to stop thread pool before daemonizing, unexpected error: " << DiagnosticInformation(ex);
+ exit(EXIT_FAILURE);
+ }
+
+ pid_t pid = fork();
+ if (pid == -1) {
+ Log(LogCritical, "cli")
+ << "fork() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ exit(EXIT_FAILURE);
+ }
+
+ if (pid) {
+ // systemd requires that the pidfile of the daemon is written before the forking
+ // process terminates. So wait till either the forked daemon has written a pidfile or died.
+
+ int status;
+ int ret;
+ pid_t readpid;
+ do {
+ Utility::Sleep(0.1);
+
+ readpid = Application::ReadPidFile(Configuration::PidPath);
+ ret = waitpid(pid, &status, WNOHANG);
+ } while (readpid != pid && ret == 0);
+
+ if (ret == pid) {
+ Log(LogCritical, "cli", "The daemon could not be started. See log output for details.");
+ _exit(EXIT_FAILURE);
+ } else if (ret == -1) {
+ Log(LogCritical, "cli")
+ << "waitpid() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+
+ Log(LogDebug, "Daemonize()")
+ << "Child process with PID " << Utility::GetPid() << " continues; re-initializing base.";
+
+ // Detach from controlling terminal
+ pid_t sid = setsid();
+ if (sid == -1) {
+ Log(LogCritical, "cli")
+ << "setsid() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ exit(EXIT_FAILURE);
+ }
+
+ try {
+ Application::InitializeBase();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to re-initialize thread pool after daemonizing: " << DiagnosticInformation(ex);
+ exit(EXIT_FAILURE);
+ }
+#endif /* _WIN32 */
+}
+
+static void CloseStdIO(const String& stderrFile)
+{
+#ifndef _WIN32
+ int fdnull = open("/dev/null", O_RDWR);
+ if (fdnull >= 0) {
+ if (fdnull != 0)
+ dup2(fdnull, 0);
+
+ if (fdnull != 1)
+ dup2(fdnull, 1);
+
+ if (fdnull > 1)
+ close(fdnull);
+ }
+
+ const char *errPath = "/dev/null";
+
+ if (!stderrFile.IsEmpty())
+ errPath = stderrFile.CStr();
+
+ int fderr = open(errPath, O_WRONLY | O_APPEND);
+
+ if (fderr < 0 && errno == ENOENT)
+ fderr = open(errPath, O_CREAT | O_WRONLY | O_APPEND, 0600);
+
+ if (fderr >= 0) {
+ if (fderr != 2)
+ dup2(fderr, 2);
+
+ if (fderr > 2)
+ close(fderr);
+ }
+#endif
+}
+
+String DaemonCommand::GetDescription() const
+{
+ return "Starts Icinga 2.";
+}
+
+String DaemonCommand::GetShortDescription() const
+{
+ return "starts Icinga 2";
+}
+
+void DaemonCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("config,c", po::value<std::vector<std::string> >(), "parse a configuration file")
+ ("no-config,z", "start without a configuration file")
+ ("validate,C", "exit after validating the configuration")
+ ("dump-objects", "write icinga2.debug cache file for icinga2 object list")
+ ("errorlog,e", po::value<std::string>(), "log fatal errors to the specified log file (only works in combination with --daemonize or --close-stdio)")
+#ifndef _WIN32
+ ("daemonize,d", "detach from the controlling terminal")
+ ("close-stdio", "do not log to stdout (or stderr) after startup")
+#endif /* _WIN32 */
+ ;
+}
+
+std::vector<String> DaemonCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "config" || argument == "errorlog")
+ return GetBashCompletionSuggestions("file", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+#ifndef _WIN32
+// The PID of the Icinga umbrella process
+pid_t l_UmbrellaPid = 0;
+
+// Whether the umbrella process allowed us to continue working beyond config validation
+static Atomic<bool> l_AllowedToWork (false);
+#endif /* _WIN32 */
+
+#ifdef I2_DEBUG
+/**
+ * Determine whether the developer wants to delay the worker process to attach a debugger to it.
+ *
+ * @return Internal.DebugWorkerDelay double
+ */
+static double GetDebugWorkerDelay()
+{
+ Namespace::Ptr internal = ScriptGlobal::Get("Internal", &Empty);
+
+ Value vdebug;
+ if (internal && internal->Get("DebugWorkerDelay", &vdebug))
+ return Convert::ToDouble(vdebug);
+
+ return 0.0;
+}
+#endif /* I2_DEBUG */
+
+static String l_ObjectsPath;
+
+/**
+ * Do the actual work (config loading, ...)
+ *
+ * @param configs Files to read config from
+ * @param closeConsoleLog Whether to close the console log after config loading
+ * @param stderrFile Where to log errors
+ *
+ * @return Exit code
+ */
+static inline
+int RunWorker(const std::vector<std::string>& configs, bool closeConsoleLog = false, const String& stderrFile = String())
+{
+
+#ifdef I2_DEBUG
+ double delay = GetDebugWorkerDelay();
+
+ if (delay > 0.0) {
+ Log(LogInformation, "RunWorker")
+ << "DEBUG: Current PID: " << Utility::GetPid() << ". Sleeping for " << delay << " seconds to allow lldb/gdb -p <PID> attachment.";
+
+ Utility::Sleep(delay);
+ }
+#endif /* I2_DEBUG */
+
+ Log(LogInformation, "cli", "Loading configuration file(s).");
+ NotifyStatus("Loading configuration file(s)...");
+
+ {
+ std::vector<ConfigItem::Ptr> newItems;
+
+ if (!DaemonUtility::LoadConfigFiles(configs, newItems, l_ObjectsPath, Configuration::VarsPath)) {
+ Log(LogCritical, "cli", "Config validation failed. Re-run with 'icinga2 daemon -C' after fixing the config.");
+ NotifyStatus("Config validation failed.");
+ return EXIT_FAILURE;
+ }
+
+#ifndef _WIN32
+ Log(LogNotice, "cli")
+ << "Notifying umbrella process (PID " << l_UmbrellaPid << ") about the config loading success";
+
+ (void)kill(l_UmbrellaPid, SIGUSR2);
+
+ Log(LogNotice, "cli")
+ << "Waiting for the umbrella process to let us doing the actual work";
+
+ NotifyStatus("Waiting for the umbrella process to let us doing the actual work...");
+
+ if (closeConsoleLog) {
+ CloseStdIO(stderrFile);
+ Logger::DisableConsoleLog();
+ }
+
+ while (!l_AllowedToWork.load()) {
+ Utility::Sleep(0.2);
+ }
+
+ Log(LogNotice, "cli")
+ << "The umbrella process let us continuing";
+#endif /* _WIN32 */
+
+ NotifyStatus("Restoring the previous program state...");
+
+ /* restore the previous program state */
+ try {
+ ConfigObject::RestoreObjects(Configuration::StatePath);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to restore state file: " << DiagnosticInformation(ex);
+
+ NotifyStatus("Failed to restore state file.");
+
+ return EXIT_FAILURE;
+ }
+
+ NotifyStatus("Activating config objects...");
+
+ // activate config only after daemonization: it starts threads and that is not compatible with fork()
+ if (!ConfigItem::ActivateItems(newItems, false, true, true)) {
+ Log(LogCritical, "cli", "Error activating configuration.");
+
+ NotifyStatus("Error activating configuration.");
+
+ return EXIT_FAILURE;
+ }
+ }
+
+ /* Create the internal API object storage. Do this here too with setups without API. */
+ ConfigObjectUtility::CreateStorage();
+
+ /* Remove ignored Downtime/Comment objects. */
+ try {
+ String configDir = ConfigObjectUtility::GetConfigDir();
+ ConfigItem::RemoveIgnoredItems(configDir);
+ } catch (const std::exception& ex) {
+ Log(LogNotice, "cli")
+ << "Cannot clean ignored downtimes/comments: " << ex.what();
+ }
+
+ ApiListener::UpdateObjectAuthority();
+
+ NotifyStatus("Startup finished.");
+
+ return Application::GetInstance()->Run();
+}
+
+#ifndef _WIN32
+// The signals to block temporarily in StartUnixWorker().
+static const sigset_t l_UnixWorkerSignals = ([]() -> sigset_t {
+ sigset_t s;
+
+ (void)sigemptyset(&s);
+ (void)sigaddset(&s, SIGUSR1);
+ (void)sigaddset(&s, SIGUSR2);
+ (void)sigaddset(&s, SIGINT);
+ (void)sigaddset(&s, SIGTERM);
+ (void)sigaddset(&s, SIGHUP);
+
+ return s;
+})();
+
+// The PID of the seamless worker currently being started by StartUnixWorker()
+static Atomic<pid_t> l_CurrentlyStartingUnixWorkerPid (-1);
+
+// The state of the seamless worker currently being started by StartUnixWorker()
+static Atomic<bool> l_CurrentlyStartingUnixWorkerReady (false);
+
+// The last temination signal we received
+static Atomic<int> l_TermSignal (-1);
+
+// Whether someone requested to re-load config (and we didn't handle that request, yet)
+static Atomic<bool> l_RequestedReload (false);
+
+// Whether someone requested to re-open logs (and we didn't handle that request, yet)
+static Atomic<bool> l_RequestedReopenLogs (false);
+
+/**
+ * Umbrella process' signal handlers
+ */
+static void UmbrellaSignalHandler(int num, siginfo_t *info, void*)
+{
+ switch (num) {
+ case SIGUSR1:
+ // Someone requested to re-open logs
+ l_RequestedReopenLogs.store(true);
+ break;
+ case SIGUSR2:
+ if (!l_CurrentlyStartingUnixWorkerReady.load()
+ && (info->si_pid == 0 || info->si_pid == l_CurrentlyStartingUnixWorkerPid.load()) ) {
+ // The seamless worker currently being started by StartUnixWorker() successfully loaded its config
+ l_CurrentlyStartingUnixWorkerReady.store(true);
+ }
+ break;
+ case SIGINT:
+ case SIGTERM:
+ // Someone requested our termination
+
+ {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+
+ sa.sa_handler = SIG_DFL;
+
+ (void)sigaction(num, &sa, nullptr);
+ }
+
+ l_TermSignal.store(num);
+ break;
+ case SIGHUP:
+ // Someone requested to re-load config
+ l_RequestedReload.store(true);
+ break;
+ default:
+ // Programming error (or someone has broken the userspace)
+ VERIFY(!"Caught unexpected signal");
+ }
+}
+
+/**
+ * Seamless worker's signal handlers
+ */
+static void WorkerSignalHandler(int num, siginfo_t *info, void*)
+{
+ switch (num) {
+ case SIGUSR1:
+ // Catches SIGUSR1 as long as the actual handler (logrotate)
+ // has not been installed not to let SIGUSR1 terminate the process
+ break;
+ case SIGUSR2:
+ if (info->si_pid == 0 || info->si_pid == l_UmbrellaPid) {
+ // The umbrella process allowed us to continue working beyond config validation
+ l_AllowedToWork.store(true);
+ }
+ break;
+ case SIGINT:
+ case SIGTERM:
+ if (info->si_pid == 0 || info->si_pid == l_UmbrellaPid) {
+ // The umbrella process requested our termination
+ Application::RequestShutdown();
+ }
+ break;
+ default:
+ // Programming error (or someone has broken the userspace)
+ VERIFY(!"Caught unexpected signal");
+ }
+}
+
+#ifdef HAVE_SYSTEMD
+// When we last notified the watchdog.
+static Atomic<double> l_LastNotifiedWatchdog (0);
+
+/**
+ * Notify the watchdog if not notified during the last 2.5s.
+ */
+static void NotifyWatchdog()
+{
+ double now = Utility::GetTime();
+
+ if (now - l_LastNotifiedWatchdog.load() >= 2.5) {
+ sd_notify(0, "WATCHDOG=1");
+ l_LastNotifiedWatchdog.store(now);
+ }
+}
+#endif /* HAVE_SYSTEMD */
+
+/**
+ * Starts seamless worker process doing the actual work (config loading, ...)
+ *
+ * @param configs Files to read config from
+ * @param closeConsoleLog Whether to close the console log after config loading
+ * @param stderrFile Where to log errors
+ *
+ * @return The worker's PID on success, -1 on fork(2) failure, -2 if the worker couldn't load its config
+ */
+static pid_t StartUnixWorker(const std::vector<std::string>& configs, bool closeConsoleLog = false, const String& stderrFile = String())
+{
+ Log(LogNotice, "cli")
+ << "Spawning seamless worker process doing the actual work";
+
+ try {
+ Application::UninitializeBase();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to stop thread pool before forking, unexpected error: " << DiagnosticInformation(ex);
+ exit(EXIT_FAILURE);
+ }
+
+ /* Block the signal handlers we'd like to change in the child process until we changed them.
+ * Block SIGUSR2 handler until we've set l_CurrentlyStartingUnixWorkerPid.
+ */
+ (void)sigprocmask(SIG_BLOCK, &l_UnixWorkerSignals, nullptr);
+
+ pid_t pid = fork();
+
+ switch (pid) {
+ case -1:
+ Log(LogCritical, "cli")
+ << "fork() failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+
+ try {
+ Application::InitializeBase();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to re-initialize thread pool after forking (parent): " << DiagnosticInformation(ex);
+ exit(EXIT_FAILURE);
+ }
+
+ (void)sigprocmask(SIG_UNBLOCK, &l_UnixWorkerSignals, nullptr);
+ return -1;
+
+ case 0:
+ try {
+ {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+
+ sa.sa_handler = SIG_DFL;
+
+ (void)sigaction(SIGUSR1, &sa, nullptr);
+ }
+
+ {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+
+ sa.sa_handler = SIG_IGN;
+
+ (void)sigaction(SIGHUP, &sa, nullptr);
+ }
+
+ {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+
+ sa.sa_sigaction = &WorkerSignalHandler;
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+
+ (void)sigaction(SIGUSR1, &sa, nullptr);
+ (void)sigaction(SIGUSR2, &sa, nullptr);
+ (void)sigaction(SIGINT, &sa, nullptr);
+ (void)sigaction(SIGTERM, &sa, nullptr);
+ }
+
+ (void)sigprocmask(SIG_UNBLOCK, &l_UnixWorkerSignals, nullptr);
+
+ try {
+ Application::InitializeBase();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to re-initialize thread pool after forking (child): " << DiagnosticInformation(ex);
+ _exit(EXIT_FAILURE);
+ }
+
+ try {
+ Process::InitializeSpawnHelper();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to initialize process spawn helper after forking (child): " << DiagnosticInformation(ex);
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(RunWorker(configs, closeConsoleLog, stderrFile));
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli") << "Exception in main process: " << DiagnosticInformation(ex);
+ _exit(EXIT_FAILURE);
+ } catch (...) {
+ _exit(EXIT_FAILURE);
+ }
+
+ default:
+ l_CurrentlyStartingUnixWorkerPid.store(pid);
+ (void)sigprocmask(SIG_UNBLOCK, &l_UnixWorkerSignals, nullptr);
+
+ Log(LogNotice, "cli")
+ << "Spawned worker process (PID " << pid << "), waiting for it to load its config";
+
+ // Wait for the newly spawned process to either load its config or fail.
+ for (;;) {
+#ifdef HAVE_SYSTEMD
+ NotifyWatchdog();
+#endif /* HAVE_SYSTEMD */
+
+ if (waitpid(pid, nullptr, WNOHANG) > 0) {
+ Log(LogNotice, "cli")
+ << "Worker process couldn't load its config";
+
+ pid = -2;
+ break;
+ }
+
+ if (l_CurrentlyStartingUnixWorkerReady.load()) {
+ Log(LogNotice, "cli")
+ << "Worker process successfully loaded its config";
+ break;
+ }
+
+ Utility::Sleep(0.2);
+ }
+
+ // Reset flags for the next time
+ l_CurrentlyStartingUnixWorkerPid.store(-1);
+ l_CurrentlyStartingUnixWorkerReady.store(false);
+
+ try {
+ Application::InitializeBase();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Failed to re-initialize thread pool after forking (parent): " << DiagnosticInformation(ex);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return pid;
+}
+
+/**
+ * Workaround to instantiate Application (which is abstract) in DaemonCommand#Run()
+ */
+class PidFileManagementApp : public Application
+{
+public:
+ inline int Main() override
+ {
+ return EXIT_FAILURE;
+ }
+};
+#endif /* _WIN32 */
+
+/**
+ * The entry point for the "daemon" CLI command.
+ *
+ * @returns An exit status.
+ */
+int DaemonCommand::Run(const po::variables_map& vm, const std::vector<std::string>& ap) const
+{
+#ifdef _WIN32
+ SetConsoleOutputCP(65001);
+#endif /* _WIN32 */
+
+ Logger::EnableTimestamp();
+
+ Log(LogInformation, "cli")
+ << "Icinga application loader (version: " << Application::GetAppVersion()
+#ifdef I2_DEBUG
+ << "; debug"
+#endif /* I2_DEBUG */
+ << ")";
+
+ std::vector<std::string> configs;
+ if (vm.count("config") > 0)
+ configs = vm["config"].as<std::vector<std::string> >();
+ else if (!vm.count("no-config")) {
+ /* The implicit string assignment is needed for Windows builds. */
+ String configDir = Configuration::ConfigDir;
+ configs.push_back(configDir + "/icinga2.conf");
+ }
+
+ if (vm.count("dump-objects")) {
+ if (!vm.count("validate")) {
+ Log(LogCritical, "cli", "--dump-objects is not allowed without -C");
+ return EXIT_FAILURE;
+ }
+
+ l_ObjectsPath = Configuration::ObjectsPath;
+ }
+
+ if (vm.count("validate")) {
+ Log(LogInformation, "cli", "Loading configuration file(s).");
+
+ std::vector<ConfigItem::Ptr> newItems;
+
+ if (!DaemonUtility::LoadConfigFiles(configs, newItems, l_ObjectsPath, Configuration::VarsPath)) {
+ Log(LogCritical, "cli", "Config validation failed. Re-run with 'icinga2 daemon -C' after fixing the config.");
+ return EXIT_FAILURE;
+ }
+
+ Log(LogInformation, "cli", "Finished validating the configuration file(s).");
+ return EXIT_SUCCESS;
+ }
+
+ {
+ pid_t runningpid = Application::ReadPidFile(Configuration::PidPath);
+ if (runningpid > 0) {
+ Log(LogCritical, "cli")
+ << "Another instance of Icinga already running with PID " << runningpid;
+ return EXIT_FAILURE;
+ }
+ }
+
+ if (vm.count("daemonize")) {
+ // this subroutine either succeeds, or logs an error
+ // and terminates the process (does not return).
+ Daemonize();
+ }
+
+#ifndef _WIN32
+ /* The Application manages the PID file,
+ * but on *nix this process doesn't load any config
+ * so there's no central Application instance.
+ */
+ PidFileManagementApp app;
+
+ try {
+ app.UpdatePidFile(Configuration::PidPath);
+ } catch (const std::exception&) {
+ Log(LogCritical, "Application")
+ << "Cannot update PID file '" << Configuration::PidPath << "'. Aborting.";
+ return EXIT_FAILURE;
+ }
+
+ Defer closePidFile ([&app]() {
+ app.ClosePidFile(true);
+ });
+#endif /* _WIN32 */
+
+ if (vm.count("daemonize")) {
+ // After disabling the console log, any further errors will go to the configured log only.
+ // Let's try to make this clear and say good bye.
+ Log(LogInformation, "cli", "Closing console log.");
+
+ String errorLog;
+ if (vm.count("errorlog"))
+ errorLog = vm["errorlog"].as<std::string>();
+
+ CloseStdIO(errorLog);
+ Logger::DisableConsoleLog();
+ }
+
+#ifdef _WIN32
+ try {
+ return RunWorker(configs);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli") << "Exception in main process: " << DiagnosticInformation(ex);
+ return EXIT_FAILURE;
+ } catch (...) {
+ return EXIT_FAILURE;
+ }
+#else /* _WIN32 */
+ l_UmbrellaPid = getpid();
+ Application::SetUmbrellaProcess(l_UmbrellaPid);
+
+ {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+
+ sa.sa_sigaction = &UmbrellaSignalHandler;
+ sa.sa_flags = SA_NOCLDSTOP | SA_RESTART | SA_SIGINFO;
+
+ (void)sigaction(SIGUSR1, &sa, nullptr);
+ (void)sigaction(SIGUSR2, &sa, nullptr);
+ (void)sigaction(SIGINT, &sa, nullptr);
+ (void)sigaction(SIGTERM, &sa, nullptr);
+ (void)sigaction(SIGHUP, &sa, nullptr);
+ }
+
+ bool closeConsoleLog = !vm.count("daemonize") && vm.count("close-stdio");
+
+ String errorLog;
+ if (vm.count("errorlog"))
+ errorLog = vm["errorlog"].as<std::string>();
+
+ // The PID of the current seamless worker
+ pid_t currentWorker = StartUnixWorker(configs, closeConsoleLog, errorLog);
+
+ if (currentWorker < 0) {
+ return EXIT_FAILURE;
+ }
+
+ if (closeConsoleLog) {
+ // After disabling the console log, any further errors will go to the configured log only.
+ // Let's try to make this clear and say good bye.
+ Log(LogInformation, "cli", "Closing console log.");
+
+ CloseStdIO(errorLog);
+ Logger::DisableConsoleLog();
+ }
+
+ // Immediately allow the first (non-reload) worker to continue working beyond config validation
+ (void)kill(currentWorker, SIGUSR2);
+
+#ifdef HAVE_SYSTEMD
+ sd_notify(0, "READY=1");
+#endif /* HAVE_SYSTEMD */
+
+ // Whether we already forwarded a termination signal to the seamless worker
+ bool requestedTermination = false;
+
+ // Whether we already notified systemd about our termination
+ bool notifiedTermination = false;
+
+ for (;;) {
+#ifdef HAVE_SYSTEMD
+ NotifyWatchdog();
+#endif /* HAVE_SYSTEMD */
+
+ if (!requestedTermination) {
+ int termSig = l_TermSignal.load();
+ if (termSig != -1) {
+ Log(LogNotice, "cli")
+ << "Got signal " << termSig << ", forwarding to seamless worker (PID " << currentWorker << ")";
+
+ (void)kill(currentWorker, termSig);
+ requestedTermination = true;
+
+#ifdef HAVE_SYSTEMD
+ if (!notifiedTermination) {
+ notifiedTermination = true;
+ sd_notify(0, "STOPPING=1");
+ }
+#endif /* HAVE_SYSTEMD */
+ }
+ }
+
+ if (l_RequestedReload.exchange(false)) {
+ Log(LogInformation, "Application")
+ << "Got reload command: Starting new instance.";
+
+#ifdef HAVE_SYSTEMD
+ sd_notify(0, "RELOADING=1");
+#endif /* HAVE_SYSTEMD */
+
+ // The old process is still active, yet.
+ // Its config changes would not be visible to the new one after config load.
+ ConfigObjectsExclusiveLock lock;
+
+ pid_t nextWorker = StartUnixWorker(configs);
+
+ switch (nextWorker) {
+ case -1:
+ break;
+ case -2:
+ Log(LogCritical, "Application", "Found error in config: reloading aborted");
+ Application::SetLastReloadFailed(Utility::GetTime());
+ break;
+ default:
+ Log(LogInformation, "Application")
+ << "Reload done, old process shutting down. Child process with PID '" << nextWorker << "' is taking over.";
+
+ NotifyStatus("Shutting down old instance...");
+
+ Application::SetLastReloadFailed(0);
+ (void)kill(currentWorker, SIGTERM);
+
+ {
+ double start = Utility::GetTime();
+
+ while (waitpid(currentWorker, nullptr, 0) == -1 && errno == EINTR) {
+ #ifdef HAVE_SYSTEMD
+ NotifyWatchdog();
+ #endif /* HAVE_SYSTEMD */
+ }
+
+ Log(LogNotice, "cli")
+ << "Waited for " << Utility::FormatDuration(Utility::GetTime() - start) << " on old process to exit.";
+ }
+
+ // Old instance shut down, allow the new one to continue working beyond config validation
+ (void)kill(nextWorker, SIGUSR2);
+
+ NotifyStatus("Shut down old instance.");
+
+ currentWorker = nextWorker;
+ }
+
+#ifdef HAVE_SYSTEMD
+ sd_notify(0, "READY=1");
+#endif /* HAVE_SYSTEMD */
+
+ }
+
+ if (l_RequestedReopenLogs.exchange(false)) {
+ Log(LogNotice, "cli")
+ << "Got signal " << SIGUSR1 << ", forwarding to seamless worker (PID " << currentWorker << ")";
+
+ (void)kill(currentWorker, SIGUSR1);
+ }
+
+ {
+ int status;
+ if (waitpid(currentWorker, &status, WNOHANG) > 0) {
+ Log(LogNotice, "cli")
+ << "Seamless worker (PID " << currentWorker << ") stopped, stopping as well";
+
+#ifdef HAVE_SYSTEMD
+ if (!notifiedTermination) {
+ notifiedTermination = true;
+ sd_notify(0, "STOPPING=1");
+ }
+#endif /* HAVE_SYSTEMD */
+
+ // If killed by signal, forward it via the exit code (to be as seamless as possible)
+ return WIFSIGNALED(status) ? 128 + WTERMSIG(status) : WEXITSTATUS(status);
+ }
+ }
+
+ Utility::Sleep(0.2);
+ }
+#endif /* _WIN32 */
+}
diff --git a/lib/cli/daemoncommand.hpp b/lib/cli/daemoncommand.hpp
new file mode 100644
index 0000000..da8a34b
--- /dev/null
+++ b/lib/cli/daemoncommand.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DAEMONCOMMAND_H
+#define DAEMONCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "daemon" CLI command.
+ *
+ * @ingroup cli
+ */
+class DaemonCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(DaemonCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* DAEMONCOMMAND_H */
diff --git a/lib/cli/daemonutility.cpp b/lib/cli/daemonutility.cpp
new file mode 100644
index 0000000..9e910f3
--- /dev/null
+++ b/lib/cli/daemonutility.cpp
@@ -0,0 +1,285 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/daemonutility.hpp"
+#include "base/configobject.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/scriptglobal.hpp"
+#include "config/configcompiler.hpp"
+#include "config/configcompilercontext.hpp"
+#include "config/configitembuilder.hpp"
+#include "icinga/dependency.hpp"
+#include <set>
+
+using namespace icinga;
+
+static bool ExecuteExpression(Expression *expression)
+{
+ if (!expression)
+ return false;
+
+ try {
+ ScriptFrame frame(true);
+ expression->Evaluate(frame);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "config", DiagnosticInformation(ex));
+ return false;
+ }
+
+ return true;
+}
+
+static bool IncludeZoneDirRecursive(const String& path, const String& package, bool& success)
+{
+ String zoneName = Utility::BaseName(path);
+
+ /* We don't have an activated zone object yet. We may forcefully guess from configitems
+ * to not include this specific synced zones directory.
+ */
+ if(!ConfigItem::GetByTypeAndName(Type::GetByName("Zone"), zoneName)) {
+ return false;
+ }
+
+ /* register this zone path for cluster config sync */
+ ConfigCompiler::RegisterZoneDir("_etc", path, zoneName);
+
+ std::vector<std::unique_ptr<Expression> > expressions;
+ Utility::GlobRecursive(path, "*.conf", [&expressions, zoneName, package](const String& file) {
+ ConfigCompiler::CollectIncludes(expressions, file, zoneName, package);
+ }, GlobFile);
+
+ DictExpression expr(std::move(expressions));
+ if (!ExecuteExpression(&expr))
+ success = false;
+
+ return true;
+}
+
+static bool IncludeNonLocalZone(const String& zonePath, const String& package, bool& success)
+{
+ /* Note: This include function must not call RegisterZoneDir().
+ * We do not need to copy it for cluster config sync. */
+
+ String zoneName = Utility::BaseName(zonePath);
+
+ /* We don't have an activated zone object yet. We may forcefully guess from configitems
+ * to not include this specific synced zones directory.
+ */
+ if(!ConfigItem::GetByTypeAndName(Type::GetByName("Zone"), zoneName)) {
+ return false;
+ }
+
+ /* Check whether this node already has an authoritative config version
+ * from zones.d in etc or api package directory, or a local marker file)
+ */
+ if (ConfigCompiler::HasZoneConfigAuthority(zoneName) || Utility::PathExists(zonePath + "/.authoritative")) {
+ Log(LogNotice, "config")
+ << "Ignoring non local config include for zone '" << zoneName << "': We already have an authoritative copy included.";
+ return true;
+ }
+
+ std::vector<std::unique_ptr<Expression> > expressions;
+ Utility::GlobRecursive(zonePath, "*.conf", [&expressions, zoneName, package](const String& file) {
+ ConfigCompiler::CollectIncludes(expressions, file, zoneName, package);
+ }, GlobFile);
+
+ DictExpression expr(std::move(expressions));
+ if (!ExecuteExpression(&expr))
+ success = false;
+
+ return true;
+}
+
+static void IncludePackage(const String& packagePath, bool& success)
+{
+ /* Note: Package includes will register their zones
+ * for config sync inside their generated config. */
+ String packageName = Utility::BaseName(packagePath);
+
+ if (Utility::PathExists(packagePath + "/include.conf")) {
+ std::unique_ptr<Expression> expr = ConfigCompiler::CompileFile(packagePath + "/include.conf",
+ String(), packageName);
+
+ if (!ExecuteExpression(&*expr))
+ success = false;
+ }
+}
+
+bool DaemonUtility::ValidateConfigFiles(const std::vector<std::string>& configs, const String& objectsFile)
+{
+ bool success;
+
+ Namespace::Ptr systemNS = ScriptGlobal::Get("System");
+ VERIFY(systemNS);
+
+ Namespace::Ptr internalNS = ScriptGlobal::Get("Internal");
+ VERIFY(internalNS);
+
+ if (!objectsFile.IsEmpty())
+ ConfigCompilerContext::GetInstance()->OpenObjectsFile(objectsFile);
+
+ if (!configs.empty()) {
+ for (const String& configPath : configs) {
+ try {
+ std::unique_ptr<Expression> expression = ConfigCompiler::CompileFile(configPath, String(), "_etc");
+ success = ExecuteExpression(&*expression);
+ if (!success)
+ return false;
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli", "Could not compile config files: " + DiagnosticInformation(ex, false));
+ Application::Exit(1);
+ }
+ }
+ }
+
+ /* Load cluster config files from /etc/icinga2/zones.d.
+ * This should probably be in libremote but
+ * unfortunately moving it there is somewhat non-trivial. */
+ success = true;
+
+ /* Only load zone directory if we're not in staging validation. */
+ if (!internalNS->Contains("ZonesStageVarDir")) {
+ String zonesEtcDir = Configuration::ZonesDir;
+ if (!zonesEtcDir.IsEmpty() && Utility::PathExists(zonesEtcDir)) {
+ std::set<String> zoneEtcDirs;
+ Utility::Glob(zonesEtcDir + "/*", [&zoneEtcDirs](const String& zoneEtcDir) { zoneEtcDirs.emplace(zoneEtcDir); }, GlobDirectory);
+
+ bool hasSuccess = true;
+
+ while (!zoneEtcDirs.empty() && hasSuccess) {
+ hasSuccess = false;
+
+ for (auto& zoneEtcDir : zoneEtcDirs) {
+ if (IncludeZoneDirRecursive(zoneEtcDir, "_etc", success)) {
+ zoneEtcDirs.erase(zoneEtcDir);
+ hasSuccess = true;
+ break;
+ }
+ }
+ }
+
+ for (auto& zoneEtcDir : zoneEtcDirs) {
+ Log(LogWarning, "config")
+ << "Ignoring directory '" << zoneEtcDir << "' for unknown zone '" << Utility::BaseName(zoneEtcDir) << "'.";
+ }
+ }
+
+ if (!success)
+ return false;
+ }
+
+ /* Load package config files - they may contain additional zones which
+ * are authoritative on this node and are checked in HasZoneConfigAuthority(). */
+ String packagesVarDir = Configuration::DataDir + "/api/packages";
+ if (Utility::PathExists(packagesVarDir))
+ Utility::Glob(packagesVarDir + "/*", [&success](const String& packagePath) { IncludePackage(packagePath, success); }, GlobDirectory);
+
+ if (!success)
+ return false;
+
+ /* Load cluster synchronized configuration files. This can be overridden for staged sync validations. */
+ String zonesVarDir = Configuration::DataDir + "/api/zones";
+
+ /* Cluster config sync stage validation needs this. */
+ if (internalNS->Contains("ZonesStageVarDir")) {
+ zonesVarDir = internalNS->Get("ZonesStageVarDir");
+
+ Log(LogNotice, "DaemonUtility")
+ << "Overriding zones var directory with '" << zonesVarDir << "' for cluster config sync staging.";
+ }
+
+
+ if (Utility::PathExists(zonesVarDir)) {
+ std::set<String> zoneVarDirs;
+ Utility::Glob(zonesVarDir + "/*", [&zoneVarDirs](const String& zoneVarDir) { zoneVarDirs.emplace(zoneVarDir); }, GlobDirectory);
+
+ bool hasSuccess = true;
+
+ while (!zoneVarDirs.empty() && hasSuccess) {
+ hasSuccess = false;
+
+ for (auto& zoneVarDir : zoneVarDirs) {
+ if (IncludeNonLocalZone(zoneVarDir, "_cluster", success)) {
+ zoneVarDirs.erase(zoneVarDir);
+ hasSuccess = true;
+ break;
+ }
+ }
+ }
+
+ for (auto& zoneEtcDir : zoneVarDirs) {
+ Log(LogWarning, "config")
+ << "Ignoring directory '" << zoneEtcDir << "' for unknown zone '" << Utility::BaseName(zoneEtcDir) << "'.";
+ }
+ }
+
+ if (!success)
+ return false;
+
+ /* This is initialized inside the IcingaApplication class. */
+ Value vAppType;
+ VERIFY(systemNS->Get("ApplicationType", &vAppType));
+
+ Type::Ptr appType = Type::GetByName(vAppType);
+
+ if (ConfigItem::GetItems(appType).empty()) {
+ ConfigItemBuilder builder;
+ builder.SetType(appType);
+ builder.SetName("app");
+ builder.AddExpression(new ImportDefaultTemplatesExpression());
+ ConfigItem::Ptr item = builder.Compile();
+ item->Register();
+ }
+
+ return true;
+}
+
+bool DaemonUtility::LoadConfigFiles(const std::vector<std::string>& configs,
+ std::vector<ConfigItem::Ptr>& newItems,
+ const String& objectsFile, const String& varsfile)
+{
+ ActivationScope ascope;
+
+ if (!DaemonUtility::ValidateConfigFiles(configs, objectsFile)) {
+ ConfigCompilerContext::GetInstance()->CancelObjectsFile();
+ return false;
+ }
+
+ // After evaluating the top-level statements of the config files (happening in ValidateConfigFiles() above),
+ // prevent further modification of the global scope. This allows for a faster execution of the following steps
+ // as Freeze() disables locking as it's not necessary on a read-only data structure anymore.
+ ScriptGlobal::GetGlobals()->Freeze();
+
+ WorkQueue upq(25000, Configuration::Concurrency);
+ upq.SetName("DaemonUtility::LoadConfigFiles");
+ bool result = ConfigItem::CommitItems(ascope.GetContext(), upq, newItems);
+
+ if (result) {
+ try {
+ Dependency::AssertNoCycles();
+ } catch (...) {
+ Log(LogCritical, "config")
+ << DiagnosticInformation(boost::current_exception(), false);
+
+ result = false;
+ }
+ }
+
+ if (!result) {
+ ConfigCompilerContext::GetInstance()->CancelObjectsFile();
+ return false;
+ }
+
+ try {
+ ScriptGlobal::WriteToFile(varsfile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli", "Could not write vars file: " + DiagnosticInformation(ex, false));
+ Application::Exit(1);
+ }
+
+ ConfigCompilerContext::GetInstance()->FinishObjectsFile();
+
+ return true;
+}
diff --git a/lib/cli/daemonutility.hpp b/lib/cli/daemonutility.hpp
new file mode 100644
index 0000000..963bfba
--- /dev/null
+++ b/lib/cli/daemonutility.hpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DAEMONUTILITY_H
+#define DAEMONUTILITY_H
+
+#include "cli/i2-cli.hpp"
+#include "config/configitem.hpp"
+#include "base/string.hpp"
+#include <boost/program_options.hpp>
+
+namespace icinga
+{
+
+/**
+ * @ingroup cli
+ */
+class DaemonUtility
+{
+public:
+ static bool ValidateConfigFiles(const std::vector<std::string>& configs, const String& objectsFile = String());
+ static bool LoadConfigFiles(const std::vector<std::string>& configs, std::vector<ConfigItem::Ptr>& newItems,
+ const String& objectsFile = String(), const String& varsfile = String());
+};
+
+}
+
+#endif /* DAEMONULITIY_H */
diff --git a/lib/cli/editline.hpp b/lib/cli/editline.hpp
new file mode 100644
index 0000000..f97525e
--- /dev/null
+++ b/lib/cli/editline.hpp
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EDITLINE_H
+#define EDITLINE_H
+
+extern "C" {
+
+char *readline(const char *prompt);
+int add_history(const char *line);
+void rl_deprep_terminal();
+
+typedef char *ELFunction(const char *, int);
+
+extern char rl_completion_append_character;
+extern ELFunction *rl_completion_entry_function;
+
+}
+
+#endif /* EDITLINE_H */
diff --git a/lib/cli/featuredisablecommand.cpp b/lib/cli/featuredisablecommand.cpp
new file mode 100644
index 0000000..95a4a26
--- /dev/null
+++ b/lib/cli/featuredisablecommand.cpp
@@ -0,0 +1,55 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/featuredisablecommand.hpp"
+#include "cli/featureutility.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("feature/disable", FeatureDisableCommand);
+
+String FeatureDisableCommand::GetDescription() const
+{
+ return "Disables specified Icinga 2 feature.";
+}
+
+String FeatureDisableCommand::GetShortDescription() const
+{
+ return "disables specified feature";
+}
+
+std::vector<String> FeatureDisableCommand::GetPositionalSuggestions(const String& word) const
+{
+ return FeatureUtility::GetFieldCompletionSuggestions(word, false);
+}
+
+int FeatureDisableCommand::GetMinArguments() const
+{
+ return 1;
+}
+
+int FeatureDisableCommand::GetMaxArguments() const
+{
+ return -1;
+}
+
+ImpersonationLevel FeatureDisableCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+/**
+ * The entry point for the "feature disable" CLI command.
+ *
+ * @returns An exit status.
+ */
+int FeatureDisableCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (ap.empty()) {
+ Log(LogCritical, "cli", "Cannot disable feature(s). Name(s) are missing!");
+ return 0;
+ }
+
+ return FeatureUtility::DisableFeatures(ap);
+}
diff --git a/lib/cli/featuredisablecommand.hpp b/lib/cli/featuredisablecommand.hpp
new file mode 100644
index 0000000..b24655d
--- /dev/null
+++ b/lib/cli/featuredisablecommand.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FEATUREDISABLECOMMAND_H
+#define FEATUREDISABLECOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "feature disable" command.
+ *
+ * @ingroup cli
+ */
+class FeatureDisableCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(FeatureDisableCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMinArguments() const override;
+ int GetMaxArguments() const override;
+ std::vector<String> GetPositionalSuggestions(const String& word) const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* FEATUREDISABLECOMMAND_H */
diff --git a/lib/cli/featureenablecommand.cpp b/lib/cli/featureenablecommand.cpp
new file mode 100644
index 0000000..0cf9066
--- /dev/null
+++ b/lib/cli/featureenablecommand.cpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/featureenablecommand.hpp"
+#include "cli/featureutility.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("feature/enable", FeatureEnableCommand);
+
+String FeatureEnableCommand::GetDescription() const
+{
+ return "Enables specified Icinga 2 feature.";
+}
+
+String FeatureEnableCommand::GetShortDescription() const
+{
+ return "enables specified feature";
+}
+
+std::vector<String> FeatureEnableCommand::GetPositionalSuggestions(const String& word) const
+{
+ return FeatureUtility::GetFieldCompletionSuggestions(word, true);
+}
+
+int FeatureEnableCommand::GetMinArguments() const
+{
+ return 1;
+}
+
+int FeatureEnableCommand::GetMaxArguments() const
+{
+ return -1;
+}
+
+ImpersonationLevel FeatureEnableCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+/**
+ * The entry point for the "feature enable" CLI command.
+ *
+ * @returns An exit status.
+ */
+int FeatureEnableCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ return FeatureUtility::EnableFeatures(ap);
+}
diff --git a/lib/cli/featureenablecommand.hpp b/lib/cli/featureenablecommand.hpp
new file mode 100644
index 0000000..fc91778
--- /dev/null
+++ b/lib/cli/featureenablecommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FEATUREENABLECOMMAND_H
+#define FEATUREENABLECOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "feature enable" command.
+ *
+ * @ingroup cli
+ */
+class FeatureEnableCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(FeatureEnableCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMinArguments() const override;
+ int GetMaxArguments() const override;
+ std::vector<String> GetPositionalSuggestions(const String& word) const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* FEATUREENABLECOMMAND_H */
diff --git a/lib/cli/featurelistcommand.cpp b/lib/cli/featurelistcommand.cpp
new file mode 100644
index 0000000..2aad4a9
--- /dev/null
+++ b/lib/cli/featurelistcommand.cpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/featurelistcommand.hpp"
+#include "cli/featureutility.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/console.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("feature/list", FeatureListCommand);
+
+String FeatureListCommand::GetDescription() const
+{
+ return "Lists all available Icinga 2 features.";
+}
+
+String FeatureListCommand::GetShortDescription() const
+{
+ return "lists all available features";
+}
+
+/**
+ * The entry point for the "feature list" CLI command.
+ *
+ * @returns An exit status.
+ */
+int FeatureListCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ return FeatureUtility::ListFeatures();
+}
diff --git a/lib/cli/featurelistcommand.hpp b/lib/cli/featurelistcommand.hpp
new file mode 100644
index 0000000..cae1d74
--- /dev/null
+++ b/lib/cli/featurelistcommand.hpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FEATURELISTCOMMAND_H
+#define FEATURELISTCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "feature list" command.
+ *
+ * @ingroup cli
+ */
+class FeatureListCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(FeatureListCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* FEATURELISTCOMMAND_H */
diff --git a/lib/cli/featureutility.cpp b/lib/cli/featureutility.cpp
new file mode 100644
index 0000000..3523868
--- /dev/null
+++ b/lib/cli/featureutility.cpp
@@ -0,0 +1,243 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/featureutility.hpp"
+#include "base/logger.hpp"
+#include "base/console.hpp"
+#include "base/application.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <fstream>
+#include <iostream>
+
+using namespace icinga;
+
+String FeatureUtility::GetFeaturesAvailablePath()
+{
+ return Configuration::ConfigDir + "/features-available";
+}
+
+String FeatureUtility::GetFeaturesEnabledPath()
+{
+ return Configuration::ConfigDir + "/features-enabled";
+}
+
+std::vector<String> FeatureUtility::GetFieldCompletionSuggestions(const String& word, bool enable)
+{
+ std::vector<String> cache;
+ std::vector<String> suggestions;
+
+ GetFeatures(cache, enable);
+
+ std::sort(cache.begin(), cache.end());
+
+ for (const String& suggestion : cache) {
+ if (suggestion.Find(word) == 0)
+ suggestions.push_back(suggestion);
+ }
+
+ return suggestions;
+}
+
+int FeatureUtility::EnableFeatures(const std::vector<std::string>& features)
+{
+ String features_available_dir = GetFeaturesAvailablePath();
+ String features_enabled_dir = GetFeaturesEnabledPath();
+
+ if (!Utility::PathExists(features_available_dir) ) {
+ Log(LogCritical, "cli")
+ << "Cannot parse available features. Path '" << features_available_dir << "' does not exist.";
+ return 1;
+ }
+
+ if (!Utility::PathExists(features_enabled_dir) ) {
+ Log(LogCritical, "cli")
+ << "Cannot enable features. Path '" << features_enabled_dir << "' does not exist.";
+ return 1;
+ }
+
+ std::vector<std::string> errors;
+
+ for (const String& feature : features) {
+ String source = features_available_dir + "/" + feature + ".conf";
+
+ if (!Utility::PathExists(source) ) {
+ Log(LogCritical, "cli")
+ << "Cannot enable feature '" << feature << "'. Source file '" << source + "' does not exist.";
+ errors.push_back(feature);
+ continue;
+ }
+
+ String target = features_enabled_dir + "/" + feature + ".conf";
+
+ if (Utility::PathExists(target) ) {
+ Log(LogWarning, "cli")
+ << "Feature '" << feature << "' already enabled.";
+ continue;
+ }
+
+ std::cout << "Enabling feature " << ConsoleColorTag(Console_ForegroundMagenta | Console_Bold) << feature
+ << ConsoleColorTag(Console_Normal) << ". Make sure to restart Icinga 2 for these changes to take effect.\n";
+
+#ifndef _WIN32
+ String relativeSource = "../features-available/" + feature + ".conf";
+
+ if (symlink(relativeSource.CStr(), target.CStr()) < 0) {
+ Log(LogCritical, "cli")
+ << "Cannot enable feature '" << feature << "'. Linking source '" << relativeSource << "' to target file '" << target
+ << "' failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\".";
+ errors.push_back(feature);
+ continue;
+ }
+#else /* _WIN32 */
+ std::ofstream fp;
+ fp.open(target.CStr());
+ fp << "include \"../features-available/" << feature << ".conf\"" << std::endl;
+ fp.close();
+
+ if (fp.fail()) {
+ Log(LogCritical, "cli")
+ << "Cannot enable feature '" << feature << "'. Failed to open file '" << target << "'.";
+ errors.push_back(feature);
+ continue;
+ }
+#endif /* _WIN32 */
+ }
+
+ if (!errors.empty()) {
+ Log(LogCritical, "cli")
+ << "Cannot enable feature(s): " << boost::algorithm::join(errors, " ");
+ errors.clear();
+ return 1;
+ }
+
+ return 0;
+}
+
+int FeatureUtility::DisableFeatures(const std::vector<std::string>& features)
+{
+ String features_enabled_dir = GetFeaturesEnabledPath();
+
+ if (!Utility::PathExists(features_enabled_dir) ) {
+ Log(LogCritical, "cli")
+ << "Cannot disable features. Path '" << features_enabled_dir << "' does not exist.";
+ return 0;
+ }
+
+ std::vector<std::string> errors;
+
+ for (const String& feature : features) {
+ String target = features_enabled_dir + "/" + feature + ".conf";
+
+ if (!Utility::PathExists(target) ) {
+ Log(LogWarning, "cli")
+ << "Feature '" << feature << "' already disabled.";
+ continue;
+ }
+
+ if (unlink(target.CStr()) < 0) {
+ Log(LogCritical, "cli")
+ << "Cannot disable feature '" << feature << "'. Unlinking target file '" << target
+ << "' failed with error code " << errno << ", \"" + Utility::FormatErrorNumber(errno) << "\".";
+ errors.push_back(feature);
+ continue;
+ }
+
+ std::cout << "Disabling feature " << ConsoleColorTag(Console_ForegroundMagenta | Console_Bold) << feature
+ << ConsoleColorTag(Console_Normal) << ". Make sure to restart Icinga 2 for these changes to take effect.\n";
+ }
+
+ if (!errors.empty()) {
+ Log(LogCritical, "cli")
+ << "Cannot disable feature(s): " << boost::algorithm::join(errors, " ");
+ errors.clear();
+ return 1;
+ }
+
+ return 0;
+}
+
+int FeatureUtility::ListFeatures(std::ostream& os)
+{
+ std::vector<String> disabled_features;
+ std::vector<String> enabled_features;
+
+ if (!FeatureUtility::GetFeatures(disabled_features, true))
+ return 1;
+
+ os << ConsoleColorTag(Console_ForegroundRed | Console_Bold) << "Disabled features: " << ConsoleColorTag(Console_Normal)
+ << boost::algorithm::join(disabled_features, " ") << "\n";
+
+ if (!FeatureUtility::GetFeatures(enabled_features, false))
+ return 1;
+
+ os << ConsoleColorTag(Console_ForegroundGreen | Console_Bold) << "Enabled features: " << ConsoleColorTag(Console_Normal)
+ << boost::algorithm::join(enabled_features, " ") << "\n";
+
+ return 0;
+}
+
+bool FeatureUtility::GetFeatures(std::vector<String>& features, bool get_disabled)
+{
+ /* request all disabled features */
+ if (get_disabled) {
+ /* disable = available-enabled */
+ String available_pattern = GetFeaturesAvailablePath() + "/*.conf";
+ std::vector<String> available;
+ Utility::Glob(available_pattern, [&available](const String& featureFile) { CollectFeatures(featureFile, available); }, GlobFile);
+
+ String enabled_pattern = GetFeaturesEnabledPath() + "/*.conf";
+ std::vector<String> enabled;
+ Utility::Glob(enabled_pattern, [&enabled](const String& featureFile) { CollectFeatures(featureFile, enabled); }, GlobFile);
+
+ std::sort(available.begin(), available.end());
+ std::sort(enabled.begin(), enabled.end());
+ std::set_difference(
+ available.begin(), available.end(),
+ enabled.begin(), enabled.end(),
+ std::back_inserter(features)
+ );
+ } else {
+ /* all enabled features */
+ String enabled_pattern = GetFeaturesEnabledPath() + "/*.conf";
+
+ Utility::Glob(enabled_pattern, [&features](const String& featureFile) { CollectFeatures(featureFile, features); }, GlobFile);
+ }
+
+ return true;
+}
+
+bool FeatureUtility::CheckFeatureEnabled(const String& feature)
+{
+ return CheckFeatureInternal(feature, false);
+}
+
+bool FeatureUtility::CheckFeatureDisabled(const String& feature)
+{
+ return CheckFeatureInternal(feature, true);
+}
+
+bool FeatureUtility::CheckFeatureInternal(const String& feature, bool check_disabled)
+{
+ std::vector<String> features;
+
+ if (!FeatureUtility::GetFeatures(features, check_disabled))
+ return false;
+
+ for (const String& check_feature : features) {
+ if (check_feature == feature)
+ return true;
+ }
+
+ return false;
+}
+
+void FeatureUtility::CollectFeatures(const String& feature_file, std::vector<String>& features)
+{
+ String feature = Utility::BaseName(feature_file);
+ boost::algorithm::replace_all(feature, ".conf", "");
+
+ Log(LogDebug, "cli")
+ << "Adding feature: " << feature;
+ features.push_back(feature);
+}
diff --git a/lib/cli/featureutility.hpp b/lib/cli/featureutility.hpp
new file mode 100644
index 0000000..9cb2128
--- /dev/null
+++ b/lib/cli/featureutility.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FEATUREUTILITY_H
+#define FEATUREUTILITY_H
+
+#include "base/i2-base.hpp"
+#include "cli/i2-cli.hpp"
+#include "base/string.hpp"
+#include <vector>
+#include <iostream>
+
+namespace icinga
+{
+
+/**
+ * @ingroup cli
+ */
+class FeatureUtility
+{
+public:
+ static String GetFeaturesAvailablePath();
+ static String GetFeaturesEnabledPath();
+
+ static std::vector<String> GetFieldCompletionSuggestions(const String& word, bool enable);
+
+ static int EnableFeatures(const std::vector<std::string>& features);
+ static int DisableFeatures(const std::vector<std::string>& features);
+ static int ListFeatures(std::ostream& os = std::cout);
+
+ static bool GetFeatures(std::vector<String>& features, bool enable);
+ static bool CheckFeatureEnabled(const String& feature);
+ static bool CheckFeatureDisabled(const String& feature);
+
+private:
+ FeatureUtility();
+ static void CollectFeatures(const String& feature_file, std::vector<String>& features);
+ static bool CheckFeatureInternal(const String& feature, bool check_disabled);
+};
+
+}
+
+#endif /* FEATUREUTILITY_H */
diff --git a/lib/cli/i2-cli.hpp b/lib/cli/i2-cli.hpp
new file mode 100644
index 0000000..86e5ddd
--- /dev/null
+++ b/lib/cli/i2-cli.hpp
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2CLI_H
+#define I2CLI_H
+
+/**
+ * @defgroup cli CLI commands
+ *
+ * The CLI library implements Icinga's command-line interface.
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2CLI_H */
diff --git a/lib/cli/internalsignalcommand.cpp b/lib/cli/internalsignalcommand.cpp
new file mode 100644
index 0000000..b097965
--- /dev/null
+++ b/lib/cli/internalsignalcommand.cpp
@@ -0,0 +1,67 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/internalsignalcommand.hpp"
+#include "base/logger.hpp"
+#include <signal.h>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("internal/signal", InternalSignalCommand);
+
+String InternalSignalCommand::GetDescription() const
+{
+ return "Send signal as Icinga user";
+}
+
+String InternalSignalCommand::GetShortDescription() const
+{
+ return "Send signal as Icinga user";
+}
+
+ImpersonationLevel InternalSignalCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+bool InternalSignalCommand::IsHidden() const
+{
+ return true;
+}
+
+void InternalSignalCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("pid,p", po::value<int>(), "Target PID")
+ ("sig,s", po::value<String>(), "Signal (POSIX string) to send")
+ ;
+}
+
+/**
+ * The entry point for the "internal signal" CLI command.
+ *
+ * @returns An exit status.
+ */
+int InternalSignalCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+#ifndef _WIN32
+ String signal = vm["sig"].as<String>();
+
+ /* Thank POSIX */
+ if (signal == "SIGKILL")
+ return kill(vm["pid"].as<int>(), SIGKILL);
+ if (signal == "SIGINT")
+ return kill(vm["pid"].as<int>(), SIGINT);
+ if (signal == "SIGCHLD")
+ return kill(vm["pid"].as<int>(), SIGCHLD);
+ if (signal == "SIGHUP")
+ return kill(vm["pid"].as<int>(), SIGHUP);
+
+ Log(LogCritical, "cli") << "Unsupported signal \"" << signal << "\"";
+#else
+ Log(LogCritical, "cli", "Unsupported action on Windows.");
+#endif /* _Win32 */
+ return 1;
+}
+
diff --git a/lib/cli/internalsignalcommand.hpp b/lib/cli/internalsignalcommand.hpp
new file mode 100644
index 0000000..d599b80
--- /dev/null
+++ b/lib/cli/internalsignalcommand.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef INTERNALSIGNALCOMMAND_H
+#define INTERNALSIGNALCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "internal signal" command.
+ *
+ * @ingroup cli
+ */
+class InternalSignalCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(InternalSignalCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ bool IsHidden() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* INTERNALSIGNALCOMMAND_H */
diff --git a/lib/cli/nodesetupcommand.cpp b/lib/cli/nodesetupcommand.cpp
new file mode 100644
index 0000000..2a685b5
--- /dev/null
+++ b/lib/cli/nodesetupcommand.cpp
@@ -0,0 +1,559 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/nodesetupcommand.hpp"
+#include "cli/nodeutility.hpp"
+#include "cli/featureutility.hpp"
+#include "cli/apisetuputility.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/atomic-file.hpp"
+#include "base/logger.hpp"
+#include "base/console.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+
+#include <iostream>
+#include <fstream>
+#include <vector>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("node/setup", NodeSetupCommand);
+
+String NodeSetupCommand::GetDescription() const
+{
+ return "Sets up an Icinga 2 node.";
+}
+
+String NodeSetupCommand::GetShortDescription() const
+{
+ return "set up node";
+}
+
+void NodeSetupCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("zone", po::value<std::string>(), "The name of the local zone")
+ ("endpoint", po::value<std::vector<std::string> >(), "Connect to remote endpoint; syntax: cn[,host,port]")
+ ("parent_host", po::value<std::string>(), "The name of the parent host for auto-signing the csr; syntax: host[,port]")
+ ("parent_zone", po::value<std::string>(), "The name of the parent zone")
+ ("listen", po::value<std::string>(), "Listen on host,port")
+ ("ticket", po::value<std::string>(), "Generated ticket number for this request (optional)")
+ ("trustedcert", po::value<std::string>(), "Trusted parent certificate file as connection verification (received via 'pki save-cert')")
+ ("cn", po::value<std::string>(), "The certificate's common name")
+ ("accept-config", "Accept config from parent node")
+ ("accept-commands", "Accept commands from parent node")
+ ("master", "Use setup for a master instance")
+ ("global_zones", po::value<std::vector<std::string> >(), "The names of the additional global zones to 'global-templates' and 'director-global'.")
+ ("disable-confd", "Disables the conf.d directory during the setup");
+
+ hiddenDesc.add_options()
+ ("master_zone", po::value<std::string>(), "DEPRECATED: The name of the master zone")
+ ("master_host", po::value<std::string>(), "DEPRECATED: The name of the master host for auto-signing the csr; syntax: host[,port]");
+}
+
+std::vector<String> NodeSetupCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "key" || argument == "cert" || argument == "trustedcert")
+ return GetBashCompletionSuggestions("file", word);
+ else if (argument == "host")
+ return GetBashCompletionSuggestions("hostname", word);
+ else if (argument == "port")
+ return GetBashCompletionSuggestions("service", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+ImpersonationLevel NodeSetupCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+/**
+ * The entry point for the "node setup" CLI command.
+ *
+ * @returns An exit status.
+ */
+int NodeSetupCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (!ap.empty()) {
+ Log(LogWarning, "cli")
+ << "Ignoring parameters: " << boost::algorithm::join(ap, " ");
+ }
+
+ if (vm.count("master"))
+ return SetupMaster(vm, ap);
+ else
+ return SetupNode(vm, ap);
+}
+
+int NodeSetupCommand::SetupMaster(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap)
+{
+ /* Ignore not required parameters */
+ if (vm.count("ticket"))
+ Log(LogWarning, "cli", "Master for Node setup: Ignoring --ticket");
+
+ if (vm.count("endpoint"))
+ Log(LogWarning, "cli", "Master for Node setup: Ignoring --endpoint");
+
+ if (vm.count("trustedcert"))
+ Log(LogWarning, "cli", "Master for Node setup: Ignoring --trustedcert");
+
+ String cn = Utility::GetFQDN();
+
+ if (vm.count("cn"))
+ cn = vm["cn"].as<std::string>();
+
+ /* Setup command hardcodes this as FQDN */
+ String endpointName = cn;
+
+ /* Allow to specify zone name. */
+ String zoneName = "master";
+
+ if (vm.count("zone"))
+ zoneName = vm["zone"].as<std::string>();
+
+ /* check whether the user wants to generate a new certificate or not */
+ String existingPath = ApiListener::GetCertsDir() + "/" + cn + ".crt";
+
+ Log(LogInformation, "cli")
+ << "Checking in existing certificates for common name '" << cn << "'...";
+
+ if (Utility::PathExists(existingPath)) {
+ Log(LogWarning, "cli")
+ << "Certificate '" << existingPath << "' for CN '" << cn << "' already exists. Not generating new certificate.";
+ } else {
+ Log(LogInformation, "cli")
+ << "Certificates not yet generated. Running 'api setup' now.";
+
+ ApiSetupUtility::SetupMasterCertificates(cn);
+ }
+
+ Log(LogInformation, "cli", "Generating master configuration for Icinga 2.");
+ ApiSetupUtility::SetupMasterApiUser();
+
+ if (!FeatureUtility::CheckFeatureEnabled("api")) {
+ ApiSetupUtility::SetupMasterEnableApi();
+ } else {
+ Log(LogInformation, "cli")
+ << "'api' feature already enabled.\n";
+ }
+
+ /* write zones.conf and update with zone + endpoint information */
+ Log(LogInformation, "cli", "Generating zone and object configuration.");
+
+ std::vector<String> globalZones { "global-templates", "director-global" };
+ std::vector<std::string> setupGlobalZones;
+
+ if (vm.count("global_zones"))
+ setupGlobalZones = vm["global_zones"].as<std::vector<std::string> >();
+
+ for (decltype(setupGlobalZones.size()) i = 0; i < setupGlobalZones.size(); i++) {
+ if (std::find(globalZones.begin(), globalZones.end(), setupGlobalZones[i]) != globalZones.end()) {
+ Log(LogCritical, "cli")
+ << "The global zone '" << setupGlobalZones[i] << "' is already specified.";
+ return 1;
+ }
+ }
+
+ globalZones.insert(globalZones.end(), setupGlobalZones.begin(), setupGlobalZones.end());
+
+ /* Generate master configuration. */
+ NodeUtility::GenerateNodeMasterIcingaConfig(endpointName, zoneName, globalZones);
+
+ /* Update the ApiListener config. */
+ Log(LogInformation, "cli", "Updating the APIListener feature.");
+
+ String apipath = FeatureUtility::GetFeaturesAvailablePath() + "/api.conf";
+ NodeUtility::CreateBackupFile(apipath);
+
+ AtomicFile fp (apipath, 0644);
+
+ fp << "/**\n"
+ << " * The API listener is used for distributed monitoring setups.\n"
+ << " */\n"
+ << "object ApiListener \"api\" {\n";
+
+ if (vm.count("listen")) {
+ std::vector<String> tokens = String(vm["listen"].as<std::string>()).Split(",");
+
+ if (tokens.size() > 0)
+ fp << " bind_host = \"" << tokens[0] << "\"\n";
+ if (tokens.size() > 1)
+ fp << " bind_port = " << tokens[1] << "\n";
+ }
+
+ fp << "\n";
+
+ if (vm.count("accept-config"))
+ fp << " accept_config = true\n";
+ else
+ fp << " accept_config = false\n";
+
+ if (vm.count("accept-commands"))
+ fp << " accept_commands = true\n";
+ else
+ fp << " accept_commands = false\n";
+
+ fp << "\n"
+ << " ticket_salt = TicketSalt\n"
+ << "}\n";
+
+ fp.Commit();
+
+ /* update constants.conf with NodeName = CN + TicketSalt = random value */
+ if (endpointName != Utility::GetFQDN()) {
+ Log(LogWarning, "cli")
+ << "CN/Endpoint name '" << endpointName << "' does not match the default FQDN '" << Utility::GetFQDN() << "'. Requires update for NodeName constant in constants.conf!";
+ }
+
+ NodeUtility::UpdateConstant("NodeName", endpointName);
+ NodeUtility::UpdateConstant("ZoneName", zoneName);
+
+ String salt = RandomString(16);
+
+ NodeUtility::UpdateConstant("TicketSalt", salt);
+
+ Log(LogInformation, "cli")
+ << "Edit the api feature config file '" << apipath << "' and set a secure 'ticket_salt' attribute.";
+
+ if (vm.count("disable-confd")) {
+ /* Disable conf.d inclusion */
+ if (NodeUtility::UpdateConfiguration("\"conf.d\"", false, true)) {
+ Log(LogInformation, "cli")
+ << "Disabled conf.d inclusion";
+ } else {
+ Log(LogWarning, "cli")
+ << "Tried to disable conf.d inclusion but failed, possibly it's already disabled.";
+ }
+
+ /* Include api-users.conf */
+ String apiUsersFilePath = ApiSetupUtility::GetApiUsersConfPath();
+
+ if (Utility::PathExists(apiUsersFilePath)) {
+ NodeUtility::UpdateConfiguration("\"conf.d/api-users.conf\"", true, false);
+ } else {
+ Log(LogWarning, "cli")
+ << "Included file doesn't exist " << apiUsersFilePath;
+ }
+ }
+
+ /* tell the user to reload icinga2 */
+ Log(LogInformation, "cli", "Make sure to restart Icinga 2.");
+
+ return 0;
+}
+
+int NodeSetupCommand::SetupNode(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap)
+{
+ /* require at least one endpoint. Ticket is optional. */
+ if (!vm.count("endpoint")) {
+ Log(LogCritical, "cli", "You need to specify at least one endpoint (--endpoint).");
+ return 1;
+ }
+
+ if (!vm.count("zone")) {
+ Log(LogCritical, "cli", "You need to specify the local zone (--zone).");
+ return 1;
+ }
+
+ /* Deprecation warnings. TODO: Remove in 2.10.0. */
+ if (vm.count("master_zone"))
+ Log(LogWarning, "cli", "The 'master_zone' parameter has been deprecated. Use 'parent_zone' instead.");
+ if (vm.count("master_host"))
+ Log(LogWarning, "cli", "The 'master_host' parameter has been deprecated. Use 'parent_host' instead.");
+
+ String ticket;
+
+ if (vm.count("ticket"))
+ ticket = vm["ticket"].as<std::string>();
+
+ if (ticket.IsEmpty()) {
+ Log(LogInformation, "cli")
+ << "Requesting certificate without a ticket.";
+ } else {
+ Log(LogInformation, "cli")
+ << "Requesting certificate with ticket '" << ticket << "'.";
+ }
+
+ /* Decide whether to directly connect to the parent node for CSR signing, or leave it to the user. */
+ bool connectToParent = false;
+ String parentHost;
+ String parentPort = "5665";
+ std::shared_ptr<X509> trustedParentCert;
+
+ /* TODO: remove master_host in 2.10.0. */
+ if (!vm.count("master_host") && !vm.count("parent_host")) {
+ connectToParent = false;
+
+ Log(LogWarning, "cli")
+ << "Node to master/satellite connection setup skipped. Please configure your parent node to\n"
+ << "connect to this node by setting the 'host' attribute for the node Endpoint object.\n";
+ } else {
+ connectToParent = true;
+
+ String parentHostInfo;
+
+ if (vm.count("parent_host"))
+ parentHostInfo = vm["parent_host"].as<std::string>();
+ else if (vm.count("master_host")) /* TODO: Remove in 2.10.0. */
+ parentHostInfo = vm["master_host"].as<std::string>();
+
+ std::vector<String> tokens = parentHostInfo.Split(",");
+
+ if (tokens.size() == 1 || tokens.size() == 2)
+ parentHost = tokens[0];
+
+ if (tokens.size() == 2)
+ parentPort = tokens[1];
+
+ Log(LogInformation, "cli")
+ << "Verifying parent host connection information: host '" << parentHost << "', port '" << parentPort << "'.";
+
+ }
+
+ /* retrieve CN and pass it (defaults to FQDN) */
+ String cn = Utility::GetFQDN();
+
+ if (vm.count("cn"))
+ cn = vm["cn"].as<std::string>();
+
+ Log(LogInformation, "cli")
+ << "Using the following CN (defaults to FQDN): '" << cn << "'.";
+
+ /* pki request a signed certificate from the master */
+ String certsDir = ApiListener::GetCertsDir();
+ Utility::MkDirP(certsDir, 0700);
+
+ String user = Configuration::RunAsUser;
+ String group = Configuration::RunAsGroup;
+
+ if (!Utility::SetFileOwnership(certsDir, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on file '" << certsDir << "'. Verify it yourself!";
+ }
+
+ String key = certsDir + "/" + cn + ".key";
+ String cert = certsDir + "/" + cn + ".crt";
+ String ca = certsDir + "/ca.crt";
+
+ if (Utility::PathExists(key))
+ NodeUtility::CreateBackupFile(key, true);
+ if (Utility::PathExists(cert))
+ NodeUtility::CreateBackupFile(cert);
+
+ if (PkiUtility::NewCert(cn, key, String(), cert) != 0) {
+ Log(LogCritical, "cli", "Failed to generate new self-signed certificate.");
+ return 1;
+ }
+
+ /* fix permissions: root -> icinga daemon user */
+ if (!Utility::SetFileOwnership(key, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on file '" << key << "'. Verify it yourself!";
+ }
+
+ /* Send a signing request to the parent immediately, or leave it to the user. */
+ if (connectToParent) {
+ /* In contrast to `node wizard` the user must manually fetch
+ * the trustedParentCert to prove the trust relationship (fetched with 'pki save-cert').
+ */
+ if (!vm.count("trustedcert")) {
+ Log(LogCritical, "cli")
+ << "Please pass the trusted cert retrieved from the parent node (master or satellite)\n"
+ << "(Hint: 'icinga2 pki save-cert --host <parenthost> --port <5665> --key local.key --cert local.crt --trustedcert trusted-parent.crt').";
+ return 1;
+ }
+
+ String trustedCert = vm["trustedcert"].as<std::string>();
+
+ try{
+ trustedParentCert = GetX509Certificate(trustedCert);
+ } catch (const std::exception&) {
+ Log(LogCritical, "cli")
+ << "Can't read trusted cert at '" << trustedCert << "'.";
+ return 1;
+ }
+
+ try {
+ if (IsCa(trustedParentCert)) {
+ Log(LogCritical, "cli")
+ << "The trusted parent certificate is NOT a client certificate. It seems you passed the 'ca.crt' CA certificate via '--trustedcert' parameter.";
+ return 1;
+ }
+ } catch (const std::exception&) {
+ /* Swallow the error and do not run the check on unsupported OpenSSL platforms. */
+ }
+
+ Log(LogInformation, "cli")
+ << "Verifying trusted certificate file '" << vm["trustedcert"].as<std::string>() << "'.";
+
+ Log(LogInformation, "cli", "Requesting a signed certificate from the parent Icinga node.");
+
+ if (PkiUtility::RequestCertificate(parentHost, parentPort, key, cert, ca, trustedParentCert, ticket) > 0) {
+ Log(LogCritical, "cli")
+ << "Failed to fetch signed certificate from parent Icinga node '"
+ << parentHost << ", "
+ << parentPort << "'. Please try again.";
+ return 1;
+ }
+ } else {
+ /* We cannot retrieve the parent certificate.
+ * Tell the user to manually copy the ca.crt file
+ * into DataDir + "/certs"
+ */
+ Log(LogWarning, "cli")
+ << "\nNo connection to the parent node was specified.\n\n"
+ << "Please copy the public CA certificate from your master/satellite\n"
+ << "into '" << ca << "' before starting Icinga 2.\n";
+
+ if (Utility::PathExists(ca)) {
+ Log(LogInformation, "cli")
+ << "\nFound public CA certificate in '" << ca << "'.\n"
+ << "Please verify that it is the same as on your master/satellite.\n";
+ }
+ }
+
+ if (!Utility::SetFileOwnership(ca, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on file '" << ca << "'. Verify it yourself!";
+ }
+
+ /* fix permissions (again) when updating the signed certificate */
+ if (!Utility::SetFileOwnership(cert, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on file '" << cert << "'. Verify it yourself!";
+ }
+
+ /* disable the notifications feature */
+ Log(LogInformation, "cli", "Disabling the Notification feature.");
+
+ FeatureUtility::DisableFeatures({ "notification" });
+
+ /* enable the ApiListener config */
+
+ Log(LogInformation, "cli", "Updating the ApiListener feature.");
+
+ FeatureUtility::EnableFeatures({ "api" });
+
+ String apipath = FeatureUtility::GetFeaturesAvailablePath() + "/api.conf";
+ NodeUtility::CreateBackupFile(apipath);
+
+ AtomicFile fp (apipath, 0644);
+
+ fp << "/**\n"
+ << " * The API listener is used for distributed monitoring setups.\n"
+ << " */\n"
+ << "object ApiListener \"api\" {\n";
+
+ if (vm.count("listen")) {
+ std::vector<String> tokens = String(vm["listen"].as<std::string>()).Split(",");
+
+ if (tokens.size() > 0)
+ fp << " bind_host = \"" << tokens[0] << "\"\n";
+ if (tokens.size() > 1)
+ fp << " bind_port = " << tokens[1] << "\n";
+ }
+
+ fp << "\n";
+
+ if (vm.count("accept-config"))
+ fp << " accept_config = true\n";
+ else
+ fp << " accept_config = false\n";
+
+ if (vm.count("accept-commands"))
+ fp << " accept_commands = true\n";
+ else
+ fp << " accept_commands = false\n";
+
+ fp << "\n"
+ << "}\n";
+
+ fp.Commit();
+
+ /* Generate zones configuration. */
+ Log(LogInformation, "cli", "Generating zone and object configuration.");
+
+ /* Setup command hardcodes this as FQDN */
+ String endpointName = cn;
+
+ /* Allow to specify zone name. */
+ String zoneName = vm["zone"].as<std::string>();
+
+ /* Allow to specify the parent zone name. */
+ String parentZoneName = "master";
+
+ if (vm.count("parent_zone"))
+ parentZoneName = vm["parent_zone"].as<std::string>();
+
+ std::vector<String> globalZones { "global-templates", "director-global" };
+ std::vector<std::string> setupGlobalZones;
+
+ if (vm.count("global_zones"))
+ setupGlobalZones = vm["global_zones"].as<std::vector<std::string> >();
+
+ for (decltype(setupGlobalZones.size()) i = 0; i < setupGlobalZones.size(); i++) {
+ if (std::find(globalZones.begin(), globalZones.end(), setupGlobalZones[i]) != globalZones.end()) {
+ Log(LogCritical, "cli")
+ << "The global zone '" << setupGlobalZones[i] << "' is already specified.";
+ return 1;
+ }
+ }
+
+ globalZones.insert(globalZones.end(), setupGlobalZones.begin(), setupGlobalZones.end());
+
+ /* Generate node configuration. */
+ NodeUtility::GenerateNodeIcingaConfig(endpointName, zoneName, parentZoneName, vm["endpoint"].as<std::vector<std::string> >(), globalZones);
+
+ /* update constants.conf with NodeName = CN */
+ if (endpointName != Utility::GetFQDN()) {
+ Log(LogWarning, "cli")
+ << "CN/Endpoint name '" << endpointName << "' does not match the default FQDN '"
+ << Utility::GetFQDN() << "'. Requires an update for the NodeName constant in constants.conf!";
+ }
+
+ NodeUtility::UpdateConstant("NodeName", endpointName);
+ NodeUtility::UpdateConstant("ZoneName", zoneName);
+
+ if (!ticket.IsEmpty()) {
+ String ticketPath = ApiListener::GetCertsDir() + "/ticket";
+ AtomicFile af (ticketPath, 0600);
+
+ if (!Utility::SetFileOwnership(af.GetTempFilename(), user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user
+ << "' group '" << group
+ << "' on file '" << ticketPath << "'. Verify it yourself!";
+ }
+
+ af << ticket;
+ af.Commit();
+ }
+
+ /* If no parent connection was made, the user must supply the ca.crt before restarting Icinga 2.*/
+ if (!connectToParent) {
+ Log(LogWarning, "cli")
+ << "No connection to the parent node was specified.\n\n"
+ << "Please copy the public CA certificate from your master/satellite\n"
+ << "into '" << ca << "' before starting Icinga 2.\n";
+ } else {
+ Log(LogInformation, "cli", "Make sure to restart Icinga 2.");
+ }
+
+ if (vm.count("disable-confd")) {
+ /* Disable conf.d inclusion */
+ NodeUtility::UpdateConfiguration("\"conf.d\"", false, true);
+ }
+
+ /* tell the user to reload icinga2 */
+ Log(LogInformation, "cli", "Make sure to restart Icinga 2.");
+
+ return 0;
+}
diff --git a/lib/cli/nodesetupcommand.hpp b/lib/cli/nodesetupcommand.hpp
new file mode 100644
index 0000000..d25d21e
--- /dev/null
+++ b/lib/cli/nodesetupcommand.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NODESETUPCOMMAND_H
+#define NODESETUPCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "node setup" command.
+ *
+ * @ingroup cli
+ */
+class NodeSetupCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(NodeSetupCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+private:
+ static int SetupMaster(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap);
+ static int SetupNode(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap);
+};
+
+}
+
+#endif /* NODESETUPCOMMAND_H */
diff --git a/lib/cli/nodeutility.cpp b/lib/cli/nodeutility.cpp
new file mode 100644
index 0000000..523532a
--- /dev/null
+++ b/lib/cli/nodeutility.cpp
@@ -0,0 +1,378 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/nodeutility.hpp"
+#include "cli/clicommand.hpp"
+#include "cli/variableutility.hpp"
+#include "base/atomic-file.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/json.hpp"
+#include "base/netstring.hpp"
+#include "base/stdiostream.hpp"
+#include "base/debug.hpp"
+#include "base/objectlock.hpp"
+#include "base/console.hpp"
+#include "base/exception.hpp"
+#include "base/configwriter.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <fstream>
+#include <iostream>
+
+using namespace icinga;
+
+String NodeUtility::GetConstantsConfPath()
+{
+ return Configuration::ConfigDir + "/constants.conf";
+}
+
+String NodeUtility::GetZonesConfPath()
+{
+ return Configuration::ConfigDir + "/zones.conf";
+}
+
+/*
+ * Node Setup helpers
+ */
+
+int NodeUtility::GenerateNodeIcingaConfig(const String& endpointName, const String& zoneName,
+ const String& parentZoneName, const std::vector<std::string>& endpoints,
+ const std::vector<String>& globalZones)
+{
+ Array::Ptr config = new Array();
+
+ Array::Ptr myParentZoneMembers = new Array();
+
+ for (const String& endpoint : endpoints) {
+ /* extract all --endpoint arguments and store host,port info */
+ std::vector<String> tokens = endpoint.Split(",");
+
+ Dictionary::Ptr myParentEndpoint = new Dictionary();
+
+ if (tokens.size() > 1) {
+ String host = tokens[1].Trim();
+
+ if (!host.IsEmpty())
+ myParentEndpoint->Set("host", host);
+ }
+
+ if (tokens.size() > 2) {
+ String port = tokens[2].Trim();
+
+ if (!port.IsEmpty())
+ myParentEndpoint->Set("port", port);
+ }
+
+ String myEndpointName = tokens[0].Trim();
+ myParentEndpoint->Set("__name", myEndpointName);
+ myParentEndpoint->Set("__type", "Endpoint");
+
+ /* save endpoint in master zone */
+ myParentZoneMembers->Add(myEndpointName);
+
+ config->Add(myParentEndpoint);
+ }
+
+ /* add the parent zone to the config */
+ config->Add(new Dictionary({
+ { "__name", parentZoneName },
+ { "__type", "Zone" },
+ { "endpoints", myParentZoneMembers }
+ }));
+
+ /* store the local generated node configuration */
+ config->Add(new Dictionary({
+ { "__name", endpointName },
+ { "__type", "Endpoint" }
+ }));
+
+ config->Add(new Dictionary({
+ { "__name", zoneName },
+ { "__type", "Zone" },
+ { "parent", parentZoneName },
+ { "endpoints", new Array({ endpointName }) }
+ }));
+
+ for (const String& globalzone : globalZones) {
+ config->Add(new Dictionary({
+ { "__name", globalzone },
+ { "__type", "Zone" },
+ { "global", true }
+ }));
+ }
+
+ /* Write the newly generated configuration. */
+ NodeUtility::WriteNodeConfigObjects(GetZonesConfPath(), config);
+
+ return 0;
+}
+
+int NodeUtility::GenerateNodeMasterIcingaConfig(const String& endpointName, const String& zoneName,
+ const std::vector<String>& globalZones)
+{
+ Array::Ptr config = new Array();
+
+ /* store the local generated node master configuration */
+ config->Add(new Dictionary({
+ { "__name", endpointName },
+ { "__type", "Endpoint" }
+ }));
+
+ config->Add(new Dictionary({
+ { "__name", zoneName },
+ { "__type", "Zone" },
+ { "endpoints", new Array({ endpointName }) }
+ }));
+
+ for (const String& globalzone : globalZones) {
+ config->Add(new Dictionary({
+ { "__name", globalzone },
+ { "__type", "Zone" },
+ { "global", true }
+ }));
+ }
+
+ /* Write the newly generated configuration. */
+ NodeUtility::WriteNodeConfigObjects(GetZonesConfPath(), config);
+
+ return 0;
+}
+
+bool NodeUtility::WriteNodeConfigObjects(const String& filename, const Array::Ptr& objects)
+{
+ Log(LogInformation, "cli")
+ << "Dumping config items to file '" << filename << "'.";
+
+ /* create a backup first */
+ CreateBackupFile(filename);
+
+ String path = Utility::DirName(filename);
+
+ Utility::MkDirP(path, 0755);
+
+ String user = Configuration::RunAsUser;
+ String group = Configuration::RunAsGroup;
+
+ if (!Utility::SetFileOwnership(path, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user << "' group '" << group << "' on path '" << path << "'. Verify it yourself!";
+ }
+
+ AtomicFile fp (filename, 0644);
+
+ fp << "/*\n";
+ fp << " * Generated by Icinga 2 node setup commands\n";
+ fp << " * on " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime()) << "\n";
+ fp << " */\n\n";
+
+ ObjectLock olock(objects);
+ for (const Dictionary::Ptr& object : objects) {
+ SerializeObject(fp, object);
+ }
+
+ fp << std::endl;
+ fp.Commit();
+
+ return true;
+}
+
+
+/*
+ * We generally don't overwrite files without backup before
+ */
+bool NodeUtility::CreateBackupFile(const String& target, bool isPrivate)
+{
+ if (!Utility::PathExists(target))
+ return false;
+
+ String backup = target + ".orig";
+
+ if (Utility::PathExists(backup)) {
+ Log(LogInformation, "cli")
+ << "Backup file '" << backup << "' already exists. Skipping backup.";
+ return false;
+ }
+
+ Utility::CopyFile(target, backup);
+
+#ifndef _WIN32
+ if (isPrivate)
+ chmod(backup.CStr(), 0600);
+#endif /* _WIN32 */
+
+ Log(LogInformation, "cli")
+ << "Created backup file '" << backup << "'.";
+
+ return true;
+}
+
+void NodeUtility::SerializeObject(std::ostream& fp, const Dictionary::Ptr& object)
+{
+ fp << "object ";
+ ConfigWriter::EmitIdentifier(fp, object->Get("__type"), false);
+ fp << " ";
+ ConfigWriter::EmitValue(fp, 0, object->Get("__name"));
+ fp << " {\n";
+
+ ObjectLock olock(object);
+ for (const Dictionary::Pair& kv : object) {
+ if (kv.first == "__type" || kv.first == "__name")
+ continue;
+
+ fp << "\t";
+ ConfigWriter::EmitIdentifier(fp, kv.first, true);
+ fp << " = ";
+ ConfigWriter::EmitValue(fp, 1, kv.second);
+ fp << "\n";
+ }
+
+ fp << "}\n\n";
+}
+
+/*
+* Returns true if the include is found, otherwise false
+*/
+bool NodeUtility::GetConfigurationIncludeState(const String& value, bool recursive) {
+ String configurationFile = Configuration::ConfigDir + "/icinga2.conf";
+
+ Log(LogInformation, "cli")
+ << "Reading '" << configurationFile << "'.";
+
+ std::ifstream ifp(configurationFile.CStr());
+
+ String affectedInclude = value;
+
+ if (recursive)
+ affectedInclude = "include_recursive " + affectedInclude;
+ else
+ affectedInclude = "include " + affectedInclude;
+
+ bool isIncluded = false;
+
+ std::string line;
+
+ while(std::getline(ifp, line)) {
+ /*
+ * Trying to find if the inclusion is enabled.
+ * First hit breaks out of the loop.
+ */
+
+ if (line.compare(0, affectedInclude.GetLength(), affectedInclude) == 0) {
+ isIncluded = true;
+
+ /*
+ * We can safely break out here, since an enabled include always win.
+ */
+ break;
+ }
+ }
+
+ ifp.close();
+
+ return isIncluded;
+}
+
+/*
+ * include = false, will comment out the include statement
+ * include = true, will add an include statement or uncomment a statement if one is existing
+ * resursive = false, will search for a non-resursive include statement
+ * recursive = true, will search for a resursive include statement
+ * Returns true on success, false if option was not found
+ */
+bool NodeUtility::UpdateConfiguration(const String& value, bool include, bool recursive)
+{
+ String configurationFile = Configuration::ConfigDir + "/icinga2.conf";
+
+ Log(LogInformation, "cli")
+ << "Updating '" << value << "' include in '" << configurationFile << "'.";
+
+ NodeUtility::CreateBackupFile(configurationFile);
+
+ std::ifstream ifp(configurationFile.CStr());
+ AtomicFile ofp (configurationFile, 0644);
+
+ String affectedInclude = value;
+
+ if (recursive)
+ affectedInclude = "include_recursive " + affectedInclude;
+ else
+ affectedInclude = "include " + affectedInclude;
+
+ bool found = false;
+
+ std::string line;
+
+ while (std::getline(ifp, line)) {
+ if (include) {
+ if (line.find("//" + affectedInclude) != std::string::npos || line.find("// " + affectedInclude) != std::string::npos) {
+ found = true;
+ ofp << "// Added by the node setup CLI command on "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime())
+ << "\n" + affectedInclude + "\n";
+ } else if (line.find(affectedInclude) != std::string::npos) {
+ found = true;
+
+ Log(LogInformation, "cli")
+ << "Include statement '" + affectedInclude + "' already set.";
+
+ ofp << line << "\n";
+ } else {
+ ofp << line << "\n";
+ }
+ } else {
+ if (line.find(affectedInclude) != std::string::npos) {
+ found = true;
+ ofp << "// Disabled by the node setup CLI command on "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime())
+ << "\n// " + affectedInclude + "\n";
+ } else {
+ ofp << line << "\n";
+ }
+ }
+ }
+
+ if (include && !found) {
+ ofp << "// Added by the node setup CLI command on "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", Utility::GetTime())
+ << "\n" + affectedInclude + "\n";
+ }
+
+ ifp.close();
+ ofp.Commit();
+
+ return (found || include);
+}
+
+void NodeUtility::UpdateConstant(const String& name, const String& value)
+{
+ String constantsConfPath = NodeUtility::GetConstantsConfPath();
+
+ Log(LogInformation, "cli")
+ << "Updating '" << name << "' constant in '" << constantsConfPath << "'.";
+
+ NodeUtility::CreateBackupFile(constantsConfPath);
+
+ std::ifstream ifp(constantsConfPath.CStr());
+ AtomicFile ofp (constantsConfPath, 0644);
+
+ bool found = false;
+
+ std::string line;
+ while (std::getline(ifp, line)) {
+ if (line.find("const " + name + " = ") != std::string::npos) {
+ ofp << "const " + name + " = \"" + value + "\"\n";
+ found = true;
+ } else
+ ofp << line << "\n";
+ }
+
+ if (!found)
+ ofp << "const " + name + " = \"" + value + "\"\n";
+
+ ifp.close();
+ ofp.Commit();
+}
diff --git a/lib/cli/nodeutility.hpp b/lib/cli/nodeutility.hpp
new file mode 100644
index 0000000..7016b6b
--- /dev/null
+++ b/lib/cli/nodeutility.hpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NODEUTILITY_H
+#define NODEUTILITY_H
+
+#include "base/i2-base.hpp"
+#include "cli/i2-cli.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "base/value.hpp"
+#include "base/string.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * @ingroup cli
+ */
+class NodeUtility
+{
+public:
+ static String GetConstantsConfPath();
+ static String GetZonesConfPath();
+
+ static bool CreateBackupFile(const String& target, bool isPrivate = false);
+
+ static bool WriteNodeConfigObjects(const String& filename, const Array::Ptr& objects);
+
+ static bool GetConfigurationIncludeState(const String& value, bool recursive);
+ static bool UpdateConfiguration(const String& value, bool include, bool recursive);
+ static void UpdateConstant(const String& name, const String& value);
+
+ /* node setup helpers */
+ static int GenerateNodeIcingaConfig(const String& endpointName, const String& zoneName,
+ const String& parentZoneName, const std::vector<std::string>& endpoints,
+ const std::vector<String>& globalZones);
+ static int GenerateNodeMasterIcingaConfig(const String& endpointName, const String& zoneName,
+ const std::vector<String>& globalZones);
+
+private:
+ NodeUtility();
+
+ static void SerializeObject(std::ostream& fp, const Dictionary::Ptr& object);
+};
+
+}
+
+#endif /* NODEUTILITY_H */
diff --git a/lib/cli/nodewizardcommand.cpp b/lib/cli/nodewizardcommand.cpp
new file mode 100644
index 0000000..3a3cd42
--- /dev/null
+++ b/lib/cli/nodewizardcommand.cpp
@@ -0,0 +1,815 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/nodewizardcommand.hpp"
+#include "cli/nodeutility.hpp"
+#include "cli/featureutility.hpp"
+#include "cli/apisetuputility.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/atomic-file.hpp"
+#include "base/logger.hpp"
+#include "base/console.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/case_conv.hpp>
+#include <iostream>
+#include <string>
+#include <fstream>
+#include <vector>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("node/wizard", NodeWizardCommand);
+
+String NodeWizardCommand::GetDescription() const
+{
+ return "Wizard for Icinga 2 node setup.";
+}
+
+String NodeWizardCommand::GetShortDescription() const
+{
+ return "wizard for node setup";
+}
+
+ImpersonationLevel NodeWizardCommand::GetImpersonationLevel() const
+{
+ return ImpersonateIcinga;
+}
+
+int NodeWizardCommand::GetMaxArguments() const
+{
+ return -1;
+}
+
+void NodeWizardCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("verbose", "increase log level");
+}
+
+/**
+ * The entry point for the "node wizard" CLI command.
+ *
+ * @returns An exit status.
+ */
+int NodeWizardCommand::Run(const boost::program_options::variables_map& vm,
+ const std::vector<std::string>& ap) const
+{
+ if (!vm.count("verbose"))
+ Logger::SetConsoleLogSeverity(LogCritical);
+
+ /*
+ * The wizard will get all information from the user,
+ * and then call all required functions.
+ */
+
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundBlue)
+ << "Welcome to the Icinga 2 Setup Wizard!\n"
+ << "\n"
+ << "We will guide you through all required configuration details.\n"
+ << "\n"
+ << ConsoleColorTag(Console_Normal);
+
+ /* 0. master or node setup?
+ * 1. Ticket
+ * 2. Master information for autosigning
+ * 3. Trusted cert location
+ * 4. CN to use (defaults to FQDN)
+ * 5. Local CA
+ * 6. New self signed certificate
+ * 7. Request signed certificate from master
+ * 8. copy key information to /var/lib/icinga2/certs
+ * 9. enable ApiListener feature
+ * 10. generate zones.conf with endpoints and zone objects
+ * 11. set NodeName = cn and ZoneName in constants.conf
+ * 12. disable conf.d directory?
+ * 13. reload icinga2, or tell the user to
+ */
+
+ std::string answer;
+ /* master or satellite/agent setup */
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Please specify if this is an agent/satellite setup "
+ << "('n' installs a master setup)" << ConsoleColorTag(Console_Normal)
+ << " [Y/n]: ";
+ std::getline (std::cin, answer);
+
+ boost::algorithm::to_lower(answer);
+
+ String choice = answer;
+
+ std::cout << "\n";
+
+ int res = 0;
+
+ if (choice.Contains("n"))
+ res = MasterSetup();
+ else
+ res = AgentSatelliteSetup();
+
+ if (res != 0)
+ return res;
+
+ std::cout << "\n";
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundGreen)
+ << "Done.\n\n"
+ << ConsoleColorTag(Console_Normal);
+
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundRed)
+ << "Now restart your Icinga 2 daemon to finish the installation!\n"
+ << ConsoleColorTag(Console_Normal);
+
+ return 0;
+}
+
+int NodeWizardCommand::AgentSatelliteSetup() const
+{
+ std::string answer;
+ String choice;
+ bool connectToParent = false;
+
+ std::cout << "Starting the Agent/Satellite setup routine...\n\n";
+
+ /* CN */
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Please specify the common name (CN)"
+ << ConsoleColorTag(Console_Normal)
+ << " [" << Utility::GetFQDN() << "]: ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty())
+ answer = Utility::GetFQDN();
+
+ String cn = answer;
+ cn = cn.Trim();
+
+ std::vector<std::string> endpoints;
+
+ String endpointBuffer;
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "\nPlease specify the parent endpoint(s) (master or satellite) where this node should connect to:"
+ << ConsoleColorTag(Console_Normal) << "\n";
+ String parentEndpointName;
+
+wizard_endpoint_loop_start:
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Master/Satellite Common Name" << ConsoleColorTag(Console_Normal)
+ << " (CN from your master/satellite node): ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty()) {
+ Log(LogWarning, "cli", "Master/Satellite CN is required! Please retry.");
+ goto wizard_endpoint_loop_start;
+ }
+
+ endpointBuffer = answer;
+ endpointBuffer = endpointBuffer.Trim();
+
+ std::cout << "\nDo you want to establish a connection to the parent node "
+ << ConsoleColorTag(Console_Bold) << "from this node?"
+ << ConsoleColorTag(Console_Normal) << " [Y/n]: ";
+
+ std::getline (std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ String parentEndpointPort = "5665";
+
+ if (choice.Contains("n")) {
+ connectToParent = false;
+
+ Log(LogWarning, "cli", "Node to master/satellite connection setup skipped");
+ std::cout << "Connection setup skipped. Please configure your parent node to\n"
+ << "connect to this node by setting the 'host' attribute for the node Endpoint object.\n";
+
+ } else {
+ connectToParent = true;
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Please specify the master/satellite connection information:"
+ << ConsoleColorTag(Console_Normal) << "\n"
+ << ConsoleColorTag(Console_Bold) << "Master/Satellite endpoint host"
+ << ConsoleColorTag(Console_Normal) << " (IP address or FQDN): ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty()) {
+ Log(LogWarning, "cli", "Please enter the parent endpoint (master/satellite) connection information.");
+ goto wizard_endpoint_loop_start;
+ }
+
+ String tmp = answer;
+ tmp = tmp.Trim();
+
+ endpointBuffer += "," + tmp;
+ parentEndpointName = tmp;
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Master/Satellite endpoint port" << ConsoleColorTag(Console_Normal)
+ << " [" << parentEndpointPort << "]: ";
+
+ std::getline(std::cin, answer);
+
+ if (!answer.empty())
+ parentEndpointPort = answer;
+
+ endpointBuffer += "," + parentEndpointPort.Trim();
+ }
+
+ endpoints.push_back(endpointBuffer);
+
+ std::cout << ConsoleColorTag(Console_Bold) << "\nAdd more master/satellite endpoints?"
+ << ConsoleColorTag(Console_Normal) << " [y/N]: ";
+ std::getline (std::cin, answer);
+
+ boost::algorithm::to_lower(answer);
+
+ choice = answer;
+
+ if (choice.Contains("y"))
+ goto wizard_endpoint_loop_start;
+
+ /* Extract parent node information. */
+ String parentHost, parentPort;
+
+ for (const String& endpoint : endpoints) {
+ std::vector<String> tokens = endpoint.Split(",");
+
+ if (tokens.size() > 1)
+ parentHost = tokens[1];
+
+ if (tokens.size() > 2)
+ parentPort = tokens[2];
+ }
+
+ /* workaround for fetching the master cert */
+ String certsDir = ApiListener::GetCertsDir();
+ Utility::MkDirP(certsDir, 0700);
+
+ String user = Configuration::RunAsUser;
+ String group = Configuration::RunAsGroup;
+
+ if (!Utility::SetFileOwnership(certsDir, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user
+ << "' group '" << group
+ << "' on file '" << certsDir << "'. Verify it yourself!";
+ }
+
+ String nodeCert = certsDir + "/" + cn + ".crt";
+ String nodeKey = certsDir + "/" + cn + ".key";
+
+ if (Utility::PathExists(nodeKey))
+ NodeUtility::CreateBackupFile(nodeKey, true);
+ if (Utility::PathExists(nodeCert))
+ NodeUtility::CreateBackupFile(nodeCert);
+
+ if (PkiUtility::NewCert(cn, nodeKey, Empty, nodeCert) > 0) {
+ Log(LogCritical, "cli")
+ << "Failed to create new self-signed certificate for CN '"
+ << cn << "'. Please try again.";
+ return 1;
+ }
+
+ /* fix permissions: root -> icinga daemon user */
+ if (!Utility::SetFileOwnership(nodeKey, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user
+ << "' group '" << group
+ << "' on file '" << nodeKey << "'. Verify it yourself!";
+ }
+
+ std::shared_ptr<X509> trustedParentCert;
+
+ /* Check whether we should connect to the parent node and present its trusted certificate. */
+ if (connectToParent) {
+ //save-cert and store the master certificate somewhere
+ Log(LogInformation, "cli")
+ << "Fetching public certificate from master ("
+ << parentHost << ", " << parentPort << "):\n";
+
+ trustedParentCert = PkiUtility::FetchCert(parentHost, parentPort);
+ if (!trustedParentCert) {
+ Log(LogCritical, "cli", "Peer did not present a valid certificate.");
+ return 1;
+ }
+
+ std::cout << ConsoleColorTag(Console_Bold) << "Parent certificate information:\n"
+ << ConsoleColorTag(Console_Normal) << PkiUtility::GetCertificateInformation(trustedParentCert)
+ << ConsoleColorTag(Console_Bold) << "\nIs this information correct?"
+ << ConsoleColorTag(Console_Normal) << " [y/N]: ";
+
+ std::getline (std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ if (answer != "y") {
+ Log(LogWarning, "cli", "Process aborted.");
+ return 1;
+ }
+
+ Log(LogInformation, "cli", "Received trusted parent certificate.\n");
+ }
+
+wizard_ticket:
+ String nodeCA = certsDir + "/ca.crt";
+ String ticket;
+
+ /* Check whether we can connect to the parent node and fetch the client and CA certificate. */
+ if (connectToParent) {
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "\nPlease specify the request ticket generated on your Icinga 2 master "
+ << ConsoleColorTag(Console_Normal) << "(optional)"
+ << ConsoleColorTag(Console_Bold) << "."
+ << ConsoleColorTag(Console_Normal) << "\n"
+ << " (Hint: # icinga2 pki ticket --cn '" << cn << "'): ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty()) {
+ std::cout << ConsoleColorTag(Console_Bold) << "\n"
+ << "No ticket was specified. Please approve the certificate signing request manually\n"
+ << "on the master (see 'icinga2 ca list' and 'icinga2 ca sign --help' for details)."
+ << ConsoleColorTag(Console_Normal) << "\n";
+ }
+
+ ticket = answer;
+ ticket = ticket.Trim();
+
+ if (ticket.IsEmpty()) {
+ Log(LogInformation, "cli")
+ << "Requesting certificate without a ticket.";
+ } else {
+ Log(LogInformation, "cli")
+ << "Requesting certificate with ticket '" << ticket << "'.";
+ }
+
+ if (Utility::PathExists(nodeCA))
+ NodeUtility::CreateBackupFile(nodeCA);
+ if (Utility::PathExists(nodeCert))
+ NodeUtility::CreateBackupFile(nodeCert);
+
+ if (PkiUtility::RequestCertificate(parentHost, parentPort, nodeKey,
+ nodeCert, nodeCA, trustedParentCert, ticket) > 0) {
+ Log(LogCritical, "cli")
+ << "Failed to fetch signed certificate from master '"
+ << parentHost << ", "
+ << parentPort << "'. Please try again.";
+ goto wizard_ticket;
+ }
+
+ /* fix permissions (again) when updating the signed certificate */
+ if (!Utility::SetFileOwnership(nodeCert, user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user
+ << "' group '" << group << "' on file '"
+ << nodeCert << "'. Verify it yourself!";
+ }
+ } else {
+ /* We cannot retrieve the parent certificate.
+ * Tell the user to manually copy the ca.crt file
+ * into DataDir + "/certs"
+ */
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "\nNo connection to the parent node was specified.\n\n"
+ << "Please copy the public CA certificate from your master/satellite\n"
+ << "into '" << nodeCA << "' before starting Icinga 2.\n"
+ << ConsoleColorTag(Console_Normal);
+
+ if (Utility::PathExists(nodeCA)) {
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "\nFound public CA certificate in '" << nodeCA << "'.\n"
+ << "Please verify that it is the same as on your master/satellite.\n"
+ << ConsoleColorTag(Console_Normal);
+ }
+
+ }
+
+ /* apilistener config */
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Please specify the API bind host/port "
+ << ConsoleColorTag(Console_Normal) << "(optional)"
+ << ConsoleColorTag(Console_Bold) << ":\n";
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Bind Host" << ConsoleColorTag(Console_Normal) << " []: ";
+
+ std::getline(std::cin, answer);
+
+ String bindHost = answer;
+ bindHost = bindHost.Trim();
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Bind Port" << ConsoleColorTag(Console_Normal) << " []: ";
+
+ std::getline(std::cin, answer);
+
+ String bindPort = answer;
+ bindPort = bindPort.Trim();
+
+ std::cout << ConsoleColorTag(Console_Bold) << "\n"
+ << "Accept config from parent node?" << ConsoleColorTag(Console_Normal)
+ << " [y/N]: ";
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ String acceptConfig = choice.Contains("y") ? "true" : "false";
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Accept commands from parent node?" << ConsoleColorTag(Console_Normal)
+ << " [y/N]: ";
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ String acceptCommands = choice.Contains("y") ? "true" : "false";
+
+ std::cout << "\n";
+
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundGreen)
+ << "Reconfiguring Icinga...\n"
+ << ConsoleColorTag(Console_Normal);
+
+ /* disable the notifications feature on agent/satellite nodes */
+ Log(LogInformation, "cli", "Disabling the Notification feature.");
+
+ FeatureUtility::DisableFeatures({ "notification" });
+
+ Log(LogInformation, "cli", "Enabling the ApiListener feature.");
+
+ FeatureUtility::EnableFeatures({ "api" });
+
+ String apiConfPath = FeatureUtility::GetFeaturesAvailablePath() + "/api.conf";
+ NodeUtility::CreateBackupFile(apiConfPath);
+
+ AtomicFile fp (apiConfPath, 0644);
+
+ fp << "/**\n"
+ << " * The API listener is used for distributed monitoring setups.\n"
+ << " */\n"
+ << "object ApiListener \"api\" {\n"
+ << " accept_config = " << acceptConfig << "\n"
+ << " accept_commands = " << acceptCommands << "\n";
+
+ if (!bindHost.IsEmpty())
+ fp << " bind_host = \"" << bindHost << "\"\n";
+ if (!bindPort.IsEmpty())
+ fp << " bind_port = " << bindPort << "\n";
+
+ fp << "}\n";
+
+ fp.Commit();
+
+ /* Zones configuration. */
+ Log(LogInformation, "cli", "Generating local zones.conf.");
+
+ /* Setup command hardcodes this as FQDN */
+ String endpointName = cn;
+
+ /* Different local zone name. */
+ std::cout << "\nLocal zone name [" + endpointName + "]: ";
+ std::getline(std::cin, answer);
+
+ if (answer.empty())
+ answer = endpointName;
+
+ String zoneName = answer;
+ zoneName = zoneName.Trim();
+
+ /* Different parent zone name. */
+ std::cout << "Parent zone name [master]: ";
+ std::getline(std::cin, answer);
+
+ if (answer.empty())
+ answer = "master";
+
+ String parentZoneName = answer;
+ parentZoneName = parentZoneName.Trim();
+
+ /* Global zones. */
+ std::vector<String> globalZones { "global-templates", "director-global" };
+
+ std::cout << "\nDefault global zones: " << boost::algorithm::join(globalZones, " ");
+ std::cout << "\nDo you want to specify additional global zones? [y/N]: ";
+
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+wizard_global_zone_loop_start:
+ if (choice.Contains("y")) {
+ std::cout << "\nPlease specify the name of the global Zone: ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty()) {
+ std::cout << "\nName of the global Zone is required! Please retry.";
+ goto wizard_global_zone_loop_start;
+ }
+
+ String globalZoneName = answer;
+ globalZoneName = globalZoneName.Trim();
+
+ if (std::find(globalZones.begin(), globalZones.end(), globalZoneName) != globalZones.end()) {
+ std::cout << "The global zone '" << globalZoneName << "' is already specified."
+ << " Please retry.";
+ goto wizard_global_zone_loop_start;
+ }
+
+ globalZones.push_back(globalZoneName);
+
+ std::cout << "\nDo you want to specify another global zone? [y/N]: ";
+
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ if (choice.Contains("y"))
+ goto wizard_global_zone_loop_start;
+ } else
+ Log(LogInformation, "cli", "No additional global Zones have been specified");
+
+ /* Generate node configuration. */
+ NodeUtility::GenerateNodeIcingaConfig(endpointName, zoneName, parentZoneName, endpoints, globalZones);
+
+ if (endpointName != Utility::GetFQDN()) {
+ Log(LogWarning, "cli")
+ << "CN/Endpoint name '" << endpointName << "' does not match the default FQDN '"
+ << Utility::GetFQDN() << "'. Requires update for NodeName constant in constants.conf!";
+ }
+
+ NodeUtility::UpdateConstant("NodeName", endpointName);
+ NodeUtility::UpdateConstant("ZoneName", zoneName);
+
+ if (!ticket.IsEmpty()) {
+ String ticketPath = ApiListener::GetCertsDir() + "/ticket";
+ AtomicFile af (ticketPath, 0600);
+
+ if (!Utility::SetFileOwnership(af.GetTempFilename(), user, group)) {
+ Log(LogWarning, "cli")
+ << "Cannot set ownership for user '" << user
+ << "' group '" << group
+ << "' on file '" << ticketPath << "'. Verify it yourself!";
+ }
+
+ af << ticket;
+ af.Commit();
+ }
+
+ /* If no parent connection was made, the user must supply the ca.crt before restarting Icinga 2.*/
+ if (!connectToParent) {
+ Log(LogWarning, "cli")
+ << "No connection to the parent node was specified.\n\n"
+ << "Please copy the public CA certificate from your master/satellite\n"
+ << "into '" << nodeCA << "' before starting Icinga 2.\n";
+ } else {
+ Log(LogInformation, "cli", "Make sure to restart Icinga 2.");
+ }
+
+ /* Disable conf.d inclusion */
+ std::cout << "\nDo you want to disable the inclusion of the conf.d directory [Y/n]: ";
+
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ if (choice.Contains("n"))
+ Log(LogInformation, "cli")
+ << "conf.d directory has not been disabled.";
+ else {
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundGreen)
+ << "Disabling the inclusion of the conf.d directory...\n"
+ << ConsoleColorTag(Console_Normal);
+
+ if (!NodeUtility::UpdateConfiguration("\"conf.d\"", false, true)) {
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundRed)
+ << "Failed to disable the conf.d inclusion, it may already have been disabled.\n"
+ << ConsoleColorTag(Console_Normal);
+ }
+
+ /* Satellite/Agents should not include the api-users.conf file.
+ * The configuration should instead be managed via config sync or automation tools.
+ */
+ }
+
+ return 0;
+}
+
+int NodeWizardCommand::MasterSetup() const
+{
+ std::string answer;
+ String choice;
+
+ std::cout << ConsoleColorTag(Console_Bold) << "Starting the Master setup routine...\n\n";
+
+ /* CN */
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Please specify the common name" << ConsoleColorTag(Console_Normal)
+ << " (CN) [" << Utility::GetFQDN() << "]: ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty())
+ answer = Utility::GetFQDN();
+
+ String cn = answer;
+ cn = cn.Trim();
+
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundGreen)
+ << "Reconfiguring Icinga...\n"
+ << ConsoleColorTag(Console_Normal);
+
+ /* check whether the user wants to generate a new certificate or not */
+ String existing_path = ApiListener::GetCertsDir() + "/" + cn + ".crt";
+
+ std::cout << ConsoleColorTag(Console_Normal)
+ << "Checking for existing certificates for common name '" << cn << "'...\n";
+
+ if (Utility::PathExists(existing_path)) {
+ std::cout << "Certificate '" << existing_path << "' for CN '"
+ << cn << "' already existing. Skipping certificate generation.\n";
+ } else {
+ std::cout << "Certificates not yet generated. Running 'api setup' now.\n";
+ ApiSetupUtility::SetupMasterCertificates(cn);
+ }
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Generating master configuration for Icinga 2.\n"
+ << ConsoleColorTag(Console_Normal);
+
+ ApiSetupUtility::SetupMasterApiUser();
+
+ if (!FeatureUtility::CheckFeatureEnabled("api"))
+ ApiSetupUtility::SetupMasterEnableApi();
+ else
+ std::cout << "'api' feature already enabled.\n";
+
+ /* Setup command hardcodes this as FQDN */
+ String endpointName = cn;
+
+ /* Different zone name. */
+ std::cout << "\nMaster zone name [master]: ";
+ std::getline(std::cin, answer);
+
+ if (answer.empty())
+ answer = "master";
+
+ String zoneName = answer;
+ zoneName = zoneName.Trim();
+
+ /* Global zones. */
+ std::vector<String> globalZones { "global-templates", "director-global" };
+
+ std::cout << "\nDefault global zones: " << boost::algorithm::join(globalZones, " ");
+ std::cout << "\nDo you want to specify additional global zones? [y/N]: ";
+
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+wizard_global_zone_loop_start:
+ if (choice.Contains("y")) {
+ std::cout << "\nPlease specify the name of the global Zone: ";
+
+ std::getline(std::cin, answer);
+
+ if (answer.empty()) {
+ std::cout << "\nName of the global Zone is required! Please retry.";
+ goto wizard_global_zone_loop_start;
+ }
+
+ String globalZoneName = answer;
+ globalZoneName = globalZoneName.Trim();
+
+ if (std::find(globalZones.begin(), globalZones.end(), globalZoneName) != globalZones.end()) {
+ std::cout << "The global zone '" << globalZoneName << "' is already specified."
+ << " Please retry.";
+ goto wizard_global_zone_loop_start;
+ }
+
+ globalZones.push_back(globalZoneName);
+
+ std::cout << "\nDo you want to specify another global zone? [y/N]: ";
+
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ if (choice.Contains("y"))
+ goto wizard_global_zone_loop_start;
+ } else
+ Log(LogInformation, "cli", "No additional global Zones have been specified");
+
+ /* Generate master configuration. */
+ NodeUtility::GenerateNodeMasterIcingaConfig(endpointName, zoneName, globalZones);
+
+ /* apilistener config */
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Please specify the API bind host/port "
+ << ConsoleColorTag(Console_Normal) << "(optional)"
+ << ConsoleColorTag(Console_Bold) << ":\n";
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Bind Host" << ConsoleColorTag(Console_Normal) << " []: ";
+
+ std::getline(std::cin, answer);
+
+ String bindHost = answer;
+ bindHost = bindHost.Trim();
+
+ std::cout << ConsoleColorTag(Console_Bold)
+ << "Bind Port" << ConsoleColorTag(Console_Normal) << " []: ";
+
+ std::getline(std::cin, answer);
+
+ String bindPort = answer;
+ bindPort = bindPort.Trim();
+
+ /* api feature is always enabled, check above */
+ String apiConfPath = FeatureUtility::GetFeaturesAvailablePath() + "/api.conf";
+ NodeUtility::CreateBackupFile(apiConfPath);
+
+ AtomicFile fp (apiConfPath, 0644);
+
+ fp << "/**\n"
+ << " * The API listener is used for distributed monitoring setups.\n"
+ << " */\n"
+ << "object ApiListener \"api\" {\n";
+
+ if (!bindHost.IsEmpty())
+ fp << " bind_host = \"" << bindHost << "\"\n";
+ if (!bindPort.IsEmpty())
+ fp << " bind_port = " << bindPort << "\n";
+
+ fp << "\n"
+ << " ticket_salt = TicketSalt\n"
+ << "}\n";
+
+ fp.Commit();
+
+ /* update constants.conf with NodeName = CN + TicketSalt = random value */
+ if (cn != Utility::GetFQDN()) {
+ Log(LogWarning, "cli")
+ << "CN '" << cn << "' does not match the default FQDN '"
+ << Utility::GetFQDN() << "'. Requires an update for the NodeName constant in constants.conf!";
+ }
+
+ Log(LogInformation, "cli", "Updating constants.conf.");
+
+ NodeUtility::CreateBackupFile(NodeUtility::GetConstantsConfPath());
+
+ NodeUtility::UpdateConstant("NodeName", endpointName);
+ NodeUtility::UpdateConstant("ZoneName", zoneName);
+
+ String salt = RandomString(16);
+
+ NodeUtility::UpdateConstant("TicketSalt", salt);
+
+ /* Disable conf.d inclusion */
+ std::cout << "\nDo you want to disable the inclusion of the conf.d directory [Y/n]: ";
+
+ std::getline(std::cin, answer);
+ boost::algorithm::to_lower(answer);
+ choice = answer;
+
+ if (choice.Contains("n"))
+ Log(LogInformation, "cli")
+ << "conf.d directory has not been disabled.";
+ else {
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundGreen)
+ << "Disabling the inclusion of the conf.d directory...\n"
+ << ConsoleColorTag(Console_Normal);
+
+ if (!NodeUtility::UpdateConfiguration("\"conf.d\"", false, true)) {
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundRed)
+ << "Failed to disable the conf.d inclusion, it may already have been disabled.\n"
+ << ConsoleColorTag(Console_Normal);
+ }
+
+ /* Include api-users.conf */
+ String apiUsersFilePath = Configuration::ConfigDir + "/conf.d/api-users.conf";
+
+ std::cout << ConsoleColorTag(Console_Bold | Console_ForegroundGreen)
+ << "Checking if the api-users.conf file exists...\n"
+ << ConsoleColorTag(Console_Normal);
+
+ if (Utility::PathExists(apiUsersFilePath)) {
+ NodeUtility::UpdateConfiguration("\"conf.d/api-users.conf\"", true, false);
+ } else {
+ Log(LogWarning, "cli")
+ << "Included file '" << apiUsersFilePath << "' does not exist.";
+ }
+ }
+
+ return 0;
+}
diff --git a/lib/cli/nodewizardcommand.hpp b/lib/cli/nodewizardcommand.hpp
new file mode 100644
index 0000000..dfda70c
--- /dev/null
+++ b/lib/cli/nodewizardcommand.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NODEWIZARDCOMMAND_H
+#define NODEWIZARDCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "node wizard" command.
+ *
+ * @ingroup cli
+ */
+class NodeWizardCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(NodeWizardCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMaxArguments() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+ ImpersonationLevel GetImpersonationLevel() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+
+private:
+ int AgentSatelliteSetup() const;
+ int MasterSetup() const;
+};
+
+}
+
+#endif /* NODEWIZARDCOMMAND_H */
diff --git a/lib/cli/objectlistcommand.cpp b/lib/cli/objectlistcommand.cpp
new file mode 100644
index 0000000..3bcb315
--- /dev/null
+++ b/lib/cli/objectlistcommand.cpp
@@ -0,0 +1,145 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/objectlistcommand.hpp"
+#include "cli/objectlistutility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/convert.hpp"
+#include "base/configobject.hpp"
+#include "base/configtype.hpp"
+#include "base/json.hpp"
+#include "base/netstring.hpp"
+#include "base/stdiostream.hpp"
+#include "base/debug.hpp"
+#include "base/objectlock.hpp"
+#include "base/console.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <sys/stat.h>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("object/list", ObjectListCommand);
+
+String ObjectListCommand::GetDescription() const
+{
+ return "Lists all Icinga 2 objects.";
+}
+
+String ObjectListCommand::GetShortDescription() const
+{
+ return "lists all objects";
+}
+
+void ObjectListCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("count,c", "display object counts by types")
+ ("name,n", po::value<std::string>(), "filter by name matches")
+ ("type,t", po::value<std::string>(), "filter by type matches");
+}
+
+static time_t GetCtime(const String& path)
+{
+#ifdef _WIN32
+ struct _stat statbuf;
+ int rc = _stat(path.CStr(), &statbuf);
+#else /* _WIN32 */
+ struct stat statbuf;
+ int rc = stat(path.CStr(), &statbuf);
+#endif /* _WIN32 */
+
+ return rc ? 0 : statbuf.st_ctime;
+}
+
+/**
+ * The entry point for the "object list" CLI command.
+ *
+ * @returns An exit status.
+ */
+int ObjectListCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String objectfile = Configuration::ObjectsPath;
+
+ if (!Utility::PathExists(objectfile)) {
+ Log(LogCritical, "cli")
+ << "Cannot open objects file '" << Configuration::ObjectsPath << "'.";
+ Log(LogCritical, "cli", "Run 'icinga2 daemon -C --dump-objects' to validate config and generate the cache file.");
+ return 1;
+ }
+
+ std::fstream fp;
+ fp.open(objectfile.CStr(), std::ios_base::in);
+
+ StdioStream::Ptr sfp = new StdioStream(&fp, false);
+ unsigned long objects_count = 0;
+ std::map<String, int> type_count;
+
+ String name_filter, type_filter;
+
+ if (vm.count("name"))
+ name_filter = vm["name"].as<std::string>();
+ if (vm.count("type"))
+ type_filter = vm["type"].as<std::string>();
+
+ bool first = true;
+
+ String message;
+ StreamReadContext src;
+ for (;;) {
+ StreamReadStatus srs = NetString::ReadStringFromStream(sfp, &message, src);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ ObjectListUtility::PrintObject(std::cout, first, message, type_count, name_filter, type_filter);
+ objects_count++;
+ }
+
+ sfp->Close();
+ fp.close();
+
+ if (vm.count("count")) {
+ if (!first)
+ std::cout << "\n";
+
+ PrintTypeCounts(std::cout, type_count);
+ std::cout << "\n";
+ }
+
+ Log(LogNotice, "cli")
+ << "Parsed " << objects_count << " objects.";
+
+ auto objectsPathCtime (GetCtime(Configuration::ObjectsPath));
+ auto varsPathCtime (GetCtime(Configuration::VarsPath));
+
+ if (objectsPathCtime < varsPathCtime) {
+ Log(LogWarning, "cli")
+ << "This data is " << Utility::FormatDuration(varsPathCtime - objectsPathCtime)
+ << " older than the last Icinga config (re)load. It may be outdated. Consider running 'icinga2 daemon -C --dump-objects' first.";
+ }
+
+ return 0;
+}
+
+void ObjectListCommand::PrintTypeCounts(std::ostream& fp, const std::map<String, int>& type_count)
+{
+ typedef std::map<String, int>::value_type TypeCount;
+
+ for (const TypeCount& kv : type_count) {
+ fp << "Found " << kv.second << " " << kv.first << " object";
+
+ if (kv.second != 1)
+ fp << "s";
+
+ fp << ".\n";
+ }
+}
diff --git a/lib/cli/objectlistcommand.hpp b/lib/cli/objectlistcommand.hpp
new file mode 100644
index 0000000..bafe3ec
--- /dev/null
+++ b/lib/cli/objectlistcommand.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTLISTCOMMAND_H
+#define OBJECTLISTCOMMAND_H
+
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "cli/clicommand.hpp"
+#include <ostream>
+
+namespace icinga
+{
+
+/**
+ * The "object list" command.
+ *
+ * @ingroup cli
+ */
+class ObjectListCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ObjectListCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+private:
+ static void PrintTypeCounts(std::ostream& fp, const std::map<String, int>& type_count);
+};
+
+}
+
+#endif /* OBJECTLISTCOMMAND_H */
diff --git a/lib/cli/objectlistutility.cpp b/lib/cli/objectlistutility.cpp
new file mode 100644
index 0000000..a8135d9
--- /dev/null
+++ b/lib/cli/objectlistutility.cpp
@@ -0,0 +1,155 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/objectlistutility.hpp"
+#include "base/json.hpp"
+#include "base/utility.hpp"
+#include "base/console.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include <iostream>
+#include <iomanip>
+
+using namespace icinga;
+
+bool ObjectListUtility::PrintObject(std::ostream& fp, bool& first, const String& message, std::map<String, int>& type_count, const String& name_filter, const String& type_filter)
+{
+ Dictionary::Ptr object = JsonDecode(message);
+
+ Dictionary::Ptr properties = object->Get("properties");
+
+ String internal_name = properties->Get("__name");
+ String name = object->Get("name");
+ String type = object->Get("type");
+
+ if (!name_filter.IsEmpty() && !Utility::Match(name_filter, name) && !Utility::Match(name_filter, internal_name))
+ return false;
+ if (!type_filter.IsEmpty() && !Utility::Match(type_filter, type))
+ return false;
+
+ if (first)
+ first = false;
+ else
+ fp << "\n";
+
+ Dictionary::Ptr debug_hints = object->Get("debug_hints");
+
+ fp << "Object '" << ConsoleColorTag(Console_ForegroundBlue | Console_Bold) << internal_name << ConsoleColorTag(Console_Normal) << "'";
+ fp << " of type '" << ConsoleColorTag(Console_ForegroundMagenta | Console_Bold) << type << ConsoleColorTag(Console_Normal) << "':\n";
+
+ Array::Ptr di = object->Get("debug_info");
+
+ if (di) {
+ fp << ConsoleColorTag(Console_ForegroundCyan) << " % declared in '" << di->Get(0) << "', lines "
+ << di->Get(1) << ":" << di->Get(2) << "-" << di->Get(3) << ":" << di->Get(4) << ConsoleColorTag(Console_Normal) << "\n";
+ }
+
+ PrintProperties(fp, properties, debug_hints, 2);
+
+ type_count[type]++;
+ return true;
+}
+
+void ObjectListUtility::PrintProperties(std::ostream& fp, const Dictionary::Ptr& props, const Dictionary::Ptr& debug_hints, int indent)
+{
+ /* get debug hint props */
+ Dictionary::Ptr debug_hint_props;
+ if (debug_hints)
+ debug_hint_props = debug_hints->Get("properties");
+
+ int offset = 2;
+
+ ObjectLock olock(props);
+ for (const Dictionary::Pair& kv : props)
+ {
+ String key = kv.first;
+ Value val = kv.second;
+
+ /* key & value */
+ fp << std::setw(indent) << " " << "* " << ConsoleColorTag(Console_ForegroundGreen) << key << ConsoleColorTag(Console_Normal);
+
+ /* extract debug hints for key */
+ Dictionary::Ptr debug_hints_fwd;
+ if (debug_hint_props)
+ debug_hints_fwd = debug_hint_props->Get(key);
+
+ /* print dicts recursively */
+ if (val.IsObjectType<Dictionary>()) {
+ fp << "\n";
+ PrintHints(fp, debug_hints_fwd, indent + offset);
+ PrintProperties(fp, val, debug_hints_fwd, indent + offset);
+ } else {
+ fp << " = ";
+ PrintValue(fp, val);
+ fp << "\n";
+ PrintHints(fp, debug_hints_fwd, indent + offset);
+ }
+ }
+}
+
+void ObjectListUtility::PrintHints(std::ostream& fp, const Dictionary::Ptr& debug_hints, int indent)
+{
+ if (!debug_hints)
+ return;
+
+ Array::Ptr messages = debug_hints->Get("messages");
+
+ if (messages) {
+ ObjectLock olock(messages);
+
+ for (const Value& msg : messages)
+ {
+ PrintHint(fp, msg, indent);
+ }
+ }
+}
+
+void ObjectListUtility::PrintHint(std::ostream& fp, const Array::Ptr& msg, int indent)
+{
+ fp << std::setw(indent) << " " << ConsoleColorTag(Console_ForegroundCyan) << "% " << msg->Get(0) << " modified in '" << msg->Get(1) << "', lines "
+ << msg->Get(2) << ":" << msg->Get(3) << "-" << msg->Get(4) << ":" << msg->Get(5) << ConsoleColorTag(Console_Normal) << "\n";
+}
+
+void ObjectListUtility::PrintValue(std::ostream& fp, const Value& val)
+{
+ if (val.IsObjectType<Array>()) {
+ PrintArray(fp, val);
+ return;
+ }
+
+ if (val.IsString()) {
+ fp << "\"" << Convert::ToString(val) << "\"";
+ return;
+ }
+
+ if (val.IsEmpty()) {
+ fp << "null";
+ return;
+ }
+
+ fp << Convert::ToString(val);
+}
+
+void ObjectListUtility::PrintArray(std::ostream& fp, const Array::Ptr& arr)
+{
+ bool first = true;
+
+ fp << "[ ";
+
+ if (arr) {
+ ObjectLock olock(arr);
+ for (const Value& value : arr)
+ {
+ if (first)
+ first = false;
+ else
+ fp << ", ";
+
+ PrintValue(fp, value);
+ }
+ }
+
+ if (!first)
+ fp << " ";
+
+ fp << "]";
+}
diff --git a/lib/cli/objectlistutility.hpp b/lib/cli/objectlistutility.hpp
new file mode 100644
index 0000000..ee1b97c
--- /dev/null
+++ b/lib/cli/objectlistutility.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTLISTUTILITY_H
+#define OBJECTLISTUTILITY_H
+
+#include "base/i2-base.hpp"
+#include "cli/i2-cli.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "base/value.hpp"
+#include "base/string.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup cli
+ */
+class ObjectListUtility
+{
+public:
+ static bool PrintObject(std::ostream& fp, bool& first, const String& message, std::map<String, int>& type_count, const String& name_filter, const String& type_filter);
+
+private:
+ static void PrintProperties(std::ostream& fp, const Dictionary::Ptr& props, const Dictionary::Ptr& debug_hints, int indent);
+ static void PrintHints(std::ostream& fp, const Dictionary::Ptr& debug_hints, int indent);
+ static void PrintHint(std::ostream& fp, const Array::Ptr& msg, int indent);
+ static void PrintValue(std::ostream& fp, const Value& val);
+ static void PrintArray(std::ostream& fp, const Array::Ptr& arr);
+};
+
+}
+
+#endif /* OBJECTLISTUTILITY_H */
diff --git a/lib/cli/pkinewcacommand.cpp b/lib/cli/pkinewcacommand.cpp
new file mode 100644
index 0000000..eba08c6
--- /dev/null
+++ b/lib/cli/pkinewcacommand.cpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkinewcacommand.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_CLICOMMAND("pki/new-ca", PKINewCACommand);
+
+String PKINewCACommand::GetDescription() const
+{
+ return "Sets up a new Certificate Authority.";
+}
+
+String PKINewCACommand::GetShortDescription() const
+{
+ return "sets up a new CA";
+}
+
+/**
+ * The entry point for the "pki new-ca" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKINewCACommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ return PkiUtility::NewCa();
+}
diff --git a/lib/cli/pkinewcacommand.hpp b/lib/cli/pkinewcacommand.hpp
new file mode 100644
index 0000000..5b1bff6
--- /dev/null
+++ b/lib/cli/pkinewcacommand.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKINEWCACOMMAND_H
+#define PKINEWCACOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki new-ca" command.
+ *
+ * @ingroup cli
+ */
+class PKINewCACommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKINewCACommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKINEWCACOMMAND_H */
diff --git a/lib/cli/pkinewcertcommand.cpp b/lib/cli/pkinewcertcommand.cpp
new file mode 100644
index 0000000..5201d92
--- /dev/null
+++ b/lib/cli/pkinewcertcommand.cpp
@@ -0,0 +1,66 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkinewcertcommand.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("pki/new-cert", PKINewCertCommand);
+
+String PKINewCertCommand::GetDescription() const
+{
+ return "Creates a new Certificate Signing Request, a self-signed X509 certificate or both.";
+}
+
+String PKINewCertCommand::GetShortDescription() const
+{
+ return "creates a new CSR";
+}
+
+void PKINewCertCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("cn", po::value<std::string>(), "Common Name")
+ ("key", po::value<std::string>(), "Key file path (output)")
+ ("csr", po::value<std::string>(), "CSR file path (optional, output)")
+ ("cert", po::value<std::string>(), "Certificate file path (optional, output)");
+}
+
+std::vector<String> PKINewCertCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "key" || argument == "csr" || argument == "cert")
+ return GetBashCompletionSuggestions("file", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+/**
+ * The entry point for the "pki new-cert" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKINewCertCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (!vm.count("cn")) {
+ Log(LogCritical, "cli", "Common name (--cn) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("key")) {
+ Log(LogCritical, "cli", "Key file path (--key) must be specified.");
+ return 1;
+ }
+
+ String csr, cert;
+
+ if (vm.count("csr"))
+ csr = vm["csr"].as<std::string>();
+
+ if (vm.count("cert"))
+ cert = vm["cert"].as<std::string>();
+
+ return PkiUtility::NewCert(vm["cn"].as<std::string>(), vm["key"].as<std::string>(), csr, cert);
+}
diff --git a/lib/cli/pkinewcertcommand.hpp b/lib/cli/pkinewcertcommand.hpp
new file mode 100644
index 0000000..0c39bb6
--- /dev/null
+++ b/lib/cli/pkinewcertcommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKINEWCERTCOMMAND_H
+#define PKINEWCERTCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki new-cert" command.
+ *
+ * @ingroup cli
+ */
+class PKINewCertCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKINewCertCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKINEWCERTCOMMAND_H */
diff --git a/lib/cli/pkirequestcommand.cpp b/lib/cli/pkirequestcommand.cpp
new file mode 100644
index 0000000..d2b79f0
--- /dev/null
+++ b/lib/cli/pkirequestcommand.cpp
@@ -0,0 +1,93 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkirequestcommand.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/logger.hpp"
+#include "base/tlsutility.hpp"
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("pki/request", PKIRequestCommand);
+
+String PKIRequestCommand::GetDescription() const
+{
+ return "Sends a PKI request to Icinga 2.";
+}
+
+String PKIRequestCommand::GetShortDescription() const
+{
+ return "requests a certificate";
+}
+
+void PKIRequestCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("key", po::value<std::string>(), "Key file path (input)")
+ ("cert", po::value<std::string>(), "Certificate file path (input + output)")
+ ("ca", po::value<std::string>(), "CA file path (output)")
+ ("trustedcert", po::value<std::string>(), "Trusted certificate file path (input)")
+ ("host", po::value<std::string>(), "Icinga 2 host")
+ ("port", po::value<std::string>(), "Icinga 2 port")
+ ("ticket", po::value<std::string>(), "Icinga 2 PKI ticket");
+}
+
+std::vector<String> PKIRequestCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "key" || argument == "cert" || argument == "ca" || argument == "trustedcert")
+ return GetBashCompletionSuggestions("file", word);
+ else if (argument == "host")
+ return GetBashCompletionSuggestions("hostname", word);
+ else if (argument == "port")
+ return GetBashCompletionSuggestions("service", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+/**
+ * The entry point for the "pki request" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKIRequestCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (!vm.count("host")) {
+ Log(LogCritical, "cli", "Icinga 2 host (--host) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("key")) {
+ Log(LogCritical, "cli", "Key input file path (--key) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("cert")) {
+ Log(LogCritical, "cli", "Certificate output file path (--cert) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("ca")) {
+ Log(LogCritical, "cli", "CA certificate output file path (--ca) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("trustedcert")) {
+ Log(LogCritical, "cli", "Trusted certificate input file path (--trustedcert) must be specified.");
+ return 1;
+ }
+
+ String port = "5665";
+ String ticket;
+
+ if (vm.count("port"))
+ port = vm["port"].as<std::string>();
+
+ if (vm.count("ticket"))
+ ticket = vm["ticket"].as<std::string>();
+
+ return PkiUtility::RequestCertificate(vm["host"].as<std::string>(), port, vm["key"].as<std::string>(),
+ vm["cert"].as<std::string>(), vm["ca"].as<std::string>(), GetX509Certificate(vm["trustedcert"].as<std::string>()),
+ ticket);
+}
diff --git a/lib/cli/pkirequestcommand.hpp b/lib/cli/pkirequestcommand.hpp
new file mode 100644
index 0000000..6e2a393
--- /dev/null
+++ b/lib/cli/pkirequestcommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKIREQUESTCOMMAND_H
+#define PKIREQUESTCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki request" command.
+ *
+ * @ingroup cli
+ */
+class PKIRequestCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKIRequestCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKIREQUESTCOMMAND_H */
diff --git a/lib/cli/pkisavecertcommand.cpp b/lib/cli/pkisavecertcommand.cpp
new file mode 100644
index 0000000..befd0ee
--- /dev/null
+++ b/lib/cli/pkisavecertcommand.cpp
@@ -0,0 +1,89 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkisavecertcommand.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/logger.hpp"
+#include "base/tlsutility.hpp"
+#include "base/console.hpp"
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("pki/save-cert", PKISaveCertCommand);
+
+String PKISaveCertCommand::GetDescription() const
+{
+ return "Saves another Icinga 2 instance's certificate.";
+}
+
+String PKISaveCertCommand::GetShortDescription() const
+{
+ return "saves another Icinga 2 instance's certificate";
+}
+
+void PKISaveCertCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("trustedcert", po::value<std::string>(), "Trusted certificate file path (output)")
+ ("host", po::value<std::string>(), "Parent Icinga instance to fetch the public TLS certificate from")
+ ("port", po::value<std::string>()->default_value("5665"), "Icinga 2 port");
+
+ hiddenDesc.add_options()
+ ("key", po::value<std::string>())
+ ("cert", po::value<std::string>());
+}
+
+std::vector<String> PKISaveCertCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "trustedcert")
+ return GetBashCompletionSuggestions("file", word);
+ else if (argument == "host")
+ return GetBashCompletionSuggestions("hostname", word);
+ else if (argument == "port")
+ return GetBashCompletionSuggestions("service", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+/**
+ * The entry point for the "pki save-cert" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKISaveCertCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (!vm.count("host")) {
+ Log(LogCritical, "cli", "Icinga 2 host (--host) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("trustedcert")) {
+ Log(LogCritical, "cli", "Trusted certificate output file path (--trustedcert) must be specified.");
+ return 1;
+ }
+
+ String host = vm["host"].as<std::string>();
+ String port = vm["port"].as<std::string>();
+
+ Log(LogInformation, "cli")
+ << "Retrieving TLS certificate for '" << host << ":" << port << "'.";
+
+ std::shared_ptr<X509> cert = PkiUtility::FetchCert(host, port);
+
+ if (!cert) {
+ Log(LogCritical, "cli", "Failed to fetch certificate from host.");
+ return 1;
+ }
+
+ std::cout << PkiUtility::GetCertificateInformation(cert) << "\n";
+ std::cout << ConsoleColorTag(Console_ForegroundRed)
+ << "***\n"
+ << "*** You have to ensure that this certificate actually matches the parent\n"
+ << "*** instance's certificate in order to avoid man-in-the-middle attacks.\n"
+ << "***\n\n"
+ << ConsoleColorTag(Console_Normal);
+
+ return PkiUtility::WriteCert(cert, vm["trustedcert"].as<std::string>());
+}
diff --git a/lib/cli/pkisavecertcommand.hpp b/lib/cli/pkisavecertcommand.hpp
new file mode 100644
index 0000000..c552eef
--- /dev/null
+++ b/lib/cli/pkisavecertcommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKISAVECERTCOMMAND_H
+#define PKISAVECERTCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki save-cert" command.
+ *
+ * @ingroup cli
+ */
+class PKISaveCertCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKISaveCertCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKISAVECERTCOMMAND_H */
diff --git a/lib/cli/pkisigncsrcommand.cpp b/lib/cli/pkisigncsrcommand.cpp
new file mode 100644
index 0000000..ce1427b
--- /dev/null
+++ b/lib/cli/pkisigncsrcommand.cpp
@@ -0,0 +1,56 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkisigncsrcommand.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("pki/sign-csr", PKISignCSRCommand);
+
+String PKISignCSRCommand::GetDescription() const
+{
+ return "Reads a Certificate Signing Request from stdin and prints a signed certificate on stdout.";
+}
+
+String PKISignCSRCommand::GetShortDescription() const
+{
+ return "signs a CSR";
+}
+
+void PKISignCSRCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("csr", po::value<std::string>(), "CSR file path (input)")
+ ("cert", po::value<std::string>(), "Certificate file path (output)");
+}
+
+std::vector<String> PKISignCSRCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "csr" || argument == "cert")
+ return GetBashCompletionSuggestions("file", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+/**
+ * The entry point for the "pki sign-csr" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKISignCSRCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (!vm.count("csr")) {
+ Log(LogCritical, "cli", "Certificate signing request file path (--csr) must be specified.");
+ return 1;
+ }
+
+ if (!vm.count("cert")) {
+ Log(LogCritical, "cli", "Certificate file path (--cert) must be specified.");
+ return 1;
+ }
+
+ return PkiUtility::SignCsr(vm["csr"].as<std::string>(), vm["cert"].as<std::string>());
+}
diff --git a/lib/cli/pkisigncsrcommand.hpp b/lib/cli/pkisigncsrcommand.hpp
new file mode 100644
index 0000000..a66fd39
--- /dev/null
+++ b/lib/cli/pkisigncsrcommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKISIGNCSRCOMMAND_H
+#define PKISIGNCSRCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki sign-csr" command.
+ *
+ * @ingroup cli
+ */
+class PKISignCSRCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKISignCSRCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKISIGNCSRCOMMAND_H */
diff --git a/lib/cli/pkiticketcommand.cpp b/lib/cli/pkiticketcommand.cpp
new file mode 100644
index 0000000..82f3586
--- /dev/null
+++ b/lib/cli/pkiticketcommand.cpp
@@ -0,0 +1,55 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkiticketcommand.hpp"
+#include "remote/pkiutility.hpp"
+#include "cli/variableutility.hpp"
+#include "base/logger.hpp"
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("pki/ticket", PKITicketCommand);
+
+String PKITicketCommand::GetDescription() const
+{
+ return "Generates an Icinga 2 ticket";
+}
+
+String PKITicketCommand::GetShortDescription() const
+{
+ return "generates a ticket";
+}
+
+void PKITicketCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("cn", po::value<std::string>(), "Certificate common name")
+ ("salt", po::value<std::string>(), "Ticket salt");
+}
+
+/**
+ * The entry point for the "pki ticket" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKITicketCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (!vm.count("cn")) {
+ Log(LogCritical, "cli", "Common name (--cn) must be specified.");
+ return 1;
+ }
+
+ String salt = VariableUtility::GetVariable("TicketSalt");
+
+ if (vm.count("salt"))
+ salt = vm["salt"].as<std::string>();
+
+ if (salt.IsEmpty()) {
+ Log(LogCritical, "cli", "Ticket salt (--salt) must be specified.");
+ return 1;
+ }
+
+ return PkiUtility::GenTicket(vm["cn"].as<std::string>(), salt, std::cout);
+}
diff --git a/lib/cli/pkiticketcommand.hpp b/lib/cli/pkiticketcommand.hpp
new file mode 100644
index 0000000..500ce86
--- /dev/null
+++ b/lib/cli/pkiticketcommand.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKITICKETCOMMAND_H
+#define PKITICKETCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki ticket" command.
+ *
+ * @ingroup cli
+ */
+class PKITicketCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKITicketCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKITICKETCOMMAND_H */
diff --git a/lib/cli/pkiverifycommand.cpp b/lib/cli/pkiverifycommand.cpp
new file mode 100644
index 0000000..963903a
--- /dev/null
+++ b/lib/cli/pkiverifycommand.cpp
@@ -0,0 +1,226 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#include "cli/pkiverifycommand.hpp"
+#include "icinga/service.hpp"
+#include "remote/pkiutility.hpp"
+#include "base/tlsutility.hpp"
+#include "base/logger.hpp"
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("pki/verify", PKIVerifyCommand);
+
+String PKIVerifyCommand::GetDescription() const
+{
+ return "Verify TLS certificates: CN, signed by CA, is CA; Print certificate";
+}
+
+String PKIVerifyCommand::GetShortDescription() const
+{
+ return "verify TLS certificates: CN, signed by CA, is CA; Print certificate";
+}
+
+void PKIVerifyCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("cn", po::value<std::string>(), "Common Name (optional). Use with '--cert' to check the CN in the certificate.")
+ ("cert", po::value<std::string>(), "Certificate file path (optional). Standalone: print certificate. With '--cacert': Verify against CA.")
+ ("cacert", po::value<std::string>(), "CA certificate file path (optional). If passed standalone, verifies whether this is a CA certificate")
+ ("crl", po::value<std::string>(), "CRL file path (optional). Check the certificate against this revocation list when verifying against CA.");
+}
+
+std::vector<String> PKIVerifyCommand::GetArgumentSuggestions(const String& argument, const String& word) const
+{
+ if (argument == "cert" || argument == "cacert" || argument == "crl")
+ return GetBashCompletionSuggestions("file", word);
+ else
+ return CLICommand::GetArgumentSuggestions(argument, word);
+}
+
+/**
+ * The entry point for the "pki verify" CLI command.
+ *
+ * @returns An exit status.
+ */
+int PKIVerifyCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String cn, certFile, caCertFile, crlFile;
+
+ if (vm.count("cn"))
+ cn = vm["cn"].as<std::string>();
+
+ if (vm.count("cert"))
+ certFile = vm["cert"].as<std::string>();
+
+ if (vm.count("cacert"))
+ caCertFile = vm["cacert"].as<std::string>();
+
+ if (vm.count("crl"))
+ crlFile = vm["crl"].as<std::string>();
+
+ /* Verify CN in certificate. */
+ if (!cn.IsEmpty() && !certFile.IsEmpty()) {
+ std::shared_ptr<X509> cert;
+ try {
+ cert = GetX509Certificate(certFile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot read certificate file '" << certFile << "'. Please ensure that it exists and is readable.";
+
+ return ServiceCritical;
+ }
+
+ Log(LogInformation, "cli")
+ << "Verifying common name (CN) '" << cn << " in certificate '" << certFile << "'.";
+
+ std::cout << PkiUtility::GetCertificateInformation(cert) << "\n";
+
+ String certCN = GetCertificateCN(cert);
+
+ if (cn == certCN) {
+ Log(LogInformation, "cli")
+ << "OK: CN '" << cn << "' matches certificate CN '" << certCN << "'.";
+
+ return ServiceOK;
+ } else {
+ Log(LogCritical, "cli")
+ << "CRITICAL: CN '" << cn << "' does NOT match certificate CN '" << certCN << "'.";
+
+ return ServiceCritical;
+ }
+ }
+
+ /* Verify certificate. */
+ if (!certFile.IsEmpty() && !caCertFile.IsEmpty()) {
+ std::shared_ptr<X509> cert;
+ try {
+ cert = GetX509Certificate(certFile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot read certificate file '" << certFile << "'. Please ensure that it exists and is readable.";
+
+ return ServiceCritical;
+ }
+
+ std::shared_ptr<X509> cacert;
+ try {
+ cacert = GetX509Certificate(caCertFile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot read CA certificate file '" << caCertFile << "'. Please ensure that it exists and is readable.";
+
+ return ServiceCritical;
+ }
+
+ Log(LogInformation, "cli")
+ << "Verifying certificate '" << certFile << "'";
+
+ std::cout << PkiUtility::GetCertificateInformation(cert) << "\n";
+
+ Log(LogInformation, "cli")
+ << " with CA certificate '" << caCertFile << "'.";
+
+ std::cout << PkiUtility::GetCertificateInformation(cacert) << "\n";
+
+ String certCN = GetCertificateCN(cert);
+
+ bool signedByCA;
+
+ try {
+ signedByCA = VerifyCertificate(cacert, cert, crlFile);
+ } catch (const std::exception& ex) {
+ Log logmsg (LogCritical, "cli");
+ logmsg << "CRITICAL: Certificate with CN '" << certCN << "' is NOT signed by CA: ";
+ if (const unsigned long *openssl_code = boost::get_error_info<errinfo_openssl_error>(ex)) {
+ logmsg << X509_verify_cert_error_string(*openssl_code) << " (code " << *openssl_code << ")";
+ } else {
+ logmsg << DiagnosticInformation(ex, false);
+ }
+
+ return ServiceCritical;
+ }
+
+ if (signedByCA) {
+ Log(LogInformation, "cli")
+ << "OK: Certificate with CN '" << certCN << "' is signed by CA.";
+
+ return ServiceOK;
+ } else {
+ Log(LogCritical, "cli")
+ << "CRITICAL: Certificate with CN '" << certCN << "' is NOT signed by CA.";
+
+ return ServiceCritical;
+ }
+ }
+
+
+ /* Standalone CA checks. */
+ if (certFile.IsEmpty() && !caCertFile.IsEmpty()) {
+ std::shared_ptr<X509> cacert;
+ try {
+ cacert = GetX509Certificate(caCertFile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot read CA certificate file '" << caCertFile << "'. Please ensure that it exists and is readable.";
+
+ return ServiceCritical;
+ }
+
+ Log(LogInformation, "cli")
+ << "Checking whether certificate '" << caCertFile << "' is a valid CA certificate.";
+
+ std::cout << PkiUtility::GetCertificateInformation(cacert) << "\n";
+
+ if (IsCa(cacert)) {
+ Log(LogInformation, "cli")
+ << "OK: CA certificate file '" << caCertFile << "' was verified successfully.\n";
+
+ return ServiceOK;
+ } else {
+ Log(LogCritical, "cli")
+ << "CRITICAL: The file '" << caCertFile << "' does not seem to be a CA certificate file.\n";
+
+ return ServiceCritical;
+ }
+ }
+
+ /* Print certificate */
+ if (!certFile.IsEmpty()) {
+ std::shared_ptr<X509> cert;
+ try {
+ cert = GetX509Certificate(certFile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot read certificate file '" << certFile << "'. Please ensure that it exists and is readable.";
+
+ return ServiceCritical;
+ }
+
+ Log(LogInformation, "cli")
+ << "Printing certificate '" << certFile << "'";
+
+ std::cout << PkiUtility::GetCertificateInformation(cert) << "\n";
+
+ return ServiceOK;
+ }
+
+ /* Error handling. */
+ if (!cn.IsEmpty() && certFile.IsEmpty()) {
+ Log(LogCritical, "cli")
+ << "The '--cn' parameter requires the '--cert' parameter.";
+
+ return ServiceCritical;
+ }
+
+ if (cn.IsEmpty() && certFile.IsEmpty() && caCertFile.IsEmpty()) {
+ Log(LogInformation, "cli")
+ << "Please add the '--help' parameter to see all available options.";
+
+ return ServiceOK;
+ }
+
+ return ServiceOK;
+}
diff --git a/lib/cli/pkiverifycommand.hpp b/lib/cli/pkiverifycommand.hpp
new file mode 100644
index 0000000..8e4b9db
--- /dev/null
+++ b/lib/cli/pkiverifycommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#ifndef PKIVERIFYCOMMAND_H
+#define PKIVERIFYCOMMAND_H
+
+#include "cli/clicommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * The "pki verify" command.
+ *
+ * @ingroup cli
+ */
+class PKIVerifyCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(PKIVerifyCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ std::vector<String> GetArgumentSuggestions(const String& argument, const String& word) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+};
+
+}
+
+#endif /* PKIVERIFYCOMMAND_H */
diff --git a/lib/cli/variablegetcommand.cpp b/lib/cli/variablegetcommand.cpp
new file mode 100644
index 0000000..c05ac96
--- /dev/null
+++ b/lib/cli/variablegetcommand.cpp
@@ -0,0 +1,75 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/variablegetcommand.hpp"
+#include "cli/variableutility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/convert.hpp"
+#include "base/configobject.hpp"
+#include "base/configtype.hpp"
+#include "base/json.hpp"
+#include "base/netstring.hpp"
+#include "base/stdiostream.hpp"
+#include "base/debug.hpp"
+#include "base/objectlock.hpp"
+#include "base/console.hpp"
+#include "base/scriptglobal.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <fstream>
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("variable/get", VariableGetCommand);
+
+String VariableGetCommand::GetDescription() const
+{
+ return "Prints the value of an Icinga 2 variable.";
+}
+
+String VariableGetCommand::GetShortDescription() const
+{
+ return "gets a variable";
+}
+
+void VariableGetCommand::InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const
+{
+ visibleDesc.add_options()
+ ("current", "Uses the current value (i.e. from the running process, rather than from the vars file)");
+}
+
+int VariableGetCommand::GetMinArguments() const
+{
+ return 1;
+}
+
+/**
+ * The entry point for the "variable get" CLI command.
+ *
+ * @returns An exit status.
+ */
+int VariableGetCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ if (vm.count("current")) {
+ std::cout << ScriptGlobal::Get(ap[0], &Empty) << "\n";
+ return 0;
+ }
+
+ String varsfile = Configuration::VarsPath;
+
+ if (!Utility::PathExists(varsfile)) {
+ Log(LogCritical, "cli")
+ << "Cannot open variables file '" << varsfile << "'.";
+ Log(LogCritical, "cli", "Run 'icinga2 daemon -C' to validate config and generate the cache file.");
+ return 1;
+ }
+
+ Value value = VariableUtility::GetVariable(ap[0]);
+
+ std::cout << value << "\n";
+
+ return 0;
+}
diff --git a/lib/cli/variablegetcommand.hpp b/lib/cli/variablegetcommand.hpp
new file mode 100644
index 0000000..9479b3a
--- /dev/null
+++ b/lib/cli/variablegetcommand.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef VARIABLEGETCOMMAND_H
+#define VARIABLEGETCOMMAND_H
+
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "cli/clicommand.hpp"
+#include <ostream>
+
+namespace icinga
+{
+
+/**
+ * The "variable get" command.
+ *
+ * @ingroup cli
+ */
+class VariableGetCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(VariableGetCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int GetMinArguments() const override;
+ void InitParameters(boost::program_options::options_description& visibleDesc,
+ boost::program_options::options_description& hiddenDesc) const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+};
+
+}
+
+#endif /* VARIABLEGETCOMMAND_H */
diff --git a/lib/cli/variablelistcommand.cpp b/lib/cli/variablelistcommand.cpp
new file mode 100644
index 0000000..b7ba1be
--- /dev/null
+++ b/lib/cli/variablelistcommand.cpp
@@ -0,0 +1,52 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/variablelistcommand.hpp"
+#include "cli/variableutility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/convert.hpp"
+#include "base/configobject.hpp"
+#include "base/debug.hpp"
+#include "base/objectlock.hpp"
+#include "base/console.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <fstream>
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+REGISTER_CLICOMMAND("variable/list", VariableListCommand);
+
+String VariableListCommand::GetDescription() const
+{
+ return "Lists all Icinga 2 variables.";
+}
+
+String VariableListCommand::GetShortDescription() const
+{
+ return "lists all variables";
+}
+
+/**
+ * The entry point for the "variable list" CLI command.
+ *
+ * @returns An exit status.
+ */
+int VariableListCommand::Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const
+{
+ String varsfile = Configuration::VarsPath;
+
+ if (!Utility::PathExists(varsfile)) {
+ Log(LogCritical, "cli")
+ << "Cannot open variables file '" << varsfile << "'.";
+ Log(LogCritical, "cli", "Run 'icinga2 daemon -C' to validate config and generate the cache file.");
+ return 1;
+ }
+
+ VariableUtility::PrintVariables(std::cout);
+
+ return 0;
+}
+
diff --git a/lib/cli/variablelistcommand.hpp b/lib/cli/variablelistcommand.hpp
new file mode 100644
index 0000000..909d9eb
--- /dev/null
+++ b/lib/cli/variablelistcommand.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef VARIABLELISTCOMMAND_H
+#define VARIABLELISTCOMMAND_H
+
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include "cli/clicommand.hpp"
+#include <ostream>
+
+namespace icinga
+{
+
+/**
+ * The "variable list" command.
+ *
+ * @ingroup cli
+ */
+class VariableListCommand final : public CLICommand
+{
+public:
+ DECLARE_PTR_TYPEDEFS(VariableListCommand);
+
+ String GetDescription() const override;
+ String GetShortDescription() const override;
+ int Run(const boost::program_options::variables_map& vm, const std::vector<std::string>& ap) const override;
+
+private:
+ static void PrintVariable(std::ostream& fp, const String& message);
+};
+
+}
+
+#endif /* VARIABLELISTCOMMAND_H */
diff --git a/lib/cli/variableutility.cpp b/lib/cli/variableutility.cpp
new file mode 100644
index 0000000..398c9a0
--- /dev/null
+++ b/lib/cli/variableutility.cpp
@@ -0,0 +1,76 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/variableutility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/utility.hpp"
+#include "base/stdiostream.hpp"
+#include "base/netstring.hpp"
+#include "base/json.hpp"
+#include "remote/jsonrpc.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+Value VariableUtility::GetVariable(const String& name)
+{
+ String varsfile = Configuration::VarsPath;
+
+ std::fstream fp;
+ fp.open(varsfile.CStr(), std::ios_base::in);
+
+ StdioStream::Ptr sfp = new StdioStream(&fp, false);
+
+ String message;
+ StreamReadContext src;
+ for (;;) {
+ StreamReadStatus srs = NetString::ReadStringFromStream(sfp, &message, src);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ Dictionary::Ptr variable = JsonDecode(message);
+
+ if (variable->Get("name") == name) {
+ return variable->Get("value");
+ }
+ }
+
+ return Empty;
+}
+
+void VariableUtility::PrintVariables(std::ostream& outfp)
+{
+ String varsfile = Configuration::VarsPath;
+
+ std::fstream fp;
+ fp.open(varsfile.CStr(), std::ios_base::in);
+
+ StdioStream::Ptr sfp = new StdioStream(&fp, false);
+ unsigned long variables_count = 0;
+
+ String message;
+ StreamReadContext src;
+ for (;;) {
+ StreamReadStatus srs = NetString::ReadStringFromStream(sfp, &message, src);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ Dictionary::Ptr variable = JsonDecode(message);
+ outfp << variable->Get("name") << " = " << variable->Get("value") << "\n";
+ variables_count++;
+ }
+
+ sfp->Close();
+ fp.close();
+
+ Log(LogNotice, "cli")
+ << "Parsed " << variables_count << " variables.";
+}
diff --git a/lib/cli/variableutility.hpp b/lib/cli/variableutility.hpp
new file mode 100644
index 0000000..69869b2
--- /dev/null
+++ b/lib/cli/variableutility.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef VARIABLEUTILITY_H
+#define VARIABLEUTILITY_H
+
+#include "base/i2-base.hpp"
+#include "cli/i2-cli.hpp"
+#include "base/dictionary.hpp"
+#include "base/string.hpp"
+#include <ostream>
+
+namespace icinga
+{
+
+/**
+ * @ingroup cli
+ */
+class VariableUtility
+{
+public:
+ static Value GetVariable(const String& name);
+ static void PrintVariables(std::ostream& outfp);
+
+private:
+ VariableUtility();
+
+};
+
+}
+
+#endif /* VARIABLEUTILITY_H */
diff --git a/lib/compat/CMakeLists.txt b/lib/compat/CMakeLists.txt
new file mode 100644
index 0000000..f7d032f
--- /dev/null
+++ b/lib/compat/CMakeLists.txt
@@ -0,0 +1,38 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(compatlogger.ti compatlogger-ti.cpp compatlogger-ti.hpp)
+mkclass_target(externalcommandlistener.ti externalcommandlistener-ti.cpp externalcommandlistener-ti.hpp)
+
+set(compat_SOURCES
+ compatlogger.cpp compatlogger.hpp compatlogger-ti.hpp
+ externalcommandlistener.cpp externalcommandlistener.hpp externalcommandlistener-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(compat compat compat_SOURCES)
+endif()
+
+add_library(compat OBJECT ${compat_SOURCES})
+
+add_dependencies(compat base config icinga)
+
+set_target_properties (
+ compat PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/command.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/compatlog.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_LOGDIR}/compat/archives\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_SPOOLDIR}\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_INITRUNDIR}/cmd\")")
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/compat/compatlogger.cpp b/lib/compat/compatlogger.cpp
new file mode 100644
index 0000000..95ca830
--- /dev/null
+++ b/lib/compat/compatlogger.cpp
@@ -0,0 +1,614 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "compat/compatlogger.hpp"
+#include "compat/compatlogger-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/externalcommandprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/application.hpp"
+#include "base/utility.hpp"
+#include "base/statsfunction.hpp"
+#include <boost/algorithm/string.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE(CompatLogger);
+
+REGISTER_STATSFUNCTION(CompatLogger, &CompatLogger::StatsFunc);
+
+void CompatLogger::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const CompatLogger::Ptr& compat_logger : ConfigType::GetObjectsByType<CompatLogger>()) {
+ nodes.emplace_back(compat_logger->GetName(), 1); // add more stats
+ }
+
+ status->Set("compatlogger", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::Start(bool runtimeCreated)
+{
+ ObjectImpl<CompatLogger>::Start(runtimeCreated);
+
+ Log(LogInformation, "CompatLogger")
+ << "'" << GetName() << "' started.";
+
+ Log(LogWarning, "CompatLogger")
+ << "This feature is DEPRECATED and may be removed in future releases. Check the roadmap at https://github.com/Icinga/icinga2/milestones";
+
+ Checkable::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+ Checkable::OnNotificationSentToUser.connect([this](const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, const NotificationType& type, const CheckResult::Ptr& cr, const String& author,
+ const String& commentText, const String& commandName, const MessageOrigin::Ptr&) {
+ NotificationSentHandler(notification, checkable, user, type, cr, author, commentText, commandName);
+ });
+
+ Downtime::OnDowntimeTriggered.connect([this](const Downtime::Ptr& downtime) { TriggerDowntimeHandler(downtime); });
+ Downtime::OnDowntimeRemoved.connect([this](const Downtime::Ptr& downtime) { RemoveDowntimeHandler(downtime); });
+ Checkable::OnEventCommandExecuted.connect([this](const Checkable::Ptr& checkable) { EventCommandHandler(checkable); });
+
+ Checkable::OnFlappingChanged.connect([this](const Checkable::Ptr& checkable, const Value&) { FlappingChangedHandler(checkable); });
+ Checkable::OnEnableFlappingChanged.connect([this](const Checkable::Ptr& checkable, const Value&) { EnableFlappingChangedHandler(checkable); });
+
+ ExternalCommandProcessor::OnNewExternalCommand.connect([this](double, const String& command, const std::vector<String>& arguments) {
+ ExternalCommandHandler(command, arguments);
+ });
+
+ m_RotationTimer = Timer::Create();
+ m_RotationTimer->OnTimerExpired.connect([this](const Timer * const&) { RotationTimerHandler(); });
+ m_RotationTimer->Start();
+
+ ReopenFile(false);
+ ScheduleNextRotation();
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::Stop(bool runtimeRemoved)
+{
+ m_RotationTimer->Stop(true);
+
+ Log(LogInformation, "CompatLogger")
+ << "'" << GetName() << "' stopped.";
+
+ ObjectImpl<CompatLogger>::Stop(runtimeRemoved);
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr &cr)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr vars_after = cr->GetVarsAfter();
+
+ long state_after = vars_after->Get("state");
+ long stateType_after = vars_after->Get("state_type");
+ long attempt_after = vars_after->Get("attempt");
+ bool reachable_after = vars_after->Get("reachable");
+
+ Dictionary::Ptr vars_before = cr->GetVarsBefore();
+
+ if (vars_before) {
+ long state_before = vars_before->Get("state");
+ long stateType_before = vars_before->Get("state_type");
+ long attempt_before = vars_before->Get("attempt");
+ bool reachable_before = vars_before->Get("reachable");
+
+ if (state_before == state_after && stateType_before == stateType_after &&
+ attempt_before == attempt_after && reachable_before == reachable_after)
+ return; /* Nothing changed, ignore this checkresult. */
+ }
+
+ String output;
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << Service::StateToString(service->GetState()) << ";"
+ << Service::StateTypeToString(service->GetStateType()) << ";"
+ << attempt_after << ";"
+ << output << ""
+ << "";
+ } else {
+ String state = Host::StateToString(Host::CalculateState(static_cast<ServiceState>(state_after)));
+
+ msgbuf << "HOST ALERT: "
+ << host->GetName() << ";"
+ << GetHostStateString(host) << ";"
+ << Host::StateTypeToString(host->GetStateType()) << ";"
+ << attempt_after << ";"
+ << output << ""
+ << "";
+
+ }
+
+ {
+ ObjectLock olock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::TriggerDowntimeHandler(const Downtime::Ptr& downtime)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(downtime->GetCheckable());
+
+ if (!downtime)
+ return;
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << "STARTED" << "; "
+ << "Checkable has entered a period of scheduled downtime."
+ << "";
+ } else {
+ msgbuf << "HOST DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << "STARTED" << "; "
+ << "Checkable has entered a period of scheduled downtime."
+ << "";
+ }
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::RemoveDowntimeHandler(const Downtime::Ptr& downtime)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(downtime->GetCheckable());
+
+ if (!downtime)
+ return;
+
+ String downtime_output;
+ String downtime_state_str;
+
+ if (downtime->GetWasCancelled()) {
+ downtime_output = "Scheduled downtime for service has been cancelled.";
+ downtime_state_str = "CANCELLED";
+ } else {
+ downtime_output = "Checkable has exited from a period of scheduled downtime.";
+ downtime_state_str = "STOPPED";
+ }
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << downtime_state_str << "; "
+ << downtime_output
+ << "";
+ } else {
+ msgbuf << "HOST DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << downtime_state_str << "; "
+ << downtime_output
+ << "";
+ }
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::NotificationSentHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notification_type, CheckResult::Ptr const& cr,
+ const String& author, const String& comment_text, const String& command_name)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ String notification_type_str = Notification::NotificationTypeToStringCompat(notification_type);
+
+ /* override problem notifications with their current state string */
+ if (notification_type == NotificationProblem) {
+ if (service)
+ notification_type_str = Service::StateToString(service->GetState());
+ else
+ notification_type_str = GetHostStateString(host);
+ }
+
+ String author_comment = "";
+ if (notification_type == NotificationCustom || notification_type == NotificationAcknowledgement) {
+ author_comment = author + ";" + comment_text;
+ }
+
+ if (!cr)
+ return;
+
+ String output;
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE NOTIFICATION: "
+ << user->GetName() << ";"
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << notification_type_str << ";"
+ << command_name << ";"
+ << output << ";"
+ << author_comment
+ << "";
+ } else {
+ msgbuf << "HOST NOTIFICATION: "
+ << user->GetName() << ";"
+ << host->GetName() << ";"
+ << notification_type_str << " "
+ << "(" << GetHostStateString(host) << ");"
+ << command_name << ";"
+ << output << ";"
+ << author_comment
+ << "";
+ }
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::FlappingChangedHandler(const Checkable::Ptr& checkable)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ String flapping_state_str;
+ String flapping_output;
+
+ if (checkable->IsFlapping()) {
+ flapping_output = "Checkable appears to have started flapping (" + Convert::ToString(checkable->GetFlappingCurrent()) + "% change >= " + Convert::ToString(checkable->GetFlappingThresholdHigh()) + "% threshold)";
+ flapping_state_str = "STARTED";
+ } else {
+ flapping_output = "Checkable appears to have stopped flapping (" + Convert::ToString(checkable->GetFlappingCurrent()) + "% change < " + Convert::ToString(checkable->GetFlappingThresholdLow()) + "% threshold)";
+ flapping_state_str = "STOPPED";
+ }
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << flapping_state_str << "; "
+ << flapping_output
+ << "";
+ } else {
+ msgbuf << "HOST FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << flapping_state_str << "; "
+ << flapping_output
+ << "";
+ }
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+void CompatLogger::EnableFlappingChangedHandler(const Checkable::Ptr& checkable)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ if (checkable->GetEnableFlapping())
+ return;
+
+ String flapping_output = "Flap detection has been disabled";
+ String flapping_state_str = "DISABLED";
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << flapping_state_str << "; "
+ << flapping_output
+ << "";
+ } else {
+ msgbuf << "HOST FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << flapping_state_str << "; "
+ << flapping_output
+ << "";
+ }
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+void CompatLogger::ExternalCommandHandler(const String& command, const std::vector<String>& arguments)
+{
+ std::ostringstream msgbuf;
+ msgbuf << "EXTERNAL COMMAND: "
+ << command << ";"
+ << boost::algorithm::join(arguments, ";")
+ << "";
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+void CompatLogger::EventCommandHandler(const Checkable::Ptr& checkable)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ EventCommand::Ptr event_command = checkable->GetEventCommand();
+ String event_command_name = event_command->GetName();
+ long current_attempt = checkable->GetCheckAttempt();
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE EVENT HANDLER: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << Service::StateToString(service->GetState()) << ";"
+ << Service::StateTypeToString(service->GetStateType()) << ";"
+ << current_attempt << ";"
+ << event_command_name;
+ } else {
+ msgbuf << "HOST EVENT HANDLER: "
+ << host->GetName() << ";"
+ << GetHostStateString(host) << ";"
+ << Host::StateTypeToString(host->GetStateType()) << ";"
+ << current_attempt << ";"
+ << event_command_name;
+ }
+
+ {
+ ObjectLock oLock(this);
+ WriteLine(msgbuf.str());
+ Flush();
+ }
+}
+
+String CompatLogger::GetHostStateString(const Host::Ptr& host)
+{
+ if (host->GetState() != HostUp && !host->IsReachable())
+ return "UNREACHABLE"; /* hardcoded compat state */
+
+ return Host::StateToString(host->GetState());
+}
+
+void CompatLogger::WriteLine(const String& line)
+{
+ ASSERT(OwnsLock());
+
+ if (!m_OutputFile.good())
+ return;
+
+ m_OutputFile << "[" << (long)Utility::GetTime() << "] " << line << "\n";
+}
+
+void CompatLogger::Flush()
+{
+ ASSERT(OwnsLock());
+
+ if (!m_OutputFile.good())
+ return;
+
+ m_OutputFile << std::flush;
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::ReopenFile(bool rotate)
+{
+ ObjectLock olock(this);
+
+ String tempFile = GetLogDir() + "/icinga.log";
+
+ if (m_OutputFile) {
+ m_OutputFile.close();
+
+ if (rotate) {
+ String archiveFile = GetLogDir() + "/archives/icinga-" + Utility::FormatDateTime("%m-%d-%Y-%H", Utility::GetTime()) + ".log";
+
+ Log(LogNotice, "CompatLogger")
+ << "Rotating compat log file '" << tempFile << "' -> '" << archiveFile << "'";
+
+ (void) rename(tempFile.CStr(), archiveFile.CStr());
+ }
+ }
+
+ m_OutputFile.open(tempFile.CStr(), std::ofstream::app);
+
+ if (!m_OutputFile) {
+ Log(LogWarning, "CompatLogger")
+ << "Could not open compat log file '" << tempFile << "' for writing. Log output will be lost.";
+
+ return;
+ }
+
+ WriteLine("LOG ROTATION: " + GetRotationMethod());
+ WriteLine("LOG VERSION: 2.0");
+
+ for (const Host::Ptr& host : ConfigType::GetObjectsByType<Host>()) {
+ String output;
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ std::ostringstream msgbuf;
+ msgbuf << "CURRENT HOST STATE: "
+ << host->GetName() << ";"
+ << GetHostStateString(host) << ";"
+ << Host::StateTypeToString(host->GetStateType()) << ";"
+ << host->GetCheckAttempt() << ";"
+ << output << "";
+
+ WriteLine(msgbuf.str());
+ }
+
+ for (const Service::Ptr& service : ConfigType::GetObjectsByType<Service>()) {
+ Host::Ptr host = service->GetHost();
+
+ String output;
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ std::ostringstream msgbuf;
+ msgbuf << "CURRENT SERVICE STATE: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << Service::StateToString(service->GetState()) << ";"
+ << Service::StateTypeToString(service->GetStateType()) << ";"
+ << service->GetCheckAttempt() << ";"
+ << output << "";
+
+ WriteLine(msgbuf.str());
+ }
+
+ Flush();
+}
+
+void CompatLogger::ScheduleNextRotation()
+{
+ auto now = (time_t)Utility::GetTime();
+ String method = GetRotationMethod();
+
+ tm tmthen;
+
+#ifdef _MSC_VER
+ tm *temp = localtime(&now);
+
+ if (!temp) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("localtime")
+ << boost::errinfo_errno(errno));
+ }
+
+ tmthen = *temp;
+#else /* _MSC_VER */
+ if (!localtime_r(&now, &tmthen)) {
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("localtime_r")
+ << boost::errinfo_errno(errno));
+ }
+#endif /* _MSC_VER */
+
+ tmthen.tm_min = 0;
+ tmthen.tm_sec = 0;
+
+ if (method == "HOURLY") {
+ tmthen.tm_hour++;
+ } else if (method == "DAILY") {
+ tmthen.tm_mday++;
+ tmthen.tm_hour = 0;
+ } else if (method == "WEEKLY") {
+ tmthen.tm_mday += 7 - tmthen.tm_wday;
+ tmthen.tm_hour = 0;
+ } else if (method == "MONTHLY") {
+ tmthen.tm_mon++;
+ tmthen.tm_mday = 1;
+ tmthen.tm_hour = 0;
+ }
+
+ time_t ts = mktime(&tmthen);
+
+ Log(LogNotice, "CompatLogger")
+ << "Rescheduling rotation timer for compat log '"
+ << GetName() << "' to '" << Utility::FormatDateTime("%Y/%m/%d %H:%M:%S %z", ts) << "'";
+
+ m_RotationTimer->Reschedule(ts);
+}
+
+/**
+ * @threadsafety Always.
+ */
+void CompatLogger::RotationTimerHandler()
+{
+ try {
+ ReopenFile(true);
+ } catch (...) {
+ ScheduleNextRotation();
+
+ throw;
+ }
+
+ ScheduleNextRotation();
+}
+
+void CompatLogger::ValidateRotationMethod(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<CompatLogger>::ValidateRotationMethod(lvalue, utils);
+
+ if (lvalue() != "HOURLY" && lvalue() != "DAILY" &&
+ lvalue() != "WEEKLY" && lvalue() != "MONTHLY" && lvalue() != "NONE") {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "rotation_method" }, "Rotation method '" + lvalue() + "' is invalid."));
+ }
+}
diff --git a/lib/compat/compatlogger.hpp b/lib/compat/compatlogger.hpp
new file mode 100644
index 0000000..9fb0b29
--- /dev/null
+++ b/lib/compat/compatlogger.hpp
@@ -0,0 +1,60 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMPATLOGGER_H
+#define COMPATLOGGER_H
+
+#include "compat/compatlogger-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/timer.hpp"
+#include <fstream>
+
+namespace icinga
+{
+
+/**
+ * An Icinga compat log writer.
+ *
+ * @ingroup compat
+ */
+class CompatLogger final : public ObjectImpl<CompatLogger>
+{
+public:
+ DECLARE_OBJECT(CompatLogger);
+ DECLARE_OBJECTNAME(CompatLogger);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void ValidateRotationMethod(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ void WriteLine(const String& line);
+ void Flush();
+
+ void CheckResultHandler(const Checkable::Ptr& service, const CheckResult::Ptr& cr);
+ void NotificationSentHandler(const Notification::Ptr& notification, const Checkable::Ptr& service,
+ const User::Ptr& user, NotificationType notification_type, CheckResult::Ptr const& cr,
+ const String& author, const String& comment_text, const String& command_name);
+ void FlappingChangedHandler(const Checkable::Ptr& checkable);
+ void EnableFlappingChangedHandler(const Checkable::Ptr& checkable);
+ void TriggerDowntimeHandler(const Downtime::Ptr& downtime);
+ void RemoveDowntimeHandler(const Downtime::Ptr& downtime);
+ void ExternalCommandHandler(const String& command, const std::vector<String>& arguments);
+ void EventCommandHandler(const Checkable::Ptr& service);
+
+ static String GetHostStateString(const Host::Ptr& host);
+
+ Timer::Ptr m_RotationTimer;
+ void RotationTimerHandler();
+ void ScheduleNextRotation();
+
+ std::ofstream m_OutputFile;
+ void ReopenFile(bool rotate);
+};
+
+}
+
+#endif /* COMPATLOGGER_H */
diff --git a/lib/compat/compatlogger.ti b/lib/compat/compatlogger.ti
new file mode 100644
index 0000000..56431ec
--- /dev/null
+++ b/lib/compat/compatlogger.ti
@@ -0,0 +1,23 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/application.hpp"
+
+library compat;
+
+namespace icinga
+{
+
+class CompatLogger : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String log_dir {
+ default {{{ return Configuration::LogDir + "/compat"; }}}
+ };
+ [config] String rotation_method {
+ default {{{ return "HOURLY"; }}}
+ };
+};
+
+}
diff --git a/lib/compat/externalcommandlistener.cpp b/lib/compat/externalcommandlistener.cpp
new file mode 100644
index 0000000..b61813b
--- /dev/null
+++ b/lib/compat/externalcommandlistener.cpp
@@ -0,0 +1,150 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "compat/externalcommandlistener.hpp"
+#include "compat/externalcommandlistener-ti.cpp"
+#include "icinga/externalcommandprocessor.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/application.hpp"
+#include "base/statsfunction.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(ExternalCommandListener);
+
+REGISTER_STATSFUNCTION(ExternalCommandListener, &ExternalCommandListener::StatsFunc);
+
+void ExternalCommandListener::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const ExternalCommandListener::Ptr& externalcommandlistener : ConfigType::GetObjectsByType<ExternalCommandListener>()) {
+ nodes.emplace_back(externalcommandlistener->GetName(), 1); //add more stats
+ }
+
+ status->Set("externalcommandlistener", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Starts the component.
+ */
+void ExternalCommandListener::Start(bool runtimeCreated)
+{
+ ObjectImpl<ExternalCommandListener>::Start(runtimeCreated);
+
+ Log(LogInformation, "ExternalCommandListener")
+ << "'" << GetName() << "' started.";
+
+ Log(LogWarning, "ExternalCommandListener")
+ << "This feature is DEPRECATED and may be removed in future releases. Check the roadmap at https://github.com/Icinga/icinga2/milestones";
+#ifndef _WIN32
+ String path = GetCommandPath();
+ m_CommandThread = std::thread([this, path]() { CommandPipeThread(path); });
+ m_CommandThread.detach();
+#endif /* _WIN32 */
+}
+
+/**
+ * Stops the component.
+ */
+void ExternalCommandListener::Stop(bool runtimeRemoved)
+{
+ Log(LogInformation, "ExternalCommandListener")
+ << "'" << GetName() << "' stopped.";
+
+ ObjectImpl<ExternalCommandListener>::Stop(runtimeRemoved);
+}
+
+#ifndef _WIN32
+void ExternalCommandListener::CommandPipeThread(const String& commandPath)
+{
+ Utility::SetThreadName("Command Pipe");
+
+ struct stat statbuf;
+ bool fifo_ok = false;
+
+ if (lstat(commandPath.CStr(), &statbuf) >= 0) {
+ if (S_ISFIFO(statbuf.st_mode) && access(commandPath.CStr(), R_OK) >= 0) {
+ fifo_ok = true;
+ } else {
+ Utility::Remove(commandPath);
+ }
+ }
+
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
+
+ if (!fifo_ok && mkfifo(commandPath.CStr(), S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0) {
+ Log(LogCritical, "ExternalCommandListener")
+ << "mkfifo() for fifo path '" << commandPath << "' failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return;
+ }
+
+ /* mkfifo() uses umask to mask off some bits, which means we need to chmod() the
+ * fifo to get the right mask. */
+ if (chmod(commandPath.CStr(), mode) < 0) {
+ Log(LogCritical, "ExternalCommandListener")
+ << "chmod() on fifo '" << commandPath << "' failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return;
+ }
+
+ for (;;) {
+ int fd = open(commandPath.CStr(), O_RDWR | O_NONBLOCK);
+
+ if (fd < 0) {
+ Log(LogCritical, "ExternalCommandListener")
+ << "open() for fifo path '" << commandPath << "' failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return;
+ }
+
+ FIFO::Ptr fifo = new FIFO();
+ Socket::Ptr sock = new Socket(fd);
+ StreamReadContext src;
+
+ for (;;) {
+ sock->Poll(true, false);
+
+ char buffer[8192];
+ size_t rc;
+
+ try {
+ rc = sock->Read(buffer, sizeof(buffer));
+ } catch (const std::exception& ex) {
+ /* We have read all data. */
+ if (errno == EAGAIN)
+ continue;
+
+ Log(LogWarning, "ExternalCommandListener")
+ << "Cannot read from command pipe." << DiagnosticInformation(ex);
+ break;
+ }
+
+ /* Empty pipe (EOF) */
+ if (rc == 0)
+ continue;
+
+ fifo->Write(buffer, rc);
+
+ for (;;) {
+ String command;
+ StreamReadStatus srs = fifo->ReadLine(&command, src);
+
+ if (srs != StatusNewItem)
+ break;
+
+ try {
+ Log(LogInformation, "ExternalCommandListener")
+ << "Executing external command: " << command;
+
+ ExternalCommandProcessor::Execute(command);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ExternalCommandListener")
+ << "External command failed: " << DiagnosticInformation(ex, false);
+ Log(LogNotice, "ExternalCommandListener")
+ << "External command failed: " << DiagnosticInformation(ex, true);
+ }
+ }
+ }
+ }
+}
+#endif /* _WIN32 */
diff --git a/lib/compat/externalcommandlistener.hpp b/lib/compat/externalcommandlistener.hpp
new file mode 100644
index 0000000..895531f
--- /dev/null
+++ b/lib/compat/externalcommandlistener.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EXTERNALCOMMANDLISTENER_H
+#define EXTERNALCOMMANDLISTENER_H
+
+#include "compat/externalcommandlistener-ti.hpp"
+#include "base/objectlock.hpp"
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include <thread>
+#include <iostream>
+
+namespace icinga
+{
+
+/**
+ * @ingroup compat
+ */
+class ExternalCommandListener final : public ObjectImpl<ExternalCommandListener>
+{
+public:
+ DECLARE_OBJECT(ExternalCommandListener);
+ DECLARE_OBJECTNAME(ExternalCommandListener);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+protected:
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+#ifndef _WIN32
+ std::thread m_CommandThread;
+
+ void CommandPipeThread(const String& commandPath);
+#endif /* _WIN32 */
+};
+
+}
+
+#endif /* EXTERNALCOMMANDLISTENER_H */
diff --git a/lib/compat/externalcommandlistener.ti b/lib/compat/externalcommandlistener.ti
new file mode 100644
index 0000000..5b52944
--- /dev/null
+++ b/lib/compat/externalcommandlistener.ti
@@ -0,0 +1,20 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/application.hpp"
+
+library compat;
+
+namespace icinga
+{
+
+class ExternalCommandListener : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String command_path {
+ default {{{ return Configuration::InitRunDir + "/cmd/icinga2.cmd"; }}}
+ };
+};
+
+}
diff --git a/lib/config/CMakeLists.txt b/lib/config/CMakeLists.txt
new file mode 100644
index 0000000..80b8c2c
--- /dev/null
+++ b/lib/config/CMakeLists.txt
@@ -0,0 +1,47 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+find_package(BISON 2.3.0 REQUIRED)
+find_package(FLEX 2.5.31 REQUIRED)
+
+bison_target(config_parser config_parser.yy ${CMAKE_CURRENT_BINARY_DIR}/config_parser.cc)
+set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/config_parser.cc PROPERTY EXCLUDE_UNITY_BUILD TRUE)
+
+flex_target(config_lexer config_lexer.ll ${CMAKE_CURRENT_BINARY_DIR}/config_lexer.cc)
+set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/config_lexer.cc PROPERTY EXCLUDE_UNITY_BUILD TRUE)
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/config_parser.cc PROPERTY COMPILE_FLAGS "-Wno-deprecated-register -Wno-parentheses-equality -Wno-unused-function")
+ set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/config_lexer.cc PROPERTY COMPILE_FLAGS "-Wno-deprecated-register -Wno-null-conversion")
+endif()
+
+add_flex_bison_dependency(config_lexer config_parser)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
+
+set(config_SOURCES
+ i2-config.hpp
+ activationcontext.cpp activationcontext.hpp
+ applyrule.cpp applyrule-targeted.cpp applyrule.hpp
+ configcompiler.cpp configcompiler.hpp
+ configcompilercontext.cpp configcompilercontext.hpp
+ configfragment.hpp
+ configitem.cpp configitem.hpp
+ configitembuilder.cpp configitembuilder.hpp
+ expression.cpp expression.hpp
+ objectrule.cpp objectrule.hpp
+ vmops.hpp
+ ${FLEX_config_lexer_OUTPUTS} ${BISON_config_parser_OUTPUTS}
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(config config config_SOURCES)
+endif()
+
+add_library(config OBJECT ${config_SOURCES})
+
+add_dependencies(config base)
+
+set_target_properties (
+ config PROPERTIES
+ FOLDER Lib
+)
diff --git a/lib/config/activationcontext.cpp b/lib/config/activationcontext.cpp
new file mode 100644
index 0000000..d050875
--- /dev/null
+++ b/lib/config/activationcontext.cpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/activationcontext.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+boost::thread_specific_ptr<std::stack<ActivationContext::Ptr> > ActivationContext::m_ActivationStack;
+
+std::stack<ActivationContext::Ptr>& ActivationContext::GetActivationStack()
+{
+ std::stack<ActivationContext::Ptr> *actx = m_ActivationStack.get();
+
+ if (!actx) {
+ actx = new std::stack<ActivationContext::Ptr>();
+ m_ActivationStack.reset(actx);
+ }
+
+ return *actx;
+}
+
+void ActivationContext::PushContext(const ActivationContext::Ptr& context)
+{
+ GetActivationStack().push(context);
+}
+
+void ActivationContext::PopContext()
+{
+ ASSERT(!GetActivationStack().empty());
+ GetActivationStack().pop();
+}
+
+ActivationContext::Ptr ActivationContext::GetCurrentContext()
+{
+ std::stack<ActivationContext::Ptr>& astack = GetActivationStack();
+
+ if (astack.empty())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Objects may not be created outside of an activation context."));
+
+ return astack.top();
+}
+
+ActivationScope::ActivationScope(ActivationContext::Ptr context)
+ : m_Context(std::move(context))
+{
+ if (!m_Context)
+ m_Context = new ActivationContext();
+
+ ActivationContext::PushContext(m_Context);
+}
+
+ActivationScope::~ActivationScope()
+{
+ ActivationContext::PopContext();
+}
+
+ActivationContext::Ptr ActivationScope::GetContext() const
+{
+ return m_Context;
+}
+
diff --git a/lib/config/activationcontext.hpp b/lib/config/activationcontext.hpp
new file mode 100644
index 0000000..3fe5d09
--- /dev/null
+++ b/lib/config/activationcontext.hpp
@@ -0,0 +1,46 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ACTIVATIONCONTEXT_H
+#define ACTIVATIONCONTEXT_H
+
+#include "config/i2-config.hpp"
+#include "base/object.hpp"
+#include <boost/thread/tss.hpp>
+#include <stack>
+
+namespace icinga
+{
+
+class ActivationContext final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ActivationContext);
+
+ static ActivationContext::Ptr GetCurrentContext();
+
+private:
+ static void PushContext(const ActivationContext::Ptr& context);
+ static void PopContext();
+
+ static std::stack<ActivationContext::Ptr>& GetActivationStack();
+
+ static boost::thread_specific_ptr<std::stack<ActivationContext::Ptr> > m_ActivationStack;
+
+ friend class ActivationScope;
+};
+
+class ActivationScope
+{
+public:
+ ActivationScope(ActivationContext::Ptr context = nullptr);
+ ~ActivationScope();
+
+ ActivationContext::Ptr GetContext() const;
+
+private:
+ ActivationContext::Ptr m_Context;
+};
+
+}
+
+#endif /* ACTIVATIONCONTEXT_H */
diff --git a/lib/config/applyrule-targeted.cpp b/lib/config/applyrule-targeted.cpp
new file mode 100644
index 0000000..c5bfe20
--- /dev/null
+++ b/lib/config/applyrule-targeted.cpp
@@ -0,0 +1,266 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#include "base/string.hpp"
+#include "config/applyrule.hpp"
+#include "config/expression.hpp"
+#include <utility>
+#include <vector>
+
+using namespace icinga;
+
+/**
+ * @returns All ApplyRules targeting only specific parent objects including the given host. (See AddTargetedRule().)
+ */
+const std::set<ApplyRule::Ptr>& ApplyRule::GetTargetedHostRules(const Type::Ptr& sourceType, const String& host)
+{
+ auto perSourceType (m_Rules.find(sourceType.get()));
+
+ if (perSourceType != m_Rules.end()) {
+ auto perHost (perSourceType->second.Targeted.find(host));
+
+ if (perHost != perSourceType->second.Targeted.end()) {
+ return perHost->second.ForHost;
+ }
+ }
+
+ static const std::set<ApplyRule::Ptr> noRules;
+ return noRules;
+}
+
+/**
+ * @returns All ApplyRules targeting only specific parent objects including the given service. (See AddTargetedRule().)
+ */
+const std::set<ApplyRule::Ptr>& ApplyRule::GetTargetedServiceRules(const Type::Ptr& sourceType, const String& host, const String& service)
+{
+ auto perSourceType (m_Rules.find(sourceType.get()));
+
+ if (perSourceType != m_Rules.end()) {
+ auto perHost (perSourceType->second.Targeted.find(host));
+
+ if (perHost != perSourceType->second.Targeted.end()) {
+ auto perService (perHost->second.ForServices.find(service));
+
+ if (perService != perHost->second.ForServices.end()) {
+ return perService->second;
+ }
+ }
+ }
+
+ static const std::set<ApplyRule::Ptr> noRules;
+ return noRules;
+}
+
+/**
+ * If the given ApplyRule targets only specific parent objects, add it to the respective "index".
+ *
+ * - The above means for apply T "N" to Host: assign where host.name == "H" [ || host.name == "h" ... ]
+ * - For apply T "N" to Service it means: assign where host.name == "H" && service.name == "S" [ || host.name == "h" && service.name == "s" ... ]
+ *
+ * The order of operands of || && == doesn't matter.
+ *
+ * @returns Whether the rule has been added to the "index".
+ */
+bool ApplyRule::AddTargetedRule(const ApplyRule::Ptr& rule, const String& targetType, ApplyRule::PerSourceType& rules)
+{
+ if (targetType == "Host") {
+ std::vector<const String *> hosts;
+
+ if (GetTargetHosts(rule->m_Filter.get(), hosts)) {
+ for (auto host : hosts) {
+ rules.Targeted[*host].ForHost.emplace(rule);
+ }
+
+ return true;
+ }
+ } else if (targetType == "Service") {
+ std::vector<std::pair<const String *, const String *>> services;
+
+ if (GetTargetServices(rule->m_Filter.get(), services)) {
+ for (auto service : services) {
+ rules.Targeted[*service.first].ForServices[*service.second].emplace(rule);
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * If the given assign filter is like the following, extract the host names ("H", "h", ...) into the vector:
+ *
+ * host.name == "H" [ || host.name == "h" ... ]
+ *
+ * The order of operands of || == doesn't matter.
+ *
+ * @returns Whether the given assign filter is like above.
+ */
+bool ApplyRule::GetTargetHosts(Expression* assignFilter, std::vector<const String *>& hosts, const Dictionary::Ptr& constants)
+{
+ auto lor (dynamic_cast<LogicalOrExpression*>(assignFilter));
+
+ if (lor) {
+ return GetTargetHosts(lor->GetOperand1().get(), hosts, constants)
+ && GetTargetHosts(lor->GetOperand2().get(), hosts, constants);
+ }
+
+ auto name (GetComparedName(assignFilter, "host", constants));
+
+ if (name) {
+ hosts.emplace_back(name);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * If the given assign filter is like the following, extract the host+service names ("H"+"S", "h"+"s", ...) into the vector:
+ *
+ * host.name == "H" && service.name == "S" [ || host.name == "h" && service.name == "s" ... ]
+ *
+ * The order of operands of || && == doesn't matter.
+ *
+ * @returns Whether the given assign filter is like above.
+ */
+bool ApplyRule::GetTargetServices(Expression* assignFilter, std::vector<std::pair<const String *, const String *>>& services, const Dictionary::Ptr& constants)
+{
+ auto lor (dynamic_cast<LogicalOrExpression*>(assignFilter));
+
+ if (lor) {
+ return GetTargetServices(lor->GetOperand1().get(), services, constants)
+ && GetTargetServices(lor->GetOperand2().get(), services, constants);
+ }
+
+ auto service (GetTargetService(assignFilter, constants));
+
+ if (service.first) {
+ services.emplace_back(service);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * If the given filter is like the following, extract the host+service names ("H"+"S"):
+ *
+ * host.name == "H" && service.name == "S"
+ *
+ * The order of operands of && == doesn't matter.
+ *
+ * @returns {host, service} on success and {nullptr, nullptr} on failure.
+ */
+std::pair<const String *, const String *> ApplyRule::GetTargetService(Expression* assignFilter, const Dictionary::Ptr& constants)
+{
+ auto land (dynamic_cast<LogicalAndExpression*>(assignFilter));
+
+ if (!land) {
+ return {nullptr, nullptr};
+ }
+
+ auto op1 (land->GetOperand1().get());
+ auto op2 (land->GetOperand2().get());
+ auto host (GetComparedName(op1, "host", constants));
+
+ if (!host) {
+ std::swap(op1, op2);
+ host = GetComparedName(op1, "host", constants);
+ }
+
+ if (host) {
+ auto service (GetComparedName(op2, "service", constants));
+
+ if (service) {
+ return {host, service};
+ }
+ }
+
+ return {nullptr, nullptr};
+}
+
+/**
+ * If the given filter is like the following, extract the object name ("N"):
+ *
+ * $lcType$.name == "N"
+ *
+ * The order of operands of == doesn't matter.
+ *
+ * @returns The object name on success and nullptr on failure.
+ */
+const String * ApplyRule::GetComparedName(Expression* assignFilter, const char * lcType, const Dictionary::Ptr& constants)
+{
+ auto eq (dynamic_cast<EqualExpression*>(assignFilter));
+
+ if (!eq) {
+ return nullptr;
+ }
+
+ auto op1 (eq->GetOperand1().get());
+ auto op2 (eq->GetOperand2().get());
+
+ if (IsNameIndexer(op1, lcType, constants)) {
+ return GetConstString(op2, constants);
+ }
+
+ if (IsNameIndexer(op2, lcType, constants)) {
+ return GetConstString(op1, constants);
+ }
+
+ return nullptr;
+}
+
+/**
+ * @returns Whether the given expression is like $lcType$.name.
+ */
+bool ApplyRule::IsNameIndexer(Expression* exp, const char * lcType, const Dictionary::Ptr& constants)
+{
+ auto ixr (dynamic_cast<IndexerExpression*>(exp));
+
+ if (!ixr) {
+ return false;
+ }
+
+ auto var (dynamic_cast<VariableExpression*>(ixr->GetOperand1().get()));
+
+ if (!var || var->GetVariable() != lcType) {
+ return false;
+ }
+
+ auto val (GetConstString(ixr->GetOperand2().get(), constants));
+
+ return val && *val == "name";
+}
+
+/**
+ * @returns If the given expression is a constant string, its address. nullptr on failure.
+ */
+const String * ApplyRule::GetConstString(Expression* exp, const Dictionary::Ptr& constants)
+{
+ auto cnst (GetConst(exp, constants));
+
+ return cnst && cnst->IsString() ? &cnst->Get<String>() : nullptr;
+}
+
+/**
+ * @returns If the given expression is a constant, its address. nullptr on failure.
+ */
+const Value * ApplyRule::GetConst(Expression* exp, const Dictionary::Ptr& constants)
+{
+ auto lit (dynamic_cast<LiteralExpression*>(exp));
+
+ if (lit) {
+ return &lit->GetValue();
+ }
+
+ if (constants) {
+ auto var (dynamic_cast<VariableExpression*>(exp));
+
+ if (var) {
+ return constants->GetRef(var->GetVariable());
+ }
+ }
+
+ return nullptr;
+}
diff --git a/lib/config/applyrule.cpp b/lib/config/applyrule.cpp
new file mode 100644
index 0000000..8739971
--- /dev/null
+++ b/lib/config/applyrule.cpp
@@ -0,0 +1,189 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/applyrule.hpp"
+#include "base/logger.hpp"
+#include <set>
+#include <unordered_set>
+
+using namespace icinga;
+
+ApplyRule::RuleMap ApplyRule::m_Rules;
+ApplyRule::TypeMap ApplyRule::m_Types;
+
+ApplyRule::ApplyRule(String name, Expression::Ptr expression,
+ Expression::Ptr filter, String package, String fkvar, String fvvar, Expression::Ptr fterm,
+ bool ignoreOnError, DebugInfo di, Dictionary::Ptr scope)
+ : m_Name(std::move(name)), m_Expression(std::move(expression)), m_Filter(std::move(filter)), m_Package(std::move(package)), m_FKVar(std::move(fkvar)),
+ m_FVVar(std::move(fvvar)), m_FTerm(std::move(fterm)), m_IgnoreOnError(ignoreOnError), m_DebugInfo(std::move(di)), m_Scope(std::move(scope)), m_HasMatches(false)
+{ }
+
+String ApplyRule::GetName() const
+{
+ return m_Name;
+}
+
+Expression::Ptr ApplyRule::GetExpression() const
+{
+ return m_Expression;
+}
+
+Expression::Ptr ApplyRule::GetFilter() const
+{
+ return m_Filter;
+}
+
+String ApplyRule::GetPackage() const
+{
+ return m_Package;
+}
+
+Expression::Ptr ApplyRule::GetFTerm() const
+{
+ return m_FTerm;
+}
+
+bool ApplyRule::GetIgnoreOnError() const
+{
+ return m_IgnoreOnError;
+}
+
+const DebugInfo& ApplyRule::GetDebugInfo() const
+{
+ return m_DebugInfo;
+}
+
+Dictionary::Ptr ApplyRule::GetScope() const
+{
+ return m_Scope;
+}
+
+void ApplyRule::AddRule(const String& sourceType, const String& targetType, const String& name,
+ const Expression::Ptr& expression, const Expression::Ptr& filter, const String& package, const String& fkvar,
+ const String& fvvar, const Expression::Ptr& fterm, bool ignoreOnError, const DebugInfo& di, const Dictionary::Ptr& scope)
+{
+ auto actualTargetType (&targetType);
+
+ if (*actualTargetType == "") {
+ auto& targetTypes (GetTargetTypes(sourceType));
+
+ if (targetTypes.size() == 1u) {
+ actualTargetType = &targetTypes[0];
+ }
+ }
+
+ ApplyRule::Ptr rule = new ApplyRule(name, expression, filter, package, fkvar, fvvar, fterm, ignoreOnError, di, scope);
+ auto& rules (m_Rules[Type::GetByName(sourceType).get()]);
+
+ if (!AddTargetedRule(rule, *actualTargetType, rules)) {
+ rules.Regular[Type::GetByName(*actualTargetType).get()].emplace_back(std::move(rule));
+ }
+}
+
+bool ApplyRule::EvaluateFilter(ScriptFrame& frame) const
+{
+ return Convert::ToBool(m_Filter->Evaluate(frame));
+}
+
+void ApplyRule::RegisterType(const String& sourceType, const std::vector<String>& targetTypes)
+{
+ m_Types[sourceType] = targetTypes;
+}
+
+bool ApplyRule::IsValidSourceType(const String& sourceType)
+{
+ return m_Types.find(sourceType) != m_Types.end();
+}
+
+bool ApplyRule::IsValidTargetType(const String& sourceType, const String& targetType)
+{
+ auto it = m_Types.find(sourceType);
+
+ if (it == m_Types.end())
+ return false;
+
+ if (it->second.size() == 1 && targetType == "")
+ return true;
+
+ for (const String& type : it->second) {
+ if (type == targetType)
+ return true;
+ }
+
+ return false;
+}
+
+const std::vector<String>& ApplyRule::GetTargetTypes(const String& sourceType)
+{
+ auto it = m_Types.find(sourceType);
+
+ if (it == m_Types.end()) {
+ static const std::vector<String> noTypes;
+ return noTypes;
+ }
+
+ return it->second;
+}
+
+void ApplyRule::AddMatch()
+{
+ m_HasMatches.store(true, std::memory_order_relaxed);
+}
+
+bool ApplyRule::HasMatches() const
+{
+ return m_HasMatches.load(std::memory_order_relaxed);
+}
+
+const std::vector<ApplyRule::Ptr>& ApplyRule::GetRules(const Type::Ptr& sourceType, const Type::Ptr& targetType)
+{
+ auto perSourceType (m_Rules.find(sourceType.get()));
+
+ if (perSourceType != m_Rules.end()) {
+ auto perTargetType (perSourceType->second.Regular.find(targetType.get()));
+
+ if (perTargetType != perSourceType->second.Regular.end()) {
+ return perTargetType->second;
+ }
+ }
+
+ static const std::vector<ApplyRule::Ptr> noRules;
+ return noRules;
+}
+
+void ApplyRule::CheckMatches(bool silent)
+{
+ for (auto& perSourceType : m_Rules) {
+ for (auto& perTargetType : perSourceType.second.Regular) {
+ for (auto& rule : perTargetType.second) {
+ CheckMatches(rule, perSourceType.first, silent);
+ }
+ }
+
+ std::unordered_set<ApplyRule*> targeted;
+
+ for (auto& perHost : perSourceType.second.Targeted) {
+ for (auto& rule : perHost.second.ForHost) {
+ targeted.emplace(rule.get());
+ }
+
+ for (auto& perService : perHost.second.ForServices) {
+ for (auto& rule : perService.second) {
+ targeted.emplace(rule.get());
+ }
+ }
+ }
+
+ for (auto rule : targeted) {
+ CheckMatches(rule, perSourceType.first, silent);
+ }
+ }
+}
+
+void ApplyRule::CheckMatches(const ApplyRule::Ptr& rule, Type* sourceType, bool silent)
+{
+ if (!rule->HasMatches() && !silent) {
+ Log(LogWarning, "ApplyRule")
+ << "Apply rule '" << rule->GetName() << "' (" << rule->GetDebugInfo() << ") for type '"
+ << sourceType->GetName() << "' does not match anywhere!";
+ }
+}
diff --git a/lib/config/applyrule.hpp b/lib/config/applyrule.hpp
new file mode 100644
index 0000000..cf9b6e5
--- /dev/null
+++ b/lib/config/applyrule.hpp
@@ -0,0 +1,126 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APPLYRULE_H
+#define APPLYRULE_H
+
+#include "config/i2-config.hpp"
+#include "config/expression.hpp"
+#include "base/debuginfo.hpp"
+#include "base/shared-object.hpp"
+#include "base/type.hpp"
+#include <unordered_map>
+#include <atomic>
+
+namespace icinga
+{
+
+/**
+ * @ingroup config
+ */
+class ApplyRule : public SharedObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ApplyRule);
+
+ struct PerHost
+ {
+ std::set<ApplyRule::Ptr> ForHost;
+ std::unordered_map<String /* service */, std::set<ApplyRule::Ptr>> ForServices;
+ };
+
+ struct PerSourceType
+ {
+ std::unordered_map<Type* /* target type */, std::vector<ApplyRule::Ptr>> Regular;
+ std::unordered_map<String /* host */, PerHost> Targeted;
+ };
+
+ /*
+ * m_Rules[T::TypeInstance.get()].Targeted["H"].ForHost
+ * contains all apply rules like apply T "x" to Host { ... }
+ * which target only specific hosts incl. "H", e.g. via
+ * assign where host.name == "H" || host.name == "h".
+ *
+ * m_Rules[T::TypeInstance.get()].Targeted["H"].ForServices["S"]
+ * contains all apply rules like apply T "x" to Service { ... }
+ * which target only specific services on specific hosts,
+ * e.g. via assign where host.name == "H" && service.name == "S".
+ *
+ * m_Rules[T::TypeInstance.get()].Regular[C::TypeInstance.get()]
+ * contains all other apply rules like apply T "x" to C { ... }.
+ */
+ typedef std::unordered_map<Type* /* source type */, PerSourceType> RuleMap;
+
+ typedef std::map<String, std::vector<String> > TypeMap;
+
+ String GetName() const;
+ Expression::Ptr GetExpression() const;
+ Expression::Ptr GetFilter() const;
+ String GetPackage() const;
+
+ inline const String& GetFKVar() const noexcept
+ {
+ return m_FKVar;
+ }
+
+ inline const String& GetFVVar() const noexcept
+ {
+ return m_FVVar;
+ }
+
+ Expression::Ptr GetFTerm() const;
+ bool GetIgnoreOnError() const;
+ const DebugInfo& GetDebugInfo() const;
+ Dictionary::Ptr GetScope() const;
+ void AddMatch();
+ bool HasMatches() const;
+
+ bool EvaluateFilter(ScriptFrame& frame) const;
+
+ static void AddRule(const String& sourceType, const String& targetType, const String& name, const Expression::Ptr& expression,
+ const Expression::Ptr& filter, const String& package, const String& fkvar, const String& fvvar, const Expression::Ptr& fterm,
+ bool ignoreOnError, const DebugInfo& di, const Dictionary::Ptr& scope);
+ static const std::vector<ApplyRule::Ptr>& GetRules(const Type::Ptr& sourceType, const Type::Ptr& targetType);
+ static const std::set<ApplyRule::Ptr>& GetTargetedHostRules(const Type::Ptr& sourceType, const String& host);
+ static const std::set<ApplyRule::Ptr>& GetTargetedServiceRules(const Type::Ptr& sourceType, const String& host, const String& service);
+ static bool GetTargetHosts(Expression* assignFilter, std::vector<const String *>& hosts, const Dictionary::Ptr& constants = nullptr);
+ static bool GetTargetServices(Expression* assignFilter, std::vector<std::pair<const String *, const String *>>& services, const Dictionary::Ptr& constants = nullptr);
+
+ static void RegisterType(const String& sourceType, const std::vector<String>& targetTypes);
+ static bool IsValidSourceType(const String& sourceType);
+ static bool IsValidTargetType(const String& sourceType, const String& targetType);
+ static const std::vector<String>& GetTargetTypes(const String& sourceType);
+
+ static void CheckMatches(bool silent);
+ static void CheckMatches(const ApplyRule::Ptr& rule, Type* sourceType, bool silent);
+
+private:
+ String m_Name;
+ Expression::Ptr m_Expression;
+ Expression::Ptr m_Filter;
+ String m_Package;
+ String m_FKVar;
+ String m_FVVar;
+ Expression::Ptr m_FTerm;
+ bool m_IgnoreOnError;
+ DebugInfo m_DebugInfo;
+ Dictionary::Ptr m_Scope;
+ std::atomic<bool> m_HasMatches;
+
+ static TypeMap m_Types;
+ static RuleMap m_Rules;
+
+ static bool AddTargetedRule(const ApplyRule::Ptr& rule, const String& targetType, PerSourceType& rules);
+ static std::pair<const String *, const String *> GetTargetService(Expression* assignFilter, const Dictionary::Ptr& constants);
+ static const String * GetComparedName(Expression* assignFilter, const char * lcType, const Dictionary::Ptr& constants);
+ static bool IsNameIndexer(Expression* exp, const char * lcType, const Dictionary::Ptr& constants);
+ static const String * GetConstString(Expression* exp, const Dictionary::Ptr& constants);
+ static const Value * GetConst(Expression* exp, const Dictionary::Ptr& constants);
+
+ ApplyRule(String name, Expression::Ptr expression,
+ Expression::Ptr filter, String package, String fkvar, String fvvar, Expression::Ptr fterm,
+ bool ignoreOnError, DebugInfo di, Dictionary::Ptr scope);
+};
+
+}
+
+#endif /* APPLYRULE_H */
diff --git a/lib/config/config_lexer.ll b/lib/config/config_lexer.ll
new file mode 100644
index 0000000..abfdaff
--- /dev/null
+++ b/lib/config/config_lexer.ll
@@ -0,0 +1,253 @@
+%{
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configcompiler.hpp"
+#include "config/expression.hpp"
+#include "base/exception.hpp"
+#include <utility>
+
+using namespace icinga;
+
+#include "config/config_parser.hh"
+#include <sstream>
+
+#define YYLTYPE icinga::CompilerDebugInfo
+
+#define YY_EXTRA_TYPE ConfigCompiler *
+#define YY_USER_ACTION \
+do { \
+ yylloc->Path = yyextra->GetPath(); \
+ yylloc->FirstLine = yylineno; \
+ yylloc->FirstColumn = yycolumn; \
+ yylloc->LastLine = yylineno; \
+ yylloc->LastColumn = yycolumn + yyleng - 1; \
+ yycolumn += yyleng; \
+} while (0);
+
+#define YY_INPUT(buf, result, max_size) \
+do { \
+ result = yyextra->ReadInput(buf, max_size); \
+} while (0)
+%}
+
+%option reentrant noyywrap yylineno
+%option bison-bridge bison-locations
+%option never-interactive nounistd
+%option noinput nounput
+
+%x C_COMMENT
+%x STRING
+%x HEREDOC
+
+%%
+\" {
+ yyextra->m_LexBuffer.Clear();
+
+ yyextra->m_LocationBegin = *yylloc;
+
+ BEGIN(STRING);
+ }
+
+<STRING>\" {
+ BEGIN(INITIAL);
+
+ yylloc->FirstLine = yyextra->m_LocationBegin.FirstLine;
+ yylloc->FirstColumn = yyextra->m_LocationBegin.FirstColumn;
+
+ yylval->text = new String(std::move(yyextra->m_LexBuffer));
+
+ return T_STRING;
+ }
+
+<STRING>\n {
+ BOOST_THROW_EXCEPTION(ScriptError("Unterminated string literal", DebugInfoRange(yyextra->m_LocationBegin, *yylloc)));
+ }
+
+<STRING>\\[0-7]{1,3} {
+ /* octal escape sequence */
+ int result;
+
+ (void) sscanf(yytext + 1, "%o", &result);
+
+ if (result > 0xff) {
+ /* error, constant is out-of-bounds */
+ BOOST_THROW_EXCEPTION(ScriptError("Constant is out of bounds: " + String(yytext), *yylloc));
+ }
+
+ yyextra->m_LexBuffer += static_cast<char>(result);
+ }
+
+<STRING>\\[0-9]+ {
+ /* generate error - bad escape sequence; something
+ * like '\48' or '\0777777'
+ */
+ BOOST_THROW_EXCEPTION(ScriptError("Bad escape sequence found: " + String(yytext), *yylloc));
+ }
+<STRING>\\n { yyextra->m_LexBuffer += '\n'; }
+<STRING>\\\\ { yyextra->m_LexBuffer += '\\'; }
+<STRING>\\\" { yyextra->m_LexBuffer += '"'; }
+<STRING>\\t { yyextra->m_LexBuffer += '\t'; }
+<STRING>\\r { yyextra->m_LexBuffer += '\r'; }
+<STRING>\\b { yyextra->m_LexBuffer += '\b'; }
+<STRING>\\f { yyextra->m_LexBuffer += '\f'; }
+<STRING>\\\n { yyextra->m_LexBuffer += yytext[1]; }
+<STRING>\\. {
+ BOOST_THROW_EXCEPTION(ScriptError("Bad escape sequence found: " + String(yytext), *yylloc));
+ }
+
+<STRING>[^\\\n\"]+ {
+ char *yptr = yytext;
+
+ while (*yptr)
+ yyextra->m_LexBuffer += *yptr++;
+ }
+
+<STRING><<EOF>> {
+ BOOST_THROW_EXCEPTION(ScriptError("End-of-file while in string literal", DebugInfoRange(yyextra->m_LocationBegin, *yylloc)));
+ }
+
+\{\{\{ {
+ yyextra->m_LexBuffer.Clear();
+
+ yyextra->m_LocationBegin = *yylloc;
+
+ BEGIN(HEREDOC);
+ }
+
+<HEREDOC><<EOF>> {
+ BOOST_THROW_EXCEPTION(ScriptError("End-of-file while in string literal", DebugInfoRange(yyextra->m_LocationBegin, *yylloc)));
+ }
+
+<HEREDOC>\}\}\} {
+ BEGIN(INITIAL);
+
+ yylloc->FirstLine = yyextra->m_LocationBegin.FirstLine;
+ yylloc->FirstColumn = yyextra->m_LocationBegin.FirstColumn;
+
+ yylval->text = new String(std::move(yyextra->m_LexBuffer));
+
+ return T_STRING;
+ }
+
+<HEREDOC>(.|\n) { yyextra->m_LexBuffer += yytext[0]; }
+
+<INITIAL>{
+"/*" BEGIN(C_COMMENT);
+}
+
+<C_COMMENT>{
+"*/" BEGIN(INITIAL);
+[^*] /* ignore comment */
+"*" /* ignore star */
+}
+
+<C_COMMENT><<EOF>> {
+ BOOST_THROW_EXCEPTION(ScriptError("End-of-file while in comment", *yylloc));
+ }
+
+
+\/\/[^\n]* /* ignore C++-style comments */
+#[^\n]* /* ignore shell-style comments */
+[ \t] /* ignore whitespace */
+
+<INITIAL>{
+object return T_OBJECT;
+template return T_TEMPLATE;
+include return T_INCLUDE;
+include_recursive return T_INCLUDE_RECURSIVE;
+include_zones return T_INCLUDE_ZONES;
+library return T_LIBRARY;
+null return T_NULL;
+true { yylval->boolean = 1; return T_BOOLEAN; }
+false { yylval->boolean = 0; return T_BOOLEAN; }
+const return T_CONST;
+var return T_VAR;
+this return T_THIS;
+globals return T_GLOBALS;
+locals return T_LOCALS;
+use return T_USE;
+using return T_USING;
+apply return T_APPLY;
+default return T_DEFAULT;
+to return T_TO;
+where return T_WHERE;
+import return T_IMPORT;
+assign return T_ASSIGN;
+ignore return T_IGNORE;
+function return T_FUNCTION;
+return return T_RETURN;
+break return T_BREAK;
+continue return T_CONTINUE;
+for return T_FOR;
+if return T_IF;
+else return T_ELSE;
+while return T_WHILE;
+throw return T_THROW;
+try return T_TRY;
+except return T_EXCEPT;
+ignore_on_error return T_IGNORE_ON_ERROR;
+current_filename return T_CURRENT_FILENAME;
+current_line return T_CURRENT_LINE;
+debugger return T_DEBUGGER;
+namespace return T_NAMESPACE;
+=\> return T_FOLLOWS;
+\<\< return T_SHIFT_LEFT;
+\>\> return T_SHIFT_RIGHT;
+\<= return T_LESS_THAN_OR_EQUAL;
+\>= return T_GREATER_THAN_OR_EQUAL;
+== return T_EQUAL;
+!= return T_NOT_EQUAL;
+!in return T_NOT_IN;
+in return T_IN;
+&& return T_LOGICAL_AND;
+\|\| return T_LOGICAL_OR;
+\{\{ return T_NULLARY_LAMBDA_BEGIN;
+\}\} return T_NULLARY_LAMBDA_END;
+[a-zA-Z_][a-zA-Z0-9\_]* { yylval->text = new String(yytext); return T_IDENTIFIER; }
+@[a-zA-Z_][a-zA-Z0-9\_]* { yylval->text = new String(yytext + 1); return T_IDENTIFIER; }
+\<[^ \>]*\> { yytext[yyleng-1] = '\0'; yylval->text = new String(yytext + 1); return T_STRING_ANGLE; }
+[0-9]+(\.[0-9]+)?ms { yylval->num = strtod(yytext, NULL) / 1000; return T_NUMBER; }
+[0-9]+(\.[0-9]+)?d { yylval->num = strtod(yytext, NULL) * 60 * 60 * 24; return T_NUMBER; }
+[0-9]+(\.[0-9]+)?h { yylval->num = strtod(yytext, NULL) * 60 * 60; return T_NUMBER; }
+[0-9]+(\.[0-9]+)?m { yylval->num = strtod(yytext, NULL) * 60; return T_NUMBER; }
+[0-9]+(\.[0-9]+)?s { yylval->num = strtod(yytext, NULL); return T_NUMBER; }
+[0-9]+(\.[0-9]+)? { yylval->num = strtod(yytext, NULL); return T_NUMBER; }
+= { yylval->csop = OpSetLiteral; return T_SET; }
+\+= { yylval->csop = OpSetAdd; return T_SET_ADD; }
+-= { yylval->csop = OpSetSubtract; return T_SET_SUBTRACT; }
+\*= { yylval->csop = OpSetMultiply; return T_SET_MULTIPLY; }
+\/= { yylval->csop = OpSetDivide; return T_SET_DIVIDE; }
+\%= { yylval->csop = OpSetModulo; return T_SET_MODULO; }
+\^= { yylval->csop = OpSetXor; return T_SET_XOR; }
+\&= { yylval->csop = OpSetBinaryAnd; return T_SET_BINARY_AND; }
+\|= { yylval->csop = OpSetBinaryOr; return T_SET_BINARY_OR; }
+\+ return T_PLUS;
+\- return T_MINUS;
+\* return T_MULTIPLY;
+\/ return T_DIVIDE_OP;
+\% return T_MODULO;
+\^ return T_XOR;
+\& return T_BINARY_AND;
+\| return T_BINARY_OR;
+\< return T_LESS_THAN;
+\> return T_GREATER_THAN;
+}
+
+\( { yyextra->m_IgnoreNewlines.push(true); return '('; }
+\) { yyextra->m_IgnoreNewlines.pop(); return ')'; }
+[\r\n]+ { yycolumn -= strlen(yytext) - 1; if (!yyextra->m_IgnoreNewlines.top()) { return T_NEWLINE; } }
+<<EOF>> { if (!yyextra->m_Eof) { yyextra->m_Eof = true; return T_NEWLINE; } else { yyterminate(); } }
+. return yytext[0];
+
+%%
+
+void ConfigCompiler::InitializeScanner()
+{
+ yylex_init(&m_Scanner);
+ yyset_extra(this, m_Scanner);
+}
+
+void ConfigCompiler::DestroyScanner()
+{
+ yylex_destroy(m_Scanner);
+}
diff --git a/lib/config/config_parser.yy b/lib/config/config_parser.yy
new file mode 100644
index 0000000..939681e
--- /dev/null
+++ b/lib/config/config_parser.yy
@@ -0,0 +1,1243 @@
+%{
+#define YYDEBUG 1
+
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/i2-config.hpp"
+#include "config/configcompiler.hpp"
+#include "config/expression.hpp"
+#include "config/applyrule.hpp"
+#include "config/objectrule.hpp"
+#include "base/value.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/configtype.hpp"
+#include "base/exception.hpp"
+#include <sstream>
+#include <stack>
+
+#define YYLTYPE icinga::CompilerDebugInfo
+#define YYERROR_VERBOSE
+
+#define YYLLOC_DEFAULT(Current, Rhs, N) \
+do { \
+ if (N) { \
+ (Current).Path = YYRHSLOC(Rhs, 1).Path; \
+ (Current).FirstLine = YYRHSLOC(Rhs, 1).FirstLine; \
+ (Current).FirstColumn = YYRHSLOC(Rhs, 1).FirstColumn; \
+ (Current).LastLine = YYRHSLOC(Rhs, N).LastLine; \
+ (Current).LastColumn = YYRHSLOC(Rhs, N).LastColumn; \
+ } else { \
+ (Current).Path = YYRHSLOC(Rhs, 0).Path; \
+ (Current).FirstLine = (Current).LastLine = \
+ YYRHSLOC(Rhs, 0).LastLine; \
+ (Current).FirstColumn = (Current).LastColumn = \
+ YYRHSLOC(Rhs, 0).LastColumn; \
+ } \
+} while (0)
+
+#define YY_LOCATION_PRINT(file, loc) \
+do { \
+ std::ostringstream msgbuf; \
+ msgbuf << loc; \
+ std::string str = msgbuf.str(); \
+ fputs(str.c_str(), file); \
+} while (0)
+
+#define YYINITDEPTH 10000
+
+using namespace icinga;
+
+template<typename T>
+static void MakeRBinaryOp(Expression** result, Expression *left, Expression *right, const DebugInfo& diLeft, const DebugInfo& diRight)
+{
+ *result = new T(std::unique_ptr<Expression>(left), std::unique_ptr<Expression>(right), DebugInfoRange(diLeft, diRight));
+}
+
+%}
+
+%pure-parser
+
+%locations
+%defines
+%error-verbose
+%glr-parser
+
+%parse-param { std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> > *llist }
+%parse-param { ConfigCompiler *context }
+%lex-param { void *scanner }
+
+%union {
+ String *text;
+ double num;
+ bool boolean;
+ icinga::Expression *expr;
+ icinga::DictExpression *dexpr;
+ CombinedSetOp csop;
+ std::vector<String> *slist;
+ std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> > *llist;
+ std::vector<std::unique_ptr<Expression> > *elist;
+ std::vector<std::pair<std::unique_ptr<Expression>, std::unique_ptr<Expression> > > *ebranchlist;
+ std::pair<std::unique_ptr<Expression>, std::unique_ptr<Expression> > *ebranch;
+ std::pair<String, std::unique_ptr<Expression> > *cvitem;
+ std::map<String, std::unique_ptr<Expression> > *cvlist;
+ icinga::ScopeSpecifier scope;
+}
+
+%token T_NEWLINE "new-line"
+%token <text> T_STRING
+%token <text> T_STRING_ANGLE
+%token <num> T_NUMBER
+%token <boolean> T_BOOLEAN
+%token T_NULL
+%token <text> T_IDENTIFIER
+
+%token <csop> T_SET "= (T_SET)"
+%token <csop> T_SET_ADD "+= (T_SET_ADD)"
+%token <csop> T_SET_SUBTRACT "-= (T_SET_SUBTRACT)"
+%token <csop> T_SET_MULTIPLY "*= (T_SET_MULTIPLY)"
+%token <csop> T_SET_DIVIDE "/= (T_SET_DIVIDE)"
+%token <csop> T_SET_MODULO "%= (T_SET_MODULO)"
+%token <csop> T_SET_XOR "^= (T_SET_XOR)"
+%token <csop> T_SET_BINARY_AND "&= (T_SET_BINARY_AND)"
+%token <csop> T_SET_BINARY_OR "|= (T_SET_BINARY_OR)"
+
+%token T_SHIFT_LEFT "<< (T_SHIFT_LEFT)"
+%token T_SHIFT_RIGHT ">> (T_SHIFT_RIGHT)"
+%token T_EQUAL "== (T_EQUAL)"
+%token T_NOT_EQUAL "!= (T_NOT_EQUAL)"
+%token T_IN "in (T_IN)"
+%token T_NOT_IN "!in (T_NOT_IN)"
+%token T_LOGICAL_AND "&& (T_LOGICAL_AND)"
+%token T_LOGICAL_OR "|| (T_LOGICAL_OR)"
+%token T_LESS_THAN_OR_EQUAL "<= (T_LESS_THAN_OR_EQUAL)"
+%token T_GREATER_THAN_OR_EQUAL ">= (T_GREATER_THAN_OR_EQUAL)"
+%token T_PLUS "+ (T_PLUS)"
+%token T_MINUS "- (T_MINUS)"
+%token T_MULTIPLY "* (T_MULTIPLY)"
+%token T_DIVIDE_OP "/ (T_DIVIDE_OP)"
+%token T_MODULO "% (T_MODULO)"
+%token T_XOR "^ (T_XOR)"
+%token T_BINARY_AND "& (T_BINARY_AND)"
+%token T_BINARY_OR "| (T_BINARY_OR)"
+%token T_LESS_THAN "< (T_LESS_THAN)"
+%token T_GREATER_THAN "> (T_GREATER_THAN)"
+
+%token T_VAR "var (T_VAR)"
+%token T_GLOBALS "globals (T_GLOBALS)"
+%token T_LOCALS "locals (T_LOCALS)"
+%token T_CONST "const (T_CONST)"
+%token T_DEFAULT "default (T_DEFAULT)"
+%token T_IGNORE_ON_ERROR "ignore_on_error (T_IGNORE_ON_ERROR)"
+%token T_CURRENT_FILENAME "current_filename (T_CURRENT_FILENAME)"
+%token T_CURRENT_LINE "current_line (T_CURRENT_LINE)"
+%token T_DEBUGGER "debugger (T_DEBUGGER)"
+%token T_NAMESPACE "namespace (T_NAMESPACE)"
+%token T_USE "use (T_USE)"
+%token T_USING "using (T_USING)"
+%token T_OBJECT "object (T_OBJECT)"
+%token T_TEMPLATE "template (T_TEMPLATE)"
+%token T_INCLUDE "include (T_INCLUDE)"
+%token T_INCLUDE_RECURSIVE "include_recursive (T_INCLUDE_RECURSIVE)"
+%token T_INCLUDE_ZONES "include_zones (T_INCLUDE_ZONES)"
+%token T_LIBRARY "library (T_LIBRARY)"
+%token T_APPLY "apply (T_APPLY)"
+%token T_TO "to (T_TO)"
+%token T_WHERE "where (T_WHERE)"
+%token T_IMPORT "import (T_IMPORT)"
+%token T_ASSIGN "assign (T_ASSIGN)"
+%token T_IGNORE "ignore (T_IGNORE)"
+%token T_FUNCTION "function (T_FUNCTION)"
+%token T_RETURN "return (T_RETURN)"
+%token T_BREAK "break (T_BREAK)"
+%token T_CONTINUE "continue (T_CONTINUE)"
+%token T_FOR "for (T_FOR)"
+%token T_IF "if (T_IF)"
+%token T_ELSE "else (T_ELSE)"
+%token T_WHILE "while (T_WHILE)"
+%token T_THROW "throw (T_THROW)"
+%token T_TRY "try (T_TRY)"
+%token T_EXCEPT "except (T_EXCEPT)"
+%token T_FOLLOWS "=> (T_FOLLOWS)"
+%token T_NULLARY_LAMBDA_BEGIN "{{ (T_NULLARY_LAMBDA_BEGIN)"
+%token T_NULLARY_LAMBDA_END "}} (T_NULLARY_LAMBDA_END)"
+
+%type <text> identifier
+%type <elist> rterm_items
+%type <elist> rterm_items_inner
+%type <slist> identifier_items
+%type <slist> identifier_items_inner
+%type <csop> combined_set_op
+%type <llist> statements
+%type <llist> lterm_items
+%type <llist> lterm_items_inner
+%type <expr> rterm
+%type <expr> rterm_array
+%type <dexpr> rterm_dict
+%type <dexpr> rterm_scope_require_side_effect
+%type <dexpr> rterm_scope
+%type <ebranchlist> else_if_branches
+%type <ebranch> else_if_branch
+%type <expr> rterm_side_effect
+%type <expr> rterm_no_side_effect
+%type <expr> rterm_no_side_effect_no_dict
+%type <expr> lterm
+%type <expr> object
+%type <expr> apply
+%type <expr> optional_rterm
+%type <text> target_type_specifier
+%type <boolean> default_specifier
+%type <boolean> ignore_specifier
+%type <cvlist> use_specifier
+%type <cvlist> use_specifier_items
+%type <cvitem> use_specifier_item
+%type <num> object_declaration
+
+%right T_FOLLOWS
+%right T_INCLUDE T_INCLUDE_RECURSIVE T_INCLUDE_ZONES T_OBJECT T_TEMPLATE T_APPLY T_IMPORT T_ASSIGN T_IGNORE T_WHERE
+%right T_FUNCTION T_FOR
+%left T_SET T_SET_ADD T_SET_SUBTRACT T_SET_MULTIPLY T_SET_DIVIDE T_SET_MODULO T_SET_XOR T_SET_BINARY_AND T_SET_BINARY_OR
+%right '?' ':'
+%left T_LOGICAL_OR
+%left T_LOGICAL_AND
+%left T_RETURN T_BREAK T_CONTINUE
+%left T_IDENTIFIER
+%left T_BINARY_OR
+%left T_XOR
+%left T_BINARY_AND
+%nonassoc T_EQUAL T_NOT_EQUAL
+%left T_IN T_NOT_IN
+%nonassoc T_LESS_THAN T_LESS_THAN_OR_EQUAL T_GREATER_THAN T_GREATER_THAN_OR_EQUAL
+%left T_SHIFT_LEFT T_SHIFT_RIGHT
+%left T_PLUS T_MINUS
+%left T_MULTIPLY T_DIVIDE_OP T_MODULO
+%left UNARY_MINUS UNARY_PLUS
+%right REF_OP DEREF_OP
+%right '!' '~'
+%left '.' '(' '['
+%left T_VAR T_THIS T_GLOBALS T_LOCALS
+%right ';' ','
+%right T_NEWLINE
+%{
+
+int yylex(YYSTYPE *lvalp, YYLTYPE *llocp, void *scanner);
+
+extern int yydebug;
+
+void yyerror(const YYLTYPE *locp, std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> > *, ConfigCompiler *context, const char *err)
+{
+ bool incomplete = context && context->m_Eof && (context->m_OpenBraces > 0);
+ BOOST_THROW_EXCEPTION(ScriptError(err, *locp, incomplete));
+}
+
+int yyparse(std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> > *llist, ConfigCompiler *context);
+
+static void BeginFlowControlBlock(ConfigCompiler *compiler, int allowedTypes, bool inherit)
+{
+ if (inherit)
+ allowedTypes |= compiler->m_FlowControlInfo.top();
+
+ compiler->m_FlowControlInfo.push(allowedTypes);
+}
+
+static void EndFlowControlBlock(ConfigCompiler *compiler)
+{
+ compiler->m_FlowControlInfo.pop();
+}
+
+static void UseFlowControl(ConfigCompiler *compiler, FlowControlType type, const CompilerDebugInfo& location)
+{
+ int fci = compiler->m_FlowControlInfo.top();
+
+ if ((type & fci) != type)
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid flow control statement.", location));
+}
+
+std::unique_ptr<Expression> ConfigCompiler::Compile()
+{
+ std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> > llist;
+
+ //yydebug = 1;
+
+ m_IgnoreNewlines.push(false);
+ BeginFlowControlBlock(this, 0, false);
+
+ if (yyparse(&llist, this) != 0)
+ return NULL;
+
+ EndFlowControlBlock(this);
+ m_IgnoreNewlines.pop();
+
+ std::vector<std::unique_ptr<Expression> > dlist;
+ decltype(llist.size()) num = 0;
+ for (auto& litem : llist) {
+ if (!litem.second.SideEffect && num != llist.size() - 1) {
+ yyerror(&litem.second.DebugInfo, NULL, NULL, "Value computed is not used.");
+ }
+ dlist.emplace_back(std::move(litem.first));
+ num++;
+ }
+
+ std::unique_ptr<DictExpression> expr{new DictExpression(std::move(dlist))};
+ expr->MakeInline();
+ return std::move(expr);
+}
+
+#define scanner (context->GetScanner())
+
+%}
+
+%%
+script: statements
+ {
+ llist->swap(*$1);
+ delete $1;
+ }
+ ;
+
+statements: optional_newlines lterm_items
+ {
+ $$ = $2;
+ }
+ ;
+
+lterm_items: /* empty */
+ {
+ $$ = new std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> >();
+ }
+ | lterm_items_inner
+ | lterm_items_inner sep
+ ;
+
+lterm_items_inner: lterm %dprec 2
+ {
+ $$ = new std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> >();
+ $$->emplace_back(std::unique_ptr<Expression>($1), EItemInfo{true, @1});
+ }
+ | rterm_no_side_effect
+ {
+ $$ = new std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> >();
+ $$->emplace_back(std::unique_ptr<Expression>($1), EItemInfo{false, @1});
+ }
+ | lterm_items_inner sep lterm %dprec 1
+ {
+ if ($1)
+ $$ = $1;
+ else
+ $$ = new std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> >();
+
+ if ($3) {
+ $$->emplace_back(std::unique_ptr<Expression>($3), EItemInfo{true, @3});
+ }
+ }
+ | lterm_items_inner sep rterm_no_side_effect %dprec 1
+ {
+ if ($1)
+ $$ = $1;
+ else
+ $$ = new std::vector<std::pair<std::unique_ptr<Expression>, EItemInfo> >();
+
+ if ($3) {
+ $$->emplace_back(std::unique_ptr<Expression>($3), EItemInfo{false, @3});
+ }
+ }
+ ;
+
+identifier: T_IDENTIFIER
+ | T_STRING
+ ;
+
+object:
+ {
+ context->m_ObjectAssign.push(true);
+ context->m_SeenAssign.push(false);
+ context->m_SeenIgnore.push(false);
+ context->m_Assign.push(0);
+ context->m_Ignore.push(0);
+ }
+ object_declaration rterm optional_rterm use_specifier default_specifier ignore_specifier
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope_require_side_effect
+ {
+ EndFlowControlBlock(context);
+
+ context->m_ObjectAssign.pop();
+
+ bool abstract = $2;
+ bool defaultTmpl = $6;
+
+ if (!abstract && defaultTmpl)
+ BOOST_THROW_EXCEPTION(ScriptError("'default' keyword is invalid for object definitions", @6));
+
+ bool seen_assign = context->m_SeenAssign.top();
+ context->m_SeenAssign.pop();
+
+ bool seen_ignore = context->m_SeenIgnore.top();
+ context->m_SeenIgnore.pop();
+
+ std::unique_ptr<Expression> ignore{std::move(context->m_Ignore.top())};
+ context->m_Ignore.pop();
+
+ std::unique_ptr<Expression> assign{std::move(context->m_Assign.top())};
+ context->m_Assign.pop();
+
+ std::unique_ptr<Expression> filter;
+
+ if (seen_assign) {
+ if (ignore) {
+ std::unique_ptr<Expression> rex{new LogicalNegateExpression(std::move(ignore), DebugInfoRange(@2, @5))};
+
+ filter.reset(new LogicalAndExpression(std::move(assign), std::move(rex), DebugInfoRange(@2, @5)));
+ } else
+ filter.swap(assign);
+ } else if (seen_ignore) {
+ BOOST_THROW_EXCEPTION(ScriptError("object rule 'ignore where' cannot be used without 'assign where'", DebugInfoRange(@2, @4)));
+ }
+
+ $$ = new ObjectExpression(abstract, std::unique_ptr<Expression>($3), std::unique_ptr<Expression>($4),
+ std::move(filter), context->GetZone(), context->GetPackage(), std::move(*$5), $6, $7,
+ std::unique_ptr<Expression>($9), DebugInfoRange(@2, @7));
+ delete $5;
+ }
+ ;
+
+object_declaration: T_OBJECT
+ {
+ $$ = false;
+ }
+ | T_TEMPLATE
+ {
+ $$ = true;
+ }
+ ;
+
+identifier_items: /* empty */
+ {
+ $$ = new std::vector<String>();
+ }
+ | identifier_items_inner
+ | identifier_items_inner ','
+ ;
+
+identifier_items_inner: identifier
+ {
+ $$ = new std::vector<String>();
+ $$->emplace_back(std::move(*$1));
+ delete $1;
+ }
+ | identifier_items_inner ',' identifier
+ {
+ if ($1)
+ $$ = $1;
+ else
+ $$ = new std::vector<String>();
+
+ $$->emplace_back(std::move(*$3));
+ delete $3;
+ }
+ ;
+
+combined_set_op: T_SET
+ | T_SET_ADD
+ | T_SET_SUBTRACT
+ | T_SET_MULTIPLY
+ | T_SET_DIVIDE
+ | T_SET_MODULO
+ | T_SET_XOR
+ | T_SET_BINARY_AND
+ | T_SET_BINARY_OR
+ ;
+
+optional_var: /* empty */
+ | T_VAR
+ ;
+
+lterm: T_LIBRARY rterm
+ {
+ $$ = new LibraryExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | rterm combined_set_op rterm
+ {
+ $$ = new SetExpression(std::unique_ptr<Expression>($1), $2, std::unique_ptr<Expression>($3), @$);
+ }
+ | T_INCLUDE rterm
+ {
+ $$ = new IncludeExpression(Utility::DirName(context->GetPath()), std::unique_ptr<Expression>($2), NULL, NULL, IncludeRegular, false, context->GetZone(), context->GetPackage(), @$);
+ }
+ | T_INCLUDE T_STRING_ANGLE
+ {
+ $$ = new IncludeExpression(Utility::DirName(context->GetPath()), MakeLiteral(std::move(*$2)), NULL, NULL, IncludeRegular, true, context->GetZone(), context->GetPackage(), @$);
+ delete $2;
+ }
+ | T_INCLUDE_RECURSIVE rterm
+ {
+ $$ = new IncludeExpression(Utility::DirName(context->GetPath()), std::unique_ptr<Expression>($2), MakeLiteral("*.conf"), NULL, IncludeRecursive, false, context->GetZone(), context->GetPackage(), @$);
+ }
+ | T_INCLUDE_RECURSIVE rterm ',' rterm
+ {
+ $$ = new IncludeExpression(Utility::DirName(context->GetPath()), std::unique_ptr<Expression>($2), std::unique_ptr<Expression>($4), NULL, IncludeRecursive, false, context->GetZone(), context->GetPackage(), @$);
+ }
+ | T_INCLUDE_ZONES rterm ',' rterm
+ {
+ $$ = new IncludeExpression(Utility::DirName(context->GetPath()), std::unique_ptr<Expression>($4), MakeLiteral("*.conf"), std::unique_ptr<Expression>($2), IncludeZones, false, context->GetZone(), context->GetPackage(), @$);
+ }
+ | T_INCLUDE_ZONES rterm ',' rterm ',' rterm
+ {
+ $$ = new IncludeExpression(Utility::DirName(context->GetPath()), std::unique_ptr<Expression>($4), std::unique_ptr<Expression>($6), std::unique_ptr<Expression>($2), IncludeZones, false, context->GetZone(), context->GetPackage(), @$);
+ }
+ | T_IMPORT rterm
+ {
+ $$ = new ImportExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | T_ASSIGN T_WHERE
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope %dprec 2
+ {
+ EndFlowControlBlock(context);
+
+ if ((context->m_Apply.empty() || !context->m_Apply.top()) && (context->m_ObjectAssign.empty() || !context->m_ObjectAssign.top()))
+ BOOST_THROW_EXCEPTION(ScriptError("'assign' keyword not valid in this context.", @$));
+
+ context->m_SeenAssign.top() = true;
+
+ if (context->m_Assign.top())
+ context->m_Assign.top() = new LogicalOrExpression(std::unique_ptr<Expression>(context->m_Assign.top()), std::unique_ptr<Expression>($4), @$);
+ else
+ context->m_Assign.top() = $4;
+
+ $$ = MakeLiteralRaw();
+ }
+ | T_ASSIGN T_WHERE rterm %dprec 1
+ {
+ ASSERT(!dynamic_cast<DictExpression *>($3));
+
+ if ((context->m_Apply.empty() || !context->m_Apply.top()) && (context->m_ObjectAssign.empty() || !context->m_ObjectAssign.top()))
+ BOOST_THROW_EXCEPTION(ScriptError("'assign' keyword not valid in this context.", @$));
+
+ context->m_SeenAssign.top() = true;
+
+ if (context->m_Assign.top())
+ context->m_Assign.top() = new LogicalOrExpression(std::unique_ptr<Expression>(context->m_Assign.top()), std::unique_ptr<Expression>($3), @$);
+ else
+ context->m_Assign.top() = $3;
+
+ $$ = MakeLiteralRaw();
+ }
+ | T_IGNORE T_WHERE
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope %dprec 2
+ {
+ EndFlowControlBlock(context);
+
+ if ((context->m_Apply.empty() || !context->m_Apply.top()) && (context->m_ObjectAssign.empty() || !context->m_ObjectAssign.top()))
+ BOOST_THROW_EXCEPTION(ScriptError("'ignore' keyword not valid in this context.", @$));
+
+ context->m_SeenIgnore.top() = true;
+
+ if (context->m_Ignore.top())
+ context->m_Ignore.top() = new LogicalOrExpression(std::unique_ptr<Expression>(context->m_Ignore.top()), std::unique_ptr<Expression>($4), @$);
+ else
+ context->m_Ignore.top() = $4;
+
+ $$ = MakeLiteralRaw();
+ }
+ | T_IGNORE T_WHERE rterm %dprec 1
+ {
+ ASSERT(!dynamic_cast<DictExpression *>($3));
+
+ if ((context->m_Apply.empty() || !context->m_Apply.top()) && (context->m_ObjectAssign.empty() || !context->m_ObjectAssign.top()))
+ BOOST_THROW_EXCEPTION(ScriptError("'ignore' keyword not valid in this context.", @$));
+
+ context->m_SeenIgnore.top() = true;
+
+ if (context->m_Ignore.top())
+ context->m_Ignore.top() = new LogicalOrExpression(std::unique_ptr<Expression>(context->m_Ignore.top()), std::unique_ptr<Expression>($3), @$);
+ else
+ context->m_Ignore.top() = $3;
+
+ $$ = MakeLiteralRaw();
+ }
+ | T_RETURN optional_rterm
+ {
+ UseFlowControl(context, FlowControlReturn, @$);
+ $$ = new ReturnExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | T_BREAK
+ {
+ UseFlowControl(context, FlowControlBreak, @$);
+ $$ = new BreakExpression(@$);
+ }
+ | T_CONTINUE
+ {
+ UseFlowControl(context, FlowControlContinue, @$);
+ $$ = new ContinueExpression(@$);
+ }
+ | T_DEBUGGER
+ {
+ $$ = new BreakpointExpression(@$);
+ }
+ | T_NAMESPACE rterm
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope_require_side_effect
+ {
+ EndFlowControlBlock(context);
+
+ std::unique_ptr<Expression> expr{$2};
+ BindToScope(expr, ScopeGlobal);
+ $$ = new SetExpression(std::move(expr), OpSetLiteral, std::unique_ptr<Expression>(new NamespaceExpression(std::unique_ptr<Expression>($4), @$)), @$);
+ }
+ | T_USING rterm
+ {
+ Expression::Ptr expr{$2};
+ context->AddImport(std::move(expr));
+ $$ = MakeLiteralRaw();
+ }
+ | apply
+ | object
+ | T_FOR '(' optional_var identifier T_FOLLOWS optional_var identifier T_IN rterm ')'
+ {
+ BeginFlowControlBlock(context, FlowControlContinue | FlowControlBreak, true);
+ }
+ rterm_scope_require_side_effect
+ {
+ EndFlowControlBlock(context);
+
+ $$ = new ForExpression(std::move(*$4), std::move(*$7), std::unique_ptr<Expression>($9), std::unique_ptr<Expression>($12), @$);
+ delete $4;
+ delete $7;
+ }
+ | T_FOR '(' optional_var identifier T_IN rterm ')'
+ {
+ BeginFlowControlBlock(context, FlowControlContinue | FlowControlBreak, true);
+ }
+ rterm_scope_require_side_effect
+ {
+ EndFlowControlBlock(context);
+
+ $$ = new ForExpression(std::move(*$4), "", std::unique_ptr<Expression>($6), std::unique_ptr<Expression>($9), @$);
+ delete $4;
+ }
+ | T_FUNCTION identifier '(' identifier_items ')' use_specifier
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope
+ {
+ EndFlowControlBlock(context);
+
+ std::unique_ptr<FunctionExpression> fexpr{new FunctionExpression(*$2, std::move(*$4), std::move(*$6), std::unique_ptr<Expression>($8), @$)};
+ delete $4;
+ delete $6;
+
+ $$ = new SetExpression(MakeIndexer(ScopeThis, std::move(*$2)), OpSetLiteral, std::move(fexpr), @$);
+ delete $2;
+ }
+ | T_CONST T_IDENTIFIER T_SET rterm
+ {
+ $$ = new SetConstExpression(std::move(*$2), std::unique_ptr<Expression>($4), @$);
+ delete $2;
+ }
+ | T_VAR rterm
+ {
+ std::unique_ptr<Expression> expr{$2};
+ BindToScope(expr, ScopeLocal);
+ $$ = new SetExpression(std::move(expr), OpSetLiteral, MakeLiteral(), @$);
+ }
+ | T_VAR rterm combined_set_op rterm
+ {
+ std::unique_ptr<Expression> expr{$2};
+ BindToScope(expr, ScopeLocal);
+ $$ = new SetExpression(std::move(expr), $3, std::unique_ptr<Expression>($4), @$);
+ }
+ | T_WHILE '(' rterm ')'
+ {
+ BeginFlowControlBlock(context, FlowControlContinue | FlowControlBreak, true);
+ }
+ rterm_scope
+ {
+ EndFlowControlBlock(context);
+
+ $$ = new WhileExpression(std::unique_ptr<Expression>($3), std::unique_ptr<Expression>($6), @$);
+ }
+ | T_THROW rterm
+ {
+ $$ = new ThrowExpression(std::unique_ptr<Expression>($2), false, @$);
+ }
+ | T_TRY rterm_scope T_EXCEPT rterm_scope
+ {
+ $$ = new TryExceptExpression(std::unique_ptr<Expression>($2), std::unique_ptr<Expression>($4), @$);
+ }
+ | rterm_side_effect
+ ;
+
+rterm_items: /* empty */
+ {
+ $$ = new std::vector<std::unique_ptr<Expression> >();
+ }
+ | rterm_items_inner
+ | rterm_items_inner ',' optional_newlines
+ | rterm_items_inner newlines
+ ;
+
+rterm_items_inner: rterm
+ {
+ $$ = new std::vector<std::unique_ptr<Expression> >();
+ $$->emplace_back($1);
+ }
+ | rterm_items_inner ',' optional_newlines rterm
+ {
+ $$ = $1;
+ $$->emplace_back($4);
+ }
+ ;
+
+rterm_array: '['
+ {
+ context->m_OpenBraces++;
+ }
+ optional_newlines rterm_items ']'
+ {
+ context->m_OpenBraces--;
+ $$ = new ArrayExpression(std::move(*$4), @$);
+ delete $4;
+ }
+ ;
+
+rterm_dict: '{'
+ {
+ BeginFlowControlBlock(context, 0, false);
+ context->m_IgnoreNewlines.push(false);
+ context->m_OpenBraces++;
+ }
+ statements '}'
+ {
+ EndFlowControlBlock(context);
+ context->m_OpenBraces--;
+ context->m_IgnoreNewlines.pop();
+ std::vector<std::unique_ptr<Expression> > dlist;
+ for (auto& litem : *$3) {
+ if (!litem.second.SideEffect)
+ yyerror(&litem.second.DebugInfo, NULL, NULL, "Value computed is not used.");
+ dlist.emplace_back(std::move(litem.first));
+ }
+ delete $3;
+ $$ = new DictExpression(std::move(dlist), @$);
+ }
+ ;
+
+rterm_scope_require_side_effect: '{'
+ {
+ context->m_IgnoreNewlines.push(false);
+ context->m_OpenBraces++;
+ }
+ statements '}'
+ {
+ context->m_OpenBraces--;
+ context->m_IgnoreNewlines.pop();
+ std::vector<std::unique_ptr<Expression> > dlist;
+ for (auto& litem : *$3) {
+ if (!litem.second.SideEffect)
+ yyerror(&litem.second.DebugInfo, NULL, NULL, "Value computed is not used.");
+ dlist.emplace_back(std::move(litem.first));
+ }
+ delete $3;
+ $$ = new DictExpression(std::move(dlist), @$);
+ $$->MakeInline();
+ }
+ ;
+
+rterm_scope: '{'
+ {
+ context->m_IgnoreNewlines.push(false);
+ context->m_OpenBraces++;
+ }
+ statements '}'
+ {
+ context->m_OpenBraces--;
+ context->m_IgnoreNewlines.pop();
+ std::vector<std::unique_ptr<Expression> > dlist;
+ decltype($3->size()) num = 0;
+ for (auto& litem : *$3) {
+ if (!litem.second.SideEffect && num != $3->size() - 1)
+ yyerror(&litem.second.DebugInfo, NULL, NULL, "Value computed is not used.");
+ dlist.emplace_back(std::move(litem.first));
+ num++;
+ }
+ delete $3;
+ $$ = new DictExpression(std::move(dlist), @$);
+ $$->MakeInline();
+ }
+ ;
+
+else_if_branch: T_ELSE T_IF '(' rterm ')' rterm_scope
+ {
+ $$ = new std::pair<std::unique_ptr<Expression>, std::unique_ptr<Expression> >(std::unique_ptr<Expression>($4), std::unique_ptr<Expression>($6));
+ }
+ ;
+
+else_if_branches: /* empty */
+ {
+ $$ = new std::vector<std::pair<std::unique_ptr<Expression>, std::unique_ptr<Expression> > >();
+ }
+ | else_if_branches else_if_branch
+ {
+ $$ = $1;
+ $$->emplace_back(std::move(*$2));
+ delete $2;
+ }
+ ;
+
+rterm_side_effect: rterm '(' rterm_items ')'
+ {
+ $$ = new FunctionCallExpression(std::unique_ptr<Expression>($1), std::move(*$3), @$);
+ delete $3;
+ }
+ | T_IF '(' rterm ')' rterm_scope else_if_branches
+ {
+ std::vector<std::pair<std::unique_ptr<Expression>, std::unique_ptr<Expression> > > ebranches;
+ $6->swap(ebranches);
+ delete $6;
+
+ std::unique_ptr<Expression> afalse;
+
+ for (int i = ebranches.size() - 1; i >= 0; i--) {
+ auto& ebranch = ebranches[i];
+ afalse.reset(new ConditionalExpression(std::move(ebranch.first), std::move(ebranch.second), std::move(afalse), @6));
+ }
+
+ $$ = new ConditionalExpression(std::unique_ptr<Expression>($3), std::unique_ptr<Expression>($5), std::move(afalse), @$);
+ }
+ | T_IF '(' rterm ')' rterm_scope else_if_branches T_ELSE rterm_scope
+ {
+ std::vector<std::pair<std::unique_ptr<Expression>, std::unique_ptr<Expression> > > ebranches;
+ $6->swap(ebranches);
+ delete $6;
+
+ $8->MakeInline();
+
+ std::unique_ptr<Expression> afalse{$8};
+
+ for (int i = ebranches.size() - 1; i >= 0; i--) {
+ auto& ebranch = ebranches[i];
+ afalse.reset(new ConditionalExpression(std::move(ebranch.first), std::move(ebranch.second), std::move(afalse), @6));
+ }
+
+ $$ = new ConditionalExpression(std::unique_ptr<Expression>($3), std::unique_ptr<Expression>($5), std::move(afalse), @$);
+ }
+ | rterm '?' rterm ':' rterm
+ {
+ $$ = new ConditionalExpression(std::unique_ptr<Expression>($1), std::unique_ptr<Expression>($3), std::unique_ptr<Expression>($5), @$);
+ }
+ ;
+
+rterm_no_side_effect_no_dict: T_STRING
+ {
+ $$ = MakeLiteralRaw(std::move(*$1));
+ delete $1;
+ }
+ | T_NUMBER
+ {
+ $$ = MakeLiteralRaw($1);
+ }
+ | T_BOOLEAN
+ {
+ $$ = MakeLiteralRaw($1);
+ }
+ | T_NULL
+ {
+ $$ = MakeLiteralRaw();
+ }
+ | rterm '.' T_IDENTIFIER %dprec 2
+ {
+ $$ = new IndexerExpression(std::unique_ptr<Expression>($1), MakeLiteral(std::move(*$3)), @$);
+ delete $3;
+ }
+ | rterm '[' rterm ']'
+ {
+ $$ = new IndexerExpression(std::unique_ptr<Expression>($1), std::unique_ptr<Expression>($3), @$);
+ }
+ | T_IDENTIFIER
+ {
+ $$ = new VariableExpression(std::move(*$1), context->GetImports(), @1);
+ delete $1;
+ }
+ | T_MULTIPLY rterm %prec DEREF_OP
+ {
+ $$ = new DerefExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | T_BINARY_AND rterm %prec REF_OP
+ {
+ $$ = new RefExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | '!' rterm
+ {
+ $$ = new LogicalNegateExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | '~' rterm
+ {
+ $$ = new NegateExpression(std::unique_ptr<Expression>($2), @$);
+ }
+ | T_PLUS rterm %prec UNARY_PLUS
+ {
+ $$ = $2;
+ }
+ | T_MINUS rterm %prec UNARY_MINUS
+ {
+ $$ = new SubtractExpression(MakeLiteral(0), std::unique_ptr<Expression>($2), @$);
+ }
+ | T_THIS
+ {
+ $$ = new GetScopeExpression(ScopeThis);
+ }
+ | T_GLOBALS
+ {
+ $$ = new GetScopeExpression(ScopeGlobal);
+ }
+ | T_LOCALS
+ {
+ $$ = new GetScopeExpression(ScopeLocal);
+ }
+ | T_CURRENT_FILENAME
+ {
+ $$ = MakeLiteralRaw(@$.Path);
+ }
+ | T_CURRENT_LINE
+ {
+ $$ = MakeLiteralRaw(@$.FirstLine);
+ }
+ | identifier T_FOLLOWS
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope %dprec 2
+ {
+ EndFlowControlBlock(context);
+
+ std::vector<String> args;
+ args.emplace_back(std::move(*$1));
+ delete $1;
+
+ $$ = new FunctionExpression("<anonymous>", std::move(args), {}, std::unique_ptr<Expression>($4), @$);
+ }
+ | identifier T_FOLLOWS rterm %dprec 1
+ {
+ ASSERT(!dynamic_cast<DictExpression *>($3));
+
+ std::vector<String> args;
+ args.emplace_back(std::move(*$1));
+ delete $1;
+
+ $$ = new FunctionExpression("<anonymous>", std::move(args), {}, std::unique_ptr<Expression>($3), @$);
+ }
+ | '(' identifier_items ')' use_specifier T_FOLLOWS
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope %dprec 2
+ {
+ EndFlowControlBlock(context);
+
+ $$ = new FunctionExpression("<anonymous>", std::move(*$2), std::move(*$4), std::unique_ptr<Expression>($7), @$);
+ delete $2;
+ delete $4;
+ }
+ | '(' identifier_items ')' use_specifier T_FOLLOWS rterm %dprec 1
+ {
+ ASSERT(!dynamic_cast<DictExpression *>($6));
+
+ $$ = new FunctionExpression("<anonymous>", std::move(*$2), std::move(*$4), std::unique_ptr<Expression>($6), @$);
+ delete $2;
+ delete $4;
+ }
+ | rterm_array
+ | '('
+ {
+ context->m_OpenBraces++;
+ }
+ rterm ')'
+ {
+ context->m_OpenBraces--;
+ $$ = $3;
+ }
+ | rterm T_LOGICAL_OR rterm { MakeRBinaryOp<LogicalOrExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_LOGICAL_AND rterm { MakeRBinaryOp<LogicalAndExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_BINARY_OR rterm { MakeRBinaryOp<BinaryOrExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_BINARY_AND rterm { MakeRBinaryOp<BinaryAndExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_IN rterm { MakeRBinaryOp<InExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_NOT_IN rterm { MakeRBinaryOp<NotInExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_EQUAL rterm { MakeRBinaryOp<EqualExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_NOT_EQUAL rterm { MakeRBinaryOp<NotEqualExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_LESS_THAN rterm { MakeRBinaryOp<LessThanExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_LESS_THAN_OR_EQUAL rterm { MakeRBinaryOp<LessThanOrEqualExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_GREATER_THAN rterm { MakeRBinaryOp<GreaterThanExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_GREATER_THAN_OR_EQUAL rterm { MakeRBinaryOp<GreaterThanOrEqualExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_SHIFT_LEFT rterm { MakeRBinaryOp<ShiftLeftExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_SHIFT_RIGHT rterm { MakeRBinaryOp<ShiftRightExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_PLUS rterm { MakeRBinaryOp<AddExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_MINUS rterm { MakeRBinaryOp<SubtractExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_MULTIPLY rterm { MakeRBinaryOp<MultiplyExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_DIVIDE_OP rterm { MakeRBinaryOp<DivideExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_MODULO rterm { MakeRBinaryOp<ModuloExpression>(&$$, $1, $3, @1, @3); }
+ | rterm T_XOR rterm { MakeRBinaryOp<XorExpression>(&$$, $1, $3, @1, @3); }
+ | T_FUNCTION '(' identifier_items ')' use_specifier
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope
+ {
+ EndFlowControlBlock(context);
+
+ $$ = new FunctionExpression("<anonymous>", std::move(*$3), std::move(*$5), std::unique_ptr<Expression>($7), @$);
+ delete $3;
+ delete $5;
+ }
+ | T_NULLARY_LAMBDA_BEGIN
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ statements T_NULLARY_LAMBDA_END
+ {
+ EndFlowControlBlock(context);
+
+ std::vector<std::unique_ptr<Expression> > dlist;
+ decltype(dlist.size()) num = 0;
+ for (auto& litem : *$3) {
+ if (!litem.second.SideEffect && num != $3->size() - 1)
+ yyerror(&litem.second.DebugInfo, NULL, NULL, "Value computed is not used.");
+ dlist.emplace_back(std::move(litem.first));
+ num++;
+ }
+ delete $3;
+ std::unique_ptr<DictExpression> aexpr{new DictExpression(std::move(dlist), @$)};
+ aexpr->MakeInline();
+
+ $$ = new FunctionExpression("<anonymous>", {}, {}, std::move(aexpr), @$);
+ }
+ ;
+
+rterm_no_side_effect:
+ rterm_no_side_effect_no_dict %dprec 1
+ | rterm_dict %dprec 2
+ {
+ std::unique_ptr<Expression> expr{$1};
+ BindToScope(expr, ScopeThis);
+ $$ = expr.release();
+ }
+ ;
+
+rterm:
+ rterm_side_effect %dprec 2
+ | rterm_no_side_effect %dprec 1
+ ;
+
+target_type_specifier: /* empty */
+ {
+ $$ = new String();
+ }
+ | T_TO identifier
+ {
+ $$ = $2;
+ }
+ ;
+
+default_specifier: /* empty */
+ {
+ $$ = false;
+ }
+ | T_DEFAULT
+ {
+ $$ = true;
+ }
+ ;
+
+ignore_specifier: /* empty */
+ {
+ $$ = false;
+ }
+ | T_IGNORE_ON_ERROR
+ {
+ $$ = true;
+ }
+ ;
+
+use_specifier: /* empty */
+ {
+ $$ = new std::map<String, std::unique_ptr<Expression> >();
+ }
+ | T_USE '(' use_specifier_items ')'
+ {
+ $$ = $3;
+ }
+ ;
+
+use_specifier_items: use_specifier_item
+ {
+ $$ = new std::map<String, std::unique_ptr<Expression> >();
+ $$->emplace(std::move(*$1));
+ delete $1;
+ }
+ | use_specifier_items ',' use_specifier_item
+ {
+ $$ = $1;
+ $$->emplace(std::move(*$3));
+ delete $3;
+ }
+ ;
+
+use_specifier_item: identifier
+ {
+ std::unique_ptr<Expression> var (new VariableExpression(*$1, context->GetImports(), @1));
+ $$ = new std::pair<String, std::unique_ptr<Expression> >(std::move(*$1), std::move(var));
+ delete $1;
+ }
+ | identifier T_SET rterm
+ {
+ $$ = new std::pair<String, std::unique_ptr<Expression> >(std::move(*$1), std::unique_ptr<Expression>($3));
+ delete $1;
+ }
+ ;
+
+apply_for_specifier: /* empty */
+ | T_FOR '(' optional_var identifier T_FOLLOWS optional_var identifier T_IN rterm ')'
+ {
+ context->m_FKVar.top() = std::move(*$4);
+ delete $4;
+
+ context->m_FVVar.top() = std::move(*$7);
+ delete $7;
+
+ context->m_FTerm.top() = $9;
+ }
+ | T_FOR '(' optional_var identifier T_IN rterm ')'
+ {
+ context->m_FKVar.top() = std::move(*$4);
+ delete $4;
+
+ context->m_FVVar.top() = "";
+
+ context->m_FTerm.top() = $6;
+ }
+ ;
+
+optional_rterm: /* empty */
+ {
+ $$ = MakeLiteralRaw();
+ }
+ | rterm
+ ;
+
+apply:
+ {
+ context->m_Apply.push(true);
+ context->m_SeenAssign.push(false);
+ context->m_SeenIgnore.push(false);
+ context->m_Assign.push(NULL);
+ context->m_Ignore.push(NULL);
+ context->m_FKVar.push("");
+ context->m_FVVar.push("");
+ context->m_FTerm.push(NULL);
+ }
+ T_APPLY identifier optional_rterm apply_for_specifier target_type_specifier use_specifier ignore_specifier
+ {
+ BeginFlowControlBlock(context, FlowControlReturn, false);
+ }
+ rterm_scope_require_side_effect
+ {
+ EndFlowControlBlock(context);
+
+ context->m_Apply.pop();
+
+ String type = std::move(*$3);
+ delete $3;
+ String target = std::move(*$6);
+ delete $6;
+
+ if (!ApplyRule::IsValidSourceType(type))
+ BOOST_THROW_EXCEPTION(ScriptError("'apply' cannot be used with type '" + type + "'", @3));
+
+ if (!ApplyRule::IsValidTargetType(type, target)) {
+ if (target == "") {
+ auto& types (ApplyRule::GetTargetTypes(type));
+ String typeNames;
+
+ for (std::vector<String>::size_type i = 0; i < types.size(); i++) {
+ if (typeNames != "") {
+ if (i == types.size() - 1)
+ typeNames += " or ";
+ else
+ typeNames += ", ";
+ }
+
+ typeNames += "'" + types[i] + "'";
+ }
+
+ BOOST_THROW_EXCEPTION(ScriptError("'apply' target type is ambiguous (can be one of " + typeNames + "): use 'to' to specify a type", DebugInfoRange(@2, @3)));
+ } else
+ BOOST_THROW_EXCEPTION(ScriptError("'apply' target type '" + target + "' is invalid", @6));
+ }
+
+ bool seen_assign = context->m_SeenAssign.top();
+ context->m_SeenAssign.pop();
+
+ // assign && !ignore
+ if (!seen_assign && !context->m_FTerm.top())
+ BOOST_THROW_EXCEPTION(ScriptError("'apply' is missing 'assign'/'for'", DebugInfoRange(@2, @3)));
+
+ std::unique_ptr<Expression> ignore{context->m_Ignore.top()};
+ context->m_Ignore.pop();
+
+ std::unique_ptr<Expression> assign;
+
+ if (!seen_assign)
+ assign = MakeLiteral(true);
+ else
+ assign.reset(context->m_Assign.top());
+
+ context->m_Assign.pop();
+
+ std::unique_ptr<Expression> filter;
+
+ if (ignore) {
+ std::unique_ptr<Expression>rex{new LogicalNegateExpression(std::move(ignore), DebugInfoRange(@2, @5))};
+
+ filter.reset(new LogicalAndExpression(std::move(assign), std::move(rex), DebugInfoRange(@2, @5)));
+ } else
+ filter.swap(assign);
+
+ String fkvar = std::move(context->m_FKVar.top());
+ context->m_FKVar.pop();
+
+ String fvvar = std::move(context->m_FVVar.top());
+ context->m_FVVar.pop();
+
+ std::unique_ptr<Expression> fterm{context->m_FTerm.top()};
+ context->m_FTerm.pop();
+
+ $$ = new ApplyExpression(std::move(type), std::move(target), std::unique_ptr<Expression>($4), std::move(filter), context->GetPackage(), std::move(fkvar), std::move(fvvar), std::move(fterm), std::move(*$7), $8, std::unique_ptr<Expression>($10), DebugInfoRange(@2, @8));
+ delete $7;
+ }
+ ;
+
+newlines: T_NEWLINE
+ | T_NEWLINE newlines
+ ;
+
+optional_newlines: /* empty */
+ | newlines
+ ;
+
+/* required separator */
+sep: ',' optional_newlines
+ | ';' optional_newlines
+ | newlines
+ ;
+
+%%
diff --git a/lib/config/configcompiler.cpp b/lib/config/configcompiler.cpp
new file mode 100644
index 0000000..62f02ba
--- /dev/null
+++ b/lib/config/configcompiler.cpp
@@ -0,0 +1,364 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configcompiler.hpp"
+#include "config/configitem.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/loader.hpp"
+#include "base/context.hpp"
+#include "base/exception.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+std::vector<String> ConfigCompiler::m_IncludeSearchDirs;
+std::mutex ConfigCompiler::m_ZoneDirsMutex;
+std::map<String, std::vector<ZoneFragment> > ConfigCompiler::m_ZoneDirs;
+
+/**
+ * Constructor for the ConfigCompiler class.
+ *
+ * @param path The path of the configuration file (or another name that
+ * identifies the source of the configuration text).
+ * @param input Input stream for the configuration file.
+ * @param zone The zone.
+ */
+ConfigCompiler::ConfigCompiler(String path, std::istream *input,
+ String zone, String package)
+ : m_Path(std::move(path)), m_Input(input), m_Zone(std::move(zone)),
+ m_Package(std::move(package)), m_Eof(false), m_OpenBraces(0)
+{
+ InitializeScanner();
+}
+
+/**
+ * Destructor for the ConfigCompiler class.
+ */
+ConfigCompiler::~ConfigCompiler()
+{
+ DestroyScanner();
+}
+
+/**
+ * Reads data from the input stream. Used internally by the lexer.
+ *
+ * @param buffer Where to store data.
+ * @param max_size The maximum number of bytes to read from the stream.
+ * @returns The actual number of bytes read.
+ */
+size_t ConfigCompiler::ReadInput(char *buffer, size_t max_size)
+{
+ m_Input->read(buffer, max_size);
+ return static_cast<size_t>(m_Input->gcount());
+}
+
+/**
+ * Retrieves the scanner object.
+ *
+ * @returns The scanner object.
+ */
+void *ConfigCompiler::GetScanner() const
+{
+ return m_Scanner;
+}
+
+/**
+ * Retrieves the path for the input file.
+ *
+ * @returns The path.
+ */
+const char *ConfigCompiler::GetPath() const
+{
+ return m_Path.CStr();
+}
+
+void ConfigCompiler::SetZone(const String& zone)
+{
+ m_Zone = zone;
+}
+
+String ConfigCompiler::GetZone() const
+{
+ return m_Zone;
+}
+
+void ConfigCompiler::SetPackage(const String& package)
+{
+ m_Package = package;
+}
+
+String ConfigCompiler::GetPackage() const
+{
+ return m_Package;
+}
+
+void ConfigCompiler::CollectIncludes(std::vector<std::unique_ptr<Expression> >& expressions,
+ const String& file, const String& zone, const String& package)
+{
+ try {
+ expressions.emplace_back(CompileFile(file, zone, package));
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ConfigCompiler")
+ << "Cannot compile file '"
+ << file << "': " << DiagnosticInformation(ex);
+ }
+}
+
+/**
+ * Handles an include directive.
+ *
+ * @param relativeBath The path this include is relative to.
+ * @param path The path from the include directive.
+ * @param search Whether to search global include dirs.
+ * @param debuginfo Debug information.
+ */
+std::unique_ptr<Expression> ConfigCompiler::HandleInclude(const String& relativeBase, const String& path,
+ bool search, const String& zone, const String& package, const DebugInfo& debuginfo)
+{
+ String upath;
+
+ if (search || (IsAbsolutePath(path)))
+ upath = path;
+ else
+ upath = relativeBase + "/" + path;
+
+ String includePath = upath;
+
+ if (search) {
+ for (const String& dir : m_IncludeSearchDirs) {
+ String spath = dir + "/" + path;
+
+ if (Utility::PathExists(spath)) {
+ includePath = spath;
+ break;
+ }
+ }
+ }
+
+ std::vector<std::unique_ptr<Expression> > expressions;
+ auto funcCallback = [&expressions, zone, package](const String& file) { CollectIncludes(expressions, file, zone, package); };
+
+ if (!Utility::Glob(includePath, funcCallback, GlobFile) && includePath.FindFirstOf("*?") == String::NPos) {
+ std::ostringstream msgbuf;
+ msgbuf << "Include file '" + path + "' does not exist";
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str(), debuginfo));
+ }
+
+ std::unique_ptr<DictExpression> expr{new DictExpression(std::move(expressions))};
+ expr->MakeInline();
+ return std::move(expr);
+}
+
+/**
+ * Handles recursive includes.
+ *
+ * @param relativeBase The path this include is relative to.
+ * @param path The directory path.
+ * @param pattern The file pattern.
+ * @param debuginfo Debug information.
+ */
+std::unique_ptr<Expression> ConfigCompiler::HandleIncludeRecursive(const String& relativeBase, const String& path,
+ const String& pattern, const String& zone, const String& package, const DebugInfo&)
+{
+ String ppath;
+
+ if (IsAbsolutePath(path))
+ ppath = path;
+ else
+ ppath = relativeBase + "/" + path;
+
+ std::vector<std::unique_ptr<Expression> > expressions;
+ Utility::GlobRecursive(ppath, pattern, [&expressions, zone, package](const String& file) {
+ CollectIncludes(expressions, file, zone, package);
+ }, GlobFile);
+
+ std::unique_ptr<DictExpression> dict{new DictExpression(std::move(expressions))};
+ dict->MakeInline();
+ return std::move(dict);
+}
+
+void ConfigCompiler::HandleIncludeZone(const String& relativeBase, const String& tag, const String& path, const String& pattern, const String& package, std::vector<std::unique_ptr<Expression> >& expressions)
+{
+ String zoneName = Utility::BaseName(path);
+
+ String ppath;
+
+ if (IsAbsolutePath(path))
+ ppath = path;
+ else
+ ppath = relativeBase + "/" + path;
+
+ RegisterZoneDir(tag, ppath, zoneName);
+
+ Utility::GlobRecursive(ppath, pattern, [&expressions, zoneName, package](const String& file) {
+ CollectIncludes(expressions, file, zoneName, package);
+ }, GlobFile);
+}
+
+/**
+ * Handles zone includes.
+ *
+ * @param relativeBase The path this include is relative to.
+ * @param tag The tag name.
+ * @param path The directory path.
+ * @param pattern The file pattern.
+ * @param debuginfo Debug information.
+ */
+std::unique_ptr<Expression> ConfigCompiler::HandleIncludeZones(const String& relativeBase, const String& tag,
+ const String& path, const String& pattern, const String& package, const DebugInfo&)
+{
+ String ppath;
+ String newRelativeBase = relativeBase;
+
+ if (IsAbsolutePath(path))
+ ppath = path;
+ else {
+ ppath = relativeBase + "/" + path;
+ newRelativeBase = ".";
+ }
+
+ std::vector<std::unique_ptr<Expression> > expressions;
+ Utility::Glob(ppath + "/*", [newRelativeBase, tag, pattern, package, &expressions](const String& path) {
+ HandleIncludeZone(newRelativeBase, tag, path, pattern, package, expressions);
+ }, GlobDirectory);
+
+ return std::unique_ptr<Expression>(new DictExpression(std::move(expressions)));
+}
+
+/**
+ * Compiles a stream.
+ *
+ * @param path A name identifying the stream.
+ * @param stream The input stream.
+ * @returns Configuration items.
+ */
+std::unique_ptr<Expression> ConfigCompiler::CompileStream(const String& path,
+ std::istream *stream, const String& zone, const String& package)
+{
+ CONTEXT("Compiling configuration stream with name '" << path << "'");
+
+ stream->exceptions(std::istream::badbit);
+
+ ConfigCompiler ctx(path, stream, zone, package);
+
+ try {
+ return ctx.Compile();
+ } catch (const ScriptError& ex) {
+ return std::unique_ptr<Expression>(new ThrowExpression(MakeLiteral(ex.what()), ex.IsIncompleteExpression(), ex.GetDebugInfo()));
+ } catch (const std::exception& ex) {
+ return std::unique_ptr<Expression>(new ThrowExpression(MakeLiteral(DiagnosticInformation(ex)), false));
+ }
+}
+
+/**
+ * Compiles a file.
+ *
+ * @param path The path.
+ * @returns Configuration items.
+ */
+std::unique_ptr<Expression> ConfigCompiler::CompileFile(const String& path, const String& zone,
+ const String& package)
+{
+ CONTEXT("Compiling configuration file '" << path << "'");
+
+ std::ifstream stream(path.CStr(), std::ifstream::in);
+
+ if (!stream)
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("std::ifstream::open")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(path));
+
+ Log(LogNotice, "ConfigCompiler")
+ << "Compiling config file: " << path;
+
+ return CompileStream(path, &stream, zone, package);
+}
+
+/**
+ * Compiles a snippet of text.
+ *
+ * @param path A name identifying the text.
+ * @param text The text.
+ * @returns Configuration items.
+ */
+std::unique_ptr<Expression> ConfigCompiler::CompileText(const String& path, const String& text,
+ const String& zone, const String& package)
+{
+ std::stringstream stream(text);
+ return CompileStream(path, &stream, zone, package);
+}
+
+/**
+ * Adds a directory to the list of include search dirs.
+ *
+ * @param dir The new dir.
+ */
+void ConfigCompiler::AddIncludeSearchDir(const String& dir)
+{
+ Log(LogInformation, "ConfigCompiler")
+ << "Adding include search dir: " << dir;
+
+ m_IncludeSearchDirs.push_back(dir);
+}
+
+std::vector<ZoneFragment> ConfigCompiler::GetZoneDirs(const String& zone)
+{
+ std::unique_lock<std::mutex> lock(m_ZoneDirsMutex);
+ auto it = m_ZoneDirs.find(zone);
+ if (it == m_ZoneDirs.end())
+ return std::vector<ZoneFragment>();
+ else
+ return it->second;
+}
+
+void ConfigCompiler::RegisterZoneDir(const String& tag, const String& ppath, const String& zoneName)
+{
+ ZoneFragment zf;
+ zf.Tag = tag;
+ zf.Path = ppath;
+
+ std::unique_lock<std::mutex> lock(m_ZoneDirsMutex);
+ m_ZoneDirs[zoneName].push_back(zf);
+}
+
+bool ConfigCompiler::HasZoneConfigAuthority(const String& zoneName)
+{
+ std::vector<ZoneFragment> zoneDirs = m_ZoneDirs[zoneName];
+
+ bool empty = zoneDirs.empty();
+
+ if (!empty) {
+ std::vector<String> paths;
+ paths.reserve(zoneDirs.size());
+
+ for (const ZoneFragment& zf : zoneDirs) {
+ paths.push_back(zf.Path);
+ }
+
+ Log(LogNotice, "ConfigCompiler")
+ << "Registered authoritative config directories for zone '" << zoneName << "': " << Utility::NaturalJoin(paths);
+ }
+
+ return !empty;
+}
+
+
+bool ConfigCompiler::IsAbsolutePath(const String& path)
+{
+#ifndef _WIN32
+ return (path.GetLength() > 0 && path[0] == '/');
+#else /* _WIN32 */
+ return !PathIsRelative(path.CStr());
+#endif /* _WIN32 */
+}
+
+void ConfigCompiler::AddImport(const Expression::Ptr& import)
+{
+ m_Imports.push_back(import);
+}
+
+std::vector<Expression::Ptr> ConfigCompiler::GetImports() const
+{
+ return m_Imports;
+}
diff --git a/lib/config/configcompiler.hpp b/lib/config/configcompiler.hpp
new file mode 100644
index 0000000..fe00bed
--- /dev/null
+++ b/lib/config/configcompiler.hpp
@@ -0,0 +1,161 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGCOMPILER_H
+#define CONFIGCOMPILER_H
+
+#include "config/i2-config.hpp"
+#include "config/expression.hpp"
+#include "base/debuginfo.hpp"
+#include "base/registry.hpp"
+#include "base/initialize.hpp"
+#include "base/singleton.hpp"
+#include "base/string.hpp"
+#include <future>
+#include <iostream>
+#include <stack>
+
+typedef union YYSTYPE YYSTYPE;
+typedef void *yyscan_t;
+
+namespace icinga
+{
+
+struct CompilerDebugInfo
+{
+ const char *Path;
+
+ int FirstLine;
+ int FirstColumn;
+
+ int LastLine;
+ int LastColumn;
+
+ operator DebugInfo() const
+ {
+ DebugInfo di;
+ di.Path = Path;
+ di.FirstLine = FirstLine;
+ di.FirstColumn = FirstColumn;
+ di.LastLine = LastLine;
+ di.LastColumn = LastColumn;
+ return di;
+ }
+};
+
+struct EItemInfo
+{
+ bool SideEffect;
+ CompilerDebugInfo DebugInfo;
+};
+
+enum FlowControlType
+{
+ FlowControlReturn = 1,
+ FlowControlContinue = 2,
+ FlowControlBreak = 4
+};
+
+struct ZoneFragment
+{
+ String Tag;
+ String Path;
+};
+
+/**
+ * The configuration compiler can be used to compile a configuration file
+ * into a number of configuration items.
+ *
+ * @ingroup config
+ */
+class ConfigCompiler
+{
+public:
+ explicit ConfigCompiler(String path, std::istream *input,
+ String zone = String(), String package = String());
+ virtual ~ConfigCompiler();
+
+ std::unique_ptr<Expression> Compile();
+
+ static std::unique_ptr<Expression>CompileStream(const String& path, std::istream *stream,
+ const String& zone = String(), const String& package = String());
+ static std::unique_ptr<Expression>CompileFile(const String& path, const String& zone = String(),
+ const String& package = String());
+ static std::unique_ptr<Expression>CompileText(const String& path, const String& text,
+ const String& zone = String(), const String& package = String());
+
+ static void AddIncludeSearchDir(const String& dir);
+
+ const char *GetPath() const;
+
+ void SetZone(const String& zone);
+ String GetZone() const;
+
+ void SetPackage(const String& package);
+ String GetPackage() const;
+
+ void AddImport(const Expression::Ptr& import);
+ std::vector<Expression::Ptr> GetImports() const;
+
+ static void CollectIncludes(std::vector<std::unique_ptr<Expression> >& expressions,
+ const String& file, const String& zone, const String& package);
+
+ static std::unique_ptr<Expression> HandleInclude(const String& relativeBase, const String& path, bool search,
+ const String& zone, const String& package, const DebugInfo& debuginfo = DebugInfo());
+ static std::unique_ptr<Expression> HandleIncludeRecursive(const String& relativeBase, const String& path,
+ const String& pattern, const String& zone, const String& package, const DebugInfo& debuginfo = DebugInfo());
+ static std::unique_ptr<Expression> HandleIncludeZones(const String& relativeBase, const String& tag,
+ const String& path, const String& pattern, const String& package, const DebugInfo& debuginfo = DebugInfo());
+
+ size_t ReadInput(char *buffer, size_t max_bytes);
+ void *GetScanner() const;
+
+ static std::vector<ZoneFragment> GetZoneDirs(const String& zone);
+ static void RegisterZoneDir(const String& tag, const String& ppath, const String& zoneName);
+
+ static bool HasZoneConfigAuthority(const String& zoneName);
+
+private:
+ std::promise<Expression::Ptr> m_Promise;
+
+ String m_Path;
+ std::istream *m_Input;
+ String m_Zone;
+ String m_Package;
+ std::vector<Expression::Ptr> m_Imports;
+
+ void *m_Scanner;
+
+ static std::vector<String> m_IncludeSearchDirs;
+ static std::mutex m_ZoneDirsMutex;
+ static std::map<String, std::vector<ZoneFragment> > m_ZoneDirs;
+
+ void InitializeScanner();
+ void DestroyScanner();
+
+ static void HandleIncludeZone(const String& relativeBase, const String& tag, const String& path, const String& pattern, const String& package, std::vector<std::unique_ptr<Expression> >& expressions);
+
+ static bool IsAbsolutePath(const String& path);
+
+public:
+ bool m_Eof;
+ int m_OpenBraces;
+
+ String m_LexBuffer;
+ CompilerDebugInfo m_LocationBegin;
+
+ std::stack<bool> m_IgnoreNewlines;
+ std::stack<bool> m_Apply;
+ std::stack<bool> m_ObjectAssign;
+ std::stack<bool> m_SeenAssign;
+ std::stack<bool> m_SeenIgnore;
+ std::stack<Expression *> m_Assign;
+ std::stack<Expression *> m_Ignore;
+ std::stack<String> m_FKVar;
+ std::stack<String> m_FVVar;
+ std::stack<Expression *> m_FTerm;
+ std::stack<int> m_FlowControlInfo;
+};
+
+}
+
+#endif /* CONFIGCOMPILER_H */
diff --git a/lib/config/configcompilercontext.cpp b/lib/config/configcompilercontext.cpp
new file mode 100644
index 0000000..0161181
--- /dev/null
+++ b/lib/config/configcompilercontext.cpp
@@ -0,0 +1,57 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configcompilercontext.hpp"
+#include "base/singleton.hpp"
+#include "base/json.hpp"
+#include "base/netstring.hpp"
+#include "base/exception.hpp"
+#include "base/application.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+ConfigCompilerContext *ConfigCompilerContext::GetInstance()
+{
+ return Singleton<ConfigCompilerContext>::GetInstance();
+}
+
+void ConfigCompilerContext::OpenObjectsFile(const String& filename)
+{
+ try {
+ m_ObjectsFP = std::make_unique<AtomicFile>(filename, 0600);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli", "Could not create temporary objects file: " + DiagnosticInformation(ex, false));
+ Application::Exit(1);
+ }
+}
+
+void ConfigCompilerContext::WriteObject(const Dictionary::Ptr& object)
+{
+ if (!m_ObjectsFP)
+ return;
+
+ String json = JsonEncode(object);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ NetString::WriteStringToStream(*m_ObjectsFP, json);
+ }
+}
+
+void ConfigCompilerContext::CancelObjectsFile()
+{
+ if (!m_ObjectsFP)
+ return;
+
+ m_ObjectsFP.reset(nullptr);
+}
+
+void ConfigCompilerContext::FinishObjectsFile()
+{
+ if (!m_ObjectsFP)
+ return;
+
+ m_ObjectsFP->Commit();
+ m_ObjectsFP.reset(nullptr);
+}
+
diff --git a/lib/config/configcompilercontext.hpp b/lib/config/configcompilercontext.hpp
new file mode 100644
index 0000000..c3d5317
--- /dev/null
+++ b/lib/config/configcompilercontext.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGCOMPILERCONTEXT_H
+#define CONFIGCOMPILERCONTEXT_H
+
+#include "config/i2-config.hpp"
+#include "base/atomic-file.hpp"
+#include "base/dictionary.hpp"
+#include <fstream>
+#include <memory>
+#include <mutex>
+
+namespace icinga
+{
+
+/*
+ * @ingroup config
+ */
+class ConfigCompilerContext
+{
+public:
+ void OpenObjectsFile(const String& filename);
+ void WriteObject(const Dictionary::Ptr& object);
+ void CancelObjectsFile();
+ void FinishObjectsFile();
+
+ inline bool IsOpen() const noexcept
+ {
+ return (bool)m_ObjectsFP;
+ }
+
+ static ConfigCompilerContext *GetInstance();
+
+private:
+ std::unique_ptr<AtomicFile> m_ObjectsFP;
+
+ mutable std::mutex m_Mutex;
+};
+
+}
+
+#endif /* CONFIGCOMPILERCONTEXT_H */
diff --git a/lib/config/configfragment.hpp b/lib/config/configfragment.hpp
new file mode 100644
index 0000000..883aef8
--- /dev/null
+++ b/lib/config/configfragment.hpp
@@ -0,0 +1,26 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGFRAGMENT_H
+#define CONFIGFRAGMENT_H
+
+#include "config/configcompiler.hpp"
+#include "base/initialize.hpp"
+#include "base/debug.hpp"
+#include "base/exception.hpp"
+#include "base/application.hpp"
+
+/* Ensure that the priority is lower than the basic namespace initialization in scriptframe.cpp. */
+#define REGISTER_CONFIG_FRAGMENT(name, fragment) \
+ INITIALIZE_ONCE_WITH_PRIORITY([]() { \
+ std::unique_ptr<icinga::Expression> expression = icinga::ConfigCompiler::CompileText(name, fragment); \
+ VERIFY(expression); \
+ try { \
+ icinga::ScriptFrame frame(true); \
+ expression->Evaluate(frame); \
+ } catch (const std::exception& ex) { \
+ std::cerr << icinga::DiagnosticInformation(ex) << std::endl; \
+ icinga::Application::Exit(1); \
+ } \
+ }, icinga::InitializePriority::EvaluateConfigFragments)
+
+#endif /* CONFIGFRAGMENT_H */
diff --git a/lib/config/configitem.cpp b/lib/config/configitem.cpp
new file mode 100644
index 0000000..9dc0f1a
--- /dev/null
+++ b/lib/config/configitem.cpp
@@ -0,0 +1,849 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configitem.hpp"
+#include "config/configcompilercontext.hpp"
+#include "config/applyrule.hpp"
+#include "config/objectrule.hpp"
+#include "config/configcompiler.hpp"
+#include "base/application.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/logger.hpp"
+#include "base/debug.hpp"
+#include "base/workqueue.hpp"
+#include "base/exception.hpp"
+#include "base/stdiostream.hpp"
+#include "base/netstring.hpp"
+#include "base/serializer.hpp"
+#include "base/json.hpp"
+#include "base/exception.hpp"
+#include "base/function.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <atomic>
+#include <sstream>
+#include <fstream>
+#include <algorithm>
+#include <random>
+#include <unordered_map>
+
+using namespace icinga;
+
+std::mutex ConfigItem::m_Mutex;
+ConfigItem::TypeMap ConfigItem::m_Items;
+ConfigItem::TypeMap ConfigItem::m_DefaultTemplates;
+ConfigItem::ItemList ConfigItem::m_UnnamedItems;
+ConfigItem::IgnoredItemList ConfigItem::m_IgnoredItems;
+
+REGISTER_FUNCTION(Internal, run_with_activation_context, &ConfigItem::RunWithActivationContext, "func");
+
+/**
+ * Constructor for the ConfigItem class.
+ *
+ * @param type The object type.
+ * @param name The name of the item.
+ * @param unit The unit of the item.
+ * @param abstract Whether the item is a template.
+ * @param exprl Expression list for the item.
+ * @param debuginfo Debug information.
+ */
+ConfigItem::ConfigItem(Type::Ptr type, String name,
+ bool abstract, Expression::Ptr exprl,
+ Expression::Ptr filter, bool defaultTmpl, bool ignoreOnError,
+ DebugInfo debuginfo, Dictionary::Ptr scope,
+ String zone, String package)
+ : m_Type(std::move(type)), m_Name(std::move(name)), m_Abstract(abstract),
+ m_Expression(std::move(exprl)), m_Filter(std::move(filter)),
+ m_DefaultTmpl(defaultTmpl), m_IgnoreOnError(ignoreOnError),
+ m_DebugInfo(std::move(debuginfo)), m_Scope(std::move(scope)), m_Zone(std::move(zone)),
+ m_Package(std::move(package))
+{
+}
+
+/**
+ * Retrieves the type of the configuration item.
+ *
+ * @returns The type.
+ */
+Type::Ptr ConfigItem::GetType() const
+{
+ return m_Type;
+}
+
+/**
+ * Retrieves the name of the configuration item.
+ *
+ * @returns The name.
+ */
+String ConfigItem::GetName() const
+{
+ return m_Name;
+}
+
+/**
+ * Checks whether the item is abstract.
+ *
+ * @returns true if the item is abstract, false otherwise.
+ */
+bool ConfigItem::IsAbstract() const
+{
+ return m_Abstract;
+}
+
+bool ConfigItem::IsDefaultTemplate() const
+{
+ return m_DefaultTmpl;
+}
+
+bool ConfigItem::IsIgnoreOnError() const
+{
+ return m_IgnoreOnError;
+}
+
+/**
+ * Retrieves the debug information for the configuration item.
+ *
+ * @returns The debug information.
+ */
+DebugInfo ConfigItem::GetDebugInfo() const
+{
+ return m_DebugInfo;
+}
+
+Dictionary::Ptr ConfigItem::GetScope() const
+{
+ return m_Scope;
+}
+
+ConfigObject::Ptr ConfigItem::GetObject() const
+{
+ return m_Object;
+}
+
+/**
+ * Retrieves the expression list for the configuration item.
+ *
+ * @returns The expression list.
+ */
+Expression::Ptr ConfigItem::GetExpression() const
+{
+ return m_Expression;
+}
+
+/**
+* Retrieves the object filter for the configuration item.
+*
+* @returns The filter expression.
+*/
+Expression::Ptr ConfigItem::GetFilter() const
+{
+ return m_Filter;
+}
+
+class DefaultValidationUtils final : public ValidationUtils
+{
+public:
+ bool ValidateName(const String& type, const String& name) const override
+ {
+ ConfigItem::Ptr item = ConfigItem::GetByTypeAndName(Type::GetByName(type), name);
+
+ if (!item || item->IsAbstract())
+ return false;
+
+ return true;
+ }
+};
+
+/**
+ * Commits the configuration item by creating a ConfigObject
+ * object.
+ *
+ * @returns The ConfigObject that was created/updated.
+ */
+ConfigObject::Ptr ConfigItem::Commit(bool discard)
+{
+ Type::Ptr type = GetType();
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "ConfigItem")
+ << "Commit called for ConfigItem Type=" << type->GetName() << ", Name=" << GetName();
+#endif /* I2_DEBUG */
+
+ /* Make sure the type is valid. */
+ if (!type || !ConfigObject::TypeInstance->IsAssignableFrom(type))
+ BOOST_THROW_EXCEPTION(ScriptError("Type '" + type->GetName() + "' does not exist.", m_DebugInfo));
+
+ if (IsAbstract())
+ return nullptr;
+
+ ConfigObject::Ptr dobj = static_pointer_cast<ConfigObject>(type->Instantiate(std::vector<Value>()));
+
+ dobj->SetDebugInfo(m_DebugInfo);
+ dobj->SetZoneName(m_Zone);
+ dobj->SetPackage(m_Package);
+ dobj->SetName(m_Name);
+
+ DebugHint debugHints;
+
+ ScriptFrame frame(true, dobj);
+ if (m_Scope)
+ m_Scope->CopyTo(frame.Locals);
+ try {
+ m_Expression->Evaluate(frame, &debugHints);
+ } catch (const std::exception& ex) {
+ if (m_IgnoreOnError) {
+ Log(LogNotice, "ConfigObject")
+ << "Ignoring config object '" << m_Name << "' of type '" << type->GetName() << "' due to errors: " << DiagnosticInformation(ex);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_IgnoredItems.push_back(m_DebugInfo.Path);
+ }
+
+ return nullptr;
+ }
+
+ throw;
+ }
+
+ if (discard)
+ m_Expression.reset();
+
+ String item_name;
+ String short_name = dobj->GetShortName();
+
+ if (!short_name.IsEmpty()) {
+ item_name = short_name;
+ dobj->SetName(short_name);
+ } else
+ item_name = m_Name;
+
+ String name = item_name;
+
+ auto *nc = dynamic_cast<NameComposer *>(type.get());
+
+ if (nc) {
+ if (name.IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Object name must not be empty.", m_DebugInfo));
+
+ name = nc->MakeName(name, dobj);
+
+ if (name.IsEmpty())
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not determine name for object"));
+ }
+
+ if (name != item_name)
+ dobj->SetShortName(item_name);
+
+ dobj->SetName(name);
+
+ Dictionary::Ptr dhint = debugHints.ToDictionary();
+
+ try {
+ DefaultValidationUtils utils;
+ dobj->Validate(FAConfig, utils);
+ } catch (ValidationError& ex) {
+ if (m_IgnoreOnError) {
+ Log(LogNotice, "ConfigObject")
+ << "Ignoring config object '" << m_Name << "' of type '" << type->GetName() << "' due to errors: " << DiagnosticInformation(ex);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_IgnoredItems.push_back(m_DebugInfo.Path);
+ }
+
+ return nullptr;
+ }
+
+ ex.SetDebugHint(dhint);
+ throw;
+ }
+
+ try {
+ dobj->OnConfigLoaded();
+ } catch (const std::exception& ex) {
+ if (m_IgnoreOnError) {
+ Log(LogNotice, "ConfigObject")
+ << "Ignoring config object '" << m_Name << "' of type '" << m_Type->GetName() << "' due to errors: " << DiagnosticInformation(ex);
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_IgnoredItems.push_back(m_DebugInfo.Path);
+ }
+
+ return nullptr;
+ }
+
+ throw;
+ }
+
+ Value serializedObject;
+
+ try {
+ if (ConfigCompilerContext::GetInstance()->IsOpen()) {
+ serializedObject = Serialize(dobj, FAConfig);
+ } else {
+ AssertNoCircularReferences(dobj);
+ }
+ } catch (const CircularReferenceError& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(dobj, ex.GetPath(), "Circular references are not allowed"));
+ }
+
+ if (ConfigCompilerContext::GetInstance()->IsOpen()) {
+ Dictionary::Ptr persistentItem = new Dictionary({
+ { "type", type->GetName() },
+ { "name", GetName() },
+ { "properties", serializedObject },
+ { "debug_hints", dhint },
+ { "debug_info", new Array({
+ m_DebugInfo.Path,
+ m_DebugInfo.FirstLine,
+ m_DebugInfo.FirstColumn,
+ m_DebugInfo.LastLine,
+ m_DebugInfo.LastColumn,
+ }) }
+ });
+
+ ConfigCompilerContext::GetInstance()->WriteObject(persistentItem);
+ }
+
+ dhint.reset();
+
+ dobj->Register();
+
+ m_Object = dobj;
+
+ return dobj;
+}
+
+/**
+ * Registers the configuration item.
+ */
+void ConfigItem::Register()
+{
+ m_ActivationContext = ActivationContext::GetCurrentContext();
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ /* If this is a non-abstract object with a composite name
+ * we register it in m_UnnamedItems instead of m_Items. */
+ if (!m_Abstract && dynamic_cast<NameComposer *>(m_Type.get()))
+ m_UnnamedItems.emplace_back(this);
+ else {
+ auto& items = m_Items[m_Type];
+
+ auto it = items.find(m_Name);
+
+ if (it != items.end()) {
+ std::ostringstream msgbuf;
+ msgbuf << "A configuration item of type '" << m_Type->GetName()
+ << "' and name '" << GetName() << "' already exists ("
+ << it->second->GetDebugInfo() << "), new declaration: " << GetDebugInfo();
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str()));
+ }
+
+ m_Items[m_Type][m_Name] = this;
+
+ if (m_DefaultTmpl)
+ m_DefaultTemplates[m_Type][m_Name] = this;
+ }
+}
+
+/**
+ * Unregisters the configuration item.
+ */
+void ConfigItem::Unregister()
+{
+ if (m_Object) {
+ m_Object->Unregister();
+ m_Object.reset();
+ }
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_UnnamedItems.erase(std::remove(m_UnnamedItems.begin(), m_UnnamedItems.end(), this), m_UnnamedItems.end());
+ m_Items[m_Type].erase(m_Name);
+ m_DefaultTemplates[m_Type].erase(m_Name);
+}
+
+/**
+ * Retrieves a configuration item by type and name.
+ *
+ * @param type The type of the ConfigItem that is to be looked up.
+ * @param name The name of the ConfigItem that is to be looked up.
+ * @returns The configuration item.
+ */
+ConfigItem::Ptr ConfigItem::GetByTypeAndName(const Type::Ptr& type, const String& name)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ auto it = m_Items.find(type);
+
+ if (it == m_Items.end())
+ return nullptr;
+
+ auto it2 = it->second.find(name);
+
+ if (it2 == it->second.end())
+ return nullptr;
+
+ return it2->second;
+}
+
+bool ConfigItem::CommitNewItems(const ActivationContext::Ptr& context, WorkQueue& upq, std::vector<ConfigItem::Ptr>& newItems)
+{
+ typedef std::pair<ConfigItem::Ptr, bool> ItemPair;
+ std::unordered_map<Type*, std::vector<ItemPair>> itemsByType;
+ std::vector<ItemPair>::size_type total = 0;
+
+ {
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ for (const TypeMap::value_type& kv : m_Items) {
+ std::vector<ItemPair> items;
+
+ for (const ItemMap::value_type& kv2 : kv.second) {
+ if (kv2.second->m_Abstract || kv2.second->m_Object)
+ continue;
+
+ if (kv2.second->m_ActivationContext != context)
+ continue;
+
+ items.emplace_back(kv2.second, false);
+ }
+
+ if (!items.empty()) {
+ total += items.size();
+ itemsByType.emplace(kv.first.get(), std::move(items));
+ }
+ }
+
+ ItemList newUnnamedItems;
+
+ for (const ConfigItem::Ptr& item : m_UnnamedItems) {
+ if (item->m_ActivationContext != context) {
+ newUnnamedItems.push_back(item);
+ continue;
+ }
+
+ if (item->m_Abstract || item->m_Object)
+ continue;
+
+ itemsByType[item->m_Type.get()].emplace_back(item, true);
+ ++total;
+ }
+
+ m_UnnamedItems.swap(newUnnamedItems);
+ }
+
+ if (!total)
+ return true;
+
+ // Shuffle all items to evenly distribute them over the threads of the workqueue. This increases perfomance
+ // noticably in environments with lots of objects and available threads.
+ for (auto& kv : itemsByType) {
+ std::shuffle(std::begin(kv.second), std::end(kv.second), std::default_random_engine{});
+ }
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "configitem")
+ << "Committing " << total << " new items.";
+#endif /* I2_DEBUG */
+
+ std::set<Type::Ptr> types;
+ std::set<Type::Ptr> completed_types;
+ int itemsCount {0};
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ if (ConfigObject::TypeInstance->IsAssignableFrom(type))
+ types.insert(type);
+ }
+
+ while (types.size() != completed_types.size()) {
+ for (const Type::Ptr& type : types) {
+ if (completed_types.find(type) != completed_types.end())
+ continue;
+
+ bool unresolved_dep = false;
+
+ /* skip this type (for now) if there are unresolved load dependencies */
+ for (auto pLoadDep : type->GetLoadDependencies()) {
+ if (types.find(pLoadDep) != types.end() && completed_types.find(pLoadDep) == completed_types.end()) {
+ unresolved_dep = true;
+ break;
+ }
+ }
+
+ if (unresolved_dep)
+ continue;
+
+ std::atomic<int> committed_items(0);
+ std::mutex newItemsMutex;
+
+ {
+ auto items (itemsByType.find(type.get()));
+
+ if (items != itemsByType.end()) {
+ upq.ParallelFor(items->second, [&committed_items, &newItems, &newItemsMutex](const ItemPair& ip) {
+ const ConfigItem::Ptr& item = ip.first;
+
+ if (!item->Commit(ip.second)) {
+ if (item->IsIgnoreOnError()) {
+ item->Unregister();
+ }
+
+ return;
+ }
+
+ committed_items++;
+
+ std::unique_lock<std::mutex> lock(newItemsMutex);
+ newItems.emplace_back(item);
+ });
+
+ upq.Join();
+ }
+ }
+
+ itemsCount += committed_items;
+
+ completed_types.insert(type);
+
+#ifdef I2_DEBUG
+ if (committed_items > 0)
+ Log(LogDebug, "configitem")
+ << "Committed " << committed_items << " items of type '" << type->GetName() << "'.";
+#endif /* I2_DEBUG */
+
+ if (upq.HasExceptions())
+ return false;
+ }
+ }
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "configitem")
+ << "Committed " << itemsCount << " items.";
+#endif /* I2_DEBUG */
+
+ completed_types.clear();
+
+ while (types.size() != completed_types.size()) {
+ for (const Type::Ptr& type : types) {
+ if (completed_types.find(type) != completed_types.end())
+ continue;
+
+ bool unresolved_dep = false;
+
+ /* skip this type (for now) if there are unresolved load dependencies */
+ for (auto pLoadDep : type->GetLoadDependencies()) {
+ if (types.find(pLoadDep) != types.end() && completed_types.find(pLoadDep) == completed_types.end()) {
+ unresolved_dep = true;
+ break;
+ }
+ }
+
+ if (unresolved_dep)
+ continue;
+
+ std::atomic<int> notified_items(0);
+
+ {
+ auto items (itemsByType.find(type.get()));
+
+ if (items != itemsByType.end()) {
+ upq.ParallelFor(items->second, [&notified_items](const ItemPair& ip) {
+ const ConfigItem::Ptr& item = ip.first;
+
+ if (!item->m_Object)
+ return;
+
+ try {
+ item->m_Object->OnAllConfigLoaded();
+ notified_items++;
+ } catch (const std::exception& ex) {
+ if (!item->m_IgnoreOnError)
+ throw;
+
+ Log(LogNotice, "ConfigObject")
+ << "Ignoring config object '" << item->m_Name << "' of type '" << item->m_Type->GetName() << "' due to errors: " << DiagnosticInformation(ex);
+
+ item->Unregister();
+
+ {
+ std::unique_lock<std::mutex> lock(item->m_Mutex);
+ item->m_IgnoredItems.push_back(item->m_DebugInfo.Path);
+ }
+ }
+ });
+
+ upq.Join();
+ }
+ }
+
+ completed_types.insert(type);
+
+#ifdef I2_DEBUG
+ if (notified_items > 0)
+ Log(LogDebug, "configitem")
+ << "Sent OnAllConfigLoaded to " << notified_items << " items of type '" << type->GetName() << "'.";
+#endif /* I2_DEBUG */
+
+ if (upq.HasExceptions())
+ return false;
+
+ notified_items = 0;
+ for (auto loadDep : type->GetLoadDependencies()) {
+ auto items (itemsByType.find(loadDep));
+
+ if (items != itemsByType.end()) {
+ upq.ParallelFor(items->second, [&type, &notified_items](const ItemPair& ip) {
+ const ConfigItem::Ptr& item = ip.first;
+
+ if (!item->m_Object)
+ return;
+
+ ActivationScope ascope(item->m_ActivationContext);
+ item->m_Object->CreateChildObjects(type);
+ notified_items++;
+ });
+ }
+ }
+
+ upq.Join();
+
+#ifdef I2_DEBUG
+ if (notified_items > 0)
+ Log(LogDebug, "configitem")
+ << "Sent CreateChildObjects to " << notified_items << " items of type '" << type->GetName() << "'.";
+#endif /* I2_DEBUG */
+
+ if (upq.HasExceptions())
+ return false;
+
+ // Make sure to activate any additionally generated items
+ if (!CommitNewItems(context, upq, newItems))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ConfigItem::CommitItems(const ActivationContext::Ptr& context, WorkQueue& upq, std::vector<ConfigItem::Ptr>& newItems, bool silent)
+{
+ if (!silent)
+ Log(LogInformation, "ConfigItem", "Committing config item(s).");
+
+ if (!CommitNewItems(context, upq, newItems)) {
+ upq.ReportExceptions("config");
+
+ for (const ConfigItem::Ptr& item : newItems) {
+ item->Unregister();
+ }
+
+ return false;
+ }
+
+ ApplyRule::CheckMatches(silent);
+
+ if (!silent) {
+ /* log stats for external parsers */
+ typedef std::map<Type::Ptr, int> ItemCountMap;
+ ItemCountMap itemCounts;
+ for (const ConfigItem::Ptr& item : newItems) {
+ if (!item->m_Object)
+ continue;
+
+ itemCounts[item->m_Object->GetReflectionType()]++;
+ }
+
+ for (const ItemCountMap::value_type& kv : itemCounts) {
+ Log(LogInformation, "ConfigItem")
+ << "Instantiated " << kv.second << " " << (kv.second != 1 ? kv.first->GetPluralName() : kv.first->GetName()) << ".";
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ActivateItems activates new config items.
+ *
+ * @param newItems Vector of items to be activated
+ * @param runtimeCreated Whether the objects were created by a runtime object
+ * @param mainConfigActivation Whether this is the call for activating the main configuration during startup
+ * @param withModAttrs Whether this call shall read the modified attributes file
+ * @param cookie Cookie for preventing message loops
+ * @return Whether the config activation was successful (in case of errors, exceptions are thrown)
+ */
+bool ConfigItem::ActivateItems(const std::vector<ConfigItem::Ptr>& newItems, bool runtimeCreated,
+ bool mainConfigActivation, bool withModAttrs, const Value& cookie)
+{
+ static std::mutex mtx;
+ std::unique_lock<std::mutex> lock(mtx);
+
+ if (withModAttrs) {
+ /* restore modified attributes */
+ if (Utility::PathExists(Configuration::ModAttrPath)) {
+ std::unique_ptr<Expression> expression = ConfigCompiler::CompileFile(Configuration::ModAttrPath);
+
+ if (expression) {
+ try {
+ ScriptFrame frame(true);
+ expression->Evaluate(frame);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "config", DiagnosticInformation(ex));
+ }
+ }
+ }
+ }
+
+ for (const ConfigItem::Ptr& item : newItems) {
+ if (!item->m_Object)
+ continue;
+
+ ConfigObject::Ptr object = item->m_Object;
+
+ if (object->IsActive())
+ continue;
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "ConfigItem")
+ << "Setting 'active' to true for object '" << object->GetName() << "' of type '" << object->GetReflectionType()->GetName() << "'";
+#endif /* I2_DEBUG */
+
+ object->PreActivate();
+ }
+
+ if (mainConfigActivation)
+ Log(LogInformation, "ConfigItem", "Triggering Start signal for config items");
+
+ /* Activate objects in priority order. */
+ std::vector<Type::Ptr> types = Type::GetAllTypes();
+
+ std::sort(types.begin(), types.end(), [](const Type::Ptr& a, const Type::Ptr& b) {
+ if (a->GetActivationPriority() < b->GetActivationPriority())
+ return true;
+ return false;
+ });
+
+ /* Find the last logger type to be activated. */
+ Type::Ptr lastLoggerType = nullptr;
+ for (const Type::Ptr& type : types) {
+ if (Logger::TypeInstance->IsAssignableFrom(type)) {
+ lastLoggerType = type;
+ }
+ }
+
+ for (const Type::Ptr& type : types) {
+ for (const ConfigItem::Ptr& item : newItems) {
+ if (!item->m_Object)
+ continue;
+
+ ConfigObject::Ptr object = item->m_Object;
+ Type::Ptr objectType = object->GetReflectionType();
+
+ if (objectType != type)
+ continue;
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "ConfigItem")
+ << "Activating object '" << object->GetName() << "' of type '"
+ << objectType->GetName() << "' with priority "
+ << objectType->GetActivationPriority();
+#endif /* I2_DEBUG */
+
+ object->Activate(runtimeCreated, cookie);
+ }
+
+ if (mainConfigActivation && type == lastLoggerType) {
+ /* Disable early logging configuration once the last logger type was activated. */
+ Logger::DisableEarlyLogging();
+ }
+ }
+
+ if (mainConfigActivation)
+ Log(LogInformation, "ConfigItem", "Activated all objects.");
+
+ return true;
+}
+
+bool ConfigItem::RunWithActivationContext(const Function::Ptr& function)
+{
+ ActivationScope scope;
+
+ if (!function)
+ BOOST_THROW_EXCEPTION(ScriptError("'function' argument must not be null."));
+
+ function->Invoke();
+
+ WorkQueue upq(25000, Configuration::Concurrency);
+ upq.SetName("ConfigItem::RunWithActivationContext");
+
+ std::vector<ConfigItem::Ptr> newItems;
+
+ if (!CommitItems(scope.GetContext(), upq, newItems, true))
+ return false;
+
+ if (!ActivateItems(newItems, false, false))
+ return false;
+
+ return true;
+}
+
+std::vector<ConfigItem::Ptr> ConfigItem::GetItems(const Type::Ptr& type)
+{
+ std::vector<ConfigItem::Ptr> items;
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ auto it = m_Items.find(type);
+
+ if (it == m_Items.end())
+ return items;
+
+ items.reserve(it->second.size());
+
+ for (const ItemMap::value_type& kv : it->second) {
+ items.push_back(kv.second);
+ }
+
+ return items;
+}
+
+std::vector<ConfigItem::Ptr> ConfigItem::GetDefaultTemplates(const Type::Ptr& type)
+{
+ std::vector<ConfigItem::Ptr> items;
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ auto it = m_DefaultTemplates.find(type);
+
+ if (it == m_DefaultTemplates.end())
+ return items;
+
+ items.reserve(it->second.size());
+
+ for (const ItemMap::value_type& kv : it->second) {
+ items.push_back(kv.second);
+ }
+
+ return items;
+}
+
+void ConfigItem::RemoveIgnoredItems(const String& allowedConfigPath)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ for (const String& path : m_IgnoredItems) {
+ if (path.Find(allowedConfigPath) == String::NPos)
+ continue;
+
+ Log(LogNotice, "ConfigItem")
+ << "Removing ignored item path '" << path << "'.";
+
+ (void) unlink(path.CStr());
+ }
+
+ m_IgnoredItems.clear();
+}
diff --git a/lib/config/configitem.hpp b/lib/config/configitem.hpp
new file mode 100644
index 0000000..b99cd08
--- /dev/null
+++ b/lib/config/configitem.hpp
@@ -0,0 +1,106 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGITEM_H
+#define CONFIGITEM_H
+
+#include "config/i2-config.hpp"
+#include "config/expression.hpp"
+#include "config/activationcontext.hpp"
+#include "base/configobject.hpp"
+#include "base/workqueue.hpp"
+
+namespace icinga
+{
+
+
+/**
+ * A configuration item. Non-abstract configuration items can be used to
+ * create configuration objects at runtime.
+ *
+ * @ingroup config
+ */
+class ConfigItem final : public Object {
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigItem);
+
+ ConfigItem(Type::Ptr type, String name, bool abstract,
+ Expression::Ptr exprl,
+ Expression::Ptr filter,
+ bool defaultTmpl, bool ignoreOnError, DebugInfo debuginfo,
+ Dictionary::Ptr scope, String zone,
+ String package);
+
+ Type::Ptr GetType() const;
+ String GetName() const;
+ bool IsAbstract() const;
+ bool IsDefaultTemplate() const;
+ bool IsIgnoreOnError() const;
+
+ std::vector<ConfigItem::Ptr> GetParents() const;
+
+ Expression::Ptr GetExpression() const;
+ Expression::Ptr GetFilter() const;
+
+ void Register();
+ void Unregister();
+
+ DebugInfo GetDebugInfo() const;
+ Dictionary::Ptr GetScope() const;
+
+ ConfigObject::Ptr GetObject() const;
+
+ static ConfigItem::Ptr GetByTypeAndName(const Type::Ptr& type,
+ const String& name);
+
+ static bool CommitItems(const ActivationContext::Ptr& context, WorkQueue& upq, std::vector<ConfigItem::Ptr>& newItems, bool silent = false);
+ static bool ActivateItems(const std::vector<ConfigItem::Ptr>& newItems, bool runtimeCreated = false,
+ bool mainConfigActivation = false, bool withModAttrs = false, const Value& cookie = Empty);
+
+ static bool RunWithActivationContext(const Function::Ptr& function);
+
+ static std::vector<ConfigItem::Ptr> GetItems(const Type::Ptr& type);
+ static std::vector<ConfigItem::Ptr> GetDefaultTemplates(const Type::Ptr& type);
+
+ static void RemoveIgnoredItems(const String& allowedConfigPath);
+
+private:
+ Type::Ptr m_Type; /**< The object type. */
+ String m_Name; /**< The name. */
+ bool m_Abstract; /**< Whether this is a template. */
+
+ Expression::Ptr m_Expression;
+ Expression::Ptr m_Filter;
+ bool m_DefaultTmpl;
+ bool m_IgnoreOnError;
+ DebugInfo m_DebugInfo; /**< Debug information. */
+ Dictionary::Ptr m_Scope; /**< variable scope. */
+ String m_Zone; /**< The zone. */
+ String m_Package;
+ ActivationContext::Ptr m_ActivationContext;
+
+ ConfigObject::Ptr m_Object;
+
+ static std::mutex m_Mutex;
+
+ typedef std::map<String, ConfigItem::Ptr> ItemMap;
+ typedef std::map<Type::Ptr, ItemMap> TypeMap;
+ static TypeMap m_Items; /**< All registered configuration items. */
+ static TypeMap m_DefaultTemplates;
+
+ typedef std::vector<ConfigItem::Ptr> ItemList;
+ static ItemList m_UnnamedItems;
+
+ typedef std::vector<String> IgnoredItemList;
+ static IgnoredItemList m_IgnoredItems;
+
+ static ConfigItem::Ptr GetObjectUnlocked(const String& type,
+ const String& name);
+
+ ConfigObject::Ptr Commit(bool discard = true);
+
+ static bool CommitNewItems(const ActivationContext::Ptr& context, WorkQueue& upq, std::vector<ConfigItem::Ptr>& newItems);
+};
+
+}
+
+#endif /* CONFIGITEM_H */
diff --git a/lib/config/configitembuilder.cpp b/lib/config/configitembuilder.cpp
new file mode 100644
index 0000000..f7a3ead
--- /dev/null
+++ b/lib/config/configitembuilder.cpp
@@ -0,0 +1,120 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configitembuilder.hpp"
+#include "base/configtype.hpp"
+#include <sstream>
+
+using namespace icinga;
+
+ConfigItemBuilder::ConfigItemBuilder(const DebugInfo& debugInfo)
+ : m_Abstract(false), m_DefaultTmpl(false), m_IgnoreOnError(false)
+{
+ m_DebugInfo = debugInfo;
+}
+
+void ConfigItemBuilder::SetType(const Type::Ptr& type)
+{
+ ASSERT(type);
+ m_Type = type;
+}
+
+void ConfigItemBuilder::SetName(const String& name)
+{
+ m_Name = name;
+}
+
+void ConfigItemBuilder::SetAbstract(bool abstract)
+{
+ m_Abstract = abstract;
+}
+
+void ConfigItemBuilder::SetScope(const Dictionary::Ptr& scope)
+{
+ m_Scope = scope;
+}
+
+void ConfigItemBuilder::SetZone(const String& zone)
+{
+ m_Zone = zone;
+}
+
+void ConfigItemBuilder::SetPackage(const String& package)
+{
+ m_Package = package;
+}
+
+void ConfigItemBuilder::AddExpression(Expression *expr)
+{
+ m_Expressions.emplace_back(expr);
+}
+
+void ConfigItemBuilder::SetFilter(const Expression::Ptr& filter)
+{
+ m_Filter = filter;
+}
+
+void ConfigItemBuilder::SetDefaultTemplate(bool defaultTmpl)
+{
+ m_DefaultTmpl = defaultTmpl;
+}
+
+void ConfigItemBuilder::SetIgnoreOnError(bool ignoreOnError)
+{
+ m_IgnoreOnError = ignoreOnError;
+}
+
+ConfigItem::Ptr ConfigItemBuilder::Compile()
+{
+ if (!m_Type) {
+ std::ostringstream msgbuf;
+ msgbuf << "The type of an object must be specified";
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str(), m_DebugInfo));
+ }
+
+ auto *ctype = dynamic_cast<ConfigType *>(m_Type.get());
+
+ if (!ctype) {
+ std::ostringstream msgbuf;
+ msgbuf << "The type '" + m_Type->GetName() + "' cannot be used for config objects";
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str(), m_DebugInfo));
+ }
+
+ if (m_Name.FindFirstOf("!") != String::NPos) {
+ std::ostringstream msgbuf;
+ msgbuf << "Name for object '" << m_Name << "' of type '" << m_Type->GetName() << "' is invalid: Object names may not contain '!'";
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str(), m_DebugInfo));
+ }
+
+ std::vector<std::unique_ptr<Expression> > exprs;
+
+ Array::Ptr templateArray = new Array({ m_Name });
+
+ exprs.emplace_back(new SetExpression(MakeIndexer(ScopeThis, "templates"), OpSetAdd,
+ std::unique_ptr<LiteralExpression>(new LiteralExpression(templateArray)), m_DebugInfo));
+
+#ifdef I2_DEBUG
+ if (!m_Abstract) {
+ bool foundDefaultImport = false;
+
+ for (const std::unique_ptr<Expression>& expr : m_Expressions) {
+ if (dynamic_cast<ImportDefaultTemplatesExpression *>(expr.get())) {
+ foundDefaultImport = true;
+ break;
+ }
+ }
+
+ ASSERT(foundDefaultImport);
+ }
+#endif /* I2_DEBUG */
+
+ auto *dexpr = new DictExpression(std::move(m_Expressions), m_DebugInfo);
+ dexpr->MakeInline();
+ exprs.emplace_back(dexpr);
+
+ auto exprl = new DictExpression(std::move(exprs), m_DebugInfo);
+ exprl->MakeInline();
+
+ return new ConfigItem(m_Type, m_Name, m_Abstract, exprl, m_Filter,
+ m_DefaultTmpl, m_IgnoreOnError, m_DebugInfo, m_Scope, m_Zone, m_Package);
+}
+
diff --git a/lib/config/configitembuilder.hpp b/lib/config/configitembuilder.hpp
new file mode 100644
index 0000000..9d2e339
--- /dev/null
+++ b/lib/config/configitembuilder.hpp
@@ -0,0 +1,58 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGITEMBUILDER_H
+#define CONFIGITEMBUILDER_H
+
+#include "config/expression.hpp"
+#include "config/configitem.hpp"
+#include "base/debuginfo.hpp"
+#include "base/object.hpp"
+
+namespace icinga
+{
+
+/**
+ * Config item builder. Used to dynamically build configuration objects
+ * at runtime.
+ *
+ * @ingroup config
+ */
+class ConfigItemBuilder final
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigItemBuilder);
+
+ ConfigItemBuilder() = default;
+ explicit ConfigItemBuilder(const DebugInfo& debugInfo);
+
+ void SetType(const Type::Ptr& type);
+ void SetName(const String& name);
+ void SetAbstract(bool abstract);
+ void SetScope(const Dictionary::Ptr& scope);
+ void SetZone(const String& zone);
+ void SetPackage(const String& package);
+ void SetDefaultTemplate(bool defaultTmpl);
+ void SetIgnoreOnError(bool ignoreOnError);
+
+ void AddExpression(Expression *expr);
+ void SetFilter(const Expression::Ptr& filter);
+
+ ConfigItem::Ptr Compile();
+
+private:
+ Type::Ptr m_Type; /**< The object type. */
+ String m_Name; /**< The name. */
+ bool m_Abstract{false}; /**< Whether the item is abstract. */
+ std::vector<std::unique_ptr<Expression> > m_Expressions; /**< Expressions for this item. */
+ Expression::Ptr m_Filter; /**< Filter expression. */
+ DebugInfo m_DebugInfo; /**< Debug information. */
+ Dictionary::Ptr m_Scope; /**< variable scope. */
+ String m_Zone; /**< The zone. */
+ String m_Package; /**< The package name. */
+ bool m_DefaultTmpl{false};
+ bool m_IgnoreOnError{false}; /**< Whether the object should be ignored when an error occurs in one of the expressions. */
+};
+
+}
+
+#endif /* CONFIGITEMBUILDER */
diff --git a/lib/config/expression.cpp b/lib/config/expression.cpp
new file mode 100644
index 0000000..a8e9986
--- /dev/null
+++ b/lib/config/expression.cpp
@@ -0,0 +1,1068 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/expression.hpp"
+#include "config/configitem.hpp"
+#include "config/configcompiler.hpp"
+#include "config/vmops.hpp"
+#include "base/array.hpp"
+#include "base/json.hpp"
+#include "base/object.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/loader.hpp"
+#include "base/reference.hpp"
+#include "base/namespace.hpp"
+#include "base/defer.hpp"
+#include <boost/exception_ptr.hpp>
+#include <boost/exception/errinfo_nested_exception.hpp>
+
+using namespace icinga;
+
+boost::signals2::signal<void (ScriptFrame&, ScriptError *ex, const DebugInfo&)> Expression::OnBreakpoint;
+boost::thread_specific_ptr<bool> l_InBreakpointHandler;
+
+Expression::~Expression()
+{ }
+
+void Expression::ScriptBreakpoint(ScriptFrame& frame, ScriptError *ex, const DebugInfo& di)
+{
+ bool *inHandler = l_InBreakpointHandler.get();
+ if (!inHandler || !*inHandler) {
+ inHandler = new bool(true);
+ l_InBreakpointHandler.reset(inHandler);
+ OnBreakpoint(frame, ex, di);
+ *inHandler = false;
+ }
+}
+
+ExpressionResult Expression::Evaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ try {
+#ifdef I2_DEBUG
+/* std::ostringstream msgbuf;
+ ShowCodeLocation(msgbuf, GetDebugInfo(), false);
+ Log(LogDebug, "Expression")
+ << "Executing:\n" << msgbuf.str();*/
+#endif /* I2_DEBUG */
+
+ frame.IncreaseStackDepth();
+
+ Defer decreaseStackDepth([&frame]{
+ frame.DecreaseStackDepth();
+ });
+
+ ExpressionResult result = DoEvaluate(frame, dhint);
+ return result;
+ } catch (ScriptError& ex) {
+ ScriptBreakpoint(frame, &ex, GetDebugInfo());
+ throw;
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ScriptError("Error while evaluating expression: " + String(ex.what()), GetDebugInfo())
+ << boost::errinfo_nested_exception(boost::current_exception()));
+ }
+}
+
+bool Expression::GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const
+{
+ return false;
+}
+
+const DebugInfo& Expression::GetDebugInfo() const
+{
+ static DebugInfo debugInfo;
+ return debugInfo;
+}
+
+std::unique_ptr<Expression> icinga::MakeIndexer(ScopeSpecifier scopeSpec, const String& index)
+{
+ std::unique_ptr<Expression> scope{new GetScopeExpression(scopeSpec)};
+ return std::unique_ptr<Expression>(new IndexerExpression(std::move(scope), MakeLiteral(index)));
+}
+
+void DictExpression::MakeInline()
+{
+ m_Inline = true;
+}
+
+LiteralExpression::LiteralExpression(Value value)
+ : m_Value(std::move(value))
+{ }
+
+ExpressionResult LiteralExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ return m_Value;
+}
+
+const DebugInfo& DebuggableExpression::GetDebugInfo() const
+{
+ return m_DebugInfo;
+}
+
+VariableExpression::VariableExpression(String variable, std::vector<Expression::Ptr> imports, const DebugInfo& debugInfo)
+ : DebuggableExpression(debugInfo), m_Variable(std::move(variable)), m_Imports(std::move(imports))
+{
+ m_Imports.push_back(MakeIndexer(ScopeGlobal, "System").release());
+ m_Imports.push_back(new IndexerExpression(MakeIndexer(ScopeGlobal, "System"), MakeLiteral("Configuration")));
+ m_Imports.push_back(MakeIndexer(ScopeGlobal, "Types").release());
+ m_Imports.push_back(MakeIndexer(ScopeGlobal, "Icinga").release());
+}
+
+ExpressionResult VariableExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ Value value;
+
+ if (frame.Locals && frame.Locals->Get(m_Variable, &value))
+ return value;
+ else if (frame.Self.IsObject() && frame.Locals != frame.Self.Get<Object::Ptr>() && frame.Self.Get<Object::Ptr>()->GetOwnField(m_Variable, &value))
+ return value;
+ else if (VMOps::FindVarImport(frame, m_Imports, m_Variable, &value, m_DebugInfo))
+ return value;
+ else
+ return ScriptGlobal::Get(m_Variable);
+}
+
+bool VariableExpression::GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const
+{
+ *index = m_Variable;
+
+ if (frame.Locals && frame.Locals->Contains(m_Variable)) {
+ *parent = frame.Locals;
+
+ if (dhint)
+ *dhint = nullptr;
+ } else if (frame.Self.IsObject() && frame.Locals != frame.Self.Get<Object::Ptr>() && frame.Self.Get<Object::Ptr>()->HasOwnField(m_Variable)) {
+ *parent = frame.Self;
+
+ if (dhint && *dhint)
+ *dhint = new DebugHint((*dhint)->GetChild(m_Variable));
+ } else if (VMOps::FindVarImportRef(frame, m_Imports, m_Variable, parent, m_DebugInfo)) {
+ return true;
+ } else if (ScriptGlobal::Exists(m_Variable)) {
+ *parent = ScriptGlobal::GetGlobals();
+
+ if (dhint)
+ *dhint = nullptr;
+ } else
+ *parent = frame.Self;
+
+ return true;
+}
+
+ExpressionResult RefExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ Value parent;
+ String index;
+
+ if (!m_Operand->GetReference(frame, false, &parent, &index, &dhint))
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot obtain reference for expression.", m_DebugInfo));
+
+ if (!parent.IsObject())
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot obtain reference for expression because parent is not an object.", m_DebugInfo));
+
+ return new Reference(parent, index);
+}
+
+ExpressionResult DerefExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand = m_Operand->Evaluate(frame);
+ CHECK_RESULT(operand);
+
+ Object::Ptr obj = operand.GetValue();
+ Reference::Ptr ref = dynamic_pointer_cast<Reference>(obj);
+
+ if (!ref)
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid reference specified.", GetDebugInfo()));
+
+ return ref->Get();
+}
+
+bool DerefExpression::GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const
+{
+ ExpressionResult operand = m_Operand->Evaluate(frame);
+ if (operand.GetCode() != ResultOK)
+ return false;
+
+ Reference::Ptr ref = operand.GetValue();
+
+ *parent = ref->GetParent();
+ *index = ref->GetIndex();
+ return true;
+}
+
+ExpressionResult NegateExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand = m_Operand->Evaluate(frame);
+ CHECK_RESULT(operand);
+
+ return ~(long)operand.GetValue();
+}
+
+ExpressionResult LogicalNegateExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand = m_Operand->Evaluate(frame);
+ CHECK_RESULT(operand);
+
+ return !operand.GetValue().ToBool();
+}
+
+ExpressionResult AddExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() + operand2.GetValue();
+}
+
+ExpressionResult SubtractExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() - operand2.GetValue();
+}
+
+ExpressionResult MultiplyExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() * operand2.GetValue();
+}
+
+ExpressionResult DivideExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() / operand2.GetValue();
+}
+
+ExpressionResult ModuloExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() % operand2.GetValue();
+}
+
+ExpressionResult XorExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() ^ operand2.GetValue();
+}
+
+ExpressionResult BinaryAndExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() & operand2.GetValue();
+}
+
+ExpressionResult BinaryOrExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() | operand2.GetValue();
+}
+
+ExpressionResult ShiftLeftExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() << operand2.GetValue();
+}
+
+ExpressionResult ShiftRightExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() >> operand2.GetValue();
+}
+
+ExpressionResult EqualExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() == operand2.GetValue();
+}
+
+ExpressionResult NotEqualExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() != operand2.GetValue();
+}
+
+ExpressionResult LessThanExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() < operand2.GetValue();
+}
+
+ExpressionResult GreaterThanExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() > operand2.GetValue();
+}
+
+ExpressionResult LessThanOrEqualExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() <= operand2.GetValue();
+}
+
+ExpressionResult GreaterThanOrEqualExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand1.GetValue() >= operand2.GetValue();
+}
+
+ExpressionResult InExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ if (operand2.GetValue().IsEmpty())
+ return false;
+ else if (!operand2.GetValue().IsObjectType<Array>())
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid right side argument for 'in' operator: " + JsonEncode(operand2.GetValue()), m_DebugInfo));
+
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1)
+
+ Array::Ptr arr = operand2.GetValue();
+ return arr->Contains(operand1.GetValue());
+}
+
+ExpressionResult NotInExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ if (operand2.GetValue().IsEmpty())
+ return true;
+ else if (!operand2.GetValue().IsObjectType<Array>())
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid right side argument for 'in' operator: " + JsonEncode(operand2.GetValue()), m_DebugInfo));
+
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ Array::Ptr arr = operand2.GetValue();
+ return !arr->Contains(operand1.GetValue());
+}
+
+ExpressionResult LogicalAndExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ if (!operand1.GetValue().ToBool())
+ return operand1;
+ else {
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand2.GetValue();
+ }
+}
+
+ExpressionResult LogicalOrExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ CHECK_RESULT(operand1);
+
+ if (operand1.GetValue().ToBool())
+ return operand1;
+ else {
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ CHECK_RESULT(operand2);
+
+ return operand2.GetValue();
+ }
+}
+
+ExpressionResult FunctionCallExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ Value self, vfunc;
+ String index;
+
+ if (m_FName->GetReference(frame, false, &self, &index))
+ vfunc = VMOps::GetField(self, index, frame.Sandboxed, m_DebugInfo);
+ else {
+ ExpressionResult vfuncres = m_FName->Evaluate(frame);
+ CHECK_RESULT(vfuncres);
+
+ vfunc = vfuncres.GetValue();
+ }
+
+ if (vfunc.IsObjectType<Type>()) {
+ std::vector<Value> arguments;
+ arguments.reserve(m_Args.size());
+ for (const auto& arg : m_Args) {
+ ExpressionResult argres = arg->Evaluate(frame);
+ CHECK_RESULT(argres);
+
+ arguments.push_back(argres.GetValue());
+ }
+
+ return VMOps::ConstructorCall(vfunc, arguments, m_DebugInfo);
+ }
+
+ if (!vfunc.IsObjectType<Function>())
+ BOOST_THROW_EXCEPTION(ScriptError("Argument is not a callable object.", m_DebugInfo));
+
+ Function::Ptr func = vfunc;
+
+ if (!func->IsSideEffectFree() && frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Function is not marked as safe for sandbox mode.", m_DebugInfo));
+
+ std::vector<Value> arguments;
+ arguments.reserve(m_Args.size());
+ for (const auto& arg : m_Args) {
+ ExpressionResult argres = arg->Evaluate(frame);
+ CHECK_RESULT(argres);
+
+ arguments.push_back(argres.GetValue());
+ }
+
+ return VMOps::FunctionCall(frame, self, func, arguments);
+}
+
+ExpressionResult ArrayExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ArrayData result;
+ result.reserve(m_Expressions.size());
+
+ for (const auto& aexpr : m_Expressions) {
+ ExpressionResult element = aexpr->Evaluate(frame);
+ CHECK_RESULT(element);
+
+ result.push_back(element.GetValue());
+ }
+
+ return new Array(std::move(result));
+}
+
+ExpressionResult DictExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ Value self;
+
+ if (!m_Inline) {
+ self = frame.Self;
+ frame.Self = new Dictionary();
+ }
+
+ Value result;
+
+ try {
+ for (const auto& aexpr : m_Expressions) {
+ ExpressionResult element = aexpr->Evaluate(frame, m_Inline ? dhint : nullptr);
+ CHECK_RESULT(element);
+ result = element.GetValue();
+ }
+ } catch (...) {
+ if (!m_Inline)
+ std::swap(self, frame.Self);
+ throw;
+ }
+
+ if (m_Inline)
+ return result;
+ else {
+ std::swap(self, frame.Self);
+ return self;
+ }
+}
+
+ExpressionResult GetScopeExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (m_ScopeSpec == ScopeLocal)
+ return frame.Locals;
+ else if (m_ScopeSpec == ScopeThis)
+ return frame.Self;
+ else if (m_ScopeSpec == ScopeGlobal)
+ return ScriptGlobal::GetGlobals();
+ else
+ VERIFY(!"Invalid scope.");
+}
+
+static inline
+void WarnOnImplicitlySetGlobalVar(const std::unique_ptr<Expression>& setLhs, const Value& setLhsParent, CombinedSetOp setOp, const DebugInfo& debug)
+{
+ auto var (dynamic_cast<VariableExpression*>(setLhs.get()));
+
+ if (var && setLhsParent.IsObject()) {
+ auto ns (dynamic_pointer_cast<Namespace>(setLhsParent.Get<Object::Ptr>()));
+
+ if (ns && ns == ScriptGlobal::GetGlobals() && debug.Path.GetLength()) {
+ const char *opStr = nullptr;
+
+ switch (setOp) {
+ case OpSetLiteral:
+ opStr = "=";
+ break;
+ case OpSetAdd:
+ opStr = "+=";
+ break;
+ case OpSetSubtract:
+ opStr = "-=";
+ break;
+ case OpSetMultiply:
+ opStr = "*=";
+ break;
+ case OpSetDivide:
+ opStr = "/=";
+ break;
+ case OpSetModulo:
+ opStr = "%=";
+ break;
+ case OpSetXor:
+ opStr = "^=";
+ break;
+ case OpSetBinaryAnd:
+ opStr = "&=";
+ break;
+ case OpSetBinaryOr:
+ opStr = "|=";
+ break;
+ default:
+ VERIFY(!"Invalid opcode.");
+ }
+
+ auto varName (var->GetVariable());
+
+ Log(LogWarning, "config")
+ << "Global variable '" << varName << "' has been set implicitly via '" << varName << ' ' << opStr << " ...' " << debug << "."
+ " Please set it explicitly via 'globals." << varName << ' ' << opStr << " ...' instead.";
+ }
+ }
+}
+
+ExpressionResult SetExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Assignments are not allowed in sandbox mode.", m_DebugInfo));
+
+ DebugHint *psdhint = dhint;
+
+ Value parent;
+ String index;
+
+ if (!m_Operand1->GetReference(frame, true, &parent, &index, &psdhint))
+ BOOST_THROW_EXCEPTION(ScriptError("Expression cannot be assigned to.", m_DebugInfo));
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame, dhint);
+ CHECK_RESULT(operand2);
+
+ if (m_Op != OpSetLiteral) {
+ Value object = VMOps::GetField(parent, index, frame.Sandboxed, m_DebugInfo);
+
+ switch (m_Op) {
+ case OpSetAdd:
+ operand2 = object + operand2;
+ break;
+ case OpSetSubtract:
+ operand2 = object - operand2;
+ break;
+ case OpSetMultiply:
+ operand2 = object * operand2;
+ break;
+ case OpSetDivide:
+ operand2 = object / operand2;
+ break;
+ case OpSetModulo:
+ operand2 = object % operand2;
+ break;
+ case OpSetXor:
+ operand2 = object ^ operand2;
+ break;
+ case OpSetBinaryAnd:
+ operand2 = object & operand2;
+ break;
+ case OpSetBinaryOr:
+ operand2 = object | operand2;
+ break;
+ default:
+ VERIFY(!"Invalid opcode.");
+ }
+ }
+
+ VMOps::SetField(parent, index, operand2.GetValue(), m_OverrideFrozen, m_DebugInfo);
+
+ if (psdhint) {
+ psdhint->AddMessage("=", m_DebugInfo);
+
+ if (psdhint != dhint)
+ delete psdhint;
+ }
+
+ WarnOnImplicitlySetGlobalVar(m_Operand1, parent, m_Op, m_DebugInfo);
+
+ return Empty;
+}
+
+void SetExpression::SetOverrideFrozen()
+{
+ m_OverrideFrozen = true;
+}
+
+ExpressionResult SetConstExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ auto globals = ScriptGlobal::GetGlobals();
+
+ ExpressionResult operandres = m_Operand->Evaluate(frame);
+ CHECK_RESULT(operandres);
+ Value operand = operandres.GetValue();
+
+ globals->Set(m_Name, operand, true);
+
+ return Empty;
+}
+
+ExpressionResult ConditionalExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult condition = m_Condition->Evaluate(frame, dhint);
+ CHECK_RESULT(condition);
+
+ if (condition.GetValue().ToBool())
+ return m_TrueBranch->Evaluate(frame, dhint);
+ else if (m_FalseBranch)
+ return m_FalseBranch->Evaluate(frame, dhint);
+
+ return Empty;
+}
+
+ExpressionResult WhileExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("While loops are not allowed in sandbox mode.", m_DebugInfo));
+
+ for (;;) {
+ ExpressionResult condition = m_Condition->Evaluate(frame, dhint);
+ CHECK_RESULT(condition);
+
+ if (!condition.GetValue().ToBool())
+ break;
+
+ ExpressionResult loop_body = m_LoopBody->Evaluate(frame, dhint);
+ CHECK_RESULT_LOOP(loop_body);
+ }
+
+ return Empty;
+}
+
+ExpressionResult ReturnExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand = m_Operand->Evaluate(frame);
+ CHECK_RESULT(operand);
+
+ return ExpressionResult(operand.GetValue(), ResultReturn);
+}
+
+ExpressionResult BreakExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ return ExpressionResult(Empty, ResultBreak);
+}
+
+ExpressionResult ContinueExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ return ExpressionResult(Empty, ResultContinue);
+}
+
+ExpressionResult IndexerExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame, dhint);
+ CHECK_RESULT(operand1);
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame, dhint);
+ CHECK_RESULT(operand2);
+
+ return VMOps::GetField(operand1.GetValue(), operand2.GetValue(), frame.Sandboxed, m_DebugInfo);
+}
+
+bool IndexerExpression::GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const
+{
+ Value vparent;
+ String vindex;
+ DebugHint *psdhint = nullptr;
+ bool free_psd = false;
+
+ if (dhint)
+ psdhint = *dhint;
+
+ if (frame.Sandboxed)
+ init_dict = false;
+
+ if (m_Operand1->GetReference(frame, init_dict, &vparent, &vindex, &psdhint)) {
+ if (init_dict) {
+ Value old_value;
+ bool has_field = true;
+
+ if (vparent.IsObject()) {
+ Object::Ptr oparent = vparent;
+ has_field = oparent->HasOwnField(vindex);
+ }
+
+ if (has_field)
+ old_value = VMOps::GetField(vparent, vindex, frame.Sandboxed, m_Operand1->GetDebugInfo());
+
+ if (old_value.IsEmpty() && !old_value.IsString())
+ VMOps::SetField(vparent, vindex, new Dictionary(), m_OverrideFrozen, m_Operand1->GetDebugInfo());
+ }
+
+ *parent = VMOps::GetField(vparent, vindex, frame.Sandboxed, m_DebugInfo);
+ free_psd = true;
+ } else {
+ ExpressionResult operand1 = m_Operand1->Evaluate(frame);
+ *parent = operand1.GetValue();
+ }
+
+ ExpressionResult operand2 = m_Operand2->Evaluate(frame);
+ *index = operand2.GetValue();
+
+ if (dhint) {
+ if (psdhint)
+ *dhint = new DebugHint(psdhint->GetChild(*index));
+ else
+ *dhint = nullptr;
+ }
+
+ if (free_psd)
+ delete psdhint;
+
+ return true;
+}
+
+void IndexerExpression::SetOverrideFrozen()
+{
+ m_OverrideFrozen = true;
+}
+
+void icinga::BindToScope(std::unique_ptr<Expression>& expr, ScopeSpecifier scopeSpec)
+{
+ auto *dexpr = dynamic_cast<DictExpression *>(expr.get());
+
+ if (dexpr) {
+ for (auto& expr : dexpr->m_Expressions)
+ BindToScope(expr, scopeSpec);
+
+ return;
+ }
+
+ auto *aexpr = dynamic_cast<SetExpression *>(expr.get());
+
+ if (aexpr) {
+ BindToScope(aexpr->m_Operand1, scopeSpec);
+
+ return;
+ }
+
+ auto *iexpr = dynamic_cast<IndexerExpression *>(expr.get());
+
+ if (iexpr) {
+ BindToScope(iexpr->m_Operand1, scopeSpec);
+ return;
+ }
+
+ auto *lexpr = dynamic_cast<LiteralExpression *>(expr.get());
+
+ if (lexpr && lexpr->GetValue().IsString()) {
+ std::unique_ptr<Expression> scope{new GetScopeExpression(scopeSpec)};
+ expr.reset(new IndexerExpression(std::move(scope), std::move(expr), lexpr->GetDebugInfo()));
+ }
+
+ auto *vexpr = dynamic_cast<VariableExpression *>(expr.get());
+
+ if (vexpr) {
+ std::unique_ptr<Expression> scope{new GetScopeExpression(scopeSpec)};
+ expr.reset(new IndexerExpression(std::move(scope), MakeLiteral(vexpr->GetVariable()), vexpr->GetDebugInfo()));
+ }
+}
+
+ExpressionResult ThrowExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ExpressionResult messageres = m_Message->Evaluate(frame);
+ CHECK_RESULT(messageres);
+ Value message = messageres.GetValue();
+ BOOST_THROW_EXCEPTION(ScriptError(message, m_DebugInfo, m_IncompleteExpr));
+}
+
+ExpressionResult ImportExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Imports are not allowed in sandbox mode.", m_DebugInfo));
+
+ String type = VMOps::GetField(frame.Self, "type", frame.Sandboxed, m_DebugInfo);
+ ExpressionResult nameres = m_Name->Evaluate(frame);
+ CHECK_RESULT(nameres);
+ Value name = nameres.GetValue();
+
+ if (!name.IsString())
+ BOOST_THROW_EXCEPTION(ScriptError("Template/object name must be a string", m_DebugInfo));
+
+ ConfigItem::Ptr item = ConfigItem::GetByTypeAndName(Type::GetByName(type), name);
+
+ if (!item)
+ BOOST_THROW_EXCEPTION(ScriptError("Import references unknown template: '" + name + "'", m_DebugInfo));
+
+ Dictionary::Ptr scope = item->GetScope();
+
+ if (scope)
+ scope->CopyTo(frame.Locals);
+
+ ExpressionResult result = item->GetExpression()->Evaluate(frame, dhint);
+ CHECK_RESULT(result);
+
+ return Empty;
+}
+
+ExpressionResult ImportDefaultTemplatesExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Imports are not allowed in sandbox mode.", m_DebugInfo));
+
+ String type = VMOps::GetField(frame.Self, "type", frame.Sandboxed, m_DebugInfo);
+ Type::Ptr ptype = Type::GetByName(type);
+
+ for (const ConfigItem::Ptr& item : ConfigItem::GetDefaultTemplates(ptype)) {
+ Dictionary::Ptr scope = item->GetScope();
+
+ if (scope)
+ scope->CopyTo(frame.Locals);
+
+ ExpressionResult result = item->GetExpression()->Evaluate(frame, dhint);
+ CHECK_RESULT(result);
+ }
+
+ return Empty;
+}
+
+ExpressionResult FunctionExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ return VMOps::NewFunction(frame, m_Name, m_Args, m_ClosedVars, m_Expression);
+}
+
+ExpressionResult ApplyExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Apply rules are not allowed in sandbox mode.", m_DebugInfo));
+
+ ExpressionResult nameres = m_Name->Evaluate(frame);
+ CHECK_RESULT(nameres);
+
+ return VMOps::NewApply(frame, m_Type, m_Target, nameres.GetValue(), m_Filter,
+ m_Package, m_FKVar, m_FVVar, m_FTerm, m_ClosedVars, m_IgnoreOnError, m_Expression, m_DebugInfo);
+}
+
+ExpressionResult NamespaceExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ Namespace::Ptr ns = new Namespace(true);
+
+ ScriptFrame innerFrame(true, ns);
+ ExpressionResult result = m_Expression->Evaluate(innerFrame);
+ CHECK_RESULT(result);
+
+ return ns;
+}
+
+ExpressionResult ObjectExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Object definitions are not allowed in sandbox mode.", m_DebugInfo));
+
+ ExpressionResult typeres = m_Type->Evaluate(frame, dhint);
+ CHECK_RESULT(typeres);
+ Type::Ptr type = typeres.GetValue();
+
+ String name;
+
+ if (m_Name) {
+ ExpressionResult nameres = m_Name->Evaluate(frame, dhint);
+ CHECK_RESULT(nameres);
+
+ name = nameres.GetValue();
+ }
+
+ return VMOps::NewObject(frame, m_Abstract, type, name, m_Filter, m_Zone,
+ m_Package, m_DefaultTmpl, m_IgnoreOnError, m_ClosedVars, m_Expression, m_DebugInfo);
+}
+
+ExpressionResult ForExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("For loops are not allowed in sandbox mode.", m_DebugInfo));
+
+ ExpressionResult valueres = m_Value->Evaluate(frame, dhint);
+ CHECK_RESULT(valueres);
+
+ return VMOps::For(frame, m_FKVar, m_FVVar, valueres.GetValue(), m_Expression, m_DebugInfo);
+}
+
+ExpressionResult LibraryExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Loading libraries is not allowed in sandbox mode.", m_DebugInfo));
+
+ ExpressionResult libres = m_Operand->Evaluate(frame, dhint);
+ CHECK_RESULT(libres);
+
+ Log(LogNotice, "config")
+ << "Ignoring explicit load request for library \"" << libres << "\".";
+
+ return Empty;
+}
+
+ExpressionResult IncludeExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ if (frame.Sandboxed)
+ BOOST_THROW_EXCEPTION(ScriptError("Includes are not allowed in sandbox mode.", m_DebugInfo));
+
+ std::unique_ptr<Expression> expr;
+ String name, path, pattern;
+
+ switch (m_Type) {
+ case IncludeRegular:
+ {
+ ExpressionResult pathres = m_Path->Evaluate(frame, dhint);
+ CHECK_RESULT(pathres);
+ path = pathres.GetValue();
+ }
+
+ expr = ConfigCompiler::HandleInclude(m_RelativeBase, path, m_SearchIncludes, m_Zone, m_Package, m_DebugInfo);
+ break;
+
+ case IncludeRecursive:
+ {
+ ExpressionResult pathres = m_Path->Evaluate(frame, dhint);
+ CHECK_RESULT(pathres);
+ path = pathres.GetValue();
+ }
+
+ {
+ ExpressionResult patternres = m_Pattern->Evaluate(frame, dhint);
+ CHECK_RESULT(patternres);
+ pattern = patternres.GetValue();
+ }
+
+ expr = ConfigCompiler::HandleIncludeRecursive(m_RelativeBase, path, pattern, m_Zone, m_Package, m_DebugInfo);
+ break;
+
+ case IncludeZones:
+ {
+ ExpressionResult nameres = m_Name->Evaluate(frame, dhint);
+ CHECK_RESULT(nameres);
+ name = nameres.GetValue();
+ }
+
+ {
+ ExpressionResult pathres = m_Path->Evaluate(frame, dhint);
+ CHECK_RESULT(pathres);
+ path = pathres.GetValue();
+ }
+
+ {
+ ExpressionResult patternres = m_Pattern->Evaluate(frame, dhint);
+ CHECK_RESULT(patternres);
+ pattern = patternres.GetValue();
+ }
+
+ expr = ConfigCompiler::HandleIncludeZones(m_RelativeBase, name, path, pattern, m_Package, m_DebugInfo);
+ break;
+ }
+
+ ExpressionResult res(Empty);
+
+ try {
+ res = expr->Evaluate(frame, dhint);
+ } catch (const std::exception&) {
+ throw;
+ }
+
+ return res;
+}
+
+ExpressionResult BreakpointExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ ScriptBreakpoint(frame, nullptr, GetDebugInfo());
+
+ return Empty;
+}
+
+ExpressionResult TryExceptExpression::DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const
+{
+ try {
+ ExpressionResult tryResult = m_TryBody->Evaluate(frame, dhint);
+ CHECK_RESULT(tryResult);
+ } catch (const std::exception&) {
+ ExpressionResult exceptResult = m_ExceptBody->Evaluate(frame, dhint);
+ CHECK_RESULT(exceptResult);
+ }
+
+ return Empty;
+}
+
diff --git a/lib/config/expression.hpp b/lib/config/expression.hpp
new file mode 100644
index 0000000..644548d
--- /dev/null
+++ b/lib/config/expression.hpp
@@ -0,0 +1,986 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EXPRESSION_H
+#define EXPRESSION_H
+
+#include "config/i2-config.hpp"
+#include "base/debuginfo.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/exception.hpp"
+#include "base/scriptframe.hpp"
+#include "base/shared-object.hpp"
+#include "base/convert.hpp"
+#include <map>
+
+namespace icinga
+{
+
+struct DebugHint
+{
+public:
+ DebugHint(Dictionary::Ptr hints = nullptr)
+ : m_Hints(std::move(hints))
+ { }
+
+ DebugHint(Dictionary::Ptr&& hints)
+ : m_Hints(std::move(hints))
+ { }
+
+ void AddMessage(const String& message, const DebugInfo& di)
+ {
+ GetMessages()->Add(new Array({ message, di.Path, di.FirstLine, di.FirstColumn, di.LastLine, di.LastColumn }));
+ }
+
+ DebugHint GetChild(const String& name)
+ {
+ const Dictionary::Ptr& children = GetChildren();
+
+ Value vchild;
+ Dictionary::Ptr child;
+
+ if (!children->Get(name, &vchild)) {
+ child = new Dictionary();
+ children->Set(name, child);
+ } else
+ child = vchild;
+
+ return DebugHint(child);
+ }
+
+ Dictionary::Ptr ToDictionary() const
+ {
+ return m_Hints;
+ }
+
+private:
+ Dictionary::Ptr m_Hints;
+ Array::Ptr m_Messages;
+ Dictionary::Ptr m_Children;
+
+ const Array::Ptr& GetMessages()
+ {
+ if (m_Messages)
+ return m_Messages;
+
+ if (!m_Hints)
+ m_Hints = new Dictionary();
+
+ Value vmessages;
+
+ if (!m_Hints->Get("messages", &vmessages)) {
+ m_Messages = new Array();
+ m_Hints->Set("messages", m_Messages);
+ } else
+ m_Messages = vmessages;
+
+ return m_Messages;
+ }
+
+ const Dictionary::Ptr& GetChildren()
+ {
+ if (m_Children)
+ return m_Children;
+
+ if (!m_Hints)
+ m_Hints = new Dictionary();
+
+ Value vchildren;
+
+ if (!m_Hints->Get("properties", &vchildren)) {
+ m_Children = new Dictionary();
+ m_Hints->Set("properties", m_Children);
+ } else
+ m_Children = vchildren;
+
+ return m_Children;
+ }
+};
+
+enum CombinedSetOp
+{
+ OpSetLiteral,
+ OpSetAdd,
+ OpSetSubtract,
+ OpSetMultiply,
+ OpSetDivide,
+ OpSetModulo,
+ OpSetXor,
+ OpSetBinaryAnd,
+ OpSetBinaryOr
+};
+
+enum ScopeSpecifier
+{
+ ScopeLocal,
+ ScopeThis,
+ ScopeGlobal
+};
+
+typedef std::map<String, String> DefinitionMap;
+
+/**
+ * @ingroup config
+ */
+enum ExpressionResultCode
+{
+ ResultOK,
+ ResultReturn,
+ ResultContinue,
+ ResultBreak
+};
+
+/**
+ * @ingroup config
+ */
+struct ExpressionResult
+{
+public:
+ template<typename T>
+ ExpressionResult(T value, ExpressionResultCode code = ResultOK)
+ : m_Value(std::move(value)), m_Code(code)
+ { }
+
+ operator const Value&() const
+ {
+ return m_Value;
+ }
+
+ const Value& GetValue() const
+ {
+ return m_Value;
+ }
+
+ ExpressionResultCode GetCode() const
+ {
+ return m_Code;
+ }
+
+private:
+ Value m_Value;
+ ExpressionResultCode m_Code;
+};
+
+#define CHECK_RESULT(res) \
+ do { \
+ if (res.GetCode() != ResultOK) \
+ return res; \
+ } while (0);
+
+#define CHECK_RESULT_LOOP(res) \
+ if (res.GetCode() == ResultReturn) \
+ return res; \
+ if (res.GetCode() == ResultContinue) \
+ continue; \
+ if (res.GetCode() == ResultBreak) \
+ break; \
+
+/**
+ * @ingroup config
+ */
+class Expression : public SharedObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Expression);
+
+ Expression() = default;
+ Expression(const Expression&) = delete;
+ virtual ~Expression();
+
+ Expression& operator=(const Expression&) = delete;
+
+ ExpressionResult Evaluate(ScriptFrame& frame, DebugHint *dhint = nullptr) const;
+ virtual bool GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint = nullptr) const;
+ virtual const DebugInfo& GetDebugInfo() const;
+
+ virtual ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const = 0;
+
+ static boost::signals2::signal<void (ScriptFrame& frame, ScriptError *ex, const DebugInfo& di)> OnBreakpoint;
+
+ static void ScriptBreakpoint(ScriptFrame& frame, ScriptError *ex, const DebugInfo& di);
+};
+
+std::unique_ptr<Expression> MakeIndexer(ScopeSpecifier scopeSpec, const String& index);
+
+class OwnedExpression final : public Expression
+{
+public:
+ OwnedExpression(Expression::Ptr expression)
+ : m_Expression(std::move(expression))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override
+ {
+ return m_Expression->DoEvaluate(frame, dhint);
+ }
+
+ const DebugInfo& GetDebugInfo() const override
+ {
+ return m_Expression->GetDebugInfo();
+ }
+
+private:
+ Expression::Ptr m_Expression;
+};
+
+class LiteralExpression final : public Expression
+{
+public:
+ LiteralExpression(Value value = Value());
+
+ const Value& GetValue() const
+ {
+ return m_Value;
+ }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ Value m_Value;
+};
+
+inline LiteralExpression *MakeLiteralRaw(const Value& literal = Value())
+{
+ return new LiteralExpression(literal);
+}
+
+inline std::unique_ptr<LiteralExpression> MakeLiteral(const Value& literal = Value())
+{
+ return std::unique_ptr<LiteralExpression>(MakeLiteralRaw(literal));
+}
+
+class DebuggableExpression : public Expression
+{
+public:
+ DebuggableExpression(DebugInfo debugInfo = DebugInfo())
+ : m_DebugInfo(std::move(debugInfo))
+ { }
+
+protected:
+ const DebugInfo& GetDebugInfo() const final;
+
+ DebugInfo m_DebugInfo;
+};
+
+class UnaryExpression : public DebuggableExpression
+{
+public:
+ UnaryExpression(std::unique_ptr<Expression> operand, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Operand(std::move(operand))
+ { }
+
+protected:
+ std::unique_ptr<Expression> m_Operand;
+};
+
+class BinaryExpression : public DebuggableExpression
+{
+public:
+ BinaryExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Operand1(std::move(operand1)), m_Operand2(std::move(operand2))
+ { }
+
+ inline const std::unique_ptr<Expression>& GetOperand1() const noexcept
+ {
+ return m_Operand1;
+ }
+
+ inline const std::unique_ptr<Expression>& GetOperand2() const noexcept
+ {
+ return m_Operand2;
+ }
+
+protected:
+ std::unique_ptr<Expression> m_Operand1;
+ std::unique_ptr<Expression> m_Operand2;
+};
+
+class VariableExpression final : public DebuggableExpression
+{
+public:
+ VariableExpression(String variable, std::vector<Expression::Ptr> imports, const DebugInfo& debugInfo = DebugInfo());
+
+ inline const String& GetVariable() const
+ {
+ return m_Variable;
+ }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+ bool GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const override;
+
+private:
+ String m_Variable;
+ std::vector<Expression::Ptr> m_Imports;
+
+ friend void BindToScope(std::unique_ptr<Expression>& expr, ScopeSpecifier scopeSpec);
+};
+
+class DerefExpression final : public UnaryExpression
+{
+public:
+ DerefExpression(std::unique_ptr<Expression> operand, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(operand), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+ bool GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const override;
+};
+
+class RefExpression final : public UnaryExpression
+{
+public:
+ RefExpression(std::unique_ptr<Expression> operand, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(operand), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class NegateExpression final : public UnaryExpression
+{
+public:
+ NegateExpression(std::unique_ptr<Expression> operand, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(operand), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class LogicalNegateExpression final : public UnaryExpression
+{
+public:
+ LogicalNegateExpression(std::unique_ptr<Expression> operand, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(operand), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class AddExpression final : public BinaryExpression
+{
+public:
+ AddExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class SubtractExpression final : public BinaryExpression
+{
+public:
+ SubtractExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class MultiplyExpression final : public BinaryExpression
+{
+public:
+ MultiplyExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class DivideExpression final : public BinaryExpression
+{
+public:
+ DivideExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class ModuloExpression final : public BinaryExpression
+{
+public:
+ ModuloExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class XorExpression final : public BinaryExpression
+{
+public:
+ XorExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class BinaryAndExpression final : public BinaryExpression
+{
+public:
+ BinaryAndExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class BinaryOrExpression final : public BinaryExpression
+{
+public:
+ BinaryOrExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class ShiftLeftExpression final : public BinaryExpression
+{
+public:
+ ShiftLeftExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class ShiftRightExpression final : public BinaryExpression
+{
+public:
+ ShiftRightExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class EqualExpression final : public BinaryExpression
+{
+public:
+ EqualExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class NotEqualExpression final : public BinaryExpression
+{
+public:
+ NotEqualExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class LessThanExpression final : public BinaryExpression
+{
+public:
+ LessThanExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class GreaterThanExpression final : public BinaryExpression
+{
+public:
+ GreaterThanExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class LessThanOrEqualExpression final : public BinaryExpression
+{
+public:
+ LessThanOrEqualExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class GreaterThanOrEqualExpression final : public BinaryExpression
+{
+public:
+ GreaterThanOrEqualExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class InExpression final : public BinaryExpression
+{
+public:
+ InExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class NotInExpression final : public BinaryExpression
+{
+public:
+ NotInExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class LogicalAndExpression final : public BinaryExpression
+{
+public:
+ LogicalAndExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class LogicalOrExpression final : public BinaryExpression
+{
+public:
+ LogicalOrExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class FunctionCallExpression final : public DebuggableExpression
+{
+public:
+ FunctionCallExpression(std::unique_ptr<Expression> fname, std::vector<std::unique_ptr<Expression> >&& args, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_FName(std::move(fname)), m_Args(std::move(args))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+public:
+ std::unique_ptr<Expression> m_FName;
+ std::vector<std::unique_ptr<Expression> > m_Args;
+};
+
+class ArrayExpression final : public DebuggableExpression
+{
+public:
+ ArrayExpression(std::vector<std::unique_ptr<Expression > >&& expressions, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Expressions(std::move(expressions))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::vector<std::unique_ptr<Expression> > m_Expressions;
+};
+
+class DictExpression final : public DebuggableExpression
+{
+public:
+ DictExpression(std::vector<std::unique_ptr<Expression> >&& expressions = {}, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Expressions(std::move(expressions))
+ { }
+
+ void MakeInline();
+
+ inline const std::vector<std::unique_ptr<Expression>>& GetExpressions() const noexcept
+ {
+ return m_Expressions;
+ }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::vector<std::unique_ptr<Expression> > m_Expressions;
+ bool m_Inline{false};
+
+ friend void BindToScope(std::unique_ptr<Expression>& expr, ScopeSpecifier scopeSpec);
+};
+
+class SetConstExpression final : public UnaryExpression
+{
+public:
+ SetConstExpression(const String& name, std::unique_ptr<Expression> operand, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(operand), debugInfo), m_Name(name)
+ { }
+
+protected:
+ String m_Name;
+
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class SetExpression final : public BinaryExpression
+{
+public:
+ SetExpression(std::unique_ptr<Expression> operand1, CombinedSetOp op, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo), m_Op(op)
+ { }
+
+ void SetOverrideFrozen();
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ CombinedSetOp m_Op;
+ bool m_OverrideFrozen{false};
+
+ friend void BindToScope(std::unique_ptr<Expression>& expr, ScopeSpecifier scopeSpec);
+};
+
+class ConditionalExpression final : public DebuggableExpression
+{
+public:
+ ConditionalExpression(std::unique_ptr<Expression> condition, std::unique_ptr<Expression> true_branch, std::unique_ptr<Expression> false_branch, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Condition(std::move(condition)), m_TrueBranch(std::move(true_branch)), m_FalseBranch(std::move(false_branch))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::unique_ptr<Expression> m_Condition;
+ std::unique_ptr<Expression> m_TrueBranch;
+ std::unique_ptr<Expression> m_FalseBranch;
+};
+
+class WhileExpression final : public DebuggableExpression
+{
+public:
+ WhileExpression(std::unique_ptr<Expression> condition, std::unique_ptr<Expression> loop_body, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Condition(std::move(condition)), m_LoopBody(std::move(loop_body))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::unique_ptr<Expression> m_Condition;
+ std::unique_ptr<Expression> m_LoopBody;
+};
+
+
+class ReturnExpression final : public UnaryExpression
+{
+public:
+ ReturnExpression(std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(expression), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class BreakExpression final : public DebuggableExpression
+{
+public:
+ BreakExpression(const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class ContinueExpression final : public DebuggableExpression
+{
+public:
+ ContinueExpression(const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class GetScopeExpression final : public Expression
+{
+public:
+ GetScopeExpression(ScopeSpecifier scopeSpec)
+ : m_ScopeSpec(scopeSpec)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ ScopeSpecifier m_ScopeSpec;
+};
+
+class IndexerExpression final : public BinaryExpression
+{
+public:
+ IndexerExpression(std::unique_ptr<Expression> operand1, std::unique_ptr<Expression> operand2, const DebugInfo& debugInfo = DebugInfo())
+ : BinaryExpression(std::move(operand1), std::move(operand2), debugInfo)
+ { }
+
+ void SetOverrideFrozen();
+
+protected:
+ bool m_OverrideFrozen{false};
+
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+ bool GetReference(ScriptFrame& frame, bool init_dict, Value *parent, String *index, DebugHint **dhint) const override;
+
+ friend void BindToScope(std::unique_ptr<Expression>& expr, ScopeSpecifier scopeSpec);
+};
+
+void BindToScope(std::unique_ptr<Expression>& expr, ScopeSpecifier scopeSpec);
+
+class ThrowExpression final : public DebuggableExpression
+{
+public:
+ ThrowExpression(std::unique_ptr<Expression> message, bool incompleteExpr, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Message(std::move(message)), m_IncompleteExpr(incompleteExpr)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::unique_ptr<Expression> m_Message;
+ bool m_IncompleteExpr;
+};
+
+class ImportExpression final : public DebuggableExpression
+{
+public:
+ ImportExpression(std::unique_ptr<Expression> name, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Name(std::move(name))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::unique_ptr<Expression> m_Name;
+};
+
+class ImportDefaultTemplatesExpression final : public DebuggableExpression
+{
+public:
+ ImportDefaultTemplatesExpression(const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class FunctionExpression final : public DebuggableExpression
+{
+public:
+ FunctionExpression(String name, std::vector<String> args,
+ std::map<String, std::unique_ptr<Expression> >&& closedVars, std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Name(std::move(name)), m_Args(std::move(args)), m_ClosedVars(std::move(closedVars)), m_Expression(expression.release())
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ String m_Name;
+ std::vector<String> m_Args;
+ std::map<String, std::unique_ptr<Expression> > m_ClosedVars;
+ Expression::Ptr m_Expression;
+};
+
+class ApplyExpression final : public DebuggableExpression
+{
+public:
+ ApplyExpression(String type, String target, std::unique_ptr<Expression> name,
+ std::unique_ptr<Expression> filter, String package, String fkvar, String fvvar,
+ std::unique_ptr<Expression> fterm, std::map<String, std::unique_ptr<Expression> >&& closedVars, bool ignoreOnError,
+ std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Type(std::move(type)), m_Target(std::move(target)),
+ m_Name(std::move(name)), m_Filter(filter.release()), m_Package(std::move(package)), m_FKVar(std::move(fkvar)), m_FVVar(std::move(fvvar)),
+ m_FTerm(fterm.release()), m_IgnoreOnError(ignoreOnError), m_ClosedVars(std::move(closedVars)),
+ m_Expression(expression.release())
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ String m_Type;
+ String m_Target;
+ std::unique_ptr<Expression> m_Name;
+ Expression::Ptr m_Filter;
+ String m_Package;
+ String m_FKVar;
+ String m_FVVar;
+ Expression::Ptr m_FTerm;
+ bool m_IgnoreOnError;
+ std::map<String, std::unique_ptr<Expression> > m_ClosedVars;
+ Expression::Ptr m_Expression;
+};
+
+class NamespaceExpression final : public DebuggableExpression
+{
+public:
+ NamespaceExpression(std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Expression(expression.release())
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ Expression::Ptr m_Expression;
+};
+
+class ObjectExpression final : public DebuggableExpression
+{
+public:
+ ObjectExpression(bool abstract, std::unique_ptr<Expression> type, std::unique_ptr<Expression> name, std::unique_ptr<Expression> filter,
+ String zone, String package, std::map<String, std::unique_ptr<Expression> >&& closedVars,
+ bool defaultTmpl, bool ignoreOnError, std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_Abstract(abstract), m_Type(std::move(type)),
+ m_Name(std::move(name)), m_Filter(filter.release()), m_Zone(std::move(zone)), m_Package(std::move(package)), m_DefaultTmpl(defaultTmpl),
+ m_IgnoreOnError(ignoreOnError), m_ClosedVars(std::move(closedVars)), m_Expression(expression.release())
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ bool m_Abstract;
+ std::unique_ptr<Expression> m_Type;
+ std::unique_ptr<Expression> m_Name;
+ Expression::Ptr m_Filter;
+ String m_Zone;
+ String m_Package;
+ bool m_DefaultTmpl;
+ bool m_IgnoreOnError;
+ std::map<String, std::unique_ptr<Expression> > m_ClosedVars;
+ Expression::Ptr m_Expression;
+};
+
+class ForExpression final : public DebuggableExpression
+{
+public:
+ ForExpression(String fkvar, String fvvar, std::unique_ptr<Expression> value, std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_FKVar(std::move(fkvar)), m_FVVar(std::move(fvvar)), m_Value(std::move(value)), m_Expression(std::move(expression))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ String m_FKVar;
+ String m_FVVar;
+ std::unique_ptr<Expression> m_Value;
+ std::unique_ptr<Expression> m_Expression;
+};
+
+class LibraryExpression final : public UnaryExpression
+{
+public:
+ LibraryExpression(std::unique_ptr<Expression> expression, const DebugInfo& debugInfo = DebugInfo())
+ : UnaryExpression(std::move(expression), debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+enum IncludeType
+{
+ IncludeRegular,
+ IncludeRecursive,
+ IncludeZones
+};
+
+class IncludeExpression final : public DebuggableExpression
+{
+public:
+ IncludeExpression(String relativeBase, std::unique_ptr<Expression> path, std::unique_ptr<Expression> pattern, std::unique_ptr<Expression> name,
+ IncludeType type, bool searchIncludes, String zone, String package, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_RelativeBase(std::move(relativeBase)), m_Path(std::move(path)), m_Pattern(std::move(pattern)),
+ m_Name(std::move(name)), m_Type(type), m_SearchIncludes(searchIncludes), m_Zone(std::move(zone)), m_Package(std::move(package))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ String m_RelativeBase;
+ std::unique_ptr<Expression> m_Path;
+ std::unique_ptr<Expression> m_Pattern;
+ std::unique_ptr<Expression> m_Name;
+ IncludeType m_Type;
+ bool m_SearchIncludes;
+ String m_Zone;
+ String m_Package;
+};
+
+class BreakpointExpression final : public DebuggableExpression
+{
+public:
+ BreakpointExpression(const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo)
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+};
+
+class TryExceptExpression final : public DebuggableExpression
+{
+public:
+ TryExceptExpression(std::unique_ptr<Expression> tryBody, std::unique_ptr<Expression> exceptBody, const DebugInfo& debugInfo = DebugInfo())
+ : DebuggableExpression(debugInfo), m_TryBody(std::move(tryBody)), m_ExceptBody(std::move(exceptBody))
+ { }
+
+protected:
+ ExpressionResult DoEvaluate(ScriptFrame& frame, DebugHint *dhint) const override;
+
+private:
+ std::unique_ptr<Expression> m_TryBody;
+ std::unique_ptr<Expression> m_ExceptBody;
+};
+
+}
+
+#endif /* EXPRESSION_H */
diff --git a/lib/config/i2-config.hpp b/lib/config/i2-config.hpp
new file mode 100644
index 0000000..8c26287
--- /dev/null
+++ b/lib/config/i2-config.hpp
@@ -0,0 +1,16 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2CONFIG_H
+#define I2CONFIG_H
+
+/**
+ * @defgroup config Configuration library
+ *
+ * The configuration library implements a compiler for Icinga 2's configuration
+ * format. It also provides functionality for creating configuration objects
+ * at runtime.
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2CONFIG_H */
diff --git a/lib/config/objectrule.cpp b/lib/config/objectrule.cpp
new file mode 100644
index 0000000..6a74a40
--- /dev/null
+++ b/lib/config/objectrule.cpp
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/objectrule.hpp"
+#include <set>
+
+using namespace icinga;
+
+ObjectRule::TypeSet ObjectRule::m_Types;
+
+void ObjectRule::RegisterType(const String& sourceType)
+{
+ m_Types.insert(sourceType);
+}
+
+bool ObjectRule::IsValidSourceType(const String& sourceType)
+{
+ return m_Types.find(sourceType) != m_Types.end();
+}
diff --git a/lib/config/objectrule.hpp b/lib/config/objectrule.hpp
new file mode 100644
index 0000000..d093c9f
--- /dev/null
+++ b/lib/config/objectrule.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTRULE_H
+#define OBJECTRULE_H
+
+#include "config/i2-config.hpp"
+#include "config/expression.hpp"
+#include "base/debuginfo.hpp"
+#include <set>
+
+namespace icinga
+{
+
+/**
+ * @ingroup config
+ */
+class ObjectRule
+{
+public:
+ typedef std::set<String> TypeSet;
+
+ static void RegisterType(const String& sourceType);
+ static bool IsValidSourceType(const String& sourceType);
+
+private:
+ ObjectRule();
+
+ static TypeSet m_Types;
+};
+
+}
+
+#endif /* OBJECTRULE_H */
diff --git a/lib/config/vmops.hpp b/lib/config/vmops.hpp
new file mode 100644
index 0000000..ea30983
--- /dev/null
+++ b/lib/config/vmops.hpp
@@ -0,0 +1,274 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef VMOPS_H
+#define VMOPS_H
+
+#include "config/i2-config.hpp"
+#include "config/expression.hpp"
+#include "config/configitembuilder.hpp"
+#include "config/applyrule.hpp"
+#include "config/objectrule.hpp"
+#include "base/debuginfo.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include "base/namespace.hpp"
+#include "base/function.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include <map>
+#include <vector>
+
+namespace icinga
+{
+
+class VMOps
+{
+public:
+ static inline bool FindVarImportRef(ScriptFrame& frame, const std::vector<Expression::Ptr>& imports, const String& name, Value *result, const DebugInfo& debugInfo = DebugInfo())
+ {
+ for (const auto& import : imports) {
+ ExpressionResult res = import->Evaluate(frame);
+ Object::Ptr obj = res.GetValue();
+ if (obj->HasOwnField(name)) {
+ *result = obj;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ static inline bool FindVarImport(ScriptFrame& frame, const std::vector<Expression::Ptr>& imports, const String& name, Value *result, const DebugInfo& debugInfo = DebugInfo())
+ {
+ Value parent;
+
+ if (FindVarImportRef(frame, imports, name, &parent, debugInfo)) {
+ *result = GetField(parent, name, frame.Sandboxed, debugInfo);
+ return true;
+ }
+
+ return false;
+ }
+
+ static inline Value ConstructorCall(const Type::Ptr& type, const std::vector<Value>& args, const DebugInfo& debugInfo = DebugInfo())
+ {
+ if (type->GetName() == "String") {
+ if (args.empty())
+ return "";
+ else if (args.size() == 1)
+ return Convert::ToString(args[0]);
+ else
+ BOOST_THROW_EXCEPTION(ScriptError("Too many arguments for constructor."));
+ } else if (type->GetName() == "Number") {
+ if (args.empty())
+ return 0;
+ else if (args.size() == 1)
+ return Convert::ToDouble(args[0]);
+ else
+ BOOST_THROW_EXCEPTION(ScriptError("Too many arguments for constructor."));
+ } else if (type->GetName() == "Boolean") {
+ if (args.empty())
+ return 0;
+ else if (args.size() == 1)
+ return Convert::ToBool(args[0]);
+ else
+ BOOST_THROW_EXCEPTION(ScriptError("Too many arguments for constructor."));
+ } else if (args.size() == 1 && type->IsAssignableFrom(args[0].GetReflectionType()))
+ return args[0];
+ else
+ return type->Instantiate(args);
+ }
+
+ static inline Value FunctionCall(ScriptFrame& frame, const Value& self, const Function::Ptr& func, const std::vector<Value>& arguments)
+ {
+ if (!self.IsEmpty() || self.IsString())
+ return func->InvokeThis(self, arguments);
+ else
+ return func->Invoke(arguments);
+
+ }
+
+ static inline Value NewFunction(ScriptFrame& frame, const String& name, const std::vector<String>& argNames,
+ const std::map<String, std::unique_ptr<Expression> >& closedVars, const Expression::Ptr& expression)
+ {
+ auto evaluatedClosedVars = EvaluateClosedVars(frame, closedVars);
+
+ auto wrapper = [argNames, evaluatedClosedVars, expression](const std::vector<Value>& arguments) -> Value {
+ if (arguments.size() < argNames.size())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments for function"));
+
+ ScriptFrame *frame = ScriptFrame::GetCurrentFrame();
+
+ frame->Locals = new Dictionary();
+
+ if (evaluatedClosedVars)
+ evaluatedClosedVars->CopyTo(frame->Locals);
+
+ for (std::vector<Value>::size_type i = 0; i < std::min(arguments.size(), argNames.size()); i++)
+ frame->Locals->Set(argNames[i], arguments[i]);
+
+ return expression->Evaluate(*frame);
+ };
+
+ return new Function(name, wrapper, argNames);
+ }
+
+ static inline Value NewApply(ScriptFrame& frame, const String& type, const String& target, const String& name, const Expression::Ptr& filter,
+ const String& package, const String& fkvar, const String& fvvar, const Expression::Ptr& fterm, const std::map<String, std::unique_ptr<Expression> >& closedVars,
+ bool ignoreOnError, const Expression::Ptr& expression, const DebugInfo& debugInfo = DebugInfo())
+ {
+ ApplyRule::AddRule(type, target, name, expression, filter, package, fkvar,
+ fvvar, fterm, ignoreOnError, debugInfo, EvaluateClosedVars(frame, closedVars));
+
+ return Empty;
+ }
+
+ static inline Value NewObject(ScriptFrame& frame, bool abstract, const Type::Ptr& type, const String& name, const Expression::Ptr& filter,
+ const String& zone, const String& package, bool defaultTmpl, bool ignoreOnError, const std::map<String, std::unique_ptr<Expression> >& closedVars, const Expression::Ptr& expression, const DebugInfo& debugInfo = DebugInfo())
+ {
+ ConfigItemBuilder item{debugInfo};
+
+ String checkName = name;
+
+ if (!abstract) {
+ auto *nc = dynamic_cast<NameComposer *>(type.get());
+
+ if (nc)
+ checkName = nc->MakeName(name, nullptr);
+ }
+
+ if (!checkName.IsEmpty()) {
+ ConfigItem::Ptr oldItem = ConfigItem::GetByTypeAndName(type, checkName);
+
+ if (oldItem) {
+ std::ostringstream msgbuf;
+ msgbuf << "Object '" << name << "' of type '" << type->GetName() << "' re-defined: " << debugInfo << "; previous definition: " << oldItem->GetDebugInfo();
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str(), debugInfo));
+ }
+ }
+
+ if (filter && !ObjectRule::IsValidSourceType(type->GetName())) {
+ std::ostringstream msgbuf;
+ msgbuf << "Object '" << name << "' of type '" << type->GetName() << "' must not have 'assign where' and 'ignore where' rules: " << debugInfo;
+ BOOST_THROW_EXCEPTION(ScriptError(msgbuf.str(), debugInfo));
+ }
+
+ item.SetType(type);
+ item.SetName(name);
+
+ if (!abstract)
+ item.AddExpression(new ImportDefaultTemplatesExpression());
+
+ item.AddExpression(new OwnedExpression(expression));
+ item.SetAbstract(abstract);
+ item.SetScope(EvaluateClosedVars(frame, closedVars));
+ item.SetZone(zone);
+ item.SetPackage(package);
+ item.SetFilter(filter);
+ item.SetDefaultTemplate(defaultTmpl);
+ item.SetIgnoreOnError(ignoreOnError);
+ item.Compile()->Register();
+
+ return Empty;
+ }
+
+ static inline ExpressionResult For(ScriptFrame& frame, const String& fkvar, const String& fvvar, const Value& value, const std::unique_ptr<Expression>& expression, const DebugInfo& debugInfo = DebugInfo())
+ {
+ if (value.IsObjectType<Array>()) {
+ if (!fvvar.IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot use dictionary iterator for array.", debugInfo));
+
+ Array::Ptr arr = value;
+
+ for (Array::SizeType i = 0; i < arr->GetLength(); i++) {
+ frame.Locals->Set(fkvar, arr->Get(i));
+ ExpressionResult res = expression->Evaluate(frame);
+ CHECK_RESULT_LOOP(res);
+ }
+ } else if (value.IsObjectType<Dictionary>()) {
+ if (fvvar.IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot use array iterator for dictionary.", debugInfo));
+
+ Dictionary::Ptr dict = value;
+ std::vector<String> keys;
+
+ {
+ ObjectLock olock(dict);
+ for (const Dictionary::Pair& kv : dict) {
+ keys.push_back(kv.first);
+ }
+ }
+
+ for (const String& key : keys) {
+ frame.Locals->Set(fkvar, key);
+ frame.Locals->Set(fvvar, dict->Get(key));
+ ExpressionResult res = expression->Evaluate(frame);
+ CHECK_RESULT_LOOP(res);
+ }
+ } else if (value.IsObjectType<Namespace>()) {
+ if (fvvar.IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot use array iterator for namespace.", debugInfo));
+
+ Namespace::Ptr ns = value;
+ std::vector<String> keys;
+
+ {
+ ObjectLock olock(ns);
+ for (const Namespace::Pair& kv : ns) {
+ keys.push_back(kv.first);
+ }
+ }
+
+ for (const String& key : keys) {
+ frame.Locals->Set(fkvar, key);
+ frame.Locals->Set(fvvar, ns->Get(key));
+ ExpressionResult res = expression->Evaluate(frame);
+ CHECK_RESULT_LOOP(res);
+ }
+ } else
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid type in for expression: " + value.GetTypeName(), debugInfo));
+
+ return Empty;
+ }
+
+ static inline Value GetField(const Value& context, const String& field, bool sandboxed = false, const DebugInfo& debugInfo = DebugInfo())
+ {
+ if (BOOST_UNLIKELY(context.IsEmpty() && !context.IsString()))
+ return Empty;
+
+ if (BOOST_UNLIKELY(!context.IsObject()))
+ return GetPrototypeField(context, field, true, debugInfo);
+
+ Object::Ptr object = context;
+
+ return object->GetFieldByName(field, sandboxed, debugInfo);
+ }
+
+ static inline void SetField(const Object::Ptr& context, const String& field, const Value& value, bool overrideFrozen, const DebugInfo& debugInfo = DebugInfo())
+ {
+ if (!context)
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot set field '" + field + "' on a value that is not an object.", debugInfo));
+
+ return context->SetFieldByName(field, value, overrideFrozen, debugInfo);
+ }
+
+private:
+ static inline Dictionary::Ptr EvaluateClosedVars(ScriptFrame& frame, const std::map<String, std::unique_ptr<Expression> >& closedVars)
+ {
+ if (closedVars.empty())
+ return nullptr;
+
+ DictionaryData locals;
+
+ for (const auto& cvar : closedVars)
+ locals.emplace_back(cvar.first, cvar.second->Evaluate(frame));
+
+ return new Dictionary(std::move(locals));
+ }
+};
+
+}
+
+#endif /* VMOPS_H */
diff --git a/lib/db_ido/CMakeLists.txt b/lib/db_ido/CMakeLists.txt
new file mode 100644
index 0000000..7a97d27
--- /dev/null
+++ b/lib/db_ido/CMakeLists.txt
@@ -0,0 +1,40 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(dbconnection.ti dbconnection-ti.cpp dbconnection-ti.hpp)
+
+mkembedconfig_target(db_ido-itl.conf db_ido-itl.cpp)
+
+set(db_ido_SOURCES
+ i2-db_ido.hpp db_ido-itl.cpp
+ commanddbobject.cpp commanddbobject.hpp
+ dbconnection.cpp dbconnection.hpp dbconnection-ti.hpp
+ dbevents.cpp dbevents.hpp
+ dbobject.cpp dbobject.hpp
+ dbquery.cpp dbquery.hpp
+ dbreference.cpp dbreference.hpp
+ dbtype.cpp dbtype.hpp
+ dbvalue.cpp dbvalue.hpp
+ endpointdbobject.cpp endpointdbobject.hpp
+ hostdbobject.cpp hostdbobject.hpp
+ hostgroupdbobject.cpp hostgroupdbobject.hpp
+ idochecktask.cpp idochecktask.hpp
+ servicedbobject.cpp servicedbobject.hpp
+ servicegroupdbobject.cpp servicegroupdbobject.hpp
+ timeperioddbobject.cpp timeperioddbobject.hpp
+ userdbobject.cpp userdbobject.hpp
+ usergroupdbobject.cpp usergroupdbobject.hpp
+ zonedbobject.cpp zonedbobject.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(db_ido db_ido db_ido_SOURCES)
+endif()
+
+add_library(db_ido OBJECT ${db_ido_SOURCES})
+
+add_dependencies(db_ido base config icinga remote)
+
+set_target_properties (
+ db_ido PROPERTIES
+ FOLDER Lib
+)
diff --git a/lib/db_ido/commanddbobject.cpp b/lib/db_ido/commanddbobject.cpp
new file mode 100644
index 0000000..2ac167a
--- /dev/null
+++ b/lib/db_ido/commanddbobject.cpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/commanddbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/command.hpp"
+#include "icinga/compatutility.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(Command, "command", DbObjectTypeCommand, "object_id", CommandDbObject);
+
+CommandDbObject::CommandDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr CommandDbObject::GetConfigFields() const
+{
+ Command::Ptr command = static_pointer_cast<Command>(GetObject());
+
+ return new Dictionary({
+ { "command_line", CompatUtility::GetCommandLine(command) }
+ });
+}
+
+Dictionary::Ptr CommandDbObject::GetStatusFields() const
+{
+ return nullptr;
+}
diff --git a/lib/db_ido/commanddbobject.hpp b/lib/db_ido/commanddbobject.hpp
new file mode 100644
index 0000000..6d22747
--- /dev/null
+++ b/lib/db_ido/commanddbobject.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMMANDDBOBJECT_H
+#define COMMANDDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A Command database object.
+ *
+ * @ingroup ido
+ */
+class CommandDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CommandDbObject);
+
+ CommandDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+};
+
+}
+
+#endif /* COMMANDDBOBJECT_H */
diff --git a/lib/db_ido/db_ido-itl.conf b/lib/db_ido/db_ido-itl.conf
new file mode 100644
index 0000000..e2c42c3
--- /dev/null
+++ b/lib/db_ido/db_ido-itl.conf
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+System.assert(Internal.run_with_activation_context(function() {
+ template CheckCommand "ido-check-command" use (checkFunc = Internal.IdoCheck) {
+ execute = checkFunc
+ }
+
+ object CheckCommand "ido" {
+ import "ido-check-command"
+ }
+}))
+
+var methods = [
+ "IdoCheck"
+]
+
+for (method in methods) {
+ Internal.remove(method)
+}
diff --git a/lib/db_ido/dbconnection.cpp b/lib/db_ido/dbconnection.cpp
new file mode 100644
index 0000000..a8534c4
--- /dev/null
+++ b/lib/db_ido/dbconnection.cpp
@@ -0,0 +1,583 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbconnection.hpp"
+#include "db_ido/dbconnection-ti.cpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "base/configtype.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(DbConnection);
+
+Timer::Ptr DbConnection::m_ProgramStatusTimer;
+boost::once_flag DbConnection::m_OnceFlag = BOOST_ONCE_INIT;
+
+void DbConnection::OnConfigLoaded()
+{
+ ConfigObject::OnConfigLoaded();
+
+ Value categories = GetCategories();
+
+ SetCategoryFilter(FilterArrayToInt(categories, DbQuery::GetCategoryFilterMap(), DbCatEverything));
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, "DbConnection")
+ << "HA functionality disabled. Won't pause IDO connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ }
+
+ boost::call_once(m_OnceFlag, InitializeDbTimer);
+}
+
+void DbConnection::Start(bool runtimeCreated)
+{
+ ObjectImpl<DbConnection>::Start(runtimeCreated);
+
+ Log(LogInformation, "DbConnection")
+ << "'" << GetName() << "' started.";
+
+ auto onQuery = [this](const DbQuery& query) { ExecuteQuery(query); };
+ DbObject::OnQuery.connect(onQuery);
+
+ auto onMultipleQueries = [this](const std::vector<DbQuery>& multiQueries) { ExecuteMultipleQueries(multiQueries); };
+ DbObject::OnMultipleQueries.connect(onMultipleQueries);
+
+ DbObject::QueryCallbacks queryCallbacks;
+ queryCallbacks.Query = onQuery;
+ queryCallbacks.MultipleQueries = onMultipleQueries;
+
+ DbObject::OnMakeQueries.connect([queryCallbacks](const std::function<void (const DbObject::QueryCallbacks&)>& queryFunc) {
+ queryFunc(queryCallbacks);
+ });
+}
+
+void DbConnection::Stop(bool runtimeRemoved)
+{
+ Log(LogInformation, "DbConnection")
+ << "'" << GetName() << "' stopped.";
+
+ ObjectImpl<DbConnection>::Stop(runtimeRemoved);
+}
+
+void DbConnection::EnableActiveChangedHandler()
+{
+ if (!m_ActiveChangedHandler) {
+ ConfigObject::OnActiveChanged.connect([this](const ConfigObject::Ptr& object, const Value&) { UpdateObject(object); });
+ m_ActiveChangedHandler = true;
+ }
+}
+
+void DbConnection::Resume()
+{
+ ConfigObject::Resume();
+
+ Log(LogInformation, "DbConnection")
+ << "Resuming IDO connection: " << GetName();
+
+ m_CleanUpTimer = Timer::Create();
+ m_CleanUpTimer->SetInterval(60);
+ m_CleanUpTimer->OnTimerExpired.connect([this](const Timer * const&) { CleanUpHandler(); });
+ m_CleanUpTimer->Start();
+
+ m_LogStatsTimeout = 0;
+
+ m_LogStatsTimer = Timer::Create();
+ m_LogStatsTimer->SetInterval(10);
+ m_LogStatsTimer->OnTimerExpired.connect([this](const Timer * const&) { LogStatsHandler(); });
+ m_LogStatsTimer->Start();
+}
+
+void DbConnection::Pause()
+{
+ Log(LogInformation, "DbConnection")
+ << "Pausing IDO connection: " << GetName();
+
+ m_LogStatsTimer->Stop(true);
+ m_CleanUpTimer->Stop(true);
+
+ DbQuery query1;
+ query1.Table = "programstatus";
+ query1.IdColumn = "programstatus_id";
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatProgramStatus;
+ query1.WhereCriteria = new Dictionary({
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ query1.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "program_end_time", DbValue::FromTimestamp(Utility::GetTime()) },
+ { "is_currently_running", 0 },
+ { "process_id", Empty }
+ });
+
+ query1.Priority = PriorityHigh;
+
+ ExecuteQuery(query1);
+
+ NewTransaction();
+
+ m_QueryQueue.Enqueue([this]() { Disconnect(); }, PriorityLow);
+
+ /* Work on remaining tasks but never delete the threads, for HA resuming later. */
+ m_QueryQueue.Join();
+
+ ConfigObject::Pause();
+}
+
+void DbConnection::InitializeDbTimer()
+{
+ m_ProgramStatusTimer = Timer::Create();
+ m_ProgramStatusTimer->SetInterval(10);
+ m_ProgramStatusTimer->OnTimerExpired.connect([](const Timer * const&) { UpdateProgramStatus(); });
+ m_ProgramStatusTimer->Start();
+}
+
+void DbConnection::InsertRuntimeVariable(const String& key, const Value& value)
+{
+ DbQuery query;
+ query.Table = "runtimevariables";
+ query.Type = DbQueryInsert;
+ query.Category = DbCatProgramStatus;
+ query.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "varname", key },
+ { "varvalue", value }
+ });
+ DbObject::OnQuery(query);
+}
+
+void DbConnection::UpdateProgramStatus()
+{
+ IcingaApplication::Ptr icingaApplication = IcingaApplication::GetInstance();
+
+ if (!icingaApplication)
+ return;
+
+ Log(LogNotice, "DbConnection")
+ << "Updating programstatus table.";
+
+ std::vector<DbQuery> queries;
+
+ DbQuery query1;
+ query1.Type = DbQueryNewTransaction;
+ query1.Priority = PriorityImmediate;
+ queries.emplace_back(std::move(query1));
+
+ DbQuery query2;
+ query2.Table = "programstatus";
+ query2.IdColumn = "programstatus_id";
+ query2.Type = DbQueryInsert | DbQueryDelete;
+ query2.Category = DbCatProgramStatus;
+
+ query2.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "program_version", Application::GetAppVersion() },
+ { "status_update_time", DbValue::FromTimestamp(Utility::GetTime()) },
+ { "program_start_time", DbValue::FromTimestamp(Application::GetStartTime()) },
+ { "is_currently_running", 1 },
+ { "endpoint_name", icingaApplication->GetNodeName() },
+ { "process_id", Utility::GetPid() },
+ { "daemon_mode", 1 },
+ { "last_command_check", DbValue::FromTimestamp(Utility::GetTime()) },
+ { "notifications_enabled", (icingaApplication->GetEnableNotifications() ? 1 : 0) },
+ { "active_host_checks_enabled", (icingaApplication->GetEnableHostChecks() ? 1 : 0) },
+ { "passive_host_checks_enabled", 1 },
+ { "active_service_checks_enabled", (icingaApplication->GetEnableServiceChecks() ? 1 : 0) },
+ { "passive_service_checks_enabled", 1 },
+ { "event_handlers_enabled", (icingaApplication->GetEnableEventHandlers() ? 1 : 0) },
+ { "flap_detection_enabled", (icingaApplication->GetEnableFlapping() ? 1 : 0) },
+ { "process_performance_data", (icingaApplication->GetEnablePerfdata() ? 1 : 0) }
+ });
+
+ query2.WhereCriteria = new Dictionary({
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ queries.emplace_back(std::move(query2));
+
+ DbQuery query3;
+ query3.Type = DbQueryNewTransaction;
+ queries.emplace_back(std::move(query3));
+
+ DbObject::OnMultipleQueries(queries);
+
+ DbQuery query4;
+ query4.Table = "runtimevariables";
+ query4.Type = DbQueryDelete;
+ query4.Category = DbCatProgramStatus;
+ query4.WhereCriteria = new Dictionary({
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ DbObject::OnQuery(query4);
+
+ InsertRuntimeVariable("total_services", ConfigType::Get<Service>()->GetObjectCount());
+ InsertRuntimeVariable("total_scheduled_services", ConfigType::Get<Service>()->GetObjectCount());
+ InsertRuntimeVariable("total_hosts", ConfigType::Get<Host>()->GetObjectCount());
+ InsertRuntimeVariable("total_scheduled_hosts", ConfigType::Get<Host>()->GetObjectCount());
+}
+
+void DbConnection::CleanUpHandler()
+{
+ auto now = static_cast<long>(Utility::GetTime());
+
+ struct {
+ String name;
+ String time_column;
+ } tables[] = {
+ { "acknowledgements", "entry_time" },
+ { "commenthistory", "entry_time" },
+ { "contactnotifications", "start_time" },
+ { "contactnotificationmethods", "start_time" },
+ { "downtimehistory", "entry_time" },
+ { "eventhandlers", "start_time" },
+ { "externalcommands", "entry_time" },
+ { "flappinghistory", "event_time" },
+ { "hostchecks", "start_time" },
+ { "logentries", "logentry_time" },
+ { "notifications", "start_time" },
+ { "processevents", "event_time" },
+ { "statehistory", "state_time" },
+ { "servicechecks", "start_time" },
+ { "systemcommands", "start_time" }
+ };
+
+ for (auto& table : tables) {
+ double max_age = GetCleanup()->Get(table.name + "_age");
+
+ if (max_age == 0)
+ continue;
+
+ CleanUpExecuteQuery(table.name, table.time_column, now - max_age);
+ Log(LogNotice, "DbConnection")
+ << "Cleanup (" << table.name << "): " << max_age
+ << " now: " << now
+ << " old: " << now - max_age;
+ }
+
+}
+
+void DbConnection::LogStatsHandler()
+{
+ if (!GetConnected() || IsPaused())
+ return;
+
+ auto pending = m_PendingQueries.load();
+
+ auto now = Utility::GetTime();
+ bool timeoutReached = m_LogStatsTimeout < now;
+
+ if (pending == 0u && !timeoutReached) {
+ return;
+ }
+
+ auto output = round(m_OutputQueries.CalculateRate(now, 10));
+
+ if (pending < output * 5 && !timeoutReached) {
+ return;
+ }
+
+ auto input = round(m_InputQueries.CalculateRate(now, 10));
+
+ Log(LogInformation, GetReflectionType()->GetName())
+ << "Pending queries: " << pending << " (Input: " << input
+ << "/s; Output: " << output << "/s)";
+
+ /* Reschedule next log entry in 5 minutes. */
+ if (timeoutReached) {
+ m_LogStatsTimeout = now + 60 * 5;
+ }
+}
+
+void DbConnection::CleanUpExecuteQuery(const String&, const String&, double)
+{
+ /* Default handler does nothing. */
+}
+
+void DbConnection::SetConfigHash(const DbObject::Ptr& dbobj, const String& hash)
+{
+ SetConfigHash(dbobj->GetType(), GetObjectID(dbobj), hash);
+}
+
+void DbConnection::SetConfigHash(const DbType::Ptr& type, const DbReference& objid, const String& hash)
+{
+ if (!objid.IsValid())
+ return;
+
+ if (!hash.IsEmpty())
+ m_ConfigHashes[std::make_pair(type, objid)] = hash;
+ else
+ m_ConfigHashes.erase(std::make_pair(type, objid));
+}
+
+String DbConnection::GetConfigHash(const DbObject::Ptr& dbobj) const
+{
+ return GetConfigHash(dbobj->GetType(), GetObjectID(dbobj));
+}
+
+String DbConnection::GetConfigHash(const DbType::Ptr& type, const DbReference& objid) const
+{
+ if (!objid.IsValid())
+ return String();
+
+ auto it = m_ConfigHashes.find(std::make_pair(type, objid));
+
+ if (it == m_ConfigHashes.end())
+ return String();
+
+ return it->second;
+}
+
+void DbConnection::SetObjectID(const DbObject::Ptr& dbobj, const DbReference& dbref)
+{
+ if (dbref.IsValid())
+ m_ObjectIDs[dbobj] = dbref;
+ else
+ m_ObjectIDs.erase(dbobj);
+}
+
+DbReference DbConnection::GetObjectID(const DbObject::Ptr& dbobj) const
+{
+ auto it = m_ObjectIDs.find(dbobj);
+
+ if (it == m_ObjectIDs.end())
+ return {};
+
+ return it->second;
+}
+
+void DbConnection::SetInsertID(const DbObject::Ptr& dbobj, const DbReference& dbref)
+{
+ SetInsertID(dbobj->GetType(), GetObjectID(dbobj), dbref);
+}
+
+void DbConnection::SetInsertID(const DbType::Ptr& type, const DbReference& objid, const DbReference& dbref)
+{
+ if (!objid.IsValid())
+ return;
+
+ if (dbref.IsValid())
+ m_InsertIDs[std::make_pair(type, objid)] = dbref;
+ else
+ m_InsertIDs.erase(std::make_pair(type, objid));
+}
+
+DbReference DbConnection::GetInsertID(const DbObject::Ptr& dbobj) const
+{
+ return GetInsertID(dbobj->GetType(), GetObjectID(dbobj));
+}
+
+DbReference DbConnection::GetInsertID(const DbType::Ptr& type, const DbReference& objid) const
+{
+ if (!objid.IsValid())
+ return {};
+
+ auto it = m_InsertIDs.find(std::make_pair(type, objid));
+
+ if (it == m_InsertIDs.end())
+ return DbReference();
+
+ return it->second;
+}
+
+void DbConnection::SetObjectActive(const DbObject::Ptr& dbobj, bool active)
+{
+ if (active)
+ m_ActiveObjects.insert(dbobj);
+ else
+ m_ActiveObjects.erase(dbobj);
+}
+
+bool DbConnection::GetObjectActive(const DbObject::Ptr& dbobj) const
+{
+ return (m_ActiveObjects.find(dbobj) != m_ActiveObjects.end());
+}
+
+void DbConnection::ClearIDCache()
+{
+ SetIDCacheValid(false);
+
+ m_ObjectIDs.clear();
+ m_InsertIDs.clear();
+ m_ActiveObjects.clear();
+ m_ConfigUpdates.clear();
+ m_StatusUpdates.clear();
+ m_ConfigHashes.clear();
+}
+
+void DbConnection::SetConfigUpdate(const DbObject::Ptr& dbobj, bool hasupdate)
+{
+ if (hasupdate)
+ m_ConfigUpdates.insert(dbobj);
+ else
+ m_ConfigUpdates.erase(dbobj);
+}
+
+bool DbConnection::GetConfigUpdate(const DbObject::Ptr& dbobj) const
+{
+ return (m_ConfigUpdates.find(dbobj) != m_ConfigUpdates.end());
+}
+
+void DbConnection::SetStatusUpdate(const DbObject::Ptr& dbobj, bool hasupdate)
+{
+ if (hasupdate)
+ m_StatusUpdates.insert(dbobj);
+ else
+ m_StatusUpdates.erase(dbobj);
+}
+
+bool DbConnection::GetStatusUpdate(const DbObject::Ptr& dbobj) const
+{
+ return (m_StatusUpdates.find(dbobj) != m_StatusUpdates.end());
+}
+
+void DbConnection::UpdateObject(const ConfigObject::Ptr& object)
+{
+ bool isShuttingDown = Application::IsShuttingDown();
+ bool isRestarting = Application::IsRestarting();
+
+#ifdef I2_DEBUG
+ if (isShuttingDown || isRestarting) {
+ //Log(LogDebug, "DbConnection")
+ // << "Updating object '" << object->GetName() << "' \t\t active '" << Convert::ToLong(object->IsActive())
+ // << "' shutting down '" << Convert::ToLong(isShuttingDown) << "' restarting '" << Convert::ToLong(isRestarting) << "'.";
+ }
+#endif /* I2_DEBUG */
+
+ /* Wait until a database connection is established on reconnect. */
+ if (!GetConnected())
+ return;
+
+ /* Don't update inactive objects during shutdown/reload/restart.
+ * They would be marked as deleted. This gets triggered with ConfigObject::StopObjects().
+ * During startup/reconnect this is fine, the handler is not active there.
+ */
+ if (isShuttingDown || isRestarting)
+ return;
+
+ DbObject::Ptr dbobj = DbObject::GetOrCreateByObject(object);
+
+ if (dbobj) {
+ bool dbActive = GetObjectActive(dbobj);
+ bool active = object->IsActive();
+
+ if (active) {
+ if (!dbActive)
+ ActivateObject(dbobj);
+
+ Dictionary::Ptr configFields = dbobj->GetConfigFields();
+ String configHash = dbobj->CalculateConfigHash(configFields);
+ ASSERT(configHash.GetLength() <= 64);
+ configFields->Set("config_hash", configHash);
+
+ String cachedHash = GetConfigHash(dbobj);
+
+ if (cachedHash != configHash) {
+ dbobj->SendConfigUpdateHeavy(configFields);
+ dbobj->SendStatusUpdate();
+ } else {
+ dbobj->SendConfigUpdateLight();
+ }
+ } else if (!active) {
+ /* This may happen on reload/restart actions too
+ * and is blocked above already.
+ *
+ * Deactivate the deleted object no matter
+ * which state it had in the database.
+ */
+ DeactivateObject(dbobj);
+ }
+ }
+}
+
+void DbConnection::UpdateAllObjects()
+{
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+ m_QueryQueue.Enqueue([this, object](){ UpdateObject(object); }, PriorityHigh);
+ }
+ }
+}
+
+void DbConnection::PrepareDatabase()
+{
+ for (const DbType::Ptr& type : DbType::GetAllTypes()) {
+ FillIDCache(type);
+ }
+}
+
+void DbConnection::ValidateFailoverTimeout(const Lazy<double>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<DbConnection>::ValidateFailoverTimeout(lvalue, utils);
+
+ if (lvalue() < 30)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "failover_timeout" }, "Failover timeout minimum is 30s."));
+}
+
+void DbConnection::ValidateCategories(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<DbConnection>::ValidateCategories(lvalue, utils);
+
+ int filter = FilterArrayToInt(lvalue(), DbQuery::GetCategoryFilterMap(), 0);
+
+ if (filter != DbCatEverything && (filter & ~(DbCatInvalid | DbCatEverything | DbCatConfig | DbCatState |
+ DbCatAcknowledgement | DbCatComment | DbCatDowntime | DbCatEventHandler | DbCatExternalCommand |
+ DbCatFlapping | DbCatLog | DbCatNotification | DbCatProgramStatus | DbCatRetention |
+ DbCatStateHistory)) != 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "categories" }, "categories filter is invalid."));
+}
+
+void DbConnection::IncreaseQueryCount()
+{
+ double now = Utility::GetTime();
+
+ std::unique_lock<std::mutex> lock(m_StatsMutex);
+ m_QueryStats.InsertValue(now, 1);
+}
+
+int DbConnection::GetQueryCount(RingBuffer::SizeType span)
+{
+ std::unique_lock<std::mutex> lock(m_StatsMutex);
+ return m_QueryStats.UpdateAndGetValues(Utility::GetTime(), span);
+}
+
+bool DbConnection::IsIDCacheValid() const
+{
+ return m_IDCacheValid;
+}
+
+void DbConnection::SetIDCacheValid(bool valid)
+{
+ m_IDCacheValid = valid;
+}
+
+int DbConnection::GetSessionToken()
+{
+ return Application::GetStartTime();
+}
+
+void DbConnection::IncreasePendingQueries(int count)
+{
+ m_PendingQueries.fetch_add(count);
+ m_InputQueries.InsertValue(Utility::GetTime(), count);
+}
+
+void DbConnection::DecreasePendingQueries(int count)
+{
+ m_PendingQueries.fetch_sub(count);
+ m_OutputQueries.InsertValue(Utility::GetTime(), count);
+}
diff --git a/lib/db_ido/dbconnection.hpp b/lib/db_ido/dbconnection.hpp
new file mode 100644
index 0000000..517a8a4
--- /dev/null
+++ b/lib/db_ido/dbconnection.hpp
@@ -0,0 +1,138 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBCONNECTION_H
+#define DBCONNECTION_H
+
+#include "db_ido/i2-db_ido.hpp"
+#include "db_ido/dbconnection-ti.hpp"
+#include "db_ido/dbobject.hpp"
+#include "db_ido/dbquery.hpp"
+#include "base/timer.hpp"
+#include "base/ringbuffer.hpp"
+#include <boost/thread/once.hpp>
+#include <mutex>
+
+namespace icinga
+{
+
+/**
+ * A database connection.
+ *
+ * @ingroup db_ido
+ */
+class DbConnection : public ObjectImpl<DbConnection>
+{
+public:
+ DECLARE_OBJECT(DbConnection);
+
+ static void InitializeDbTimer();
+
+ virtual const char * GetLatestSchemaVersion() const noexcept = 0;
+ virtual const char * GetCompatSchemaVersion() const noexcept = 0;
+
+ void SetConfigHash(const DbObject::Ptr& dbobj, const String& hash);
+ void SetConfigHash(const DbType::Ptr& type, const DbReference& objid, const String& hash);
+ String GetConfigHash(const DbObject::Ptr& dbobj) const;
+ String GetConfigHash(const DbType::Ptr& type, const DbReference& objid) const;
+
+ void SetObjectID(const DbObject::Ptr& dbobj, const DbReference& dbref);
+ DbReference GetObjectID(const DbObject::Ptr& dbobj) const;
+
+ void SetInsertID(const DbObject::Ptr& dbobj, const DbReference& dbref);
+ void SetInsertID(const DbType::Ptr& type, const DbReference& objid, const DbReference& dbref);
+ DbReference GetInsertID(const DbObject::Ptr& dbobj) const;
+ DbReference GetInsertID(const DbType::Ptr& type, const DbReference& objid) const;
+
+ void SetObjectActive(const DbObject::Ptr& dbobj, bool active);
+ bool GetObjectActive(const DbObject::Ptr& dbobj) const;
+
+ void ClearIDCache();
+
+ void SetConfigUpdate(const DbObject::Ptr& dbobj, bool hasupdate);
+ bool GetConfigUpdate(const DbObject::Ptr& dbobj) const;
+
+ void SetStatusUpdate(const DbObject::Ptr& dbobj, bool hasupdate);
+ bool GetStatusUpdate(const DbObject::Ptr& dbobj) const;
+
+ int GetQueryCount(RingBuffer::SizeType span);
+ virtual int GetPendingQueryCount() const = 0;
+
+ void ValidateFailoverTimeout(const Lazy<double>& lvalue, const ValidationUtils& utils) final;
+ void ValidateCategories(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) final;
+
+protected:
+ void OnConfigLoaded() override;
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+ void Resume() override;
+ void Pause() override;
+
+ virtual void ExecuteQuery(const DbQuery& query) = 0;
+ virtual void ExecuteMultipleQueries(const std::vector<DbQuery>&) = 0;
+ virtual void ActivateObject(const DbObject::Ptr& dbobj) = 0;
+ virtual void DeactivateObject(const DbObject::Ptr& dbobj) = 0;
+
+ virtual void CleanUpExecuteQuery(const String& table, const String& time_column, double max_age);
+ virtual void FillIDCache(const DbType::Ptr& type) = 0;
+ virtual void NewTransaction() = 0;
+ virtual void Disconnect() = 0;
+
+ void UpdateObject(const ConfigObject::Ptr& object);
+ void UpdateAllObjects();
+
+ void PrepareDatabase();
+
+ void IncreaseQueryCount();
+
+ bool IsIDCacheValid() const;
+ void SetIDCacheValid(bool valid);
+
+ void EnableActiveChangedHandler();
+
+ static void UpdateProgramStatus();
+
+ static int GetSessionToken();
+
+ void IncreasePendingQueries(int count);
+ void DecreasePendingQueries(int count);
+
+ WorkQueue m_QueryQueue{10000000, 1, LogNotice};
+
+private:
+ bool m_IDCacheValid{false};
+ std::map<std::pair<DbType::Ptr, DbReference>, String> m_ConfigHashes;
+ std::map<DbObject::Ptr, DbReference> m_ObjectIDs;
+ std::map<std::pair<DbType::Ptr, DbReference>, DbReference> m_InsertIDs;
+ std::set<DbObject::Ptr> m_ActiveObjects;
+ std::set<DbObject::Ptr> m_ConfigUpdates;
+ std::set<DbObject::Ptr> m_StatusUpdates;
+ Timer::Ptr m_CleanUpTimer;
+ Timer::Ptr m_LogStatsTimer;
+
+ double m_LogStatsTimeout;
+
+ void CleanUpHandler();
+ void LogStatsHandler();
+
+ static Timer::Ptr m_ProgramStatusTimer;
+ static boost::once_flag m_OnceFlag;
+
+ static void InsertRuntimeVariable(const String& key, const Value& value);
+
+ mutable std::mutex m_StatsMutex;
+ RingBuffer m_QueryStats{15 * 60};
+ bool m_ActiveChangedHandler{false};
+
+ RingBuffer m_InputQueries{10};
+ RingBuffer m_OutputQueries{10};
+ Atomic<uint_fast64_t> m_PendingQueries{0};
+};
+
+struct database_error : virtual std::exception, virtual boost::exception { };
+
+struct errinfo_database_query_;
+typedef boost::error_info<struct errinfo_database_query_, std::string> errinfo_database_query;
+
+}
+
+#endif /* DBCONNECTION_H */
diff --git a/lib/db_ido/dbconnection.ti b/lib/db_ido/dbconnection.ti
new file mode 100644
index 0000000..ad02b40
--- /dev/null
+++ b/lib/db_ido/dbconnection.ti
@@ -0,0 +1,82 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbquery.hpp"
+#include "base/configobject.hpp"
+
+library db_ido;
+
+namespace icinga
+{
+
+abstract class DbConnection : ConfigObject
+{
+ [config] String table_prefix {
+ default {{{ return "icinga_"; }}}
+ };
+
+ [config, required] Dictionary::Ptr cleanup {
+ default {{{ return new Dictionary(); }}}
+ };
+
+ [config] Array::Ptr categories {
+ default {{{
+ return new Array({
+ "DbCatConfig",
+ "DbCatState",
+ "DbCatAcknowledgement",
+ "DbCatComment",
+ "DbCatDowntime",
+ "DbCatEventHandler",
+ "DbCatFlapping",
+ "DbCatNotification",
+ "DbCatProgramStatus",
+ "DbCatRetention",
+ "DbCatStateHistory"
+ });
+ }}}
+ };
+ [no_user_view, no_user_modify] int categories_filter_real (CategoryFilter);
+
+ [config] bool enable_ha {
+ default {{{ return true; }}}
+ };
+
+ [config] double failover_timeout {
+ default {{{ return 30; }}}
+ };
+
+ [state, no_user_modify] double last_failover;
+
+ [no_user_modify] String schema_version;
+ [no_user_modify] bool connected;
+ [no_user_modify] bool should_connect {
+ default {{{ return true; }}}
+ };
+};
+
+
+validator DbConnection {
+ Dictionary cleanup {
+ Number acknowledgements_age;
+ Number commenthistory_age;
+ Number contactnotifications_age;
+ Number contactnotificationmethods_age;
+ Number downtimehistory_age;
+ Number eventhandlers_age;
+ Number externalcommands_age;
+ Number flappinghistory_age;
+ Number hostchecks_age;
+ Number logentries_age;
+ Number notifications_age;
+ Number processevents_age;
+ Number statehistory_age;
+ Number servicechecks_age;
+ Number systemcommands_age;
+ };
+
+ Array categories {
+ String "*";
+ };
+};
+
+}
diff --git a/lib/db_ido/dbevents.cpp b/lib/db_ido/dbevents.cpp
new file mode 100644
index 0000000..8358824
--- /dev/null
+++ b/lib/db_ido/dbevents.cpp
@@ -0,0 +1,1884 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbevents.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "remote/endpoint.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/externalcommandprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/icingaapplication.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <utility>
+
+using namespace icinga;
+
+INITIALIZE_ONCE(&DbEvents::StaticInitialize);
+
+void DbEvents::StaticInitialize()
+{
+ /* Status */
+ Comment::OnCommentAdded.connect([](const Comment::Ptr& comment) { DbEvents::AddComment(comment); });
+ Comment::OnCommentRemoved.connect([](const Comment::Ptr& comment) { DbEvents::RemoveComment(comment); });
+ Downtime::OnDowntimeAdded.connect([](const Downtime::Ptr& downtime) { DbEvents::AddDowntime(downtime); });
+ Downtime::OnDowntimeRemoved.connect([](const Downtime::Ptr& downtime) { DbEvents::RemoveDowntime(downtime); });
+ Downtime::OnDowntimeTriggered.connect([](const Downtime::Ptr& downtime) { DbEvents::TriggerDowntime(downtime); });
+ Checkable::OnAcknowledgementSet.connect([](const Checkable::Ptr& checkable, const String&, const String&,
+ AcknowledgementType type, bool, bool, double, double, const MessageOrigin::Ptr&) {
+ DbEvents::AddAcknowledgement(checkable, type);
+ });
+ Checkable::OnAcknowledgementCleared.connect([](const Checkable::Ptr& checkable, const String&, double, const MessageOrigin::Ptr&) {
+ DbEvents::RemoveAcknowledgement(checkable);
+ });
+
+ Checkable::OnNextCheckUpdated.connect([](const Checkable::Ptr& checkable) { NextCheckUpdatedHandler(checkable); });
+ Checkable::OnFlappingChanged.connect([](const Checkable::Ptr& checkable, const Value&) { FlappingChangedHandler(checkable); });
+ Checkable::OnNotificationSentToAllUsers.connect([](const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const std::set<User::Ptr>&, const NotificationType&, const CheckResult::Ptr&, const String&, const String&,
+ const MessageOrigin::Ptr&) {
+ DbEvents::LastNotificationChangedHandler(notification, checkable);
+ });
+
+ Checkable::OnEnableActiveChecksChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::EnableActiveChecksChangedHandler(checkable);
+ });
+ Checkable::OnEnablePassiveChecksChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::EnablePassiveChecksChangedHandler(checkable);
+ });
+ Checkable::OnEnableNotificationsChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::EnableNotificationsChangedHandler(checkable);
+ });
+ Checkable::OnEnablePerfdataChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::EnablePerfdataChangedHandler(checkable);
+ });
+ Checkable::OnEnableFlappingChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::EnableFlappingChangedHandler(checkable);
+ });
+
+ Checkable::OnReachabilityChanged.connect([](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ std::set<Checkable::Ptr> children, const MessageOrigin::Ptr&) {
+ DbEvents::ReachabilityChangedHandler(checkable, cr, std::move(children));
+ });
+
+ /* History */
+ Comment::OnCommentAdded.connect([](const Comment::Ptr& comment) { AddCommentHistory(comment); });
+ Downtime::OnDowntimeAdded.connect([](const Downtime::Ptr& downtime) { AddDowntimeHistory(downtime); });
+ Checkable::OnAcknowledgementSet.connect([](const Checkable::Ptr& checkable, const String& author, const String& comment,
+ AcknowledgementType type, bool notify, bool, double expiry, double, const MessageOrigin::Ptr&) {
+ DbEvents::AddAcknowledgementHistory(checkable, author, comment, type, notify, expiry);
+ });
+
+ Checkable::OnNotificationSentToAllUsers.connect([](const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const std::set<User::Ptr>& users, const NotificationType& type, const CheckResult::Ptr& cr, const String& author,
+ const String& text, const MessageOrigin::Ptr&) {
+ DbEvents::AddNotificationHistory(notification, checkable, users, type, cr, author, text);
+ });
+
+ Checkable::OnStateChange.connect([](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type, const MessageOrigin::Ptr&) {
+ DbEvents::AddStateChangeHistory(checkable, cr, type);
+ });
+
+ Checkable::OnNewCheckResult.connect([](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ DbEvents::AddCheckResultLogHistory(checkable, cr);
+ });
+ Checkable::OnNotificationSentToUser.connect([](const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& users, const NotificationType& type, const CheckResult::Ptr& cr, const String& author, const String& text,
+ const String&, const MessageOrigin::Ptr&) {
+ DbEvents::AddNotificationSentLogHistory(notification, checkable, users, type, cr, author, text);
+ });
+ Checkable::OnFlappingChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::AddFlappingChangedLogHistory(checkable);
+ });
+ Checkable::OnEnableFlappingChanged.connect([](const Checkable::Ptr& checkable, const Value&) {
+ DbEvents::AddEnableFlappingChangedLogHistory(checkable);
+ });
+ Downtime::OnDowntimeTriggered.connect([](const Downtime::Ptr& downtime) { DbEvents::AddTriggerDowntimeLogHistory(downtime); });
+ Downtime::OnDowntimeRemoved.connect([](const Downtime::Ptr& downtime) { DbEvents::AddRemoveDowntimeLogHistory(downtime); });
+
+ Checkable::OnFlappingChanged.connect([](const Checkable::Ptr& checkable, const Value&) { DbEvents::AddFlappingChangedHistory(checkable); });
+ Checkable::OnEnableFlappingChanged.connect([](const Checkable::Ptr& checkable, const Value&) { DbEvents::AddEnableFlappingChangedHistory(checkable); });
+ Checkable::OnNewCheckResult.connect([](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ DbEvents::AddCheckableCheckHistory(checkable, cr);
+ });
+
+ Checkable::OnEventCommandExecuted.connect([](const Checkable::Ptr& checkable) { DbEvents::AddEventHandlerHistory(checkable); });
+
+ ExternalCommandProcessor::OnNewExternalCommand.connect([](double time, const String& command, const std::vector<String>& arguments) {
+ DbEvents::AddExternalCommandHistory(time, command, arguments);
+ });
+}
+
+/* check events */
+void DbEvents::NextCheckUpdatedHandler(const Checkable::Ptr& checkable)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query1;
+ query1.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query1.Table = "servicestatus";
+ query1.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query1.Table = "hoststatus";
+ query1.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+ query1.StatusUpdate = true;
+ query1.Object = DbObject::GetOrCreateByObject(checkable);
+
+ query1.Fields = new Dictionary({
+ { "next_check", DbValue::FromTimestamp(checkable->GetNextCheck()) }
+ });
+
+ DbObject::OnQuery(query1);
+}
+
+void DbEvents::FlappingChangedHandler(const Checkable::Ptr& checkable)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query1;
+ query1.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query1.Table = "servicestatus";
+ query1.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query1.Table = "hoststatus";
+ query1.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+ query1.StatusUpdate = true;
+ query1.Object = DbObject::GetOrCreateByObject(checkable);
+
+ Dictionary::Ptr fields1 = new Dictionary();
+ fields1->Set("is_flapping", checkable->IsFlapping());
+ fields1->Set("percent_state_change", checkable->GetFlappingCurrent());
+
+ query1.Fields = new Dictionary({
+ { "is_flapping", checkable->IsFlapping() },
+ { "percent_state_change", checkable->GetFlappingCurrent() }
+ });
+
+ query1.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ DbObject::OnQuery(query1);
+}
+
+void DbEvents::LastNotificationChangedHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable)
+{
+ std::pair<unsigned long, unsigned long> now_bag = ConvertTimestamp(Utility::GetTime());
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(notification->GetNextNotification());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query1;
+ query1.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query1.Table = "servicestatus";
+ query1.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query1.Table = "hoststatus";
+ query1.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+ query1.StatusUpdate = true;
+ query1.Object = DbObject::GetOrCreateByObject(checkable);
+
+ query1.Fields = new Dictionary({
+ { "last_notification", DbValue::FromTimestamp(now_bag.first) },
+ { "next_notification", DbValue::FromTimestamp(timeBag.first) },
+ { "current_notification_number", notification->GetNotificationNumber() }
+ });
+
+ query1.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ DbObject::OnQuery(query1);
+}
+
+void DbEvents::ReachabilityChangedHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, std::set<Checkable::Ptr> children)
+{
+ int is_reachable = 0;
+
+ if (cr->GetState() == ServiceOK)
+ is_reachable = 1;
+
+ for (const Checkable::Ptr& child : children) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(child);
+
+ DbQuery query1;
+ query1.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query1.Table = "servicestatus";
+ query1.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query1.Table = "hoststatus";
+ query1.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+ query1.StatusUpdate = true;
+ query1.Object = DbObject::GetOrCreateByObject(child);
+
+ query1.Fields = new Dictionary({
+ { "is_reachable", is_reachable }
+ });
+
+ query1.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ DbObject::OnQuery(query1);
+ }
+}
+
+/* enable changed events */
+void DbEvents::EnableActiveChecksChangedHandler(const Checkable::Ptr& checkable)
+{
+ EnableChangedHandlerInternal(checkable, "active_checks_enabled", checkable->GetEnableActiveChecks());
+}
+
+void DbEvents::EnablePassiveChecksChangedHandler(const Checkable::Ptr& checkable)
+{
+ EnableChangedHandlerInternal(checkable, "passive_checks_enabled", checkable->GetEnablePassiveChecks());
+}
+
+void DbEvents::EnableNotificationsChangedHandler(const Checkable::Ptr& checkable)
+{
+ EnableChangedHandlerInternal(checkable, "notifications_enabled", checkable->GetEnableNotifications());
+}
+
+void DbEvents::EnablePerfdataChangedHandler(const Checkable::Ptr& checkable)
+{
+ EnableChangedHandlerInternal(checkable, "process_performance_data", checkable->GetEnablePerfdata());
+}
+
+void DbEvents::EnableFlappingChangedHandler(const Checkable::Ptr& checkable)
+{
+ EnableChangedHandlerInternal(checkable, "flap_detection_enabled", checkable->GetEnableFlapping());
+}
+
+void DbEvents::EnableChangedHandlerInternal(const Checkable::Ptr& checkable, const String& fieldName, bool enabled)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query1;
+ query1.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query1.Table = "servicestatus";
+ query1.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query1.Table = "hoststatus";
+ query1.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+ query1.StatusUpdate = true;
+ query1.Object = DbObject::GetOrCreateByObject(checkable);
+
+ query1.Fields = new Dictionary({
+ { fieldName, enabled }
+ });
+
+ query1.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ DbObject::OnQuery(query1);
+}
+
+
+/* comments */
+void DbEvents::AddComments(const Checkable::Ptr& checkable)
+{
+ std::set<Comment::Ptr> comments = checkable->GetComments();
+
+ std::vector<DbQuery> queries;
+
+ for (const Comment::Ptr& comment : comments) {
+ AddCommentInternal(queries, comment, false);
+ }
+
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::AddComment(const Comment::Ptr& comment)
+{
+ std::vector<DbQuery> queries;
+ AddCommentInternal(queries, comment, false);
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::AddCommentHistory(const Comment::Ptr& comment)
+{
+ std::vector<DbQuery> queries;
+ AddCommentInternal(queries, comment, true);
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::AddCommentInternal(std::vector<DbQuery>& queries, const Comment::Ptr& comment, bool historical)
+{
+ Checkable::Ptr checkable = comment->GetCheckable();
+
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(comment->GetEntryTime());
+
+ Dictionary::Ptr fields1 = new Dictionary();
+ fields1->Set("entry_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("entry_time_usec", timeBag.second);
+ fields1->Set("entry_type", comment->GetEntryType());
+ fields1->Set("object_id", checkable);
+
+ int commentType = 0;
+
+ if (checkable->GetReflectionType() == Host::TypeInstance)
+ commentType = 2;
+ else if (checkable->GetReflectionType() == Service::TypeInstance)
+ commentType = 1;
+ else {
+ return;
+ }
+
+ fields1->Set("comment_type", commentType);
+ fields1->Set("internal_comment_id", comment->GetLegacyId());
+ fields1->Set("name", comment->GetName());
+ fields1->Set("comment_time", DbValue::FromTimestamp(timeBag.first)); /* same as entry_time */
+ fields1->Set("author_name", comment->GetAuthor());
+ fields1->Set("comment_data", comment->GetText());
+ fields1->Set("is_persistent", comment->GetPersistent());
+ fields1->Set("comment_source", 1); /* external */
+ fields1->Set("expires", (comment->GetExpireTime() > 0));
+ fields1->Set("expiration_time", DbValue::FromTimestamp(comment->GetExpireTime()));
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ DbQuery query1;
+
+ if (!historical) {
+ query1.Table = "comments";
+ query1.Type = DbQueryInsert | DbQueryUpdate;
+
+ fields1->Set("session_token", 0); /* DbConnection class fills in real ID */
+
+ query1.WhereCriteria = new Dictionary({
+ { "object_id", checkable },
+ { "name", comment->GetName() },
+ { "entry_time", DbValue::FromTimestamp(timeBag.first) }
+ });
+ } else {
+ query1.Table = "commenthistory";
+ query1.Type = DbQueryInsert;
+ }
+
+ query1.Category = DbCatComment;
+ query1.Fields = fields1;
+ queries.emplace_back(std::move(query1));
+}
+
+void DbEvents::RemoveComment(const Comment::Ptr& comment)
+{
+ std::vector<DbQuery> queries;
+ RemoveCommentInternal(queries, comment);
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::RemoveCommentInternal(std::vector<DbQuery>& queries, const Comment::Ptr& comment)
+{
+ Checkable::Ptr checkable = comment->GetCheckable();
+
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(comment->GetEntryTime());
+
+ /* Status */
+ DbQuery query1;
+ query1.Table = "comments";
+ query1.Type = DbQueryDelete;
+ query1.Category = DbCatComment;
+
+ query1.WhereCriteria = new Dictionary({
+ { "object_id", checkable },
+ { "entry_time", DbValue::FromTimestamp(timeBag.first) },
+ { "name", comment->GetName() }
+ });
+
+ queries.emplace_back(std::move(query1));
+
+ /* History - update deletion time for service/host */
+ std::pair<unsigned long, unsigned long> timeBagNow = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query2;
+ query2.Table = "commenthistory";
+ query2.Type = DbQueryUpdate;
+ query2.Category = DbCatComment;
+
+ query2.Fields = new Dictionary({
+ { "deletion_time", DbValue::FromTimestamp(timeBagNow.first) },
+ { "deletion_time_usec", timeBagNow.second }
+ });
+
+ query2.WhereCriteria = new Dictionary({
+ { "object_id", checkable },
+ { "entry_time", DbValue::FromTimestamp(timeBag.first) },
+ { "name", comment->GetName() }
+ });
+
+ queries.emplace_back(std::move(query2));
+}
+
+/* downtimes */
+void DbEvents::AddDowntimes(const Checkable::Ptr& checkable)
+{
+ std::set<Downtime::Ptr> downtimes = checkable->GetDowntimes();
+
+ std::vector<DbQuery> queries;
+
+ for (const Downtime::Ptr& downtime : downtimes) {
+ AddDowntimeInternal(queries, downtime, false);
+ }
+
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::AddDowntime(const Downtime::Ptr& downtime)
+{
+ std::vector<DbQuery> queries;
+ AddDowntimeInternal(queries, downtime, false);
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::AddDowntimeHistory(const Downtime::Ptr& downtime)
+{
+ std::vector<DbQuery> queries;
+ AddDowntimeInternal(queries, downtime, true);
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::AddDowntimeInternal(std::vector<DbQuery>& queries, const Downtime::Ptr& downtime, bool historical)
+{
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ Dictionary::Ptr fields1 = new Dictionary();
+ fields1->Set("entry_time", DbValue::FromTimestamp(downtime->GetEntryTime()));
+ fields1->Set("object_id", checkable);
+
+ int downtimeType = 0;
+
+ if (checkable->GetReflectionType() == Host::TypeInstance)
+ downtimeType = 2;
+ else if (checkable->GetReflectionType() == Service::TypeInstance)
+ downtimeType = 1;
+ else {
+ return;
+ }
+
+ fields1->Set("downtime_type", downtimeType);
+ fields1->Set("internal_downtime_id", downtime->GetLegacyId());
+ fields1->Set("author_name", downtime->GetAuthor());
+ fields1->Set("comment_data", downtime->GetComment());
+ fields1->Set("triggered_by_id", Downtime::GetByName(downtime->GetTriggeredBy()));
+ fields1->Set("is_fixed", downtime->GetFixed());
+ fields1->Set("duration", downtime->GetDuration());
+ fields1->Set("scheduled_start_time", DbValue::FromTimestamp(downtime->GetStartTime()));
+ fields1->Set("scheduled_end_time", DbValue::FromTimestamp(downtime->GetEndTime()));
+ fields1->Set("name", downtime->GetName());
+
+ /* flexible downtimes are started at trigger time */
+ if (downtime->GetFixed()) {
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(downtime->GetStartTime());
+
+ fields1->Set("actual_start_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("actual_start_time_usec", timeBag.second);
+ fields1->Set("was_started", ((downtime->GetStartTime() <= Utility::GetTime()) ? 1 : 0));
+ }
+
+ fields1->Set("is_in_effect", downtime->IsInEffect());
+ fields1->Set("trigger_time", DbValue::FromTimestamp(downtime->GetTriggerTime()));
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ DbQuery query1;
+
+ if (!historical) {
+ query1.Table = "scheduleddowntime";
+ query1.Type = DbQueryInsert | DbQueryUpdate;
+
+ fields1->Set("session_token", 0); /* DbConnection class fills in real ID */
+
+ query1.WhereCriteria = new Dictionary({
+ { "object_id", checkable },
+ { "name", downtime->GetName() },
+ { "entry_time", DbValue::FromTimestamp(downtime->GetEntryTime()) }
+ });
+ } else {
+ query1.Table = "downtimehistory";
+ query1.Type = DbQueryInsert;
+ }
+
+ query1.Category = DbCatDowntime;
+ query1.Fields = fields1;
+ queries.emplace_back(std::move(query1));
+
+ /* host/service status */
+ if (!historical) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query2;
+ query2.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query2.Table = "servicestatus";
+ query2.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query2.Table = "hoststatus";
+ query2.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query2.Type = DbQueryUpdate;
+ query2.Category = DbCatState;
+ query2.StatusUpdate = true;
+ query2.Object = DbObject::GetOrCreateByObject(checkable);
+
+ Dictionary::Ptr fields2 = new Dictionary();
+ fields2->Set("scheduled_downtime_depth", checkable->GetDowntimeDepth());
+
+ query2.Fields = fields2;
+ query2.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ queries.emplace_back(std::move(query2));
+ }
+}
+
+void DbEvents::RemoveDowntime(const Downtime::Ptr& downtime)
+{
+ std::vector<DbQuery> queries;
+ RemoveDowntimeInternal(queries, downtime);
+ DbObject::OnMultipleQueries(queries);
+}
+
+void DbEvents::RemoveDowntimeInternal(std::vector<DbQuery>& queries, const Downtime::Ptr& downtime)
+{
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ /* Status */
+ DbQuery query1;
+ query1.Table = "scheduleddowntime";
+ query1.Type = DbQueryDelete;
+ query1.Category = DbCatDowntime;
+ query1.WhereCriteria = new Dictionary();
+
+ query1.WhereCriteria->Set("object_id", checkable);
+ query1.WhereCriteria->Set("entry_time", DbValue::FromTimestamp(downtime->GetEntryTime()));
+ query1.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+ query1.WhereCriteria->Set("scheduled_start_time", DbValue::FromTimestamp(downtime->GetStartTime()));
+ query1.WhereCriteria->Set("scheduled_end_time", DbValue::FromTimestamp(downtime->GetEndTime()));
+ query1.WhereCriteria->Set("name", downtime->GetName());
+ queries.emplace_back(std::move(query1));
+
+ /* History - update actual_end_time, was_cancelled for service (and host in case) */
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query3;
+ query3.Table = "downtimehistory";
+ query3.Type = DbQueryUpdate;
+ query3.Category = DbCatDowntime;
+
+ Dictionary::Ptr fields3 = new Dictionary();
+ fields3->Set("was_cancelled", downtime->GetWasCancelled() ? 1 : 0);
+
+ if (downtime->GetFixed() || (!downtime->GetFixed() && downtime->GetTriggerTime() > 0)) {
+ fields3->Set("actual_end_time", DbValue::FromTimestamp(timeBag.first));
+ fields3->Set("actual_end_time_usec", timeBag.second);
+ }
+
+ fields3->Set("is_in_effect", 0);
+ query3.Fields = fields3;
+
+ query3.WhereCriteria = new Dictionary({
+ { "object_id", checkable },
+ { "entry_time", DbValue::FromTimestamp(downtime->GetEntryTime()) },
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "scheduled_start_time", DbValue::FromTimestamp(downtime->GetStartTime()) },
+ { "scheduled_end_time", DbValue::FromTimestamp(downtime->GetEndTime()) },
+ { "name", downtime->GetName() }
+ });
+
+ queries.emplace_back(std::move(query3));
+
+ /* host/service status */
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query4;
+ query4.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query4.Table = "servicestatus";
+ query4.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query4.Table = "hoststatus";
+ query4.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query4.Type = DbQueryUpdate;
+ query4.Category = DbCatState;
+ query4.StatusUpdate = true;
+ query4.Object = DbObject::GetOrCreateByObject(checkable);
+
+ Dictionary::Ptr fields4 = new Dictionary();
+ fields4->Set("scheduled_downtime_depth", checkable->GetDowntimeDepth());
+
+ query4.Fields = fields4;
+ query4.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ queries.emplace_back(std::move(query4));
+}
+
+void DbEvents::TriggerDowntime(const Downtime::Ptr& downtime)
+{
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ /* Status */
+ DbQuery query1;
+ query1.Table = "scheduleddowntime";
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatDowntime;
+
+ query1.Fields = new Dictionary({
+ { "was_started", 1 },
+ { "actual_start_time", DbValue::FromTimestamp(timeBag.first) },
+ { "actual_start_time_usec", timeBag.second },
+ { "is_in_effect", (downtime->IsInEffect() ? 1 : 0) },
+ { "trigger_time", DbValue::FromTimestamp(downtime->GetTriggerTime()) },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ query1.WhereCriteria = new Dictionary({
+ { "object_id", checkable },
+ { "entry_time", DbValue::FromTimestamp(downtime->GetEntryTime()) },
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "scheduled_start_time", DbValue::FromTimestamp(downtime->GetStartTime()) },
+ { "scheduled_end_time", DbValue::FromTimestamp(downtime->GetEndTime()) },
+ { "name", downtime->GetName() }
+ });
+
+ DbObject::OnQuery(query1);
+
+ /* History - downtime was started for service (and host in case) */
+ DbQuery query3;
+ query3.Table = "downtimehistory";
+ query3.Type = DbQueryUpdate;
+ query3.Category = DbCatDowntime;
+
+ query3.Fields = new Dictionary({
+ { "was_started", 1 },
+ { "is_in_effect", 1 },
+ { "actual_start_time", DbValue::FromTimestamp(timeBag.first) },
+ { "actual_start_time_usec", timeBag.second },
+ { "trigger_time", DbValue::FromTimestamp(downtime->GetTriggerTime()) }
+ });
+
+ query3.WhereCriteria = query1.WhereCriteria;
+
+ DbObject::OnQuery(query3);
+
+ /* host/service status */
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query4;
+ query4.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query4.Table = "servicestatus";
+ query4.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query4.Table = "hoststatus";
+ query4.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query4.Type = DbQueryUpdate;
+ query4.Category = DbCatState;
+ query4.StatusUpdate = true;
+ query4.Object = DbObject::GetOrCreateByObject(checkable);
+
+ query4.Fields = new Dictionary({
+ { "scheduled_downtime_depth", checkable->GetDowntimeDepth() }
+ });
+ query4.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ DbObject::OnQuery(query4);
+}
+
+/* acknowledgements */
+void DbEvents::AddAcknowledgementHistory(const Checkable::Ptr& checkable, const String& author, const String& comment,
+ AcknowledgementType type, bool notify, double expiry)
+{
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query1;
+ query1.Table = "acknowledgements";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatAcknowledgement;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields1 = new Dictionary();
+
+ fields1->Set("entry_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("entry_time_usec", timeBag.second);
+ fields1->Set("acknowledgement_type", type);
+ fields1->Set("object_id", checkable);
+ fields1->Set("author_name", author);
+ fields1->Set("comment_data", comment);
+ fields1->Set("persistent_comment", 1);
+ fields1->Set("notify_contacts", notify);
+ fields1->Set("is_sticky", type == AcknowledgementSticky);
+ fields1->Set("end_time", DbValue::FromTimestamp(expiry));
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ if (service)
+ fields1->Set("state", service->GetState());
+ else
+ fields1->Set("state", GetHostState(host));
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+void DbEvents::AddAcknowledgement(const Checkable::Ptr& checkable, AcknowledgementType type)
+{
+ AddAcknowledgementInternal(checkable, type, true);
+}
+
+void DbEvents::RemoveAcknowledgement(const Checkable::Ptr& checkable)
+{
+ AddAcknowledgementInternal(checkable, AcknowledgementNone, false);
+}
+
+void DbEvents::AddAcknowledgementInternal(const Checkable::Ptr& checkable, AcknowledgementType type, bool add)
+{
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query1;
+ query1.WhereCriteria = new Dictionary();
+
+ if (service) {
+ query1.Table = "servicestatus";
+ query1.WhereCriteria->Set("service_object_id", service);
+ } else {
+ query1.Table = "hoststatus";
+ query1.WhereCriteria->Set("host_object_id", host);
+ }
+
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+ query1.StatusUpdate = true;
+ query1.Object = DbObject::GetOrCreateByObject(checkable);
+
+ query1.Fields = new Dictionary({
+ { "acknowledgement_type", type },
+ { "problem_has_been_acknowledged", add ? 1 : 0 }
+ });
+
+ query1.WhereCriteria->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ DbObject::OnQuery(query1);
+}
+
+/* notifications */
+void DbEvents::AddNotificationHistory(const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text)
+{
+ /* NotificationInsertID has to be tracked per IDO instance, therefore the OnQuery and OnMultipleQueries signals
+ * cannot be called directly as all IDO instances would insert rows with the same ID which is (most likely) only
+ * correct in one database. Instead, pass a lambda which generates the queries with new DbValue for
+ * NotificationInsertID each IDO instance.
+ */
+ DbObject::OnMakeQueries([&checkable, &users, &type, &cr](const DbObject::QueryCallbacks& callbacks) {
+ /* start and end happen at the same time */
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query1;
+ query1.Table = "notifications";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatNotification;
+ query1.NotificationInsertID = new DbValue(DbValueObjectInsertID, -1);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields1 = new Dictionary();
+ fields1->Set("notification_type", 1); /* service */
+ fields1->Set("notification_reason", MapNotificationReasonType(type));
+ fields1->Set("object_id", checkable);
+ fields1->Set("start_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("start_time_usec", timeBag.second);
+ fields1->Set("end_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("end_time_usec", timeBag.second);
+
+ if (service)
+ fields1->Set("state", service->GetState());
+ else
+ fields1->Set("state", GetHostState(host));
+
+ if (cr) {
+ fields1->Set("output", CompatUtility::GetCheckResultOutput(cr));
+ fields1->Set("long_output", CompatUtility::GetCheckResultLongOutput(cr));
+ }
+
+ fields1->Set("escalated", 0);
+ fields1->Set("contacts_notified", static_cast<long>(users.size()));
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ callbacks.Query(query1);
+
+ std::vector<DbQuery> queries;
+
+ for (const User::Ptr& user : users) {
+ DbQuery query2;
+ query2.Table = "contactnotifications";
+ query2.Type = DbQueryInsert;
+ query2.Category = DbCatNotification;
+
+ query2.Fields = new Dictionary({
+ { "contact_object_id", user },
+ { "start_time", DbValue::FromTimestamp(timeBag.first) },
+ { "start_time_usec", timeBag.second },
+ { "end_time", DbValue::FromTimestamp(timeBag.first) },
+ { "end_time_usec", timeBag.second },
+ { "notification_id", query1.NotificationInsertID },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ queries.emplace_back(std::move(query2));
+ }
+
+ callbacks.MultipleQueries(queries);
+ });
+}
+
+/* statehistory */
+void DbEvents::AddStateChangeHistory(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type)
+{
+ double ts = cr->GetExecutionEnd();
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(ts);
+
+ DbQuery query1;
+ query1.Table = "statehistory";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatStateHistory;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields1 = new Dictionary();
+ fields1->Set("state_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("state_time_usec", timeBag.second);
+ fields1->Set("object_id", checkable);
+ fields1->Set("state_change", 1); /* service */
+ fields1->Set("state_type", checkable->GetStateType());
+ fields1->Set("current_check_attempt", checkable->GetCheckAttempt());
+ fields1->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+
+ if (service) {
+ fields1->Set("state", service->GetState());
+ fields1->Set("last_state", service->GetLastState());
+ fields1->Set("last_hard_state", service->GetLastHardState());
+ } else {
+ fields1->Set("state", GetHostState(host));
+ fields1->Set("last_state", host->GetLastState());
+ fields1->Set("last_hard_state", host->GetLastHardState());
+ }
+
+ if (cr) {
+ fields1->Set("output", CompatUtility::GetCheckResultOutput(cr));
+ fields1->Set("long_output", CompatUtility::GetCheckResultLongOutput(cr));
+ fields1->Set("check_source", cr->GetCheckSource());
+ }
+
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+/* logentries */
+void DbEvents::AddCheckResultLogHistory(const Checkable::Ptr& checkable, const CheckResult::Ptr &cr)
+{
+ if (!cr)
+ return;
+
+ Dictionary::Ptr varsBefore = cr->GetVarsBefore();
+ Dictionary::Ptr varsAfter = cr->GetVarsAfter();
+
+ if (varsBefore && varsAfter) {
+ if (varsBefore->Get("state") == varsAfter->Get("state") &&
+ varsBefore->Get("state_type") == varsAfter->Get("state_type") &&
+ varsBefore->Get("attempt") == varsAfter->Get("attempt") &&
+ varsBefore->Get("reachable") == varsAfter->Get("reachable"))
+ return; /* Nothing changed, ignore this checkresult. */
+ }
+
+ LogEntryType type;
+ String output = CompatUtility::GetCheckResultOutput(cr);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << Service::StateToString(service->GetState()) << ";"
+ << Service::StateTypeToString(service->GetStateType()) << ";"
+ << service->GetCheckAttempt() << ";"
+ << output << ""
+ << "";
+
+ switch (service->GetState()) {
+ case ServiceOK:
+ type = LogEntryTypeServiceOk;
+ break;
+ case ServiceUnknown:
+ type = LogEntryTypeServiceUnknown;
+ break;
+ case ServiceWarning:
+ type = LogEntryTypeServiceWarning;
+ break;
+ case ServiceCritical:
+ type = LogEntryTypeServiceCritical;
+ break;
+ default:
+ Log(LogCritical, "DbEvents")
+ << "Unknown service state: " << service->GetState();
+ return;
+ }
+ } else {
+ msgbuf << "HOST ALERT: "
+ << host->GetName() << ";"
+ << GetHostStateString(host) << ";"
+ << Host::StateTypeToString(host->GetStateType()) << ";"
+ << host->GetCheckAttempt() << ";"
+ << output << ""
+ << "";
+
+ switch (host->GetState()) {
+ case HostUp:
+ type = LogEntryTypeHostUp;
+ break;
+ case HostDown:
+ type = LogEntryTypeHostDown;
+ break;
+ default:
+ Log(LogCritical, "DbEvents")
+ << "Unknown host state: " << host->GetState();
+ return;
+ }
+
+ if (!host->IsReachable())
+ type = LogEntryTypeHostUnreachable;
+ }
+
+ AddLogHistory(checkable, msgbuf.str(), type);
+}
+
+void DbEvents::AddTriggerDowntimeLogHistory(const Downtime::Ptr& downtime)
+{
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << "STARTED" << "; "
+ << "Service has entered a period of scheduled downtime."
+ << "";
+ } else {
+ msgbuf << "HOST DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << "STARTED" << "; "
+ << "Service has entered a period of scheduled downtime."
+ << "";
+ }
+
+ AddLogHistory(checkable, msgbuf.str(), LogEntryTypeInfoMessage);
+}
+
+void DbEvents::AddRemoveDowntimeLogHistory(const Downtime::Ptr& downtime)
+{
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ String downtimeOutput;
+ String downtimeStateStr;
+
+ if (downtime->GetWasCancelled()) {
+ downtimeOutput = "Scheduled downtime for service has been cancelled.";
+ downtimeStateStr = "CANCELLED";
+ } else {
+ downtimeOutput = "Service has exited from a period of scheduled downtime.";
+ downtimeStateStr = "STOPPED";
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << downtimeStateStr << "; "
+ << downtimeOutput
+ << "";
+ } else {
+ msgbuf << "HOST DOWNTIME ALERT: "
+ << host->GetName() << ";"
+ << downtimeStateStr << "; "
+ << downtimeOutput
+ << "";
+ }
+
+ AddLogHistory(checkable, msgbuf.str(), LogEntryTypeInfoMessage);
+}
+
+void DbEvents::AddNotificationSentLogHistory(const Notification::Ptr& notification, const Checkable::Ptr& checkable, const User::Ptr& user,
+ NotificationType notification_type, const CheckResult::Ptr& cr,
+ const String& author, const String& comment_text)
+{
+ CheckCommand::Ptr commandObj = checkable->GetCheckCommand();
+
+ String checkCommandName;
+
+ if (commandObj)
+ checkCommandName = commandObj->GetName();
+
+ String notificationTypeStr = Notification::NotificationTypeToStringCompat(notification_type); //TODO: Change that to our own types.
+
+ String author_comment = "";
+ if (notification_type == NotificationCustom || notification_type == NotificationAcknowledgement) {
+ author_comment = ";" + author + ";" + comment_text;
+ }
+
+ if (!cr)
+ return;
+
+ String output = CompatUtility::GetCheckResultOutput(cr);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE NOTIFICATION: "
+ << user->GetName() << ";"
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << notificationTypeStr << " "
+ << "(" << Service::StateToString(service->GetState()) << ");"
+ << checkCommandName << ";"
+ << output << author_comment
+ << "";
+ } else {
+ msgbuf << "HOST NOTIFICATION: "
+ << user->GetName() << ";"
+ << host->GetName() << ";"
+ << notificationTypeStr << " "
+ << "(" << Host::StateToString(host->GetState()) << ");"
+ << checkCommandName << ";"
+ << output << author_comment
+ << "";
+ }
+
+ AddLogHistory(checkable, msgbuf.str(), LogEntryTypeHostNotification);
+}
+
+void DbEvents::AddFlappingChangedLogHistory(const Checkable::Ptr& checkable)
+{
+ String flappingStateStr;
+ String flappingOutput;
+
+ if (checkable->IsFlapping()) {
+ flappingOutput = "Service appears to have started flapping (" + Convert::ToString(checkable->GetFlappingCurrent()) + "% change >= " + Convert::ToString(checkable->GetFlappingThresholdHigh()) + "% threshold)";
+ flappingStateStr = "STARTED";
+ } else {
+ flappingOutput = "Service appears to have stopped flapping (" + Convert::ToString(checkable->GetFlappingCurrent()) + "% change < " + Convert::ToString(checkable->GetFlappingThresholdLow()) + "% threshold)";
+ flappingStateStr = "STOPPED";
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << flappingStateStr << "; "
+ << flappingOutput
+ << "";
+ } else {
+ msgbuf << "HOST FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << flappingStateStr << "; "
+ << flappingOutput
+ << "";
+ }
+
+ AddLogHistory(checkable, msgbuf.str(), LogEntryTypeInfoMessage);
+}
+
+void DbEvents::AddEnableFlappingChangedLogHistory(const Checkable::Ptr& checkable)
+{
+ if (!checkable->GetEnableFlapping())
+ return;
+
+ String flappingOutput = "Flap detection has been disabled";
+ String flappingStateStr = "DISABLED";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::ostringstream msgbuf;
+
+ if (service) {
+ msgbuf << "SERVICE FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << service->GetShortName() << ";"
+ << flappingStateStr << "; "
+ << flappingOutput
+ << "";
+ } else {
+ msgbuf << "HOST FLAPPING ALERT: "
+ << host->GetName() << ";"
+ << flappingStateStr << "; "
+ << flappingOutput
+ << "";
+ }
+
+ AddLogHistory(checkable, msgbuf.str(), LogEntryTypeInfoMessage);
+}
+
+void DbEvents::AddLogHistory(const Checkable::Ptr& checkable, const String& buffer, LogEntryType type)
+{
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query1;
+ query1.Table = "logentries";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatLog;
+
+ Dictionary::Ptr fields1 = new Dictionary();
+
+ fields1->Set("logentry_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("entry_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("entry_time_usec", timeBag.second);
+ fields1->Set("object_id", checkable);
+ fields1->Set("logentry_type", type);
+ fields1->Set("logentry_data", buffer);
+
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+/* flappinghistory */
+void DbEvents::AddFlappingChangedHistory(const Checkable::Ptr& checkable)
+{
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query1;
+ query1.Table = "flappinghistory";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatFlapping;
+
+ Dictionary::Ptr fields1 = new Dictionary();
+
+ fields1->Set("event_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("event_time_usec", timeBag.second);
+
+ if (checkable->IsFlapping())
+ fields1->Set("event_type", 1000);
+ else {
+ fields1->Set("event_type", 1001);
+ fields1->Set("reason_type", 1);
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ fields1->Set("flapping_type", service ? 1 : 0);
+ fields1->Set("object_id", checkable);
+ fields1->Set("percent_state_change", checkable->GetFlappingCurrent());
+ fields1->Set("low_threshold", checkable->GetFlappingThresholdLow());
+ fields1->Set("high_threshold", checkable->GetFlappingThresholdHigh());
+
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+void DbEvents::AddEnableFlappingChangedHistory(const Checkable::Ptr& checkable)
+{
+ if (!checkable->GetEnableFlapping())
+ return;
+
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ DbQuery query1;
+ query1.Table = "flappinghistory";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatFlapping;
+
+ Dictionary::Ptr fields1 = new Dictionary();
+
+ fields1->Set("event_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("event_time_usec", timeBag.second);
+
+ fields1->Set("event_type", 1001);
+ fields1->Set("reason_type", 2);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ fields1->Set("flapping_type", service ? 1 : 0);
+ fields1->Set("object_id", checkable);
+ fields1->Set("percent_state_change", checkable->GetFlappingCurrent());
+ fields1->Set("low_threshold", checkable->GetFlappingThresholdLow());
+ fields1->Set("high_threshold", checkable->GetFlappingThresholdHigh());
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+/* servicechecks */
+void DbEvents::AddCheckableCheckHistory(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (!cr)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DbQuery query1;
+ query1.Table = service ? "servicechecks" : "hostchecks";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatCheck;
+
+ Dictionary::Ptr fields1 = new Dictionary();
+ fields1->Set("check_type", !checkable->GetEnableActiveChecks()); /* 0 .. active, 1 .. passive */
+ fields1->Set("current_check_attempt", checkable->GetCheckAttempt());
+ fields1->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+ fields1->Set("state_type", checkable->GetStateType());
+
+ double start = cr->GetExecutionStart();
+ double end = cr->GetExecutionEnd();
+ double executionTime = cr->CalculateExecutionTime();
+
+ std::pair<unsigned long, unsigned long> timeBagStart = ConvertTimestamp(start);
+ std::pair<unsigned long, unsigned long> timeBagEnd = ConvertTimestamp(end);
+
+ fields1->Set("start_time", DbValue::FromTimestamp(timeBagStart.first));
+ fields1->Set("start_time_usec", timeBagStart.second);
+ fields1->Set("end_time", DbValue::FromTimestamp(timeBagEnd.first));
+ fields1->Set("end_time_usec", timeBagEnd.second);
+ fields1->Set("command_object_id", checkable->GetCheckCommand());
+ fields1->Set("execution_time", executionTime);
+ fields1->Set("latency", cr->CalculateLatency());
+ fields1->Set("return_code", cr->GetExitStatus());
+ fields1->Set("perfdata", PluginUtility::FormatPerfdata(cr->GetPerformanceData()));
+
+ fields1->Set("output", CompatUtility::GetCheckResultOutput(cr));
+ fields1->Set("long_output", CompatUtility::GetCheckResultLongOutput(cr));
+ fields1->Set("command_line", CompatUtility::GetCommandLine(checkable->GetCheckCommand()));
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ if (service) {
+ fields1->Set("service_object_id", service);
+ fields1->Set("state", service->GetState());
+ } else {
+ fields1->Set("host_object_id", host);
+ fields1->Set("state", GetHostState(host));
+ }
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+/* eventhandlers */
+void DbEvents::AddEventHandlerHistory(const Checkable::Ptr& checkable)
+{
+ DbQuery query1;
+ query1.Table = "eventhandlers";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatEventHandler;
+
+ Dictionary::Ptr fields1 = new Dictionary();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ fields1->Set("object_id", checkable);
+ fields1->Set("state_type", checkable->GetStateType());
+ fields1->Set("command_object_id", checkable->GetEventCommand());
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ if (service) {
+ fields1->Set("state", service->GetState());
+ fields1->Set("eventhandler_type", 1);
+ } else {
+ fields1->Set("state", GetHostState(host));
+ fields1->Set("eventhandler_type", 0);
+ }
+
+ std::pair<unsigned long, unsigned long> timeBag = ConvertTimestamp(Utility::GetTime());
+
+ fields1->Set("start_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("start_time_usec", timeBag.second);
+ fields1->Set("end_time", DbValue::FromTimestamp(timeBag.first));
+ fields1->Set("end_time_usec", timeBag.second);
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+/* externalcommands */
+void DbEvents::AddExternalCommandHistory(double time, const String& command, const std::vector<String>& arguments)
+{
+ DbQuery query1;
+ query1.Table = "externalcommands";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatExternalCommand;
+
+ Dictionary::Ptr fields1 = new Dictionary();
+
+ fields1->Set("entry_time", DbValue::FromTimestamp(time));
+ fields1->Set("command_type", MapExternalCommandType(command));
+ fields1->Set("command_name", command);
+ fields1->Set("command_args", boost::algorithm::join(arguments, ";"));
+ fields1->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(IcingaApplication::GetInstance()->GetNodeName());
+
+ if (endpoint)
+ fields1->Set("endpoint_object_id", endpoint);
+
+ query1.Fields = fields1;
+ DbObject::OnQuery(query1);
+}
+
+int DbEvents::GetHostState(const Host::Ptr& host)
+{
+ int currentState = host->GetState();
+
+ if (currentState != HostUp && !host->IsReachable())
+ currentState = 2; /* hardcoded compat state */
+
+ return currentState;
+}
+
+String DbEvents::GetHostStateString(const Host::Ptr& host)
+{
+ if (host->GetState() != HostUp && !host->IsReachable())
+ return "UNREACHABLE"; /* hardcoded compat state */
+
+ return Host::StateToString(host->GetState());
+}
+
+std::pair<unsigned long, unsigned long> DbEvents::ConvertTimestamp(double time)
+{
+ unsigned long time_sec = static_cast<long>(time);
+ unsigned long time_usec = (time - time_sec) * 1000 * 1000;
+
+ return std::make_pair(time_sec, time_usec);
+}
+
+int DbEvents::MapNotificationReasonType(NotificationType type)
+{
+ switch (type) {
+ case NotificationDowntimeStart:
+ return 5;
+ case NotificationDowntimeEnd:
+ return 6;
+ case NotificationDowntimeRemoved:
+ return 7;
+ case NotificationCustom:
+ return 8;
+ case NotificationAcknowledgement:
+ return 1;
+ case NotificationProblem:
+ return 0;
+ case NotificationRecovery:
+ return 0;
+ case NotificationFlappingStart:
+ return 2;
+ case NotificationFlappingEnd:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
+int DbEvents::MapExternalCommandType(const String& name)
+{
+ if (name == "NONE")
+ return 0;
+ if (name == "ADD_HOST_COMMENT")
+ return 1;
+ if (name == "DEL_HOST_COMMENT")
+ return 2;
+ if (name == "ADD_SVC_COMMENT")
+ return 3;
+ if (name == "DEL_SVC_COMMENT")
+ return 4;
+ if (name == "ENABLE_SVC_CHECK")
+ return 5;
+ if (name == "DISABLE_SVC_CHECK")
+ return 6;
+ if (name == "SCHEDULE_SVC_CHECK")
+ return 7;
+ if (name == "DELAY_SVC_NOTIFICATION")
+ return 9;
+ if (name == "DELAY_HOST_NOTIFICATION")
+ return 10;
+ if (name == "DISABLE_NOTIFICATIONS")
+ return 11;
+ if (name == "ENABLE_NOTIFICATIONS")
+ return 12;
+ if (name == "RESTART_PROCESS")
+ return 13;
+ if (name == "SHUTDOWN_PROCESS")
+ return 14;
+ if (name == "ENABLE_HOST_SVC_CHECKS")
+ return 15;
+ if (name == "DISABLE_HOST_SVC_CHECKS")
+ return 16;
+ if (name == "SCHEDULE_HOST_SVC_CHECKS")
+ return 17;
+ if (name == "DELAY_HOST_SVC_NOTIFICATIONS")
+ return 19;
+ if (name == "DEL_ALL_HOST_COMMENTS")
+ return 20;
+ if (name == "DEL_ALL_SVC_COMMENTS")
+ return 21;
+ if (name == "ENABLE_SVC_NOTIFICATIONS")
+ return 22;
+ if (name == "DISABLE_SVC_NOTIFICATIONS")
+ return 23;
+ if (name == "ENABLE_HOST_NOTIFICATIONS")
+ return 24;
+ if (name == "DISABLE_HOST_NOTIFICATIONS")
+ return 25;
+ if (name == "ENABLE_ALL_NOTIFICATIONS_BEYOND_HOST")
+ return 26;
+ if (name == "DISABLE_ALL_NOTIFICATIONS_BEYOND_HOST")
+ return 27;
+ if (name == "ENABLE_HOST_SVC_NOTIFICATIONS")
+ return 28;
+ if (name == "DISABLE_HOST_SVC_NOTIFICATIONS")
+ return 29;
+ if (name == "PROCESS_SERVICE_CHECK_RESULT")
+ return 30;
+ if (name == "SAVE_STATE_INFORMATION")
+ return 31;
+ if (name == "READ_STATE_INFORMATION")
+ return 32;
+ if (name == "ACKNOWLEDGE_HOST_PROBLEM")
+ return 33;
+ if (name == "ACKNOWLEDGE_SVC_PROBLEM")
+ return 34;
+ if (name == "START_EXECUTING_SVC_CHECKS")
+ return 35;
+ if (name == "STOP_EXECUTING_SVC_CHECKS")
+ return 36;
+ if (name == "START_ACCEPTING_PASSIVE_SVC_CHECKS")
+ return 37;
+ if (name == "STOP_ACCEPTING_PASSIVE_SVC_CHECKS")
+ return 38;
+ if (name == "ENABLE_PASSIVE_SVC_CHECKS")
+ return 39;
+ if (name == "DISABLE_PASSIVE_SVC_CHECKS")
+ return 40;
+ if (name == "ENABLE_EVENT_HANDLERS")
+ return 41;
+ if (name == "DISABLE_EVENT_HANDLERS")
+ return 42;
+ if (name == "ENABLE_HOST_EVENT_HANDLER")
+ return 43;
+ if (name == "DISABLE_HOST_EVENT_HANDLER")
+ return 44;
+ if (name == "ENABLE_SVC_EVENT_HANDLER")
+ return 45;
+ if (name == "DISABLE_SVC_EVENT_HANDLER")
+ return 46;
+ if (name == "ENABLE_HOST_CHECK")
+ return 47;
+ if (name == "DISABLE_HOST_CHECK")
+ return 48;
+ if (name == "START_OBSESSING_OVER_SVC_CHECKS")
+ return 49;
+ if (name == "STOP_OBSESSING_OVER_SVC_CHECKS")
+ return 50;
+ if (name == "REMOVE_HOST_ACKNOWLEDGEMENT")
+ return 51;
+ if (name == "REMOVE_SVC_ACKNOWLEDGEMENT")
+ return 52;
+ if (name == "SCHEDULE_FORCED_HOST_SVC_CHECKS")
+ return 53;
+ if (name == "SCHEDULE_FORCED_SVC_CHECK")
+ return 54;
+ if (name == "SCHEDULE_HOST_DOWNTIME")
+ return 55;
+ if (name == "SCHEDULE_SVC_DOWNTIME")
+ return 56;
+ if (name == "ENABLE_HOST_FLAP_DETECTION")
+ return 57;
+ if (name == "DISABLE_HOST_FLAP_DETECTION")
+ return 58;
+ if (name == "ENABLE_SVC_FLAP_DETECTION")
+ return 59;
+ if (name == "DISABLE_SVC_FLAP_DETECTION")
+ return 60;
+ if (name == "ENABLE_FLAP_DETECTION")
+ return 61;
+ if (name == "DISABLE_FLAP_DETECTION")
+ return 62;
+ if (name == "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS")
+ return 63;
+ if (name == "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS")
+ return 64;
+ if (name == "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS")
+ return 65;
+ if (name == "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS")
+ return 66;
+ if (name == "ENABLE_HOSTGROUP_SVC_CHECKS")
+ return 67;
+ if (name == "DISABLE_HOSTGROUP_SVC_CHECKS")
+ return 68;
+ if (name == "CANCEL_HOST_DOWNTIME")
+ return 69;
+ if (name == "CANCEL_SVC_DOWNTIME")
+ return 70;
+ if (name == "CANCEL_ACTIVE_HOST_DOWNTIME")
+ return 71;
+ if (name == "CANCEL_PENDING_HOST_DOWNTIME")
+ return 72;
+ if (name == "CANCEL_ACTIVE_SVC_DOWNTIME")
+ return 73;
+ if (name == "CANCEL_PENDING_SVC_DOWNTIME")
+ return 74;
+ if (name == "CANCEL_ACTIVE_HOST_SVC_DOWNTIME")
+ return 75;
+ if (name == "CANCEL_PENDING_HOST_SVC_DOWNTIME")
+ return 76;
+ if (name == "FLUSH_PENDING_COMMANDS")
+ return 77;
+ if (name == "DEL_HOST_DOWNTIME")
+ return 78;
+ if (name == "DEL_SVC_DOWNTIME")
+ return 79;
+ if (name == "ENABLE_FAILURE_PREDICTION")
+ return 80;
+ if (name == "DISABLE_FAILURE_PREDICTION")
+ return 81;
+ if (name == "ENABLE_PERFORMANCE_DATA")
+ return 82;
+ if (name == "DISABLE_PERFORMANCE_DATA")
+ return 83;
+ if (name == "SCHEDULE_HOSTGROUP_HOST_DOWNTIME")
+ return 84;
+ if (name == "SCHEDULE_HOSTGROUP_SVC_DOWNTIME")
+ return 85;
+ if (name == "SCHEDULE_HOST_SVC_DOWNTIME")
+ return 86;
+ if (name == "PROCESS_HOST_CHECK_RESULT")
+ return 87;
+ if (name == "START_EXECUTING_HOST_CHECKS")
+ return 88;
+ if (name == "STOP_EXECUTING_HOST_CHECKS")
+ return 89;
+ if (name == "START_ACCEPTING_PASSIVE_HOST_CHECKS")
+ return 90;
+ if (name == "STOP_ACCEPTING_PASSIVE_HOST_CHECKS")
+ return 91;
+ if (name == "ENABLE_PASSIVE_HOST_CHECKS")
+ return 92;
+ if (name == "DISABLE_PASSIVE_HOST_CHECKS")
+ return 93;
+ if (name == "START_OBSESSING_OVER_HOST_CHECKS")
+ return 94;
+ if (name == "STOP_OBSESSING_OVER_HOST_CHECKS")
+ return 95;
+ if (name == "SCHEDULE_HOST_CHECK")
+ return 96;
+ if (name == "SCHEDULE_FORCED_HOST_CHECK")
+ return 98;
+ if (name == "START_OBSESSING_OVER_SVC")
+ return 99;
+ if (name == "STOP_OBSESSING_OVER_SVC")
+ return 100;
+ if (name == "START_OBSESSING_OVER_HOST")
+ return 101;
+ if (name == "STOP_OBSESSING_OVER_HOST")
+ return 102;
+ if (name == "ENABLE_HOSTGROUP_HOST_CHECKS")
+ return 103;
+ if (name == "DISABLE_HOSTGROUP_HOST_CHECKS")
+ return 104;
+ if (name == "ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS")
+ return 105;
+ if (name == "DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS")
+ return 106;
+ if (name == "ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS")
+ return 107;
+ if (name == "DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS")
+ return 108;
+ if (name == "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS")
+ return 109;
+ if (name == "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS")
+ return 110;
+ if (name == "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS")
+ return 111;
+ if (name == "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS")
+ return 112;
+ if (name == "ENABLE_SERVICEGROUP_SVC_CHECKS")
+ return 113;
+ if (name == "DISABLE_SERVICEGROUP_SVC_CHECKS")
+ return 114;
+ if (name == "ENABLE_SERVICEGROUP_HOST_CHECKS")
+ return 115;
+ if (name == "DISABLE_SERVICEGROUP_HOST_CHECKS")
+ return 116;
+ if (name == "ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS")
+ return 117;
+ if (name == "DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS")
+ return 118;
+ if (name == "ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS")
+ return 119;
+ if (name == "DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS")
+ return 120;
+ if (name == "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME")
+ return 121;
+ if (name == "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME")
+ return 122;
+ if (name == "CHANGE_GLOBAL_HOST_EVENT_HANDLER")
+ return 123;
+ if (name == "CHANGE_GLOBAL_SVC_EVENT_HANDLER")
+ return 124;
+ if (name == "CHANGE_HOST_EVENT_HANDLER")
+ return 125;
+ if (name == "CHANGE_SVC_EVENT_HANDLER")
+ return 126;
+ if (name == "CHANGE_HOST_CHECK_COMMAND")
+ return 127;
+ if (name == "CHANGE_SVC_CHECK_COMMAND")
+ return 128;
+ if (name == "CHANGE_NORMAL_HOST_CHECK_INTERVAL")
+ return 129;
+ if (name == "CHANGE_NORMAL_SVC_CHECK_INTERVAL")
+ return 130;
+ if (name == "CHANGE_RETRY_SVC_CHECK_INTERVAL")
+ return 131;
+ if (name == "CHANGE_MAX_HOST_CHECK_ATTEMPTS")
+ return 132;
+ if (name == "CHANGE_MAX_SVC_CHECK_ATTEMPTS")
+ return 133;
+ if (name == "SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME")
+ return 134;
+ if (name == "ENABLE_HOST_AND_CHILD_NOTIFICATIONS")
+ return 135;
+ if (name == "DISABLE_HOST_AND_CHILD_NOTIFICATIONS")
+ return 136;
+ if (name == "SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME")
+ return 137;
+ if (name == "ENABLE_SERVICE_FRESHNESS_CHECKS")
+ return 138;
+ if (name == "DISABLE_SERVICE_FRESHNESS_CHECKS")
+ return 139;
+ if (name == "ENABLE_HOST_FRESHNESS_CHECKS")
+ return 140;
+ if (name == "DISABLE_HOST_FRESHNESS_CHECKS")
+ return 141;
+ if (name == "SET_HOST_NOTIFICATION_NUMBER")
+ return 142;
+ if (name == "SET_SVC_NOTIFICATION_NUMBER")
+ return 143;
+ if (name == "CHANGE_HOST_CHECK_TIMEPERIOD")
+ return 144;
+ if (name == "CHANGE_SVC_CHECK_TIMEPERIOD")
+ return 145;
+ if (name == "PROCESS_FILE")
+ return 146;
+ if (name == "CHANGE_CUSTOM_HOST_VAR")
+ return 147;
+ if (name == "CHANGE_CUSTOM_SVC_VAR")
+ return 148;
+ if (name == "CHANGE_CUSTOM_CONTACT_VAR")
+ return 149;
+ if (name == "ENABLE_CONTACT_HOST_NOTIFICATIONS")
+ return 150;
+ if (name == "DISABLE_CONTACT_HOST_NOTIFICATIONS")
+ return 151;
+ if (name == "ENABLE_CONTACT_SVC_NOTIFICATIONS")
+ return 152;
+ if (name == "DISABLE_CONTACT_SVC_NOTIFICATIONS")
+ return 153;
+ if (name == "ENABLE_CONTACTGROUP_HOST_NOTIFICATIONS")
+ return 154;
+ if (name == "DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS")
+ return 155;
+ if (name == "ENABLE_CONTACTGROUP_SVC_NOTIFICATIONS")
+ return 156;
+ if (name == "DISABLE_CONTACTGROUP_SVC_NOTIFICATIONS")
+ return 157;
+ if (name == "CHANGE_RETRY_HOST_CHECK_INTERVAL")
+ return 158;
+ if (name == "SEND_CUSTOM_HOST_NOTIFICATION")
+ return 159;
+ if (name == "SEND_CUSTOM_SVC_NOTIFICATION")
+ return 160;
+ if (name == "CHANGE_HOST_NOTIFICATION_TIMEPERIOD")
+ return 161;
+ if (name == "CHANGE_SVC_NOTIFICATION_TIMEPERIOD")
+ return 162;
+ if (name == "CHANGE_CONTACT_HOST_NOTIFICATION_TIMEPERIOD")
+ return 163;
+ if (name == "CHANGE_CONTACT_SVC_NOTIFICATION_TIMEPERIOD")
+ return 164;
+ if (name == "CHANGE_HOST_MODATTR")
+ return 165;
+ if (name == "CHANGE_SVC_MODATTR")
+ return 166;
+ if (name == "CHANGE_CONTACT_MODATTR")
+ return 167;
+ if (name == "CHANGE_CONTACT_MODHATTR")
+ return 168;
+ if (name == "CHANGE_CONTACT_MODSATTR")
+ return 169;
+ if (name == "SYNC_STATE_INFORMATION")
+ return 170;
+ if (name == "DEL_DOWNTIME_BY_HOST_NAME")
+ return 171;
+ if (name == "DEL_DOWNTIME_BY_HOSTGROUP_NAME")
+ return 172;
+ if (name == "DEL_DOWNTIME_BY_START_TIME_COMMENT")
+ return 173;
+ if (name == "ACKNOWLEDGE_HOST_PROBLEM_EXPIRE")
+ return 174;
+ if (name == "ACKNOWLEDGE_SVC_PROBLEM_EXPIRE")
+ return 175;
+ if (name == "DISABLE_NOTIFICATIONS_EXPIRE_TIME")
+ return 176;
+ if (name == "CUSTOM_COMMAND")
+ return 999;
+
+ return 0;
+}
diff --git a/lib/db_ido/dbevents.hpp b/lib/db_ido/dbevents.hpp
new file mode 100644
index 0000000..858f3b3
--- /dev/null
+++ b/lib/db_ido/dbevents.hpp
@@ -0,0 +1,128 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBEVENTS_H
+#define DBEVENTS_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+enum LogEntryType
+{
+ LogEntryTypeRuntimeError = 1,
+ LogEntryTypeRuntimeWarning = 2,
+ LogEntryTypeVerificationError = 4,
+ LogEntryTypeVerificationWarning = 8,
+ LogEntryTypeConfigError = 16,
+ LogEntryTypeConfigWarning = 32,
+ LogEntryTypeProcessInfo = 64,
+ LogEntryTypeEventHandler = 128,
+ LogEntryTypeExternalCommand = 512,
+ LogEntryTypeHostUp = 1024,
+ LogEntryTypeHostDown = 2048,
+ LogEntryTypeHostUnreachable = 4096,
+ LogEntryTypeServiceOk = 8192,
+ LogEntryTypeServiceUnknown = 16384,
+ LogEntryTypeServiceWarning = 32768,
+ LogEntryTypeServiceCritical = 65536,
+ LogEntryTypePassiveCheck = 1231072,
+ LogEntryTypeInfoMessage = 262144,
+ LogEntryTypeHostNotification = 524288,
+ LogEntryTypeServiceNotification = 1048576
+};
+
+/**
+ * IDO events
+ *
+ * @ingroup ido
+ */
+class DbEvents
+{
+public:
+ static void StaticInitialize();
+
+ static void AddComments(const Checkable::Ptr& checkable);
+
+ static void AddDowntimes(const Checkable::Ptr& checkable);
+ static void RemoveDowntimes(const Checkable::Ptr& checkable);
+
+ static void AddLogHistory(const Checkable::Ptr& checkable, const String& buffer, LogEntryType type);
+
+ /* Status */
+ static void NextCheckUpdatedHandler(const Checkable::Ptr& checkable);
+ static void FlappingChangedHandler(const Checkable::Ptr& checkable);
+ static void LastNotificationChangedHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable);
+
+ static void EnableActiveChecksChangedHandler(const Checkable::Ptr& checkable);
+ static void EnablePassiveChecksChangedHandler(const Checkable::Ptr& checkable);
+ static void EnableNotificationsChangedHandler(const Checkable::Ptr& checkable);
+ static void EnablePerfdataChangedHandler(const Checkable::Ptr& checkable);
+ static void EnableFlappingChangedHandler(const Checkable::Ptr& checkable);
+
+ static void AddComment(const Comment::Ptr& comment);
+ static void RemoveComment(const Comment::Ptr& comment);
+
+ static void AddDowntime(const Downtime::Ptr& downtime);
+ static void RemoveDowntime(const Downtime::Ptr& downtime);
+ static void TriggerDowntime(const Downtime::Ptr& downtime);
+
+ static void AddAcknowledgement(const Checkable::Ptr& checkable, AcknowledgementType type);
+ static void RemoveAcknowledgement(const Checkable::Ptr& checkable);
+ static void AddAcknowledgementInternal(const Checkable::Ptr& checkable, AcknowledgementType type, bool add);
+
+ static void ReachabilityChangedHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, std::set<Checkable::Ptr> children);
+
+ /* comment, downtime, acknowledgement history */
+ static void AddCommentHistory(const Comment::Ptr& comment);
+ static void AddDowntimeHistory(const Downtime::Ptr& downtime);
+ static void AddAcknowledgementHistory(const Checkable::Ptr& checkable, const String& author, const String& comment,
+ AcknowledgementType type, bool notify, double expiry);
+
+ /* notification & contactnotification history */
+ static void AddNotificationHistory(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const std::set<User::Ptr>& users, NotificationType type, const CheckResult::Ptr& cr, const String& author,
+ const String& text);
+
+ /* statehistory */
+ static void AddStateChangeHistory(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type);
+
+ /* logentries */
+ static void AddCheckResultLogHistory(const Checkable::Ptr& checkable, const CheckResult::Ptr &cr);
+ static void AddTriggerDowntimeLogHistory(const Downtime::Ptr& downtime);
+ static void AddRemoveDowntimeLogHistory(const Downtime::Ptr& downtime);
+ static void AddNotificationSentLogHistory(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notification_type, const CheckResult::Ptr& cr, const String& author,
+ const String& comment_text);
+
+ static void AddFlappingChangedLogHistory(const Checkable::Ptr& checkable);
+ static void AddEnableFlappingChangedLogHistory(const Checkable::Ptr& checkable);
+
+ /* other history */
+ static void AddFlappingChangedHistory(const Checkable::Ptr& checkable);
+ static void AddEnableFlappingChangedHistory(const Checkable::Ptr& checkable);
+ static void AddCheckableCheckHistory(const Checkable::Ptr& checkable, const CheckResult::Ptr &cr);
+ static void AddEventHandlerHistory(const Checkable::Ptr& checkable);
+ static void AddExternalCommandHistory(double time, const String& command, const std::vector<String>& arguments);
+
+private:
+ DbEvents();
+
+ static void AddCommentInternal(std::vector<DbQuery>& queries, const Comment::Ptr& comment, bool historical);
+ static void RemoveCommentInternal(std::vector<DbQuery>& queries, const Comment::Ptr& comment);
+ static void AddDowntimeInternal(std::vector<DbQuery>& queries, const Downtime::Ptr& downtime, bool historical);
+ static void RemoveDowntimeInternal(std::vector<DbQuery>& queries, const Downtime::Ptr& downtime);
+ static void EnableChangedHandlerInternal(const Checkable::Ptr& checkable, const String& fieldName, bool enabled);
+
+ static int GetHostState(const Host::Ptr& host);
+ static String GetHostStateString(const Host::Ptr& host);
+ static std::pair<unsigned long, unsigned long> ConvertTimestamp(double time);
+ static int MapNotificationReasonType(NotificationType type);
+ static int MapExternalCommandType(const String& name);
+};
+
+}
+
+#endif /* DBEVENTS_H */
diff --git a/lib/db_ido/dbobject.cpp b/lib/db_ido/dbobject.cpp
new file mode 100644
index 0000000..406bf52
--- /dev/null
+++ b/lib/db_ido/dbobject.cpp
@@ -0,0 +1,430 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/customvarobject.hpp"
+#include "icinga/service.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "remote/endpoint.hpp"
+#include "base/configobject.hpp"
+#include "base/configtype.hpp"
+#include "base/json.hpp"
+#include "base/serializer.hpp"
+#include "base/json.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/initialize.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+boost::signals2::signal<void (const DbQuery&)> DbObject::OnQuery;
+boost::signals2::signal<void (const std::vector<DbQuery>&)> DbObject::OnMultipleQueries;
+boost::signals2::signal<void (const std::function<void (const DbObject::QueryCallbacks&)>&)> DbObject::OnMakeQueries;
+
+INITIALIZE_ONCE(&DbObject::StaticInitialize);
+
+DbObject::DbObject(intrusive_ptr<DbType> type, String name1, String name2)
+ : m_Name1(std::move(name1)), m_Name2(std::move(name2)), m_Type(std::move(type)), m_LastConfigUpdate(0), m_LastStatusUpdate(0)
+{ }
+
+void DbObject::StaticInitialize()
+{
+ /* triggered in ProcessCheckResult(), requires UpdateNextCheck() to be called before */
+ ConfigObject::OnStateChanged.connect([](const ConfigObject::Ptr& object) { StateChangedHandler(object); });
+ CustomVarObject::OnVarsChanged.connect([](const CustomVarObject::Ptr& customVar, const Value&) { VarsChangedHandler(customVar); });
+
+ /* triggered on create, update and delete objects */
+ ConfigObject::OnVersionChanged.connect([](const ConfigObject::Ptr& object, const Value&) { VersionChangedHandler(object); });
+}
+
+void DbObject::SetObject(const ConfigObject::Ptr& object)
+{
+ m_Object = object;
+}
+
+ConfigObject::Ptr DbObject::GetObject() const
+{
+ return m_Object;
+}
+
+String DbObject::GetName1() const
+{
+ return m_Name1;
+}
+
+String DbObject::GetName2() const
+{
+ return m_Name2;
+}
+
+DbType::Ptr DbObject::GetType() const
+{
+ return m_Type;
+}
+
+String DbObject::CalculateConfigHash(const Dictionary::Ptr& configFields) const
+{
+ Dictionary::Ptr configFieldsDup = configFields->ShallowClone();
+
+ {
+ ObjectLock olock(configFieldsDup);
+
+ for (const Dictionary::Pair& kv : configFieldsDup) {
+ if (kv.second.IsObjectType<ConfigObject>()) {
+ ConfigObject::Ptr obj = kv.second;
+ configFieldsDup->Set(kv.first, obj->GetName());
+ }
+ }
+ }
+
+ Array::Ptr data = new Array();
+ data->Add(configFieldsDup);
+
+ CustomVarObject::Ptr custom_var_object = dynamic_pointer_cast<CustomVarObject>(GetObject());
+
+ if (custom_var_object)
+ data->Add(custom_var_object->GetVars());
+
+ return HashValue(data);
+}
+
+String DbObject::HashValue(const Value& value)
+{
+ Value temp;
+
+ Type::Ptr type = value.GetReflectionType();
+
+ if (ConfigObject::TypeInstance->IsAssignableFrom(type))
+ temp = Serialize(value, FAConfig);
+ else
+ temp = value;
+
+ return SHA256(JsonEncode(temp));
+}
+
+void DbObject::SendConfigUpdateHeavy(const Dictionary::Ptr& configFields)
+{
+ /* update custom var config and status */
+ SendVarsConfigUpdateHeavy();
+
+ /* config attributes */
+ if (!configFields)
+ return;
+
+ ASSERT(configFields->Contains("config_hash"));
+
+ ConfigObject::Ptr object = GetObject();
+
+ DbQuery query;
+ query.Table = GetType()->GetTable() + "s";
+ query.Type = DbQueryInsert | DbQueryUpdate;
+ query.Category = DbCatConfig;
+ query.Fields = configFields;
+ query.Fields->Set(GetType()->GetIDColumn(), object);
+ query.Fields->Set("instance_id", 0); /* DbConnection class fills in real ID */
+ query.Fields->Set("config_type", 1);
+ query.WhereCriteria = new Dictionary({
+ { GetType()->GetIDColumn(), object }
+ });
+ query.Object = this;
+ query.ConfigUpdate = true;
+ OnQuery(query);
+
+ m_LastConfigUpdate = Utility::GetTime();
+
+ OnConfigUpdateHeavy();
+}
+
+void DbObject::SendConfigUpdateLight()
+{
+ OnConfigUpdateLight();
+}
+
+void DbObject::SendStatusUpdate()
+{
+ /* status attributes */
+ Dictionary::Ptr fields = GetStatusFields();
+
+ if (!fields)
+ return;
+
+ DbQuery query;
+ query.Table = GetType()->GetTable() + "status";
+ query.Type = DbQueryInsert | DbQueryUpdate;
+ query.Category = DbCatState;
+ query.Fields = fields;
+ query.Fields->Set(GetType()->GetIDColumn(), GetObject());
+
+ /* do not override endpoint_object_id for endpoints & zones */
+ if (query.Table != "endpointstatus" && query.Table != "zonestatus") {
+ String node = IcingaApplication::GetInstance()->GetNodeName();
+
+ Endpoint::Ptr endpoint = Endpoint::GetByName(node);
+ if (endpoint)
+ query.Fields->Set("endpoint_object_id", endpoint);
+ }
+
+ query.Fields->Set("instance_id", 0); /* DbConnection class fills in real ID */
+
+ query.Fields->Set("status_update_time", DbValue::FromTimestamp(Utility::GetTime()));
+ query.WhereCriteria = new Dictionary({
+ { GetType()->GetIDColumn(), GetObject() }
+ });
+ query.Object = this;
+ query.StatusUpdate = true;
+ OnQuery(query);
+
+ m_LastStatusUpdate = Utility::GetTime();
+
+ OnStatusUpdate();
+}
+
+void DbObject::SendVarsConfigUpdateHeavy()
+{
+ ConfigObject::Ptr obj = GetObject();
+
+ CustomVarObject::Ptr custom_var_object = dynamic_pointer_cast<CustomVarObject>(obj);
+
+ if (!custom_var_object)
+ return;
+
+ std::vector<DbQuery> queries;
+
+ DbQuery query1;
+ query1.Table = "customvariables";
+ query1.Type = DbQueryDelete;
+ query1.Category = DbCatConfig;
+ query1.WhereCriteria = new Dictionary({
+ { "object_id", obj }
+ });
+ queries.emplace_back(std::move(query1));
+
+ DbQuery query2;
+ query2.Table = "customvariablestatus";
+ query2.Type = DbQueryDelete;
+ query2.Category = DbCatConfig;
+ query2.WhereCriteria = new Dictionary({
+ { "object_id", obj }
+ });
+ queries.emplace_back(std::move(query2));
+
+ Dictionary::Ptr vars = custom_var_object->GetVars();
+
+ if (vars) {
+ ObjectLock olock (vars);
+
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.first.IsEmpty())
+ continue;
+
+ String value;
+ int is_json = 0;
+
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>()) {
+ value = JsonEncode(kv.second);
+ is_json = 1;
+ } else
+ value = kv.second;
+
+ DbQuery query3;
+ query3.Table = "customvariables";
+ query3.Type = DbQueryInsert;
+ query3.Category = DbCatConfig;
+ query3.Fields = new Dictionary({
+ { "varname", kv.first },
+ { "varvalue", value },
+ { "is_json", is_json },
+ { "config_type", 1 },
+ { "object_id", obj },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query3));
+
+ DbQuery query4;
+ query4.Table = "customvariablestatus";
+ query4.Type = DbQueryInsert;
+ query4.Category = DbCatState;
+
+ query4.Fields = new Dictionary({
+ { "varname", kv.first },
+ { "varvalue", value },
+ { "is_json", is_json },
+ { "status_update_time", DbValue::FromTimestamp(Utility::GetTime()) },
+ { "object_id", obj },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ queries.emplace_back(std::move(query4));
+ }
+ }
+
+ OnMultipleQueries(queries);
+}
+
+void DbObject::SendVarsStatusUpdate()
+{
+ ConfigObject::Ptr obj = GetObject();
+
+ CustomVarObject::Ptr custom_var_object = dynamic_pointer_cast<CustomVarObject>(obj);
+
+ if (!custom_var_object)
+ return;
+
+ Dictionary::Ptr vars = custom_var_object->GetVars();
+
+ if (vars) {
+ std::vector<DbQuery> queries;
+ ObjectLock olock (vars);
+
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.first.IsEmpty())
+ continue;
+
+ String value;
+ int is_json = 0;
+
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>()) {
+ value = JsonEncode(kv.second);
+ is_json = 1;
+ } else
+ value = kv.second;
+
+ DbQuery query;
+ query.Table = "customvariablestatus";
+ query.Type = DbQueryInsert | DbQueryUpdate;
+ query.Category = DbCatState;
+
+ query.Fields = new Dictionary({
+ { "varname", kv.first },
+ { "varvalue", value },
+ { "is_json", is_json },
+ { "status_update_time", DbValue::FromTimestamp(Utility::GetTime()) },
+ { "object_id", obj },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ query.WhereCriteria = new Dictionary({
+ { "object_id", obj },
+ { "varname", kv.first }
+ });
+
+ queries.emplace_back(std::move(query));
+ }
+
+ OnMultipleQueries(queries);
+ }
+}
+
+double DbObject::GetLastConfigUpdate() const
+{
+ return m_LastConfigUpdate;
+}
+
+double DbObject::GetLastStatusUpdate() const
+{
+ return m_LastStatusUpdate;
+}
+
+void DbObject::OnConfigUpdateHeavy()
+{
+ /* Default handler does nothing. */
+}
+
+void DbObject::OnConfigUpdateLight()
+{
+ /* Default handler does nothing. */
+}
+
+void DbObject::OnStatusUpdate()
+{
+ /* Default handler does nothing. */
+}
+
+DbObject::Ptr DbObject::GetOrCreateByObject(const ConfigObject::Ptr& object)
+{
+ std::unique_lock<std::mutex> lock(GetStaticMutex());
+
+ DbObject::Ptr dbobj = object->GetExtension("DbObject");
+
+ if (dbobj)
+ return dbobj;
+
+ DbType::Ptr dbtype = DbType::GetByName(object->GetReflectionType()->GetName());
+
+ if (!dbtype)
+ return nullptr;
+
+ Service::Ptr service;
+ String name1, name2;
+
+ service = dynamic_pointer_cast<Service>(object);
+
+ if (service) {
+ Host::Ptr host = service->GetHost();
+
+ name1 = service->GetHost()->GetName();
+ name2 = service->GetShortName();
+ } else {
+ if (object->GetReflectionType() == CheckCommand::TypeInstance ||
+ object->GetReflectionType() == EventCommand::TypeInstance ||
+ object->GetReflectionType() == NotificationCommand::TypeInstance) {
+ Command::Ptr command = dynamic_pointer_cast<Command>(object);
+ name1 = CompatUtility::GetCommandName(command);
+ }
+ else
+ name1 = object->GetName();
+ }
+
+ dbobj = dbtype->GetOrCreateObjectByName(name1, name2);
+
+ dbobj->SetObject(object);
+ object->SetExtension("DbObject", dbobj);
+
+ return dbobj;
+}
+
+void DbObject::StateChangedHandler(const ConfigObject::Ptr& object)
+{
+ DbObject::Ptr dbobj = GetOrCreateByObject(object);
+
+ if (!dbobj)
+ return;
+
+ dbobj->SendStatusUpdate();
+}
+
+void DbObject::VarsChangedHandler(const CustomVarObject::Ptr& object)
+{
+ DbObject::Ptr dbobj = GetOrCreateByObject(object);
+
+ if (!dbobj)
+ return;
+
+ dbobj->SendVarsStatusUpdate();
+}
+
+void DbObject::VersionChangedHandler(const ConfigObject::Ptr& object)
+{
+ DbObject::Ptr dbobj = DbObject::GetOrCreateByObject(object);
+
+ if (dbobj) {
+ Dictionary::Ptr configFields = dbobj->GetConfigFields();
+ String configHash = dbobj->CalculateConfigHash(configFields);
+ configFields->Set("config_hash", configHash);
+
+ dbobj->SendConfigUpdateHeavy(configFields);
+ dbobj->SendStatusUpdate();
+ }
+}
+
+std::mutex& DbObject::GetStaticMutex()
+{
+ static std::mutex mutex;
+ return mutex;
+}
diff --git a/lib/db_ido/dbobject.hpp b/lib/db_ido/dbobject.hpp
new file mode 100644
index 0000000..399b77d
--- /dev/null
+++ b/lib/db_ido/dbobject.hpp
@@ -0,0 +1,112 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBOBJECT_H
+#define DBOBJECT_H
+
+#include "db_ido/i2-db_ido.hpp"
+#include "db_ido/dbreference.hpp"
+#include "db_ido/dbquery.hpp"
+#include "db_ido/dbtype.hpp"
+#include "icinga/customvarobject.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+enum DbObjectUpdateType
+{
+ DbObjectCreated,
+ DbObjectRemoved
+};
+
+enum DbObjectType
+{
+ DbObjectTypeHost = 1,
+ DbObjectTypeService = 2,
+ DbObjectTypeHostGroup = 3,
+ DbObjectTypeServiceGroup = 4,
+ DbObjectTypeHostEscalation = 5,
+ DbObjectTypeServiceEscalation = 6,
+ DbObjectTypeHostDependency = 7,
+ DbObjectTypeServiceDependency = 8,
+ DbObjectTypeTimePeriod = 9,
+ DbObjectTypeContact = 10,
+ DbObjectTypeContactGroup = 11,
+ DbObjectTypeCommand = 12,
+ DbObjectTypeEndpoint = 13,
+ DbObjectTypeZone = 14,
+};
+
+/**
+ * A database object.
+ *
+ * @ingroup ido
+ */
+class DbObject : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(DbObject);
+
+ static void StaticInitialize();
+
+ void SetObject(const ConfigObject::Ptr& object);
+ ConfigObject::Ptr GetObject() const;
+
+ String GetName1() const;
+ String GetName2() const;
+ intrusive_ptr<DbType> GetType() const;
+
+ virtual Dictionary::Ptr GetConfigFields() const = 0;
+ virtual Dictionary::Ptr GetStatusFields() const = 0;
+
+ static DbObject::Ptr GetOrCreateByObject(const ConfigObject::Ptr& object);
+
+ struct QueryCallbacks {
+ std::function<void(const DbQuery&)> Query;
+ std::function<void(const std::vector<DbQuery>&)> MultipleQueries;
+ };
+
+ static boost::signals2::signal<void (const DbQuery&)> OnQuery;
+ static boost::signals2::signal<void (const std::vector<DbQuery>&)> OnMultipleQueries;
+ static boost::signals2::signal<void (const std::function<void (const QueryCallbacks&)>&)> OnMakeQueries;
+
+ void SendConfigUpdateHeavy(const Dictionary::Ptr& configFields);
+ void SendConfigUpdateLight();
+ void SendStatusUpdate();
+ void SendVarsConfigUpdateHeavy();
+ void SendVarsStatusUpdate();
+
+ double GetLastConfigUpdate() const;
+ double GetLastStatusUpdate() const;
+
+ virtual String CalculateConfigHash(const Dictionary::Ptr& configFields) const;
+
+protected:
+ DbObject(intrusive_ptr<DbType> type, String name1, String name2);
+
+ virtual void OnConfigUpdateHeavy();
+ virtual void OnConfigUpdateLight();
+ virtual void OnStatusUpdate();
+
+ static String HashValue(const Value& value);
+
+private:
+ String m_Name1;
+ String m_Name2;
+ intrusive_ptr<DbType> m_Type;
+ ConfigObject::Ptr m_Object;
+ double m_LastConfigUpdate;
+ double m_LastStatusUpdate;
+
+ static void StateChangedHandler(const ConfigObject::Ptr& object);
+ static void VarsChangedHandler(const CustomVarObject::Ptr& object);
+ static void VersionChangedHandler(const ConfigObject::Ptr& object);
+
+ static std::mutex& GetStaticMutex();
+
+ friend class DbType;
+};
+
+}
+
+#endif /* DBOBJECT_H */
diff --git a/lib/db_ido/dbquery.cpp b/lib/db_ido/dbquery.cpp
new file mode 100644
index 0000000..1de2928
--- /dev/null
+++ b/lib/db_ido/dbquery.cpp
@@ -0,0 +1,52 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbquery.hpp"
+#include "base/initialize.hpp"
+#include "base/scriptglobal.hpp"
+
+using namespace icinga;
+
+INITIALIZE_ONCE(&DbQuery::StaticInitialize);
+
+std::map<String, int> DbQuery::m_CategoryFilterMap;
+
+void DbQuery::StaticInitialize()
+{
+ ScriptGlobal::Set("Icinga.DbCatConfig", DbCatConfig);
+ ScriptGlobal::Set("Icinga.DbCatState", DbCatState);
+ ScriptGlobal::Set("Icinga.DbCatAcknowledgement", DbCatAcknowledgement);
+ ScriptGlobal::Set("Icinga.DbCatComment", DbCatComment);
+ ScriptGlobal::Set("Icinga.DbCatDowntime", DbCatDowntime);
+ ScriptGlobal::Set("Icinga.DbCatEventHandler", DbCatEventHandler);
+ ScriptGlobal::Set("Icinga.DbCatExternalCommand", DbCatExternalCommand);
+ ScriptGlobal::Set("Icinga.DbCatFlapping", DbCatFlapping);
+ ScriptGlobal::Set("Icinga.DbCatCheck", DbCatCheck);
+ ScriptGlobal::Set("Icinga.DbCatLog", DbCatLog);
+ ScriptGlobal::Set("Icinga.DbCatNotification", DbCatNotification);
+ ScriptGlobal::Set("Icinga.DbCatProgramStatus", DbCatProgramStatus);
+ ScriptGlobal::Set("Icinga.DbCatRetention", DbCatRetention);
+ ScriptGlobal::Set("Icinga.DbCatStateHistory", DbCatStateHistory);
+
+ ScriptGlobal::Set("Icinga.DbCatEverything", DbCatEverything);
+
+ m_CategoryFilterMap["DbCatConfig"] = DbCatConfig;
+ m_CategoryFilterMap["DbCatState"] = DbCatState;
+ m_CategoryFilterMap["DbCatAcknowledgement"] = DbCatAcknowledgement;
+ m_CategoryFilterMap["DbCatComment"] = DbCatComment;
+ m_CategoryFilterMap["DbCatDowntime"] = DbCatDowntime;
+ m_CategoryFilterMap["DbCatEventHandler"] = DbCatEventHandler;
+ m_CategoryFilterMap["DbCatExternalCommand"] = DbCatExternalCommand;
+ m_CategoryFilterMap["DbCatFlapping"] = DbCatFlapping;
+ m_CategoryFilterMap["DbCatCheck"] = DbCatCheck;
+ m_CategoryFilterMap["DbCatLog"] = DbCatLog;
+ m_CategoryFilterMap["DbCatNotification"] = DbCatNotification;
+ m_CategoryFilterMap["DbCatProgramStatus"] = DbCatProgramStatus;
+ m_CategoryFilterMap["DbCatRetention"] = DbCatRetention;
+ m_CategoryFilterMap["DbCatStateHistory"] = DbCatStateHistory;
+ m_CategoryFilterMap["DbCatEverything"] = DbCatEverything;
+}
+
+const std::map<String, int>& DbQuery::GetCategoryFilterMap()
+{
+ return m_CategoryFilterMap;
+}
diff --git a/lib/db_ido/dbquery.hpp b/lib/db_ido/dbquery.hpp
new file mode 100644
index 0000000..fecb2e3
--- /dev/null
+++ b/lib/db_ido/dbquery.hpp
@@ -0,0 +1,72 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBQUERY_H
+#define DBQUERY_H
+
+#include "db_ido/i2-db_ido.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/customvarobject.hpp"
+#include "base/dictionary.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+enum DbQueryType
+{
+ DbQueryInsert = 1,
+ DbQueryUpdate = 2,
+ DbQueryDelete = 4,
+ DbQueryNewTransaction = 8
+};
+
+enum DbQueryCategory
+{
+ DbCatInvalid = 0, //-1 is required for DbCatEverything
+ DbCatEverything = ~0,
+
+ DbCatConfig = 1,
+ DbCatState = 2,
+ DbCatAcknowledgement = 4,
+ DbCatComment = 8,
+ DbCatDowntime = 16,
+ DbCatEventHandler = 32,
+ DbCatExternalCommand = 64,
+ DbCatFlapping = 128,
+ DbCatCheck = 256,
+ DbCatLog = 512,
+ DbCatNotification = 1024,
+ DbCatProgramStatus = 2048,
+ DbCatRetention = 4096,
+ DbCatStateHistory = 8192
+};
+
+class DbObject;
+
+struct DbQuery
+{
+ int Type{0};
+ DbQueryCategory Category{DbCatInvalid};
+ String Table;
+ String IdColumn;
+ Dictionary::Ptr Fields;
+ Dictionary::Ptr WhereCriteria;
+ intrusive_ptr<DbObject> Object;
+ DbValue::Ptr NotificationInsertID;
+ bool ConfigUpdate{false};
+ bool StatusUpdate{false};
+ WorkQueuePriority Priority{PriorityNormal};
+
+ static void StaticInitialize();
+
+ static const std::map<String, int>& GetCategoryFilterMap();
+
+private:
+ static std::map<String, int> m_CategoryFilterMap;
+};
+
+}
+
+#endif /* DBQUERY_H */
+
+#include "db_ido/dbobject.hpp"
diff --git a/lib/db_ido/dbreference.cpp b/lib/db_ido/dbreference.cpp
new file mode 100644
index 0000000..e8f13c0
--- /dev/null
+++ b/lib/db_ido/dbreference.cpp
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "dbreference.hpp"
+
+using namespace icinga;
+
+DbReference::DbReference(long id)
+ : m_Id(id)
+{ }
+
+bool DbReference::IsValid() const
+{
+ return (m_Id != -1);
+}
+
+DbReference::operator long() const
+{
+ return m_Id;
+}
diff --git a/lib/db_ido/dbreference.hpp b/lib/db_ido/dbreference.hpp
new file mode 100644
index 0000000..70edf9a
--- /dev/null
+++ b/lib/db_ido/dbreference.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBREFERENCE_H
+#define DBREFERENCE_H
+
+#include "db_ido/i2-db_ido.hpp"
+
+namespace icinga
+{
+
+/**
+ * A database reference.
+ *
+ * @ingroup ido
+ */
+struct DbReference
+{
+public:
+ DbReference() = default;
+ DbReference(long id);
+
+ bool IsValid() const;
+ operator long() const;
+private:
+ long m_Id{-1};
+};
+
+}
+
+#endif /* DBREFERENCE_H */
diff --git a/lib/db_ido/dbtype.cpp b/lib/db_ido/dbtype.cpp
new file mode 100644
index 0000000..bc45dcb
--- /dev/null
+++ b/lib/db_ido/dbtype.cpp
@@ -0,0 +1,141 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbconnection.hpp"
+#include "base/objectlock.hpp"
+#include "base/debug.hpp"
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+DbType::DbType(String name, String table, long tid, String idcolumn, DbType::ObjectFactory factory)
+ : m_Name(std::move(name)), m_Table(std::move(table)), m_TypeID(tid), m_IDColumn(std::move(idcolumn)), m_ObjectFactory(std::move(factory))
+{ }
+
+String DbType::GetName() const
+{
+ return m_Name;
+}
+
+String DbType::GetTable() const
+{
+ return m_Table;
+}
+
+long DbType::GetTypeID() const
+{
+ return m_TypeID;
+}
+
+String DbType::GetIDColumn() const
+{
+ return m_IDColumn;
+}
+
+void DbType::RegisterType(const DbType::Ptr& type)
+{
+ std::unique_lock<std::mutex> lock(GetStaticMutex());
+ GetTypes()[type->GetName()] = type;
+}
+
+DbType::Ptr DbType::GetByName(const String& name)
+{
+ String typeName;
+
+ if (name == "CheckCommand" || name == "NotificationCommand" || name == "EventCommand")
+ typeName = "Command";
+ else
+ typeName = name;
+
+ std::unique_lock<std::mutex> lock(GetStaticMutex());
+ auto it = GetTypes().find(typeName);
+
+ if (it == GetTypes().end())
+ return nullptr;
+
+ return it->second;
+}
+
+DbType::Ptr DbType::GetByID(long tid)
+{
+ std::unique_lock<std::mutex> lock(GetStaticMutex());
+
+ for (const TypeMap::value_type& kv : GetTypes()) {
+ if (kv.second->GetTypeID() == tid)
+ return kv.second;
+ }
+
+ return nullptr;
+}
+
+DbObject::Ptr DbType::GetOrCreateObjectByName(const String& name1, const String& name2)
+{
+ ObjectLock olock(this);
+
+ auto it = m_Objects.find(std::make_pair(name1, name2));
+
+ if (it != m_Objects.end())
+ return it->second;
+
+ DbObject::Ptr dbobj = m_ObjectFactory(this, name1, name2);
+ m_Objects[std::make_pair(name1, name2)] = dbobj;
+
+ String objName = name1;
+
+ if (!name2.IsEmpty())
+ objName += "!" + name2;
+
+ String objType = m_Name;
+
+ if (m_TypeID == DbObjectTypeCommand) {
+ if (objName.SubStr(0, 6) == "check_") {
+ objType = "CheckCommand";
+ objName = objName.SubStr(6);
+ } else if (objName.SubStr(0, 13) == "notification_") {
+ objType = "NotificationCommand";
+ objName = objName.SubStr(13);
+ } else if (objName.SubStr(0, 6) == "event_") {
+ objType = "EventCommand";
+ objName = objName.SubStr(6);
+ }
+ }
+
+ dbobj->SetObject(ConfigObject::GetObject(objType, objName));
+
+ return dbobj;
+}
+
+std::mutex& DbType::GetStaticMutex()
+{
+ static std::mutex mutex;
+ return mutex;
+}
+
+/**
+ * Caller must hold static mutex.
+ */
+DbType::TypeMap& DbType::GetTypes()
+{
+ static DbType::TypeMap tm;
+ return tm;
+}
+
+std::set<DbType::Ptr> DbType::GetAllTypes()
+{
+ std::set<DbType::Ptr> result;
+
+ {
+ std::unique_lock<std::mutex> lock(GetStaticMutex());
+ for (const auto& kv : GetTypes()) {
+ result.insert(kv.second);
+ }
+ }
+
+ return result;
+}
+
+DbTypeRegistry *DbTypeRegistry::GetInstance()
+{
+ return Singleton<DbTypeRegistry>::GetInstance();
+}
+
diff --git a/lib/db_ido/dbtype.hpp b/lib/db_ido/dbtype.hpp
new file mode 100644
index 0000000..c8ebc45
--- /dev/null
+++ b/lib/db_ido/dbtype.hpp
@@ -0,0 +1,90 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBTYPE_H
+#define DBTYPE_H
+
+#include "db_ido/i2-db_ido.hpp"
+#include "base/object.hpp"
+#include "base/registry.hpp"
+#include "base/singleton.hpp"
+#include <set>
+
+namespace icinga
+{
+
+class DbObject;
+
+/**
+ * A database object type.
+ *
+ * @ingroup ido
+ */
+class DbType final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(DbType);
+
+ typedef std::function<intrusive_ptr<DbObject> (const intrusive_ptr<DbType>&, const String&, const String&)> ObjectFactory;
+ typedef std::map<String, DbType::Ptr> TypeMap;
+ typedef std::map<std::pair<String, String>, intrusive_ptr<DbObject> > ObjectMap;
+
+ DbType(String name, String table, long tid, String idcolumn, ObjectFactory factory);
+
+ String GetName() const;
+ String GetTable() const;
+ long GetTypeID() const;
+ String GetIDColumn() const;
+
+ static void RegisterType(const DbType::Ptr& type);
+
+ static DbType::Ptr GetByName(const String& name);
+ static DbType::Ptr GetByID(long tid);
+
+ intrusive_ptr<DbObject> GetOrCreateObjectByName(const String& name1, const String& name2);
+
+ static std::set<DbType::Ptr> GetAllTypes();
+
+private:
+ String m_Name;
+ String m_Table;
+ long m_TypeID;
+ String m_IDColumn;
+ ObjectFactory m_ObjectFactory;
+
+ static std::mutex& GetStaticMutex();
+ static TypeMap& GetTypes();
+
+ ObjectMap m_Objects;
+};
+
+/**
+ * A registry for DbType objects.
+ *
+ * @ingroup ido
+ */
+class DbTypeRegistry : public Registry<DbTypeRegistry, DbType::Ptr>
+{
+public:
+ static DbTypeRegistry *GetInstance();
+};
+
+/**
+ * Factory function for DbObject-based classes.
+ *
+ * @ingroup ido
+ */
+template<typename T>
+intrusive_ptr<T> DbObjectFactory(const DbType::Ptr& type, const String& name1, const String& name2)
+{
+ return new T(type, name1, name2);
+}
+
+#define REGISTER_DBTYPE(name, table, tid, idcolumn, type) \
+ INITIALIZE_ONCE([]() { \
+ DbType::Ptr dbtype = new DbType(#name, table, tid, idcolumn, DbObjectFactory<type>); \
+ DbType::RegisterType(dbtype); \
+ })
+
+}
+
+#endif /* DBTYPE_H */
diff --git a/lib/db_ido/dbvalue.cpp b/lib/db_ido/dbvalue.cpp
new file mode 100644
index 0000000..e1e3e6c
--- /dev/null
+++ b/lib/db_ido/dbvalue.cpp
@@ -0,0 +1,69 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbvalue.hpp"
+
+using namespace icinga;
+
+DbValue::DbValue(DbValueType type, Value value)
+ : m_Type(type), m_Value(std::move(value))
+{ }
+
+Value DbValue::FromTimestamp(const Value& ts)
+{
+ if (ts.IsEmpty() || ts == 0)
+ return Empty;
+
+ return new DbValue(DbValueTimestamp, ts);
+}
+
+Value DbValue::FromValue(const Value& value)
+{
+ return value;
+}
+
+Value DbValue::FromObjectInsertID(const Value& value)
+{
+ return new DbValue(DbValueObjectInsertID, value);
+}
+
+bool DbValue::IsTimestamp(const Value& value)
+{
+ if (!value.IsObjectType<DbValue>())
+ return false;
+
+ DbValue::Ptr dbv = value;
+ return dbv->GetType() == DbValueTimestamp;
+}
+
+bool DbValue::IsObjectInsertID(const Value& value)
+{
+ if (!value.IsObjectType<DbValue>())
+ return false;
+
+ DbValue::Ptr dbv = value;
+ return dbv->GetType() == DbValueObjectInsertID;
+}
+
+Value DbValue::ExtractValue(const Value& value)
+{
+ if (!value.IsObjectType<DbValue>())
+ return value;
+
+ DbValue::Ptr dbv = value;
+ return dbv->GetValue();
+}
+
+DbValueType DbValue::GetType() const
+{
+ return m_Type;
+}
+
+Value DbValue::GetValue() const
+{
+ return m_Value;
+}
+
+void DbValue::SetValue(const Value& value)
+{
+ m_Value = value;
+}
diff --git a/lib/db_ido/dbvalue.hpp b/lib/db_ido/dbvalue.hpp
new file mode 100644
index 0000000..cb59e3a
--- /dev/null
+++ b/lib/db_ido/dbvalue.hpp
@@ -0,0 +1,52 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DBVALUE_H
+#define DBVALUE_H
+
+#include "db_ido/i2-db_ido.hpp"
+#include "base/object.hpp"
+#include "base/value.hpp"
+
+namespace icinga
+{
+
+enum DbValueType
+{
+ DbValueTimestamp,
+ DbValueObjectInsertID
+};
+
+/**
+ * A database value.
+ *
+ * @ingroup ido
+ */
+struct DbValue final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(DbValue);
+
+ DbValue(DbValueType type, Value value);
+
+ static Value FromTimestamp(const Value& ts);
+ static Value FromValue(const Value& value);
+ static Value FromObjectInsertID(const Value& value);
+
+ static bool IsTimestamp(const Value& value);
+ static bool IsObjectInsertID(const Value& value);
+
+ static Value ExtractValue(const Value& value);
+
+ DbValueType GetType() const;
+
+ Value GetValue() const;
+ void SetValue(const Value& value);
+
+private:
+ DbValueType m_Type;
+ Value m_Value;
+};
+
+}
+
+#endif /* DBVALUE_H */
diff --git a/lib/db_ido/endpointdbobject.cpp b/lib/db_ido/endpointdbobject.cpp
new file mode 100644
index 0000000..ea16dd7
--- /dev/null
+++ b/lib/db_ido/endpointdbobject.cpp
@@ -0,0 +1,91 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/endpointdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/objectlock.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(Endpoint, "endpoint", DbObjectTypeEndpoint, "endpoint_object_id", EndpointDbObject);
+
+INITIALIZE_ONCE(&EndpointDbObject::StaticInitialize);
+
+void EndpointDbObject::StaticInitialize()
+{
+ Endpoint::OnConnected.connect([](const Endpoint::Ptr& endpoint, const JsonRpcConnection::Ptr&) { EndpointDbObject::UpdateConnectedStatus(endpoint); });
+ Endpoint::OnDisconnected.connect([](const Endpoint::Ptr& endpoint, const JsonRpcConnection::Ptr&) { EndpointDbObject::UpdateConnectedStatus(endpoint); });
+}
+
+EndpointDbObject::EndpointDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr EndpointDbObject::GetConfigFields() const
+{
+ Endpoint::Ptr endpoint = static_pointer_cast<Endpoint>(GetObject());
+
+ return new Dictionary({
+ { "identity", endpoint->GetName() },
+ { "node", IcingaApplication::GetInstance()->GetNodeName() },
+ { "zone_object_id", endpoint->GetZone() }
+ });
+}
+
+Dictionary::Ptr EndpointDbObject::GetStatusFields() const
+{
+ Endpoint::Ptr endpoint = static_pointer_cast<Endpoint>(GetObject());
+
+
+ Log(LogDebug, "EndpointDbObject")
+ << "update status for endpoint '" << endpoint->GetName() << "'";
+
+ return new Dictionary({
+ { "identity", endpoint->GetName() },
+ { "node", IcingaApplication::GetInstance()->GetNodeName() },
+ { "zone_object_id", endpoint->GetZone() },
+ { "is_connected", EndpointIsConnected(endpoint) }
+ });
+}
+
+void EndpointDbObject::UpdateConnectedStatus(const Endpoint::Ptr& endpoint)
+{
+ bool connected = EndpointIsConnected(endpoint);
+
+ Log(LogDebug, "EndpointDbObject")
+ << "update is_connected=" << connected << " for endpoint '" << endpoint->GetName() << "'";
+
+ DbQuery query1;
+ query1.Table = "endpointstatus";
+ query1.Type = DbQueryUpdate;
+ query1.Category = DbCatState;
+
+ query1.Fields = new Dictionary({
+ { "is_connected", (connected ? 1 : 0) },
+ { "status_update_time", DbValue::FromTimestamp(Utility::GetTime()) }
+ });
+
+ query1.WhereCriteria = new Dictionary({
+ { "endpoint_object_id", endpoint },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+
+ OnQuery(query1);
+}
+
+int EndpointDbObject::EndpointIsConnected(const Endpoint::Ptr& endpoint)
+{
+ unsigned int is_connected = endpoint->GetConnected() ? 1 : 0;
+
+ /* if identity is equal to node, fake is_connected */
+ if (endpoint->GetName() == IcingaApplication::GetInstance()->GetNodeName())
+ is_connected = 1;
+
+ return is_connected;
+}
diff --git a/lib/db_ido/endpointdbobject.hpp b/lib/db_ido/endpointdbobject.hpp
new file mode 100644
index 0000000..e4fba36
--- /dev/null
+++ b/lib/db_ido/endpointdbobject.hpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ENDPOINTDBOBJECT_H
+#define ENDPOINTDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+#include "remote/endpoint.hpp"
+
+namespace icinga
+{
+
+/**
+ * A Command database object.
+ *
+ * @ingroup ido
+ */
+class EndpointDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(EndpointDbObject);
+
+ EndpointDbObject(const intrusive_ptr<DbType>& type, const String& name1, const String& name2);
+
+ static void StaticInitialize();
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+
+private:
+ static void UpdateConnectedStatus(const Endpoint::Ptr& endpoint);
+ static int EndpointIsConnected(const Endpoint::Ptr& endpoint);
+};
+
+}
+
+#endif /* ENDPOINTDBOBJECT_H */
diff --git a/lib/db_ido/hostdbobject.cpp b/lib/db_ido/hostdbobject.cpp
new file mode 100644
index 0000000..60d1a99
--- /dev/null
+++ b/lib/db_ido/hostdbobject.cpp
@@ -0,0 +1,423 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/hostdbobject.hpp"
+#include "db_ido/hostgroupdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "db_ido/dbevents.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/dependency.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/json.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(Host, "host", DbObjectTypeHost, "host_object_id", HostDbObject);
+
+HostDbObject::HostDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr HostDbObject::GetConfigFields() const
+{
+ Dictionary::Ptr fields = new Dictionary();
+ Host::Ptr host = static_pointer_cast<Host>(GetObject());
+
+ /* Compatibility fallback. */
+ String displayName = host->GetDisplayName();
+
+ unsigned long notificationStateFilter = CompatUtility::GetCheckableNotificationTypeFilter(host);
+ unsigned long notificationTypeFilter = CompatUtility::GetCheckableNotificationTypeFilter(host);
+
+ return new Dictionary({
+ { "alias", !displayName.IsEmpty() ? displayName : host->GetName() },
+ { "display_name", displayName },
+ { "address", host->GetAddress() },
+ { "address6", host->GetAddress6() },
+ { "check_command_object_id", host->GetCheckCommand() },
+ { "eventhandler_command_object_id", host->GetEventCommand() },
+ { "check_timeperiod_object_id", host->GetCheckPeriod() },
+ { "check_interval", host->GetCheckInterval() / 60.0 },
+ { "retry_interval", host->GetRetryInterval() / 60.0 },
+ { "max_check_attempts", host->GetMaxCheckAttempts() },
+ { "flap_detection_enabled", host->GetEnableFlapping() },
+ { "low_flap_threshold", host->GetFlappingThresholdLow() },
+ { "high_flap_threshold", host->GetFlappingThresholdLow() },
+ { "process_performance_data", host->GetEnablePerfdata() },
+ { "freshness_checks_enabled", 1 },
+ { "freshness_threshold", Convert::ToLong(host->GetCheckInterval()) },
+ { "event_handler_enabled", host->GetEnableEventHandler() },
+ { "passive_checks_enabled", host->GetEnablePassiveChecks() },
+ { "active_checks_enabled", host->GetEnableActiveChecks() },
+ { "notifications_enabled", host->GetEnableNotifications() },
+ { "notes", host->GetNotes() },
+ { "notes_url", host->GetNotesUrl() },
+ { "action_url", host->GetActionUrl() },
+ { "icon_image", host->GetIconImage() },
+ { "icon_image_alt", host->GetIconImageAlt() },
+ { "notification_interval", CompatUtility::GetCheckableNotificationNotificationInterval(host) },
+ { "notify_on_down", (notificationStateFilter & (ServiceWarning | ServiceCritical)) ? 1 : 0 },
+ { "notify_on_unreachable", 1 }, /* We don't have this filter and state, and as such we don't filter such notifications. */
+ { "notify_on_recovery", (notificationTypeFilter & NotificationRecovery) ? 1 : 0 },
+ { "notify_on_flapping", (notificationTypeFilter & (NotificationFlappingStart | NotificationFlappingEnd)) ? 1 : 0 },
+ { "notify_on_downtime", (notificationTypeFilter & (NotificationDowntimeStart | NotificationDowntimeEnd | NotificationDowntimeRemoved)) ? 1 : 0 }
+ });
+}
+
+Dictionary::Ptr HostDbObject::GetStatusFields() const
+{
+ Dictionary::Ptr fields = new Dictionary();
+ Host::Ptr host = static_pointer_cast<Host>(GetObject());
+
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (cr) {
+ fields->Set("output", CompatUtility::GetCheckResultOutput(cr));
+ fields->Set("long_output", CompatUtility::GetCheckResultLongOutput(cr));
+ fields->Set("perfdata", PluginUtility::FormatPerfdata(cr->GetPerformanceData()));
+ fields->Set("check_source", cr->GetCheckSource());
+ fields->Set("latency", cr->CalculateLatency());
+ fields->Set("execution_time", cr->CalculateExecutionTime());
+ }
+
+ int currentState = host->GetState();
+
+ if (currentState != HostUp && !host->GetLastReachable())
+ currentState = 2; /* hardcoded compat state */
+
+ fields->Set("current_state", currentState);
+ fields->Set("has_been_checked", host->HasBeenChecked());
+ fields->Set("should_be_scheduled", host->GetEnableActiveChecks());
+ fields->Set("current_check_attempt", host->GetCheckAttempt());
+ fields->Set("max_check_attempts", host->GetMaxCheckAttempts());
+ fields->Set("last_check", DbValue::FromTimestamp(host->GetLastCheck()));
+ fields->Set("next_check", DbValue::FromTimestamp(host->GetNextCheck()));
+ fields->Set("check_type", !host->GetEnableActiveChecks()); /* 0 .. active, 1 .. passive */
+ fields->Set("last_state_change", DbValue::FromTimestamp(host->GetLastStateChange()));
+ fields->Set("last_hard_state_change", DbValue::FromTimestamp(host->GetLastHardStateChange()));
+ fields->Set("last_hard_state", host->GetLastHardState());
+ fields->Set("last_time_up", DbValue::FromTimestamp(host->GetLastStateUp()));
+ fields->Set("last_time_down", DbValue::FromTimestamp(host->GetLastStateDown()));
+ fields->Set("last_time_unreachable", DbValue::FromTimestamp(host->GetLastStateUnreachable()));
+ fields->Set("state_type", host->GetStateType());
+ fields->Set("notifications_enabled", host->GetEnableNotifications());
+ fields->Set("problem_has_been_acknowledged", host->GetAcknowledgement() != AcknowledgementNone);
+ fields->Set("acknowledgement_type", host->GetAcknowledgement());
+ fields->Set("passive_checks_enabled", host->GetEnablePassiveChecks());
+ fields->Set("active_checks_enabled", host->GetEnableActiveChecks());
+ fields->Set("event_handler_enabled", host->GetEnableEventHandler());
+ fields->Set("flap_detection_enabled", host->GetEnableFlapping());
+ fields->Set("is_flapping", host->IsFlapping());
+ fields->Set("percent_state_change", host->GetFlappingCurrent());
+ fields->Set("scheduled_downtime_depth", host->GetDowntimeDepth());
+ fields->Set("process_performance_data", host->GetEnablePerfdata());
+ fields->Set("normal_check_interval", host->GetCheckInterval() / 60.0);
+ fields->Set("retry_check_interval", host->GetRetryInterval() / 60.0);
+ fields->Set("check_timeperiod_object_id", host->GetCheckPeriod());
+ fields->Set("is_reachable", host->GetLastReachable());
+ fields->Set("original_attributes", JsonEncode(host->GetOriginalAttributes()));
+
+ fields->Set("current_notification_number", CompatUtility::GetCheckableNotificationNotificationNumber(host));
+ fields->Set("last_notification", DbValue::FromTimestamp(CompatUtility::GetCheckableNotificationLastNotification(host)));
+ fields->Set("next_notification", DbValue::FromTimestamp(CompatUtility::GetCheckableNotificationNextNotification(host)));
+
+ EventCommand::Ptr eventCommand = host->GetEventCommand();
+
+ if (eventCommand)
+ fields->Set("event_handler", eventCommand->GetName());
+
+ CheckCommand::Ptr checkCommand = host->GetCheckCommand();
+
+ if (checkCommand)
+ fields->Set("check_command", checkCommand->GetName());
+
+ return fields;
+}
+
+void HostDbObject::OnConfigUpdateHeavy()
+{
+ Host::Ptr host = static_pointer_cast<Host>(GetObject());
+
+ /* groups */
+ Array::Ptr groups = host->GetGroups();
+
+ std::vector<DbQuery> queries;
+
+ DbQuery query1;
+ query1.Table = DbType::GetByName("HostGroup")->GetTable() + "_members";
+ query1.Type = DbQueryDelete;
+ query1.Category = DbCatConfig;
+ query1.WhereCriteria = new Dictionary();
+ query1.WhereCriteria->Set("host_object_id", host);
+ queries.emplace_back(std::move(query1));
+
+ if (groups) {
+ ObjectLock olock(groups);
+ for (const String& groupName : groups) {
+ HostGroup::Ptr group = HostGroup::GetByName(groupName);
+
+ DbQuery query2;
+ query2.Table = DbType::GetByName("HostGroup")->GetTable() + "_members";
+ query2.Type = DbQueryInsert;
+ query2.Category = DbCatConfig;
+ query2.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "hostgroup_id", DbValue::FromObjectInsertID(group) },
+ { "host_object_id", host }
+ });
+ query2.WhereCriteria = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "hostgroup_id", DbValue::FromObjectInsertID(group) },
+ { "host_object_id", host }
+ });
+ queries.emplace_back(std::move(query2));
+ }
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ queries.clear();
+
+ DbQuery query2;
+ query2.Table = GetType()->GetTable() + "_parenthosts";
+ query2.Type = DbQueryDelete;
+ query2.Category = DbCatConfig;
+ query2.WhereCriteria = new Dictionary({
+ { GetType()->GetTable() + "_id", DbValue::FromObjectInsertID(GetObject()) }
+ });
+ queries.emplace_back(std::move(query2));
+
+ /* parents */
+ for (const Checkable::Ptr& checkable : host->GetParents()) {
+ Host::Ptr parent = dynamic_pointer_cast<Host>(checkable);
+
+ if (!parent)
+ continue;
+
+ Log(LogDebug, "HostDbObject")
+ << "host parents: " << parent->GetName();
+
+ /* parents: host_id, parent_host_object_id */
+ DbQuery query1;
+ query1.Table = GetType()->GetTable() + "_parenthosts";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatConfig;
+ query1.Fields = new Dictionary({
+ { GetType()->GetTable() + "_id", DbValue::FromObjectInsertID(GetObject()) },
+ { "parent_host_object_id", parent },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query1));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ /* host dependencies */
+ Log(LogDebug, "HostDbObject")
+ << "host dependencies for '" << host->GetName() << "'";
+
+ queries.clear();
+
+ DbQuery query3;
+ query3.Table = GetType()->GetTable() + "dependencies";
+ query3.Type = DbQueryDelete;
+ query3.Category = DbCatConfig;
+ query3.WhereCriteria = new Dictionary({
+ { "dependent_host_object_id", host }
+ });
+ queries.emplace_back(std::move(query3));
+
+ for (const Dependency::Ptr& dep : host->GetDependencies()) {
+ Checkable::Ptr parent = dep->GetParent();
+
+ if (!parent) {
+ Log(LogDebug, "HostDbObject")
+ << "Missing parent for dependency '" << dep->GetName() << "'.";
+ continue;
+ }
+
+ int stateFilter = dep->GetStateFilter();
+
+ Log(LogDebug, "HostDbObject")
+ << "parent host: " << parent->GetName();
+
+ DbQuery query2;
+ query2.Table = GetType()->GetTable() + "dependencies";
+ query2.Type = DbQueryInsert;
+ query2.Category = DbCatConfig;
+ query2.Fields = new Dictionary({
+ { "host_object_id", parent },
+ { "dependent_host_object_id", host },
+ { "inherits_parent", 1 },
+ { "timeperiod_object_id", dep->GetPeriod() },
+ { "fail_on_up", (stateFilter & StateFilterUp) ? 1 : 0 },
+ { "fail_on_down", (stateFilter & StateFilterDown) ? 1 : 0 },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query2));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ Log(LogDebug, "HostDbObject")
+ << "host contacts: " << host->GetName();
+
+ queries.clear();
+
+ DbQuery query4;
+ query4.Table = GetType()->GetTable() + "_contacts";
+ query4.Type = DbQueryDelete;
+ query4.Category = DbCatConfig;
+ query4.WhereCriteria = new Dictionary({
+ { "host_id", DbValue::FromObjectInsertID(host) }
+ });
+ queries.emplace_back(std::move(query4));
+
+ for (const User::Ptr& user : CompatUtility::GetCheckableNotificationUsers(host)) {
+ Log(LogDebug, "HostDbObject")
+ << "host contacts: " << user->GetName();
+
+ DbQuery query_contact;
+ query_contact.Table = GetType()->GetTable() + "_contacts";
+ query_contact.Type = DbQueryInsert;
+ query_contact.Category = DbCatConfig;
+ query_contact.Fields = new Dictionary({
+ { "host_id", DbValue::FromObjectInsertID(host) },
+ { "contact_object_id", user },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query_contact));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ Log(LogDebug, "HostDbObject")
+ << "host contactgroups: " << host->GetName();
+
+ queries.clear();
+
+ DbQuery query5;
+ query5.Table = GetType()->GetTable() + "_contactgroups";
+ query5.Type = DbQueryDelete;
+ query5.Category = DbCatConfig;
+ query5.WhereCriteria = new Dictionary({
+ { "host_id", DbValue::FromObjectInsertID(host) }
+ });
+ queries.emplace_back(std::move(query5));
+
+ for (const UserGroup::Ptr& usergroup : CompatUtility::GetCheckableNotificationUserGroups(host)) {
+ Log(LogDebug, "HostDbObject")
+ << "host contactgroups: " << usergroup->GetName();
+
+ DbQuery query_contact;
+ query_contact.Table = GetType()->GetTable() + "_contactgroups";
+ query_contact.Type = DbQueryInsert;
+ query_contact.Category = DbCatConfig;
+ query_contact.Fields = new Dictionary({
+ { "host_id", DbValue::FromObjectInsertID(host) },
+ { "contactgroup_object_id", usergroup },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query_contact));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ DoCommonConfigUpdate();
+}
+
+void HostDbObject::OnConfigUpdateLight()
+{
+ DoCommonConfigUpdate();
+}
+
+void HostDbObject::DoCommonConfigUpdate()
+{
+ Host::Ptr host = static_pointer_cast<Host>(GetObject());
+
+ /* update comments and downtimes on config change */
+ DbEvents::AddComments(host);
+ DbEvents::AddDowntimes(host);
+}
+
+String HostDbObject::CalculateConfigHash(const Dictionary::Ptr& configFields) const
+{
+ String hashData = DbObject::CalculateConfigHash(configFields);
+
+ Host::Ptr host = static_pointer_cast<Host>(GetObject());
+
+ Array::Ptr groups = host->GetGroups();
+
+ if (groups) {
+ groups = groups->ShallowClone();
+ ObjectLock oLock (groups);
+ std::sort(groups->Begin(), groups->End());
+ hashData += DbObject::HashValue(groups);
+ }
+
+ ArrayData parents;
+
+ /* parents */
+ for (const Checkable::Ptr& checkable : host->GetParents()) {
+ Host::Ptr parent = dynamic_pointer_cast<Host>(checkable);
+
+ if (!parent)
+ continue;
+
+ parents.push_back(parent->GetName());
+ }
+
+ std::sort(parents.begin(), parents.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(parents)));
+
+ ArrayData dependencies;
+
+ /* dependencies */
+ for (const Dependency::Ptr& dep : host->GetDependencies()) {
+ Checkable::Ptr parent = dep->GetParent();
+
+ if (!parent)
+ continue;
+
+ dependencies.push_back(new Array({
+ parent->GetName(),
+ dep->GetStateFilter(),
+ dep->GetPeriodRaw()
+ }));
+ }
+
+ std::sort(dependencies.begin(), dependencies.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(dependencies)));
+
+ ArrayData users;
+
+ for (const User::Ptr& user : CompatUtility::GetCheckableNotificationUsers(host)) {
+ users.push_back(user->GetName());
+ }
+
+ std::sort(users.begin(), users.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(users)));
+
+ ArrayData userGroups;
+
+ for (const UserGroup::Ptr& usergroup : CompatUtility::GetCheckableNotificationUserGroups(host)) {
+ userGroups.push_back(usergroup->GetName());
+ }
+
+ std::sort(userGroups.begin(), userGroups.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(userGroups)));
+
+ return SHA256(hashData);
+}
diff --git a/lib/db_ido/hostdbobject.hpp b/lib/db_ido/hostdbobject.hpp
new file mode 100644
index 0000000..9fff10a
--- /dev/null
+++ b/lib/db_ido/hostdbobject.hpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HOSTDBOBJECT_H
+#define HOSTDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A Host database object.
+ *
+ * @ingroup ido
+ */
+class HostDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(HostDbObject);
+
+ HostDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+
+ void OnConfigUpdateHeavy() override;
+ void OnConfigUpdateLight() override;
+
+ String CalculateConfigHash(const Dictionary::Ptr& configFields) const override;
+
+private:
+ void DoCommonConfigUpdate();
+};
+
+}
+
+#endif /* HOSTDBOBJECT_H */
diff --git a/lib/db_ido/hostgroupdbobject.cpp b/lib/db_ido/hostgroupdbobject.cpp
new file mode 100644
index 0000000..cef6aa2
--- /dev/null
+++ b/lib/db_ido/hostgroupdbobject.cpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/hostgroupdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/objectlock.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(HostGroup, "hostgroup", DbObjectTypeHostGroup, "hostgroup_object_id", HostGroupDbObject);
+
+HostGroupDbObject::HostGroupDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr HostGroupDbObject::GetConfigFields() const
+{
+ HostGroup::Ptr group = static_pointer_cast<HostGroup>(GetObject());
+
+ return new Dictionary({
+ { "alias", group->GetDisplayName() },
+ { "notes", group->GetNotes() },
+ { "notes_url", group->GetNotesUrl() },
+ { "action_url", group->GetActionUrl() }
+ });
+}
+
+Dictionary::Ptr HostGroupDbObject::GetStatusFields() const
+{
+ return nullptr;
+}
diff --git a/lib/db_ido/hostgroupdbobject.hpp b/lib/db_ido/hostgroupdbobject.hpp
new file mode 100644
index 0000000..9c48f29
--- /dev/null
+++ b/lib/db_ido/hostgroupdbobject.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HOSTGROUPDBOBJECT_H
+#define HOSTGROUPDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "icinga/hostgroup.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A HostGroup database object.
+ *
+ * @ingroup ido
+ */
+class HostGroupDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(HostGroupDbObject);
+
+ HostGroupDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+
+private:
+ static void MembersChangedHandler(const HostGroup::Ptr& hgfilter);
+};
+
+}
+
+#endif /* HOSTGROUPDBOBJECT_H */
diff --git a/lib/db_ido/i2-db_ido.hpp b/lib/db_ido/i2-db_ido.hpp
new file mode 100644
index 0000000..1da9fdc
--- /dev/null
+++ b/lib/db_ido/i2-db_ido.hpp
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2DB_IDO_H
+#define I2DB_IDO_H
+
+/**
+ * @defgroup db_ido IDO library
+ *
+ * The Icinga library implements database-agnostic IDO functionality.
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2DB_IDO_H */
diff --git a/lib/db_ido/idochecktask.cpp b/lib/db_ido/idochecktask.cpp
new file mode 100644
index 0000000..3b5856a
--- /dev/null
+++ b/lib/db_ido/idochecktask.cpp
@@ -0,0 +1,197 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/idochecktask.hpp"
+#include "icinga/host.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/zone.hpp"
+#include "base/function.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/configtype.hpp"
+#include "base/convert.hpp"
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, IdoCheck, &IdoCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+static void ReportIdoCheck(
+ const Checkable::Ptr& checkable, const CheckCommand::Ptr& commandObj,
+ const CheckResult::Ptr& cr, String output, ServiceState state = ServiceUnknown
+)
+{
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = std::move(output);
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandObj->GetName(), pr);
+ } else {
+ cr->SetState(state);
+ cr->SetOutput(output);
+ checkable->ProcessCheckResult(cr);
+ }
+}
+
+void IdoCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ ServiceState state;
+ CheckCommand::Ptr commandObj = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+ Value raw_command = commandObj->GetCommandLine();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", commandObj);
+
+ String idoType = MacroProcessor::ResolveMacros("$ido_type$", resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ String idoName = MacroProcessor::ResolveMacros("$ido_name$", resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ String missingQueriesWarning;
+ String missingQueriesCritical;
+ String missingPendingQueriesWarning;
+ String missingPendingQueriesCritical;
+
+ double queriesWarning = MacroProcessor::ResolveMacros("$ido_queries_warning$", resolvers, checkable->GetLastCheckResult(),
+ &missingQueriesWarning, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ double queriesCritical = MacroProcessor::ResolveMacros("$ido_queries_critical$", resolvers, checkable->GetLastCheckResult(),
+ &missingQueriesCritical, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ double pendingQueriesWarning = MacroProcessor::ResolveMacros("$ido_pending_queries_warning$", resolvers, checkable->GetLastCheckResult(),
+ &missingPendingQueriesWarning, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ double pendingQueriesCritical = MacroProcessor::ResolveMacros("$ido_pending_queries_critical$", resolvers, checkable->GetLastCheckResult(),
+ &missingPendingQueriesCritical, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ if (idoType.IsEmpty()) {
+ ReportIdoCheck(checkable, commandObj, cr, "Attribute 'ido_type' must be set.");
+ return;
+ }
+
+ if (idoName.IsEmpty()) {
+ ReportIdoCheck(checkable, commandObj, cr, "Attribute 'ido_name' must be set.");
+ return;
+ }
+
+ Type::Ptr type = Type::GetByName(idoType);
+
+ if (!type || !DbConnection::TypeInstance->IsAssignableFrom(type)) {
+ ReportIdoCheck(checkable, commandObj, cr, "DB IDO type '" + idoType + "' is invalid.");
+ return;
+ }
+
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+ VERIFY(dtype);
+
+ DbConnection::Ptr conn = static_pointer_cast<DbConnection>(dtype->GetObject(idoName));
+
+ if (!conn) {
+ ReportIdoCheck(checkable, commandObj, cr, "DB IDO connection '" + idoName + "' does not exist.");
+ return;
+ }
+
+ double qps = conn->GetQueryCount(60) / 60.0;
+
+ if (conn->IsPaused()) {
+ ReportIdoCheck(checkable, commandObj, cr, "DB IDO connection is temporarily disabled on this cluster instance.", ServiceOK);
+ return;
+ }
+
+ double pendingQueries = conn->GetPendingQueryCount();
+
+ if (!conn->GetConnected()) {
+ if (conn->GetShouldConnect()) {
+ ReportIdoCheck(checkable, commandObj, cr, "Could not connect to the database server.", ServiceCritical);
+ } else {
+ ReportIdoCheck(
+ checkable, commandObj, cr,
+ "Not currently enabled: Another cluster instance is responsible for the IDO database.", ServiceOK
+ );
+ }
+ return;
+ }
+
+ /* Schema versions. */
+ String schema_version = conn->GetSchemaVersion();
+ std::ostringstream msgbuf;
+
+ if (Utility::CompareVersion(conn->GetLatestSchemaVersion(), schema_version) < 0) {
+ msgbuf << "Outdated schema version: '" << schema_version << "'. Latest version: '"
+ << conn->GetLatestSchemaVersion() << "'."
+ << " Queries per second: " << std::fixed << std::setprecision(3) << qps
+ << " Pending queries: " << std::fixed << std::setprecision(3) << pendingQueries << ".";
+
+ state = ServiceWarning;
+ } else {
+ msgbuf << "Connected to the database server (Schema version: '" << schema_version << "')."
+ << " Queries per second: " << std::fixed << std::setprecision(3) << qps
+ << " Pending queries: " << std::fixed << std::setprecision(3) << pendingQueries << ".";
+
+ state = ServiceOK;
+ }
+
+ if (conn->GetEnableHa()) {
+ double failoverTs = conn->GetLastFailover();
+
+ msgbuf << " Last failover: " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", failoverTs) << ".";
+ }
+
+ /* Check whether the thresholds have been defined and match. */
+ if (missingQueriesCritical.IsEmpty() && qps < queriesCritical) {
+ msgbuf << " " << qps << " queries/s lower than critical threshold (" << queriesCritical << " queries/s).";
+
+ state = ServiceCritical;
+ } else if (missingQueriesWarning.IsEmpty() && qps < queriesWarning) {
+ msgbuf << " " << qps << " queries/s lower than warning threshold (" << queriesWarning << " queries/s).";
+
+ state = ServiceWarning;
+ }
+
+ if (missingPendingQueriesCritical.IsEmpty() && pendingQueries > pendingQueriesCritical) {
+ msgbuf << " " << pendingQueries << " pending queries greater than critical threshold ("
+ << pendingQueriesCritical << " queries).";
+
+ state = ServiceCritical;
+ } else if (missingPendingQueriesWarning.IsEmpty() && pendingQueries > pendingQueriesWarning) {
+ msgbuf << " " << pendingQueries << " pending queries greater than warning threshold ("
+ << pendingQueriesWarning << " queries).";
+
+ if (state == ServiceOK) {
+ state = ServiceWarning;
+ }
+ }
+
+ cr->SetPerformanceData(new Array({
+ { new PerfdataValue("queries", qps, false, "", queriesWarning, queriesCritical) },
+ { new PerfdataValue("queries_1min", conn->GetQueryCount(60)) },
+ { new PerfdataValue("queries_5mins", conn->GetQueryCount(5 * 60)) },
+ { new PerfdataValue("queries_15mins", conn->GetQueryCount(15 * 60)) },
+ { new PerfdataValue("pending_queries", pendingQueries, false, "", pendingQueriesWarning, pendingQueriesCritical) }
+ }));
+
+ ReportIdoCheck(checkable, commandObj, cr, msgbuf.str(), state);
+}
diff --git a/lib/db_ido/idochecktask.hpp b/lib/db_ido/idochecktask.hpp
new file mode 100644
index 0000000..5868c38
--- /dev/null
+++ b/lib/db_ido/idochecktask.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef IDOCHECKTASK_H
+#define IDOCHECKTASK_H
+
+#include "db_ido/dbconnection.hpp"
+#include "icinga/checkable.hpp"
+
+namespace icinga
+{
+
+/**
+ * IDO check type.
+ *
+ * @ingroup db_ido
+ */
+class IdoCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ IdoCheckTask();
+};
+
+}
+
+#endif /* IDOCHECKTASK_H */
diff --git a/lib/db_ido/servicedbobject.cpp b/lib/db_ido/servicedbobject.cpp
new file mode 100644
index 0000000..7f711df
--- /dev/null
+++ b/lib/db_ido/servicedbobject.cpp
@@ -0,0 +1,359 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/servicedbobject.hpp"
+#include "db_ido/servicegroupdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "db_ido/dbevents.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/dependency.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/externalcommandprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "remote/endpoint.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/json.hpp"
+#include <boost/algorithm/string/join.hpp>
+
+using namespace icinga;
+
+REGISTER_DBTYPE(Service, "service", DbObjectTypeService, "service_object_id", ServiceDbObject);
+
+ServiceDbObject::ServiceDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr ServiceDbObject::GetConfigFields() const
+{
+ Service::Ptr service = static_pointer_cast<Service>(GetObject());
+ Host::Ptr host = service->GetHost();
+
+ unsigned long notificationStateFilter = CompatUtility::GetCheckableNotificationTypeFilter(service);
+ unsigned long notificationTypeFilter = CompatUtility::GetCheckableNotificationTypeFilter(service);
+
+ return new Dictionary({
+ { "host_object_id", host },
+ { "display_name", service->GetDisplayName() },
+ { "check_command_object_id", service->GetCheckCommand() },
+ { "eventhandler_command_object_id", service->GetEventCommand() },
+ { "check_timeperiod_object_id", service->GetCheckPeriod() },
+ { "check_interval", service->GetCheckInterval() / 60.0 },
+ { "retry_interval", service->GetRetryInterval() / 60.0 },
+ { "max_check_attempts", service->GetMaxCheckAttempts() },
+ { "is_volatile", service->GetVolatile() },
+ { "flap_detection_enabled", service->GetEnableFlapping() },
+ { "low_flap_threshold", service->GetFlappingThresholdLow() },
+ { "high_flap_threshold", service->GetFlappingThresholdLow() },
+ { "process_performance_data", service->GetEnablePerfdata() },
+ { "freshness_checks_enabled", 1 },
+ { "freshness_threshold", Convert::ToLong(service->GetCheckInterval()) },
+ { "event_handler_enabled", service->GetEnableEventHandler() },
+ { "passive_checks_enabled", service->GetEnablePassiveChecks() },
+ { "active_checks_enabled", service->GetEnableActiveChecks() },
+ { "notifications_enabled", service->GetEnableNotifications() },
+ { "notes", service->GetNotes() },
+ { "notes_url", service->GetNotesUrl() },
+ { "action_url", service->GetActionUrl() },
+ { "icon_image", service->GetIconImage() },
+ { "icon_image_alt", service->GetIconImageAlt() },
+ { "notification_interval", CompatUtility::GetCheckableNotificationNotificationInterval(service) },
+ { "notify_on_warning", (notificationStateFilter & ServiceWarning) ? 1 : 0 },
+ { "notify_on_unknown", (notificationStateFilter & ServiceUnknown) ? 1 : 0 },
+ { "notify_on_critical", (notificationStateFilter & ServiceCritical) ? 1 : 0 },
+ { "notify_on_recovery", (notificationTypeFilter & NotificationRecovery) ? 1 : 0 },
+ { "notify_on_flapping", (notificationTypeFilter & (NotificationFlappingStart | NotificationFlappingEnd)) ? 1 : 0 },
+ { "notify_on_downtime", (notificationTypeFilter & (NotificationDowntimeStart | NotificationDowntimeEnd | NotificationDowntimeRemoved)) ? 1 : 0 }
+ });
+}
+
+Dictionary::Ptr ServiceDbObject::GetStatusFields() const
+{
+ Dictionary::Ptr fields = new Dictionary();
+ Service::Ptr service = static_pointer_cast<Service>(GetObject());
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (cr) {
+ fields->Set("output", CompatUtility::GetCheckResultOutput(cr));
+ fields->Set("long_output", CompatUtility::GetCheckResultLongOutput(cr));
+ fields->Set("perfdata", PluginUtility::FormatPerfdata(cr->GetPerformanceData()));
+ fields->Set("check_source", cr->GetCheckSource());
+ fields->Set("latency", cr->CalculateLatency());
+ fields->Set("execution_time", cr->CalculateExecutionTime());
+ }
+
+ fields->Set("current_state", service->GetState());
+ fields->Set("has_been_checked", service->HasBeenChecked());
+ fields->Set("should_be_scheduled", service->GetEnableActiveChecks());
+ fields->Set("current_check_attempt", service->GetCheckAttempt());
+ fields->Set("max_check_attempts", service->GetMaxCheckAttempts());
+ fields->Set("last_check", DbValue::FromTimestamp(service->GetLastCheck()));
+ fields->Set("next_check", DbValue::FromTimestamp(service->GetNextCheck()));
+ fields->Set("check_type", !service->GetEnableActiveChecks()); /* 0 .. active, 1 .. passive */
+ fields->Set("last_state_change", DbValue::FromTimestamp(service->GetLastStateChange()));
+ fields->Set("last_hard_state_change", DbValue::FromTimestamp(service->GetLastHardStateChange()));
+ fields->Set("last_hard_state", service->GetLastHardState());
+ fields->Set("last_time_ok", DbValue::FromTimestamp(service->GetLastStateOK()));
+ fields->Set("last_time_warning", DbValue::FromTimestamp(service->GetLastStateWarning()));
+ fields->Set("last_time_critical", DbValue::FromTimestamp(service->GetLastStateCritical()));
+ fields->Set("last_time_unknown", DbValue::FromTimestamp(service->GetLastStateUnknown()));
+ fields->Set("state_type", service->GetStateType());
+ fields->Set("notifications_enabled", service->GetEnableNotifications());
+ fields->Set("problem_has_been_acknowledged", service->GetAcknowledgement() != AcknowledgementNone);
+ fields->Set("acknowledgement_type", service->GetAcknowledgement());
+ fields->Set("passive_checks_enabled", service->GetEnablePassiveChecks());
+ fields->Set("active_checks_enabled", service->GetEnableActiveChecks());
+ fields->Set("event_handler_enabled", service->GetEnableEventHandler());
+ fields->Set("flap_detection_enabled", service->GetEnableFlapping());
+ fields->Set("is_flapping", service->IsFlapping());
+ fields->Set("percent_state_change", service->GetFlappingCurrent());
+ fields->Set("scheduled_downtime_depth", service->GetDowntimeDepth());
+ fields->Set("process_performance_data", service->GetEnablePerfdata());
+ fields->Set("normal_check_interval", service->GetCheckInterval() / 60.0);
+ fields->Set("retry_check_interval", service->GetRetryInterval() / 60.0);
+ fields->Set("check_timeperiod_object_id", service->GetCheckPeriod());
+ fields->Set("is_reachable", service->GetLastReachable());
+ fields->Set("original_attributes", JsonEncode(service->GetOriginalAttributes()));
+
+ fields->Set("current_notification_number", CompatUtility::GetCheckableNotificationNotificationNumber(service));
+ fields->Set("last_notification", DbValue::FromTimestamp(CompatUtility::GetCheckableNotificationLastNotification(service)));
+ fields->Set("next_notification", DbValue::FromTimestamp(CompatUtility::GetCheckableNotificationNextNotification(service)));
+
+ EventCommand::Ptr eventCommand = service->GetEventCommand();
+
+ if (eventCommand)
+ fields->Set("event_handler", eventCommand->GetName());
+
+ CheckCommand::Ptr checkCommand = service->GetCheckCommand();
+
+ if (checkCommand)
+ fields->Set("check_command", checkCommand->GetName());
+
+ return fields;
+}
+
+void ServiceDbObject::OnConfigUpdateHeavy()
+{
+ Service::Ptr service = static_pointer_cast<Service>(GetObject());
+
+ /* groups */
+ Array::Ptr groups = service->GetGroups();
+
+ std::vector<DbQuery> queries;
+
+ DbQuery query1;
+ query1.Table = DbType::GetByName("ServiceGroup")->GetTable() + "_members";
+ query1.Type = DbQueryDelete;
+ query1.Category = DbCatConfig;
+ query1.WhereCriteria = new Dictionary({
+ { "service_object_id", service }
+ });
+ queries.emplace_back(std::move(query1));
+
+ if (groups) {
+ ObjectLock olock(groups);
+ for (const String& groupName : groups) {
+ ServiceGroup::Ptr group = ServiceGroup::GetByName(groupName);
+
+ DbQuery query2;
+ query2.Table = DbType::GetByName("ServiceGroup")->GetTable() + "_members";
+ query2.Type = DbQueryInsert;
+ query2.Category = DbCatConfig;
+ query2.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "servicegroup_id", DbValue::FromObjectInsertID(group) },
+ { "service_object_id", service }
+ });
+ query2.WhereCriteria = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "servicegroup_id", DbValue::FromObjectInsertID(group) },
+ { "service_object_id", service }
+ });
+ queries.emplace_back(std::move(query2));
+ }
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ /* service dependencies */
+ queries.clear();
+
+ DbQuery query2;
+ query2.Table = GetType()->GetTable() + "dependencies";
+ query2.Type = DbQueryDelete;
+ query2.Category = DbCatConfig;
+ query2.WhereCriteria = new Dictionary({
+ { "dependent_service_object_id", service }
+ });
+ queries.emplace_back(std::move(query2));
+
+ for (const Dependency::Ptr& dep : service->GetDependencies()) {
+ Checkable::Ptr parent = dep->GetParent();
+
+ if (!parent) {
+ Log(LogDebug, "ServiceDbObject")
+ << "Missing parent for dependency '" << dep->GetName() << "'.";
+ continue;
+ }
+
+ Log(LogDebug, "ServiceDbObject")
+ << "service parents: " << parent->GetName();
+
+ int stateFilter = dep->GetStateFilter();
+
+ /* service dependencies */
+ DbQuery query1;
+ query1.Table = GetType()->GetTable() + "dependencies";
+ query1.Type = DbQueryInsert;
+ query1.Category = DbCatConfig;
+ query1.Fields = new Dictionary({
+ { "service_object_id", parent },
+ { "dependent_service_object_id", service },
+ { "inherits_parent", 1 },
+ { "timeperiod_object_id", dep->GetPeriod() },
+ { "fail_on_ok", (stateFilter & StateFilterOK) ? 1 : 0 },
+ { "fail_on_warning", (stateFilter & StateFilterWarning) ? 1 : 0 },
+ { "fail_on_critical", (stateFilter & StateFilterCritical) ? 1 : 0 },
+ { "fail_on_unknown", (stateFilter & StateFilterUnknown) ? 1 : 0 },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query1));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ /* service contacts, contactgroups */
+ queries.clear();
+
+ DbQuery query3;
+ query3.Table = GetType()->GetTable() + "_contacts";
+ query3.Type = DbQueryDelete;
+ query3.Category = DbCatConfig;
+ query3.WhereCriteria = new Dictionary({
+ { "service_id", DbValue::FromObjectInsertID(service) }
+ });
+ queries.emplace_back(std::move(query3));
+
+ for (const User::Ptr& user : CompatUtility::GetCheckableNotificationUsers(service)) {
+ DbQuery query_contact;
+ query_contact.Table = GetType()->GetTable() + "_contacts";
+ query_contact.Type = DbQueryInsert;
+ query_contact.Category = DbCatConfig;
+ query_contact.Fields = new Dictionary({
+ { "service_id", DbValue::FromObjectInsertID(service) },
+ { "contact_object_id", user },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+
+ });
+ queries.emplace_back(std::move(query_contact));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ queries.clear();
+
+ DbQuery query4;
+ query4.Table = GetType()->GetTable() + "_contactgroups";
+ query4.Type = DbQueryDelete;
+ query4.Category = DbCatConfig;
+ query4.WhereCriteria = new Dictionary({
+ { "service_id", DbValue::FromObjectInsertID(service) }
+ });
+ queries.emplace_back(std::move(query4));
+
+ for (const UserGroup::Ptr& usergroup : CompatUtility::GetCheckableNotificationUserGroups(service)) {
+ DbQuery query_contact;
+ query_contact.Table = GetType()->GetTable() + "_contactgroups";
+ query_contact.Type = DbQueryInsert;
+ query_contact.Category = DbCatConfig;
+ query_contact.Fields = new Dictionary({
+ { "service_id", DbValue::FromObjectInsertID(service) },
+ { "contactgroup_object_id", usergroup },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+ });
+ queries.emplace_back(std::move(query_contact));
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ DoCommonConfigUpdate();
+}
+
+void ServiceDbObject::OnConfigUpdateLight()
+{
+ DoCommonConfigUpdate();
+}
+
+void ServiceDbObject::DoCommonConfigUpdate()
+{
+ Service::Ptr service = static_pointer_cast<Service>(GetObject());
+
+ /* update comments and downtimes on config change */
+ DbEvents::AddComments(service);
+ DbEvents::AddDowntimes(service);
+}
+
+String ServiceDbObject::CalculateConfigHash(const Dictionary::Ptr& configFields) const
+{
+ String hashData = DbObject::CalculateConfigHash(configFields);
+
+ Service::Ptr service = static_pointer_cast<Service>(GetObject());
+
+ Array::Ptr groups = service->GetGroups();
+
+ if (groups) {
+ groups = groups->ShallowClone();
+ ObjectLock oLock (groups);
+ std::sort(groups->Begin(), groups->End());
+ hashData += DbObject::HashValue(groups);
+ }
+
+ ArrayData dependencies;
+
+ /* dependencies */
+ for (const Dependency::Ptr& dep : service->GetDependencies()) {
+ Checkable::Ptr parent = dep->GetParent();
+
+ if (!parent)
+ continue;
+
+ dependencies.push_back(new Array({
+ parent->GetName(),
+ dep->GetStateFilter(),
+ dep->GetPeriodRaw()
+ }));
+ }
+
+ std::sort(dependencies.begin(), dependencies.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(dependencies)));
+
+ ArrayData users;
+
+ for (const User::Ptr& user : CompatUtility::GetCheckableNotificationUsers(service)) {
+ users.push_back(user->GetName());
+ }
+
+ std::sort(users.begin(), users.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(users)));
+
+ ArrayData userGroups;
+
+ for (const UserGroup::Ptr& usergroup : CompatUtility::GetCheckableNotificationUserGroups(service)) {
+ userGroups.push_back(usergroup->GetName());
+ }
+
+ std::sort(userGroups.begin(), userGroups.end());
+
+ hashData += DbObject::HashValue(new Array(std::move(userGroups)));
+
+ return SHA256(hashData);
+}
diff --git a/lib/db_ido/servicedbobject.hpp b/lib/db_ido/servicedbobject.hpp
new file mode 100644
index 0000000..19824be
--- /dev/null
+++ b/lib/db_ido/servicedbobject.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERVICEDBOBJECT_H
+#define SERVICEDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+/**
+ * A Service database object.
+ *
+ * @ingroup ido
+ */
+class ServiceDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ServiceDbObject);
+
+ ServiceDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+ static void StaticInitialize();
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+
+ void OnConfigUpdateHeavy() override;
+ void OnConfigUpdateLight() override;
+
+ String CalculateConfigHash(const Dictionary::Ptr& configFields) const override;
+
+private:
+ void DoCommonConfigUpdate();
+};
+
+}
+
+#endif /* SERVICEDBOBJECT_H */
diff --git a/lib/db_ido/servicegroupdbobject.cpp b/lib/db_ido/servicegroupdbobject.cpp
new file mode 100644
index 0000000..ea4d40c
--- /dev/null
+++ b/lib/db_ido/servicegroupdbobject.cpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/servicegroupdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/objectlock.hpp"
+#include "base/initialize.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(ServiceGroup, "servicegroup", DbObjectTypeServiceGroup, "servicegroup_object_id", ServiceGroupDbObject);
+
+ServiceGroupDbObject::ServiceGroupDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr ServiceGroupDbObject::GetConfigFields() const
+{
+ ServiceGroup::Ptr group = static_pointer_cast<ServiceGroup>(GetObject());
+
+ return new Dictionary({
+ { "alias", group->GetDisplayName() },
+ { "notes", group->GetNotes() },
+ { "notes_url", group->GetNotesUrl() },
+ { "action_url", group->GetActionUrl() }
+ });
+}
+
+Dictionary::Ptr ServiceGroupDbObject::GetStatusFields() const
+{
+ return nullptr;
+}
diff --git a/lib/db_ido/servicegroupdbobject.hpp b/lib/db_ido/servicegroupdbobject.hpp
new file mode 100644
index 0000000..7f0d6c1
--- /dev/null
+++ b/lib/db_ido/servicegroupdbobject.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERVICEGROUPDBOBJECT_H
+#define SERVICEGROUPDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "icinga/servicegroup.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A ServiceGroup database object.
+ *
+ * @ingroup ido
+ */
+class ServiceGroupDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ServiceGroupDbObject);
+
+ ServiceGroupDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+};
+
+}
+
+#endif /* SERVICEGROUPDBOBJECT_H */
diff --git a/lib/db_ido/timeperioddbobject.cpp b/lib/db_ido/timeperioddbobject.cpp
new file mode 100644
index 0000000..98997f5
--- /dev/null
+++ b/lib/db_ido/timeperioddbobject.cpp
@@ -0,0 +1,85 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/timeperioddbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/legacytimeperiod.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(TimePeriod, "timeperiod", DbObjectTypeTimePeriod, "timeperiod_object_id", TimePeriodDbObject);
+
+TimePeriodDbObject::TimePeriodDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr TimePeriodDbObject::GetConfigFields() const
+{
+ TimePeriod::Ptr tp = static_pointer_cast<TimePeriod>(GetObject());
+
+ return new Dictionary({
+ { "alias", tp->GetDisplayName() }
+ });
+}
+
+Dictionary::Ptr TimePeriodDbObject::GetStatusFields() const
+{
+ return Empty;
+}
+
+void TimePeriodDbObject::OnConfigUpdateHeavy()
+{
+ TimePeriod::Ptr tp = static_pointer_cast<TimePeriod>(GetObject());
+
+ DbQuery query_del1;
+ query_del1.Table = GetType()->GetTable() + "_timeranges";
+ query_del1.Type = DbQueryDelete;
+ query_del1.Category = DbCatConfig;
+ query_del1.WhereCriteria = new Dictionary({
+ { "timeperiod_id", DbValue::FromObjectInsertID(tp) }
+ });
+ OnQuery(query_del1);
+
+ Dictionary::Ptr ranges = tp->GetRanges();
+
+ if (!ranges)
+ return;
+
+ time_t refts = Utility::GetTime();
+ ObjectLock olock(ranges);
+ for (const Dictionary::Pair& kv : ranges) {
+ int wday = LegacyTimePeriod::WeekdayFromString(kv.first);
+
+ if (wday == -1)
+ continue;
+
+ tm reference = Utility::LocalTime(refts);
+
+ Array::Ptr segments = new Array();
+ LegacyTimePeriod::ProcessTimeRanges(kv.second, &reference, segments);
+
+ ObjectLock olock(segments);
+ for (const Value& vsegment : segments) {
+ Dictionary::Ptr segment = vsegment;
+ int begin = segment->Get("begin");
+ int end = segment->Get("end");
+
+ DbQuery query;
+ query.Table = GetType()->GetTable() + "_timeranges";
+ query.Type = DbQueryInsert;
+ query.Category = DbCatConfig;
+ query.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "timeperiod_id", DbValue::FromObjectInsertID(tp) },
+ { "day", wday },
+ { "start_sec", begin % 86400 },
+ { "end_sec", end % 86400 }
+ });
+ OnQuery(query);
+ }
+ }
+}
diff --git a/lib/db_ido/timeperioddbobject.hpp b/lib/db_ido/timeperioddbobject.hpp
new file mode 100644
index 0000000..e3cc13c
--- /dev/null
+++ b/lib/db_ido/timeperioddbobject.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TIMEPERIODDBOBJECT_H
+#define TIMEPERIODDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A TimePeriod database object.
+ *
+ * @ingroup ido
+ */
+class TimePeriodDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TimePeriodDbObject);
+
+ TimePeriodDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+protected:
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+
+ void OnConfigUpdateHeavy() override;
+};
+
+}
+
+#endif /* TIMEPERIODDBOBJECT_H */
diff --git a/lib/db_ido/userdbobject.cpp b/lib/db_ido/userdbobject.cpp
new file mode 100644
index 0000000..439b8fb
--- /dev/null
+++ b/lib/db_ido/userdbobject.cpp
@@ -0,0 +1,161 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/userdbobject.hpp"
+#include "db_ido/usergroupdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "icinga/user.hpp"
+#include "icinga/notification.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(User, "contact", DbObjectTypeContact, "contact_object_id", UserDbObject);
+
+UserDbObject::UserDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr UserDbObject::GetConfigFields() const
+{
+ User::Ptr user = static_pointer_cast<User>(GetObject());
+
+ int typeFilter = user->GetTypeFilter();
+ int stateFilter = user->GetStateFilter();
+
+ return new Dictionary({
+ { "alias", user->GetDisplayName() },
+ { "email_address", user->GetEmail() },
+ { "pager_address", user->GetPager() },
+ { "host_timeperiod_object_id", user->GetPeriod() },
+ { "service_timeperiod_object_id", user->GetPeriod() },
+ { "host_notifications_enabled", user->GetEnableNotifications() },
+ { "service_notifications_enabled", user->GetEnableNotifications() },
+ { "can_submit_commands", 1 },
+ { "notify_service_recovery", (typeFilter & NotificationRecovery) ? 1 : 0 },
+ { "notify_service_warning", (stateFilter & StateFilterWarning) ? 1 : 0 },
+ { "notify_service_unknown", (stateFilter & StateFilterUnknown) ? 1 : 0 },
+ { "notify_service_critical", (stateFilter & StateFilterCritical) ? 1 : 0 },
+ { "notify_service_flapping", (typeFilter & (NotificationFlappingStart | NotificationFlappingEnd)) ? 1 : 0 },
+ { "notify_service_downtime", (typeFilter & (NotificationDowntimeStart | NotificationDowntimeEnd | NotificationDowntimeRemoved)) ? 1 : 0 },
+ { "notify_host_recovery", (typeFilter & NotificationRecovery) ? 1 : 0 },
+ { "notify_host_down", (stateFilter & StateFilterDown) ? 1 : 0 },
+ { "notify_host_flapping", (typeFilter & (NotificationFlappingStart | NotificationFlappingEnd)) ? 1 : 0 },
+ { "notify_host_downtime", (typeFilter & (NotificationDowntimeStart | NotificationDowntimeEnd | NotificationDowntimeRemoved)) ? 1 : 0 }
+ });
+}
+
+Dictionary::Ptr UserDbObject::GetStatusFields() const
+{
+ User::Ptr user = static_pointer_cast<User>(GetObject());
+
+ return new Dictionary({
+ { "host_notifications_enabled", user->GetEnableNotifications() },
+ { "service_notifications_enabled", user->GetEnableNotifications() },
+ { "last_host_notification", DbValue::FromTimestamp(user->GetLastNotification()) },
+ { "last_service_notification", DbValue::FromTimestamp(user->GetLastNotification()) }
+ });
+}
+
+void UserDbObject::OnConfigUpdateHeavy()
+{
+ User::Ptr user = static_pointer_cast<User>(GetObject());
+
+ /* groups */
+ Array::Ptr groups = user->GetGroups();
+
+ std::vector<DbQuery> queries;
+
+ DbQuery query1;
+ query1.Table = DbType::GetByName("UserGroup")->GetTable() + "_members";
+ query1.Type = DbQueryDelete;
+ query1.Category = DbCatConfig;
+ query1.WhereCriteria = new Dictionary({
+ { "contact_object_id", user }
+ });
+ queries.emplace_back(std::move(query1));
+
+ if (groups) {
+ ObjectLock olock(groups);
+ for (const String& groupName : groups) {
+ UserGroup::Ptr group = UserGroup::GetByName(groupName);
+
+ DbQuery query2;
+ query2.Table = DbType::GetByName("UserGroup")->GetTable() + "_members";
+ query2.Type = DbQueryInsert | DbQueryUpdate;
+ query2.Category = DbCatConfig;
+ query2.Fields = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "contactgroup_id", DbValue::FromObjectInsertID(group) },
+ { "contact_object_id", user }
+ });
+ query2.WhereCriteria = new Dictionary({
+ { "instance_id", 0 }, /* DbConnection class fills in real ID */
+ { "contactgroup_id", DbValue::FromObjectInsertID(group) },
+ { "contact_object_id", user }
+ });
+ queries.emplace_back(std::move(query2));
+ }
+ }
+
+ DbObject::OnMultipleQueries(queries);
+
+ queries.clear();
+
+ DbQuery query2;
+ query2.Table = "contact_addresses";
+ query2.Type = DbQueryDelete;
+ query2.Category = DbCatConfig;
+ query2.WhereCriteria = new Dictionary({
+ { "contact_id", DbValue::FromObjectInsertID(user) }
+ });
+ queries.emplace_back(std::move(query2));
+
+ Dictionary::Ptr vars = user->GetVars();
+
+ if (vars) { /* This is sparta. */
+ for (int i = 1; i <= 6; i++) {
+ String key = "address" + Convert::ToString(i);
+
+ if (!vars->Contains(key))
+ continue;
+
+ String val = vars->Get(key);
+
+ DbQuery query;
+ query.Type = DbQueryInsert;
+ query.Table = "contact_addresses";
+ query.Category = DbCatConfig;
+ query.Fields = new Dictionary({
+ { "contact_id", DbValue::FromObjectInsertID(user) },
+ { "address_number", i },
+ { "address", val },
+ { "instance_id", 0 } /* DbConnection class fills in real ID */
+
+ });
+ queries.emplace_back(std::move(query));
+ }
+ }
+
+ DbObject::OnMultipleQueries(queries);
+}
+
+String UserDbObject::CalculateConfigHash(const Dictionary::Ptr& configFields) const
+{
+ String hashData = DbObject::CalculateConfigHash(configFields);
+
+ User::Ptr user = static_pointer_cast<User>(GetObject());
+
+ Array::Ptr groups = user->GetGroups();
+
+ if (groups) {
+ groups = groups->ShallowClone();
+ ObjectLock oLock (groups);
+ std::sort(groups->Begin(), groups->End());
+ hashData += DbObject::HashValue(groups);
+ }
+
+ return SHA256(hashData);
+}
diff --git a/lib/db_ido/userdbobject.hpp b/lib/db_ido/userdbobject.hpp
new file mode 100644
index 0000000..e0f36c5
--- /dev/null
+++ b/lib/db_ido/userdbobject.hpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef USERDBOBJECT_H
+#define USERDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A User database object.
+ *
+ * @ingroup ido
+ */
+class UserDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(UserDbObject);
+
+ UserDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+protected:
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+
+ void OnConfigUpdateHeavy() override;
+
+ String CalculateConfigHash(const Dictionary::Ptr& configFields) const override;
+};
+
+}
+
+#endif /* USERDBOBJECT_H */
diff --git a/lib/db_ido/usergroupdbobject.cpp b/lib/db_ido/usergroupdbobject.cpp
new file mode 100644
index 0000000..23b3581
--- /dev/null
+++ b/lib/db_ido/usergroupdbobject.cpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/usergroupdbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/objectlock.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+REGISTER_DBTYPE(UserGroup, "contactgroup", DbObjectTypeContactGroup, "contactgroup_object_id", UserGroupDbObject);
+
+UserGroupDbObject::UserGroupDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr UserGroupDbObject::GetConfigFields() const
+{
+ UserGroup::Ptr group = static_pointer_cast<UserGroup>(GetObject());
+
+ return new Dictionary({
+ { "alias", group->GetDisplayName() }
+ });
+}
+
+Dictionary::Ptr UserGroupDbObject::GetStatusFields() const
+{
+ return nullptr;
+}
diff --git a/lib/db_ido/usergroupdbobject.hpp b/lib/db_ido/usergroupdbobject.hpp
new file mode 100644
index 0000000..9469823
--- /dev/null
+++ b/lib/db_ido/usergroupdbobject.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef USERGROUPDBOBJECT_H
+#define USERGROUPDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "icinga/usergroup.hpp"
+#include "base/configobject.hpp"
+
+namespace icinga
+{
+
+/**
+ * A UserGroup database object.
+ *
+ * @ingroup ido
+ */
+class UserGroupDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(UserGroupDbObject);
+
+ UserGroupDbObject(const DbType::Ptr& type, const String& name1, const String& name2);
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+};
+
+}
+
+#endif /* USERGROUPDBOBJECT_H */
diff --git a/lib/db_ido/zonedbobject.cpp b/lib/db_ido/zonedbobject.cpp
new file mode 100644
index 0000000..b8ad0c1
--- /dev/null
+++ b/lib/db_ido/zonedbobject.cpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/zonedbobject.hpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+
+REGISTER_DBTYPE(Zone, "zone", DbObjectTypeZone, "zone_object_id", ZoneDbObject);
+
+ZoneDbObject::ZoneDbObject(const DbType::Ptr& type, const String& name1, const String& name2)
+ : DbObject(type, name1, name2)
+{ }
+
+Dictionary::Ptr ZoneDbObject::GetConfigFields() const
+{
+ Zone::Ptr zone = static_pointer_cast<Zone>(GetObject());
+
+ return new Dictionary({
+ { "is_global", zone->IsGlobal() ? 1 : 0 },
+ { "parent_zone_object_id", zone->GetParent() }
+
+ });
+}
+
+Dictionary::Ptr ZoneDbObject::GetStatusFields() const
+{
+ Zone::Ptr zone = static_pointer_cast<Zone>(GetObject());
+
+ Log(LogDebug, "ZoneDbObject")
+ << "update status for zone '" << zone->GetName() << "'";
+
+ return new Dictionary({
+ { "parent_zone_object_id", zone->GetParent() }
+ });
+}
diff --git a/lib/db_ido/zonedbobject.hpp b/lib/db_ido/zonedbobject.hpp
new file mode 100644
index 0000000..3901c81
--- /dev/null
+++ b/lib/db_ido/zonedbobject.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ZONEDBOBJECT_H
+#define ZONEDBOBJECT_H
+
+#include "db_ido/dbobject.hpp"
+#include "base/configobject.hpp"
+#include "remote/zone.hpp"
+
+namespace icinga
+{
+
+/**
+ * An Endpoint database object.
+ *
+ * @ingroup ido
+ */
+class ZoneDbObject final : public DbObject
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ZoneDbObject);
+
+ ZoneDbObject(const intrusive_ptr<DbType>& type, const String& name1, const String& name2);
+
+ Dictionary::Ptr GetConfigFields() const override;
+ Dictionary::Ptr GetStatusFields() const override;
+};
+
+}
+
+#endif /* ZONEDBOBJECT_H */
diff --git a/lib/db_ido_mysql/CMakeLists.txt b/lib/db_ido_mysql/CMakeLists.txt
new file mode 100644
index 0000000..70cb90d
--- /dev/null
+++ b/lib/db_ido_mysql/CMakeLists.txt
@@ -0,0 +1,41 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(idomysqlconnection.ti idomysqlconnection-ti.cpp idomysqlconnection-ti.hpp)
+
+set(db_ido_mysql_SOURCES
+ idomysqlconnection.cpp idomysqlconnection.hpp idomysqlconnection-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(db_ido_mysql db_ido_mysql db_ido_mysql_SOURCES)
+endif()
+
+add_library(db_ido_mysql OBJECT ${db_ido_mysql_SOURCES})
+
+include_directories(${MYSQL_INCLUDE_DIR})
+
+add_dependencies(db_ido_mysql base config icinga db_ido)
+
+set_target_properties (
+ db_ido_mysql PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/ido-mysql.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install(
+ DIRECTORY schema
+ DESTINATION ${CMAKE_INSTALL_DATADIR}/icinga2-ido-mysql
+ FILES_MATCHING PATTERN "*.sql"
+)
+
+install(
+ DIRECTORY schema/upgrade
+ DESTINATION ${CMAKE_INSTALL_DATADIR}/icinga2-ido-mysql/schema
+ FILES_MATCHING PATTERN "*.sql"
+)
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/db_ido_mysql/idomysqlconnection.cpp b/lib/db_ido_mysql/idomysqlconnection.cpp
new file mode 100644
index 0000000..aacb7d7
--- /dev/null
+++ b/lib/db_ido_mysql/idomysqlconnection.cpp
@@ -0,0 +1,1269 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido_mysql/idomysqlconnection.hpp"
+#include "db_ido_mysql/idomysqlconnection-ti.cpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/application.hpp"
+#include "base/configtype.hpp"
+#include "base/exception.hpp"
+#include "base/statsfunction.hpp"
+#include "base/defer.hpp"
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(IdoMysqlConnection);
+REGISTER_STATSFUNCTION(IdoMysqlConnection, &IdoMysqlConnection::StatsFunc);
+
+const char * IdoMysqlConnection::GetLatestSchemaVersion() const noexcept
+{
+ return "1.15.1";
+}
+
+const char * IdoMysqlConnection::GetCompatSchemaVersion() const noexcept
+{
+ return "1.14.3";
+}
+
+void IdoMysqlConnection::OnConfigLoaded()
+{
+ ObjectImpl<IdoMysqlConnection>::OnConfigLoaded();
+
+ m_QueryQueue.SetName("IdoMysqlConnection, " + GetName());
+
+ Library shimLibrary{"mysql_shim"};
+
+ auto create_mysql_shim = shimLibrary.GetSymbolAddress<create_mysql_shim_ptr>("create_mysql_shim");
+
+ m_Mysql.reset(create_mysql_shim());
+
+ std::swap(m_Library, shimLibrary);
+}
+
+void IdoMysqlConnection::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const IdoMysqlConnection::Ptr& idomysqlconnection : ConfigType::GetObjectsByType<IdoMysqlConnection>()) {
+ size_t queryQueueItems = idomysqlconnection->m_QueryQueue.GetLength();
+ double queryQueueItemRate = idomysqlconnection->m_QueryQueue.GetTaskCount(60) / 60.0;
+
+ nodes.emplace_back(idomysqlconnection->GetName(), new Dictionary({
+ { "version", idomysqlconnection->GetSchemaVersion() },
+ { "instance_name", idomysqlconnection->GetInstanceName() },
+ { "connected", idomysqlconnection->GetConnected() },
+ { "query_queue_items", queryQueueItems },
+ { "query_queue_item_rate", queryQueueItemRate }
+ }));
+
+ perfdata->Add(new PerfdataValue("idomysqlconnection_" + idomysqlconnection->GetName() + "_queries_rate", idomysqlconnection->GetQueryCount(60) / 60.0));
+ perfdata->Add(new PerfdataValue("idomysqlconnection_" + idomysqlconnection->GetName() + "_queries_1min", idomysqlconnection->GetQueryCount(60)));
+ perfdata->Add(new PerfdataValue("idomysqlconnection_" + idomysqlconnection->GetName() + "_queries_5mins", idomysqlconnection->GetQueryCount(5 * 60)));
+ perfdata->Add(new PerfdataValue("idomysqlconnection_" + idomysqlconnection->GetName() + "_queries_15mins", idomysqlconnection->GetQueryCount(15 * 60)));
+ perfdata->Add(new PerfdataValue("idomysqlconnection_" + idomysqlconnection->GetName() + "_query_queue_items", queryQueueItems));
+ perfdata->Add(new PerfdataValue("idomysqlconnection_" + idomysqlconnection->GetName() + "_query_queue_item_rate", queryQueueItemRate));
+ }
+
+ status->Set("idomysqlconnection", new Dictionary(std::move(nodes)));
+}
+
+void IdoMysqlConnection::Resume()
+{
+ Log(LogInformation, "IdoMysqlConnection")
+ << "'" << GetName() << "' resumed.";
+
+ SetConnected(false);
+
+ m_QueryQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ /* Immediately try to connect on Resume() without timer. */
+ m_QueryQueue.Enqueue([this]() { Reconnect(); }, PriorityImmediate);
+
+ m_TxTimer = Timer::Create();
+ m_TxTimer->SetInterval(1);
+ m_TxTimer->OnTimerExpired.connect([this](const Timer * const&) { NewTransaction(); });
+ m_TxTimer->Start();
+
+ m_ReconnectTimer = Timer::Create();
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->OnTimerExpired.connect([this](const Timer * const&){ ReconnectTimerHandler(); });
+ m_ReconnectTimer->Start();
+
+ /* Start with queries after connect. */
+ DbConnection::Resume();
+
+ ASSERT(m_Mysql->thread_safe());
+}
+
+void IdoMysqlConnection::Pause()
+{
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Attempting to pause '" << GetName() << "'.";
+
+ DbConnection::Pause();
+
+ m_ReconnectTimer->Stop(true);
+ m_TxTimer->Stop(true);
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Rescheduling disconnect task.";
+#endif /* I2_DEBUG */
+
+ Log(LogInformation, "IdoMysqlConnection")
+ << "'" << GetName() << "' paused.";
+
+}
+
+void IdoMysqlConnection::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "IdoMysqlConnection", "Exception during database operation: Verify that your database is operational!");
+
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Exception during database operation: " << DiagnosticInformation(std::move(exp));
+
+ if (GetConnected()) {
+ m_Mysql->close(&m_Connection);
+
+ SetConnected(false);
+ }
+}
+
+void IdoMysqlConnection::AssertOnWorkQueue()
+{
+ ASSERT(m_QueryQueue.IsWorkerThread());
+}
+
+void IdoMysqlConnection::Disconnect()
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ Query("COMMIT");
+ m_Mysql->close(&m_Connection);
+
+ SetConnected(false);
+
+ Log(LogInformation, "IdoMysqlConnection")
+ << "Disconnected from '" << GetName() << "' database '" << GetDatabase() << "'.";
+}
+
+void IdoMysqlConnection::NewTransaction()
+{
+ if (IsPaused() && GetPauseCalled())
+ return;
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling new transaction and finishing async queries.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this]() { InternalNewTransaction(); }, PriorityHigh);
+}
+
+void IdoMysqlConnection::InternalNewTransaction()
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ IncreasePendingQueries(2);
+
+ AsyncQuery("COMMIT");
+ AsyncQuery("BEGIN");
+
+ FinishAsyncQueries();
+}
+
+void IdoMysqlConnection::ReconnectTimerHandler()
+{
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling reconnect task.";
+#endif /* I2_DEBUG */
+
+ /* Only allow Reconnect events with high priority. */
+ m_QueryQueue.Enqueue([this]() { Reconnect(); }, PriorityImmediate);
+}
+
+void IdoMysqlConnection::Reconnect()
+{
+ AssertOnWorkQueue();
+
+ if (!IsActive())
+ return;
+
+ CONTEXT("Reconnecting to MySQL IDO database '" << GetName() << "'");
+
+ double startTime = Utility::GetTime();
+
+ SetShouldConnect(true);
+
+ bool reconnect = false;
+
+ /* Ensure to close old connections first. */
+ if (GetConnected()) {
+ /* Check if we're really still connected */
+ if (m_Mysql->ping(&m_Connection) == 0)
+ return;
+
+ m_Mysql->close(&m_Connection);
+ SetConnected(false);
+ reconnect = true;
+ }
+
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Reconnect: Clearing ID cache.";
+
+ ClearIDCache();
+
+ String ihost, isocket_path, iuser, ipasswd, idb;
+ String isslKey, isslCert, isslCa, isslCaPath, isslCipher;
+ const char *host, *socket_path, *user , *passwd, *db;
+ const char *sslKey, *sslCert, *sslCa, *sslCaPath, *sslCipher;
+ bool enableSsl;
+ long port;
+
+ ihost = GetHost();
+ isocket_path = GetSocketPath();
+ iuser = GetUser();
+ ipasswd = GetPassword();
+ idb = GetDatabase();
+
+ enableSsl = GetEnableSsl();
+ isslKey = GetSslKey();
+ isslCert = GetSslCert();
+ isslCa = GetSslCa();
+ isslCaPath = GetSslCapath();
+ isslCipher = GetSslCipher();
+
+ host = (!ihost.IsEmpty()) ? ihost.CStr() : nullptr;
+ port = GetPort();
+ socket_path = (!isocket_path.IsEmpty()) ? isocket_path.CStr() : nullptr;
+ user = (!iuser.IsEmpty()) ? iuser.CStr() : nullptr;
+ passwd = (!ipasswd.IsEmpty()) ? ipasswd.CStr() : nullptr;
+ db = (!idb.IsEmpty()) ? idb.CStr() : nullptr;
+
+ sslKey = (!isslKey.IsEmpty()) ? isslKey.CStr() : nullptr;
+ sslCert = (!isslCert.IsEmpty()) ? isslCert.CStr() : nullptr;
+ sslCa = (!isslCa.IsEmpty()) ? isslCa.CStr() : nullptr;
+ sslCaPath = (!isslCaPath.IsEmpty()) ? isslCaPath.CStr() : nullptr;
+ sslCipher = (!isslCipher.IsEmpty()) ? isslCipher.CStr() : nullptr;
+
+ /* connection */
+ if (!m_Mysql->init(&m_Connection)) {
+ Log(LogCritical, "IdoMysqlConnection")
+ << "mysql_init() failed: out of memory";
+
+ BOOST_THROW_EXCEPTION(std::bad_alloc());
+ }
+
+ /* Read "latin1" (here, in the schema and in Icinga Web) as "bytes".
+ Icinga 2 and Icinga Web use byte-strings everywhere and every byte-string is a valid latin1 string.
+ This way the (actually mostly UTF-8) bytes are transferred end-to-end as-is. */
+ m_Mysql->options(&m_Connection, MYSQL_SET_CHARSET_NAME, "latin1");
+
+ if (enableSsl)
+ m_Mysql->ssl_set(&m_Connection, sslKey, sslCert, sslCa, sslCaPath, sslCipher);
+
+ if (!m_Mysql->real_connect(&m_Connection, host, user, passwd, db, port, socket_path, CLIENT_FOUND_ROWS | CLIENT_MULTI_STATEMENTS)) {
+ Log(LogCritical, "IdoMysqlConnection")
+ << "Connection to database '" << db << "' with user '" << user << "' on '" << host << ":" << port
+ << "' " << (enableSsl ? "(SSL enabled) " : "") << "failed: \"" << m_Mysql->error(&m_Connection) << "\"";
+
+ BOOST_THROW_EXCEPTION(std::runtime_error(m_Mysql->error(&m_Connection)));
+ }
+
+ Log(LogNotice, "IdoMysqlConnection")
+ << "Reconnect: '" << GetName() << "' is now connected to database '" << GetDatabase() << "'.";
+
+ SetConnected(true);
+
+ IdoMysqlResult result = Query("SELECT @@global.max_allowed_packet AS max_allowed_packet");
+
+ Dictionary::Ptr row = FetchRow(result);
+
+ if (row)
+ m_MaxPacketSize = row->Get("max_allowed_packet");
+ else
+ m_MaxPacketSize = 64 * 1024;
+
+ DiscardRows(result);
+
+ String dbVersionName = "idoutils";
+ result = Query("SELECT version FROM " + GetTablePrefix() + "dbversion WHERE name='" + Escape(dbVersionName) + "'");
+
+ row = FetchRow(result);
+
+ if (!row) {
+ m_Mysql->close(&m_Connection);
+ SetConnected(false);
+
+ Log(LogCritical, "IdoMysqlConnection", "Schema does not provide any valid version! Verify your schema installation.");
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid schema."));
+ }
+
+ DiscardRows(result);
+
+ String version = row->Get("version");
+
+ SetSchemaVersion(version);
+
+ if (Utility::CompareVersion(GetCompatSchemaVersion(), version) < 0) {
+ m_Mysql->close(&m_Connection);
+ SetConnected(false);
+
+ Log(LogCritical, "IdoMysqlConnection")
+ << "Schema version '" << version << "' does not match the required version '"
+ << GetCompatSchemaVersion() << "' (or newer)! Please check the upgrade documentation at "
+ << "https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/#upgrading-mysql-db";
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Schema version mismatch."));
+ }
+
+ String instanceName = GetInstanceName();
+
+ result = Query("SELECT instance_id FROM " + GetTablePrefix() + "instances WHERE instance_name = '" + Escape(instanceName) + "'");
+ row = FetchRow(result);
+
+ if (!row) {
+ Query("INSERT INTO " + GetTablePrefix() + "instances (instance_name, instance_description) VALUES ('" + Escape(instanceName) + "', '" + Escape(GetInstanceDescription()) + "')");
+ m_InstanceID = GetLastInsertID();
+ } else {
+ m_InstanceID = DbReference(row->Get("instance_id"));
+ }
+
+ DiscardRows(result);
+
+ Endpoint::Ptr my_endpoint = Endpoint::GetLocalEndpoint();
+
+ /* we have an endpoint in a cluster setup, so decide if we can proceed here */
+ if (my_endpoint && GetHAMode() == HARunOnce) {
+ /* get the current endpoint writing to programstatus table */
+ result = Query("SELECT UNIX_TIMESTAMP(status_update_time) AS status_update_time, endpoint_name FROM " +
+ GetTablePrefix() + "programstatus WHERE instance_id = " + Convert::ToString(m_InstanceID));
+ row = FetchRow(result);
+ DiscardRows(result);
+
+ String endpoint_name;
+
+ if (row)
+ endpoint_name = row->Get("endpoint_name");
+ else
+ Log(LogNotice, "IdoMysqlConnection", "Empty program status table");
+
+ /* if we did not write into the database earlier, another instance is active */
+ if (endpoint_name != my_endpoint->GetName()) {
+ double status_update_time;
+
+ if (row)
+ status_update_time = row->Get("status_update_time");
+ else
+ status_update_time = 0;
+
+ double now = Utility::GetTime();
+
+ double status_update_age = now - status_update_time;
+ double failoverTimeout = GetFailoverTimeout();
+
+ if (status_update_age < failoverTimeout) {
+ Log(LogInformation, "IdoMysqlConnection")
+ << "Last update by endpoint '" << endpoint_name << "' was "
+ << status_update_age << "s ago (< failover timeout of " << failoverTimeout << "s). Retrying.";
+
+ m_Mysql->close(&m_Connection);
+ SetConnected(false);
+ SetShouldConnect(false);
+
+ return;
+ }
+
+ /* activate the IDO only, if we're authoritative in this zone */
+ if (IsPaused()) {
+ Log(LogNotice, "IdoMysqlConnection")
+ << "Local endpoint '" << my_endpoint->GetName() << "' is not authoritative, bailing out.";
+
+ m_Mysql->close(&m_Connection);
+ SetConnected(false);
+
+ return;
+ }
+
+ SetLastFailover(now);
+
+ Log(LogInformation, "IdoMysqlConnection")
+ << "Last update by endpoint '" << endpoint_name << "' was "
+ << status_update_age << "s ago. Taking over '" << GetName() << "' in HA zone '" << Zone::GetLocalZone()->GetName() << "'.";
+ }
+
+ Log(LogNotice, "IdoMysqlConnection", "Enabling IDO connection in HA zone.");
+ }
+
+ Log(LogInformation, "IdoMysqlConnection")
+ << "MySQL IDO instance id: " << static_cast<long>(m_InstanceID) << " (schema version: '" + version + "')";
+
+ /* set session time zone to utc */
+ Query("SET SESSION TIME_ZONE='+00:00'");
+
+ Query("SET SESSION SQL_MODE='NO_AUTO_VALUE_ON_ZERO'");
+
+ Query("BEGIN");
+
+ /* update programstatus table */
+ UpdateProgramStatus();
+
+ /* record connection */
+ Query("INSERT INTO " + GetTablePrefix() + "conninfo " +
+ "(instance_id, connect_time, last_checkin_time, agent_name, agent_version, connect_type, data_start_time) VALUES ("
+ + Convert::ToString(static_cast<long>(m_InstanceID)) + ", NOW(), NOW(), 'icinga2 db_ido_mysql', '" + Escape(Application::GetAppVersion())
+ + "', '" + (reconnect ? "RECONNECT" : "INITIAL") + "', NOW())");
+
+ /* clear config tables for the initial config dump */
+ PrepareDatabase();
+
+ std::ostringstream q1buf;
+ q1buf << "SELECT object_id, objecttype_id, name1, name2, is_active FROM " + GetTablePrefix() + "objects WHERE instance_id = " << static_cast<long>(m_InstanceID);
+ result = Query(q1buf.str());
+
+ std::vector<DbObject::Ptr> activeDbObjs;
+
+ while ((row = FetchRow(result))) {
+ DbType::Ptr dbtype = DbType::GetByID(row->Get("objecttype_id"));
+
+ if (!dbtype)
+ continue;
+
+ DbObject::Ptr dbobj = dbtype->GetOrCreateObjectByName(row->Get("name1"), row->Get("name2"));
+ SetObjectID(dbobj, DbReference(row->Get("object_id")));
+ bool active = row->Get("is_active");
+ SetObjectActive(dbobj, active);
+
+ if (active)
+ activeDbObjs.emplace_back(std::move(dbobj));
+ }
+
+ SetIDCacheValid(true);
+
+ EnableActiveChangedHandler();
+
+ for (const DbObject::Ptr& dbobj : activeDbObjs) {
+ if (dbobj->GetObject())
+ continue;
+
+ Log(LogNotice, "IdoMysqlConnection")
+ << "Deactivate deleted object name1: '" << dbobj->GetName1()
+ << "' name2: '" << dbobj->GetName2() + "'.";
+ DeactivateObject(dbobj);
+ }
+
+ UpdateAllObjects();
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling session table clear and finish connect task.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this]() { ClearTablesBySession(); }, PriorityNormal);
+
+ m_QueryQueue.Enqueue([this, startTime]() { FinishConnect(startTime); }, PriorityNormal);
+}
+
+void IdoMysqlConnection::FinishConnect(double startTime)
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected() || IsPaused())
+ return;
+
+ FinishAsyncQueries();
+
+ Log(LogInformation, "IdoMysqlConnection")
+ << "Finished reconnecting to '" << GetName() << "' database '" << GetDatabase() << "' in "
+ << std::setw(2) << Utility::GetTime() - startTime << " second(s).";
+
+ Query("COMMIT");
+ Query("BEGIN");
+}
+
+void IdoMysqlConnection::ClearTablesBySession()
+{
+ /* delete all comments and downtimes without current session token */
+ ClearTableBySession("comments");
+ ClearTableBySession("scheduleddowntime");
+}
+
+void IdoMysqlConnection::ClearTableBySession(const String& table)
+{
+ Query("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
+ Convert::ToString(static_cast<long>(m_InstanceID)) + " AND session_token <> " +
+ Convert::ToString(GetSessionToken()));
+}
+
+void IdoMysqlConnection::AsyncQuery(const String& query, const std::function<void (const IdoMysqlResult&)>& callback)
+{
+ AssertOnWorkQueue();
+
+ IdoAsyncQuery aq;
+ aq.Query = query;
+ /* XXX: Important: The callback must not immediately execute a query, but enqueue it!
+ * See https://github.com/Icinga/icinga2/issues/4603 for details.
+ */
+ aq.Callback = callback;
+ m_AsyncQueries.emplace_back(std::move(aq));
+}
+
+void IdoMysqlConnection::FinishAsyncQueries()
+{
+ std::vector<IdoAsyncQuery> queries;
+ m_AsyncQueries.swap(queries);
+
+ std::vector<IdoAsyncQuery>::size_type offset = 0;
+
+ // This will be executed if there is a problem with executing the queries,
+ // at which point this function throws an exception and the queries should
+ // not be listed as still pending in the queue.
+ Defer decreaseQueries ([this, &offset, &queries]() {
+ auto lostQueries = queries.size() - offset;
+
+ if (lostQueries > 0) {
+ DecreasePendingQueries(lostQueries);
+ }
+ });
+
+ while (offset < queries.size()) {
+ std::ostringstream querybuf;
+
+ std::vector<IdoAsyncQuery>::size_type count = 0;
+ size_t num_bytes = 0;
+
+ Defer decreaseQueries ([this, &offset, &count]() {
+ offset += count;
+ DecreasePendingQueries(count);
+ m_UncommittedAsyncQueries += count;
+ });
+
+ for (std::vector<IdoAsyncQuery>::size_type i = offset; i < queries.size(); i++) {
+ const IdoAsyncQuery& aq = queries[i];
+
+ size_t size_query = aq.Query.GetLength() + 1;
+
+ if (count > 0) {
+ if (num_bytes + size_query > m_MaxPacketSize - 512)
+ break;
+
+ querybuf << ";";
+ }
+
+ IncreaseQueryCount();
+ count++;
+
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Query: " << aq.Query;
+
+ querybuf << aq.Query;
+ num_bytes += size_query;
+ }
+
+ String query = querybuf.str();
+
+ if (m_Mysql->query(&m_Connection, query.CStr()) != 0) {
+ std::ostringstream msgbuf;
+ String message = m_Mysql->error(&m_Connection);
+ msgbuf << "Error \"" << message << "\" when executing query \"" << query << "\"";
+ Log(LogCritical, "IdoMysqlConnection", msgbuf.str());
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(m_Mysql->error(&m_Connection))
+ << errinfo_database_query(query)
+ );
+ }
+
+ for (std::vector<IdoAsyncQuery>::size_type i = offset; i < offset + count; i++) {
+ const IdoAsyncQuery& aq = queries[i];
+
+ MYSQL_RES *result = m_Mysql->store_result(&m_Connection);
+
+ m_AffectedRows = m_Mysql->affected_rows(&m_Connection);
+
+ IdoMysqlResult iresult;
+
+ if (!result) {
+ if (m_Mysql->field_count(&m_Connection) > 0) {
+ std::ostringstream msgbuf;
+ String message = m_Mysql->error(&m_Connection);
+ msgbuf << "Error \"" << message << "\" when executing query \"" << aq.Query << "\"";
+ Log(LogCritical, "IdoMysqlConnection", msgbuf.str());
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(m_Mysql->error(&m_Connection))
+ << errinfo_database_query(query)
+ );
+ }
+ } else
+ iresult = IdoMysqlResult(result, [this](MYSQL_RES* result) { m_Mysql->free_result(result); });
+
+ if (aq.Callback)
+ aq.Callback(iresult);
+
+ if (m_Mysql->next_result(&m_Connection) > 0) {
+ std::ostringstream msgbuf;
+ String message = m_Mysql->error(&m_Connection);
+ msgbuf << "Error \"" << message << "\" when executing query \"" << query << "\"";
+ Log(LogCritical, "IdoMysqlConnection", msgbuf.str());
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(m_Mysql->error(&m_Connection))
+ << errinfo_database_query(query)
+ );
+ }
+ }
+ }
+
+ if (m_UncommittedAsyncQueries > 25000) {
+ m_UncommittedAsyncQueries = 0;
+
+ Query("COMMIT");
+ Query("BEGIN");
+ }
+}
+
+IdoMysqlResult IdoMysqlConnection::Query(const String& query)
+{
+ AssertOnWorkQueue();
+
+ IncreasePendingQueries(1);
+ Defer decreaseQueries ([this]() { DecreasePendingQueries(1); });
+
+ /* finish all async queries to maintain the right order for queries */
+ FinishAsyncQueries();
+
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Query: " << query;
+
+ IncreaseQueryCount();
+
+ if (m_Mysql->query(&m_Connection, query.CStr()) != 0) {
+ std::ostringstream msgbuf;
+ String message = m_Mysql->error(&m_Connection);
+ msgbuf << "Error \"" << message << "\" when executing query \"" << query << "\"";
+ Log(LogCritical, "IdoMysqlConnection", msgbuf.str());
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(m_Mysql->error(&m_Connection))
+ << errinfo_database_query(query)
+ );
+ }
+
+ MYSQL_RES *result = m_Mysql->store_result(&m_Connection);
+
+ m_AffectedRows = m_Mysql->affected_rows(&m_Connection);
+
+ if (!result) {
+ if (m_Mysql->field_count(&m_Connection) > 0) {
+ std::ostringstream msgbuf;
+ String message = m_Mysql->error(&m_Connection);
+ msgbuf << "Error \"" << message << "\" when executing query \"" << query << "\"";
+ Log(LogCritical, "IdoMysqlConnection", msgbuf.str());
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(m_Mysql->error(&m_Connection))
+ << errinfo_database_query(query)
+ );
+ }
+
+ return IdoMysqlResult();
+ }
+
+ return IdoMysqlResult(result, [this](MYSQL_RES* result) { m_Mysql->free_result(result); });
+}
+
+DbReference IdoMysqlConnection::GetLastInsertID()
+{
+ AssertOnWorkQueue();
+
+ return {static_cast<long>(m_Mysql->insert_id(&m_Connection))};
+}
+
+int IdoMysqlConnection::GetAffectedRows()
+{
+ AssertOnWorkQueue();
+
+ return m_AffectedRows;
+}
+
+String IdoMysqlConnection::Escape(const String& s)
+{
+ AssertOnWorkQueue();
+
+ String utf8s = Utility::ValidateUTF8(s);
+
+ size_t length = utf8s.GetLength();
+ auto *to = new char[utf8s.GetLength() * 2 + 1];
+
+ m_Mysql->real_escape_string(&m_Connection, to, utf8s.CStr(), length);
+
+ String result = String(to);
+
+ delete [] to;
+
+ return result;
+}
+
+Dictionary::Ptr IdoMysqlConnection::FetchRow(const IdoMysqlResult& result)
+{
+ AssertOnWorkQueue();
+
+ MYSQL_ROW row;
+ MYSQL_FIELD *field;
+ unsigned long *lengths, i;
+
+ row = m_Mysql->fetch_row(result.get());
+
+ if (!row)
+ return nullptr;
+
+ lengths = m_Mysql->fetch_lengths(result.get());
+
+ if (!lengths)
+ return nullptr;
+
+ Dictionary::Ptr dict = new Dictionary();
+
+ m_Mysql->field_seek(result.get(), 0);
+ for (field = m_Mysql->fetch_field(result.get()), i = 0; field; field = m_Mysql->fetch_field(result.get()), i++)
+ dict->Set(field->name, String(row[i], row[i] + lengths[i]));
+
+ return dict;
+}
+
+void IdoMysqlConnection::DiscardRows(const IdoMysqlResult& result)
+{
+ Dictionary::Ptr row;
+
+ while ((row = FetchRow(result)))
+ ; /* empty loop body */
+}
+
+void IdoMysqlConnection::ActivateObject(const DbObject::Ptr& dbobj)
+{
+ if (IsPaused())
+ return;
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling object activation task for '" << dbobj->GetName1() << "!" << dbobj->GetName2() << "'.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this, dbobj]() { InternalActivateObject(dbobj); }, PriorityNormal);
+}
+
+void IdoMysqlConnection::InternalActivateObject(const DbObject::Ptr& dbobj)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused())
+ return;
+
+ if (!GetConnected())
+ return;
+
+ DbReference dbref = GetObjectID(dbobj);
+ std::ostringstream qbuf;
+
+ if (!dbref.IsValid()) {
+ if (!dbobj->GetName2().IsEmpty()) {
+ qbuf << "INSERT INTO " + GetTablePrefix() + "objects (instance_id, objecttype_id, name1, name2, is_active) VALUES ("
+ << static_cast<long>(m_InstanceID) << ", " << dbobj->GetType()->GetTypeID() << ", "
+ << "'" << Escape(dbobj->GetName1()) << "', '" << Escape(dbobj->GetName2()) << "', 1)";
+ } else {
+ qbuf << "INSERT INTO " + GetTablePrefix() + "objects (instance_id, objecttype_id, name1, is_active) VALUES ("
+ << static_cast<long>(m_InstanceID) << ", " << dbobj->GetType()->GetTypeID() << ", "
+ << "'" << Escape(dbobj->GetName1()) << "', 1)";
+ }
+
+ Query(qbuf.str());
+ SetObjectID(dbobj, GetLastInsertID());
+ } else {
+ qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 1 WHERE object_id = " << static_cast<long>(dbref);
+ IncreasePendingQueries(1);
+ AsyncQuery(qbuf.str());
+ }
+}
+
+void IdoMysqlConnection::DeactivateObject(const DbObject::Ptr& dbobj)
+{
+ if (IsPaused())
+ return;
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling object deactivation task for '" << dbobj->GetName1() << "!" << dbobj->GetName2() << "'.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this, dbobj]() { InternalDeactivateObject(dbobj); }, PriorityNormal);
+}
+
+void IdoMysqlConnection::InternalDeactivateObject(const DbObject::Ptr& dbobj)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused())
+ return;
+
+ if (!GetConnected())
+ return;
+
+ DbReference dbref = GetObjectID(dbobj);
+
+ if (!dbref.IsValid())
+ return;
+
+ std::ostringstream qbuf;
+ qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 0 WHERE object_id = " << static_cast<long>(dbref);
+ IncreasePendingQueries(1);
+ AsyncQuery(qbuf.str());
+
+ /* Note that we're _NOT_ clearing the db refs via SetReference/SetConfigUpdate/SetStatusUpdate
+ * because the object is still in the database. */
+
+ SetObjectActive(dbobj, false);
+}
+
+bool IdoMysqlConnection::FieldToEscapedString(const String& key, const Value& value, Value *result)
+{
+ if (key == "instance_id") {
+ *result = static_cast<long>(m_InstanceID);
+ return true;
+ } else if (key == "session_token") {
+ *result = GetSessionToken();
+ return true;
+ }
+
+ Value rawvalue = DbValue::ExtractValue(value);
+
+ if (rawvalue.GetType() == ValueEmpty) {
+ *result = "NULL";
+ } else if (rawvalue.IsObjectType<ConfigObject>()) {
+ DbObject::Ptr dbobjcol = DbObject::GetOrCreateByObject(rawvalue);
+
+ if (!dbobjcol) {
+ *result = 0;
+ return true;
+ }
+
+ if (!IsIDCacheValid())
+ return false;
+
+ DbReference dbrefcol;
+
+ if (DbValue::IsObjectInsertID(value)) {
+ dbrefcol = GetInsertID(dbobjcol);
+
+ if (!dbrefcol.IsValid())
+ return false;
+ } else {
+ dbrefcol = GetObjectID(dbobjcol);
+
+ if (!dbrefcol.IsValid()) {
+ InternalActivateObject(dbobjcol);
+
+ dbrefcol = GetObjectID(dbobjcol);
+
+ if (!dbrefcol.IsValid())
+ return false;
+ }
+ }
+
+ *result = static_cast<long>(dbrefcol);
+ } else if (DbValue::IsTimestamp(value)) {
+ long ts = rawvalue;
+ std::ostringstream msgbuf;
+ msgbuf << "FROM_UNIXTIME(" << ts << ")";
+ *result = Value(msgbuf.str());
+ } else if (DbValue::IsObjectInsertID(value)) {
+ auto id = static_cast<long>(rawvalue);
+
+ if (id <= 0)
+ return false;
+
+ *result = id;
+ return true;
+ } else {
+ Value fvalue;
+
+ if (rawvalue.IsBoolean())
+ fvalue = Convert::ToLong(rawvalue);
+ else
+ fvalue = rawvalue;
+
+ *result = "'" + Escape(fvalue) + "'";
+ }
+
+ return true;
+}
+
+void IdoMysqlConnection::ExecuteQuery(const DbQuery& query)
+{
+ if (IsPaused() && GetPauseCalled())
+ return;
+
+ ASSERT(query.Category != DbCatInvalid);
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling execute query task, type " << query.Type << ", table '" << query.Table << "'.";
+#endif /* I2_DEBUG */
+
+ IncreasePendingQueries(1);
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, -1); }, query.Priority, true);
+}
+
+void IdoMysqlConnection::ExecuteMultipleQueries(const std::vector<DbQuery>& queries)
+{
+ if (IsPaused())
+ return;
+
+ if (queries.empty())
+ return;
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling multiple execute query task, type " << queries[0].Type << ", table '" << queries[0].Table << "'.";
+#endif /* I2_DEBUG */
+
+ IncreasePendingQueries(queries.size());
+ m_QueryQueue.Enqueue([this, queries]() { InternalExecuteMultipleQueries(queries); }, queries[0].Priority, true);
+}
+
+bool IdoMysqlConnection::CanExecuteQuery(const DbQuery& query)
+{
+ if (query.Object && !IsIDCacheValid())
+ return false;
+
+ if (query.WhereCriteria) {
+ ObjectLock olock(query.WhereCriteria);
+ Value value;
+
+ for (const Dictionary::Pair& kv : query.WhereCriteria) {
+ if (!FieldToEscapedString(kv.first, kv.second, &value))
+ return false;
+ }
+ }
+
+ if (query.Fields) {
+ ObjectLock olock(query.Fields);
+
+ for (const Dictionary::Pair& kv : query.Fields) {
+ Value value;
+
+ if (!FieldToEscapedString(kv.first, kv.second, &value))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void IdoMysqlConnection::InternalExecuteMultipleQueries(const std::vector<DbQuery>& queries)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused()) {
+ DecreasePendingQueries(queries.size());
+ return;
+ }
+
+ if (!GetConnected()) {
+ DecreasePendingQueries(queries.size());
+ return;
+ }
+
+
+ for (const DbQuery& query : queries) {
+ ASSERT(query.Type == DbQueryNewTransaction || query.Category != DbCatInvalid);
+
+ if (!CanExecuteQuery(query)) {
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling multiple execute query task again: Cannot execute query now. Type '"
+ << query.Type << "', table '" << query.Table << "', queue size: '" << GetPendingQueryCount() << "'.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this, queries]() { InternalExecuteMultipleQueries(queries); }, query.Priority);
+ return;
+ }
+ }
+
+ for (const DbQuery& query : queries) {
+ InternalExecuteQuery(query);
+ }
+}
+
+void IdoMysqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOverride)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused() && GetPauseCalled()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (!GetConnected()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (query.Type == DbQueryNewTransaction) {
+ DecreasePendingQueries(1);
+ InternalNewTransaction();
+ return;
+ }
+
+ /* check whether we're allowed to execute the query first */
+ if (GetCategoryFilter() != DbCatEverything && (query.Category & GetCategoryFilter()) == 0) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (query.Object && query.Object->GetObject()->GetExtension("agent_check").ToBool()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ /* check if there are missing object/insert ids and re-enqueue the query */
+ if (!CanExecuteQuery(query)) {
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling execute query task again: Cannot execute query now. Type '"
+ << typeOverride << "', table '" << query.Table << "', queue size: '" << GetPendingQueryCount() << "'.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this, query, typeOverride]() { InternalExecuteQuery(query, typeOverride); }, query.Priority);
+ return;
+ }
+
+ std::ostringstream qbuf, where;
+ int type;
+
+ if (query.WhereCriteria) {
+ where << " WHERE ";
+
+ ObjectLock olock(query.WhereCriteria);
+ Value value;
+ bool first = true;
+
+ for (const Dictionary::Pair& kv : query.WhereCriteria) {
+ if (!FieldToEscapedString(kv.first, kv.second, &value)) {
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling execute query task again: Cannot execute query now. Type '"
+ << typeOverride << "', table '" << query.Table << "', queue size: '" << GetPendingQueryCount() << "'.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, -1); }, query.Priority);
+ return;
+ }
+
+ if (!first)
+ where << " AND ";
+
+ where << kv.first << " = " << value;
+
+ if (first)
+ first = false;
+ }
+ }
+
+ type = (typeOverride != -1) ? typeOverride : query.Type;
+
+ bool upsert = false;
+
+ if ((type & DbQueryInsert) && (type & DbQueryUpdate)) {
+ bool hasid = false;
+
+ if (query.Object) {
+ if (query.ConfigUpdate)
+ hasid = GetConfigUpdate(query.Object);
+ else if (query.StatusUpdate)
+ hasid = GetStatusUpdate(query.Object);
+ }
+
+ if (!hasid)
+ upsert = true;
+
+ type = DbQueryUpdate;
+ }
+
+ if ((type & DbQueryInsert) && (type & DbQueryDelete)) {
+ std::ostringstream qdel;
+ qdel << "DELETE FROM " << GetTablePrefix() << query.Table << where.str();
+ IncreasePendingQueries(1);
+ AsyncQuery(qdel.str());
+
+ type = DbQueryInsert;
+ }
+
+ switch (type) {
+ case DbQueryInsert:
+ qbuf << "INSERT INTO " << GetTablePrefix() << query.Table;
+ break;
+ case DbQueryUpdate:
+ qbuf << "UPDATE " << GetTablePrefix() << query.Table << " SET";
+ break;
+ case DbQueryDelete:
+ qbuf << "DELETE FROM " << GetTablePrefix() << query.Table;
+ break;
+ default:
+ VERIFY(!"Invalid query type.");
+ }
+
+ if (type == DbQueryInsert || type == DbQueryUpdate) {
+ std::ostringstream colbuf, valbuf;
+
+ if (type == DbQueryUpdate && query.Fields->GetLength() == 0)
+ return;
+
+ ObjectLock olock(query.Fields);
+
+ bool first = true;
+ for (const Dictionary::Pair& kv : query.Fields) {
+ Value value;
+
+ if (!FieldToEscapedString(kv.first, kv.second, &value)) {
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Scheduling execute query task again: Cannot extract required INSERT/UPDATE fields, key '"
+ << kv.first << "', val '" << kv.second << "', type " << typeOverride << ", table '" << query.Table << "'.";
+#endif /* I2_DEBUG */
+
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, -1); }, query.Priority);
+ return;
+ }
+
+ if (type == DbQueryInsert) {
+ if (!first) {
+ colbuf << ", ";
+ valbuf << ", ";
+ }
+
+ colbuf << kv.first;
+ valbuf << value;
+ } else {
+ if (!first)
+ qbuf << ", ";
+
+ qbuf << " " << kv.first << " = " << value;
+ }
+
+ if (first)
+ first = false;
+ }
+
+ if (type == DbQueryInsert)
+ qbuf << " (" << colbuf.str() << ") VALUES (" << valbuf.str() << ")";
+ }
+
+ if (type != DbQueryInsert)
+ qbuf << where.str();
+
+ AsyncQuery(qbuf.str(), [this, query, type, upsert](const IdoMysqlResult&) { FinishExecuteQuery(query, type, upsert); });
+}
+
+void IdoMysqlConnection::FinishExecuteQuery(const DbQuery& query, int type, bool upsert)
+{
+ if (upsert && GetAffectedRows() == 0) {
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Rescheduling DELETE/INSERT query: Upsert UPDATE did not affect rows, type " << type << ", table '" << query.Table << "'.";
+#endif /* I2_DEBUG */
+
+ IncreasePendingQueries(1);
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, DbQueryDelete | DbQueryInsert); }, query.Priority);
+
+ return;
+ }
+
+ if (type == DbQueryInsert && query.Object) {
+ if (query.ConfigUpdate) {
+ SetInsertID(query.Object, GetLastInsertID());
+ SetConfigUpdate(query.Object, true);
+ } else if (query.StatusUpdate)
+ SetStatusUpdate(query.Object, true);
+ }
+
+ if (type == DbQueryInsert && query.Table == "notifications" && query.NotificationInsertID)
+ query.NotificationInsertID->SetValue(static_cast<long>(GetLastInsertID()));
+}
+
+void IdoMysqlConnection::CleanUpExecuteQuery(const String& table, const String& time_column, double max_age)
+{
+ if (IsPaused())
+ return;
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "IdoMysqlConnection")
+ << "Rescheduling cleanup query for table '" << table << "' and column '"
+ << time_column << "'. max_age is set to '" << max_age << "'.";
+#endif /* I2_DEBUG */
+
+ IncreasePendingQueries(1);
+ m_QueryQueue.Enqueue([this, table, time_column, max_age]() { InternalCleanUpExecuteQuery(table, time_column, max_age); }, PriorityLow, true);
+}
+
+void IdoMysqlConnection::InternalCleanUpExecuteQuery(const String& table, const String& time_column, double max_age)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (!GetConnected()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ AsyncQuery("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
+ Convert::ToString(static_cast<long>(m_InstanceID)) + " AND " + time_column +
+ " < FROM_UNIXTIME(" + Convert::ToString(static_cast<long>(max_age)) + ")");
+}
+
+void IdoMysqlConnection::FillIDCache(const DbType::Ptr& type)
+{
+ String query = "SELECT " + type->GetIDColumn() + " AS object_id, " + type->GetTable() + "_id, config_hash FROM " + GetTablePrefix() + type->GetTable() + "s";
+ IdoMysqlResult result = Query(query);
+
+ Dictionary::Ptr row;
+
+ while ((row = FetchRow(result))) {
+ DbReference dbref(row->Get("object_id"));
+ SetInsertID(type, dbref, DbReference(row->Get(type->GetTable() + "_id")));
+ SetConfigHash(type, dbref, row->Get("config_hash"));
+ }
+}
+
+int IdoMysqlConnection::GetPendingQueryCount() const
+{
+ return m_QueryQueue.GetLength();
+}
diff --git a/lib/db_ido_mysql/idomysqlconnection.hpp b/lib/db_ido_mysql/idomysqlconnection.hpp
new file mode 100644
index 0000000..5a5c120
--- /dev/null
+++ b/lib/db_ido_mysql/idomysqlconnection.hpp
@@ -0,0 +1,114 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef IDOMYSQLCONNECTION_H
+#define IDOMYSQLCONNECTION_H
+
+#include "db_ido_mysql/idomysqlconnection-ti.hpp"
+#include "mysql_shim/mysqlinterface.hpp"
+#include "base/array.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include "base/library.hpp"
+#include <cstdint>
+
+namespace icinga
+{
+
+typedef std::shared_ptr<MYSQL_RES> IdoMysqlResult;
+
+typedef std::function<void (const IdoMysqlResult&)> IdoAsyncCallback;
+
+struct IdoAsyncQuery
+{
+ String Query;
+ IdoAsyncCallback Callback;
+};
+
+/**
+ * An IDO MySQL database connection.
+ *
+ * @ingroup ido
+ */
+class IdoMysqlConnection final : public ObjectImpl<IdoMysqlConnection>
+{
+public:
+ DECLARE_OBJECT(IdoMysqlConnection);
+ DECLARE_OBJECTNAME(IdoMysqlConnection);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ const char * GetLatestSchemaVersion() const noexcept override;
+ const char * GetCompatSchemaVersion() const noexcept override;
+
+ int GetPendingQueryCount() const override;
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+ void ActivateObject(const DbObject::Ptr& dbobj) override;
+ void DeactivateObject(const DbObject::Ptr& dbobj) override;
+ void ExecuteQuery(const DbQuery& query) override;
+ void ExecuteMultipleQueries(const std::vector<DbQuery>& queries) override;
+ void CleanUpExecuteQuery(const String& table, const String& time_key, double time_value) override;
+ void FillIDCache(const DbType::Ptr& type) override;
+ void NewTransaction() override;
+ void Disconnect() override;
+
+private:
+ DbReference m_InstanceID;
+
+ Library m_Library;
+ std::unique_ptr<MysqlInterface, MysqlInterfaceDeleter> m_Mysql;
+
+ MYSQL m_Connection;
+ int m_AffectedRows;
+ unsigned int m_MaxPacketSize;
+
+ std::vector<IdoAsyncQuery> m_AsyncQueries;
+ uint_fast32_t m_UncommittedAsyncQueries = 0;
+
+ Timer::Ptr m_ReconnectTimer;
+ Timer::Ptr m_TxTimer;
+
+ IdoMysqlResult Query(const String& query);
+ DbReference GetLastInsertID();
+ int GetAffectedRows();
+ String Escape(const String& s);
+ Dictionary::Ptr FetchRow(const IdoMysqlResult& result);
+ void DiscardRows(const IdoMysqlResult& result);
+
+ void AsyncQuery(const String& query, const IdoAsyncCallback& callback = IdoAsyncCallback());
+ void FinishAsyncQueries();
+
+ bool FieldToEscapedString(const String& key, const Value& value, Value *result);
+ void InternalActivateObject(const DbObject::Ptr& dbobj);
+ void InternalDeactivateObject(const DbObject::Ptr& dbobj);
+
+ void Reconnect();
+
+ void AssertOnWorkQueue();
+
+ void ReconnectTimerHandler();
+
+ bool CanExecuteQuery(const DbQuery& query);
+
+ void InternalExecuteQuery(const DbQuery& query, int typeOverride = -1);
+ void InternalExecuteMultipleQueries(const std::vector<DbQuery>& queries);
+
+ void FinishExecuteQuery(const DbQuery& query, int type, bool upsert);
+ void InternalCleanUpExecuteQuery(const String& table, const String& time_key, double time_value);
+ void InternalNewTransaction();
+
+ void ClearTableBySession(const String& table);
+ void ClearTablesBySession();
+
+ void ExceptionHandler(boost::exception_ptr exp);
+
+ void FinishConnect(double startTime);
+};
+
+}
+
+#endif /* IDOMYSQLCONNECTION_H */
diff --git a/lib/db_ido_mysql/idomysqlconnection.ti b/lib/db_ido_mysql/idomysqlconnection.ti
new file mode 100644
index 0000000..681148f
--- /dev/null
+++ b/lib/db_ido_mysql/idomysqlconnection.ti
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbconnection.hpp"
+
+library db_ido_mysql;
+
+namespace icinga
+{
+
+class IdoMysqlConnection : DbConnection
+{
+ activation_priority 100;
+
+ [config] String host {
+ default {{{ return "localhost"; }}}
+ };
+ [config] int port {
+ default {{{ return 3306; }}}
+ };
+ [config] String socket_path;
+ [config] String user {
+ default {{{ return "icinga"; }}}
+ };
+ [config, no_user_view, no_user_modify] String password {
+ default {{{ return "icinga"; }}}
+ };
+ [config] String database {
+ default {{{ return "icinga"; }}}
+ };
+ [config] bool enable_ssl;
+ [config] String ssl_key;
+ [config] String ssl_cert;
+ [config] String ssl_ca;
+ [config] String ssl_capath;
+ [config] String ssl_cipher;
+ [config] String instance_name {
+ default {{{ return "default"; }}}
+ };
+ [config] String instance_description;
+};
+
+}
diff --git a/lib/db_ido_mysql/schema/mysql.sql b/lib/db_ido_mysql/schema/mysql.sql
new file mode 100644
index 0000000..020ba3c
--- /dev/null
+++ b/lib/db_ido_mysql/schema/mysql.sql
@@ -0,0 +1,1666 @@
+-- --------------------------------------------------------
+-- mysql.sql
+-- DB definition for IDO MySQL
+--
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- -- --------------------------------------------------------
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+
+--
+-- Database: icinga
+--
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_acknowledgements
+--
+
+CREATE TABLE IF NOT EXISTS icinga_acknowledgements (
+ acknowledgement_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ entry_time timestamp NULL,
+ entry_time_usec int default 0,
+ acknowledgement_type smallint default 0,
+ object_id bigint unsigned default 0,
+ state smallint default 0,
+ author_name varchar(64) character set latin1 default '',
+ comment_data TEXT character set latin1,
+ is_sticky smallint default 0,
+ persistent_comment smallint default 0,
+ notify_contacts smallint default 0,
+ end_time timestamp NULL,
+ PRIMARY KEY (acknowledgement_id)
+) ENGINE=InnoDB COMMENT='Current and historical host and service acknowledgements';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_commands
+--
+
+CREATE TABLE IF NOT EXISTS icinga_commands (
+ command_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ object_id bigint unsigned default 0,
+ command_line TEXT character set latin1,
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (command_id),
+ UNIQUE KEY instance_id (instance_id,object_id,config_type)
+) ENGINE=InnoDB COMMENT='Command definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_commenthistory
+--
+
+CREATE TABLE IF NOT EXISTS icinga_commenthistory (
+ commenthistory_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ entry_time timestamp NULL,
+ entry_time_usec int default 0,
+ comment_type smallint default 0,
+ entry_type smallint default 0,
+ object_id bigint unsigned default 0,
+ comment_time timestamp NULL,
+ internal_comment_id bigint unsigned default 0,
+ author_name varchar(64) character set latin1 default '',
+ comment_data TEXT character set latin1,
+ is_persistent smallint default 0,
+ comment_source smallint default 0,
+ expires smallint default 0,
+ expiration_time timestamp NULL,
+ deletion_time timestamp NULL,
+ deletion_time_usec int default 0,
+ name TEXT character set latin1 default NULL,
+ PRIMARY KEY (commenthistory_id)
+) ENGINE=InnoDB COMMENT='Historical host and service comments';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_comments
+--
+
+CREATE TABLE IF NOT EXISTS icinga_comments (
+ comment_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ entry_time timestamp NULL,
+ entry_time_usec int default 0,
+ comment_type smallint default 0,
+ entry_type smallint default 0,
+ object_id bigint unsigned default 0,
+ comment_time timestamp NULL,
+ internal_comment_id bigint unsigned default 0,
+ author_name varchar(64) character set latin1 default '',
+ comment_data TEXT character set latin1,
+ is_persistent smallint default 0,
+ comment_source smallint default 0,
+ expires smallint default 0,
+ expiration_time timestamp NULL,
+ name TEXT character set latin1 default NULL,
+ session_token int default NULL,
+ PRIMARY KEY (comment_id)
+) ENGINE=InnoDB COMMENT='Usercomments on Icinga objects';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_configfiles
+--
+
+CREATE TABLE IF NOT EXISTS icinga_configfiles (
+ configfile_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ configfile_type smallint default 0,
+ configfile_path varchar(255) character set latin1 default '',
+ PRIMARY KEY (configfile_id),
+ UNIQUE KEY instance_id (instance_id,configfile_type,configfile_path)
+) ENGINE=InnoDB COMMENT='Configuration files';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_configfilevariables
+--
+
+CREATE TABLE IF NOT EXISTS icinga_configfilevariables (
+ configfilevariable_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ configfile_id bigint unsigned default 0,
+ varname varchar(64) character set latin1 default '',
+ varvalue TEXT character set latin1,
+ PRIMARY KEY (configfilevariable_id)
+) ENGINE=InnoDB COMMENT='Configuration file variables';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_conninfo
+--
+
+CREATE TABLE IF NOT EXISTS icinga_conninfo (
+ conninfo_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ agent_name varchar(32) character set latin1 default '',
+ agent_version varchar(32) character set latin1 default '',
+ disposition varchar(32) character set latin1 default '',
+ connect_source varchar(32) character set latin1 default '',
+ connect_type varchar(32) character set latin1 default '',
+ connect_time timestamp NULL,
+ disconnect_time timestamp NULL,
+ last_checkin_time timestamp NULL,
+ data_start_time timestamp NULL,
+ data_end_time timestamp NULL,
+ bytes_processed bigint unsigned default '0',
+ lines_processed bigint unsigned default '0',
+ entries_processed bigint unsigned default '0',
+ PRIMARY KEY (conninfo_id)
+) ENGINE=InnoDB COMMENT='IDO2DB daemon connection information';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactgroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contactgroups (
+ contactgroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ contactgroup_object_id bigint unsigned default 0,
+ alias varchar(255) character set latin1 default '',
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (contactgroup_id),
+ UNIQUE KEY instance_id (instance_id,config_type,contactgroup_object_id)
+) ENGINE=InnoDB COMMENT='Contactgroup definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactgroup_members
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contactgroup_members (
+ contactgroup_member_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ contactgroup_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ PRIMARY KEY (contactgroup_member_id)
+) ENGINE=InnoDB COMMENT='Contactgroup members';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactnotificationmethods
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contactnotificationmethods (
+ contactnotificationmethod_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ contactnotification_id bigint unsigned default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ command_object_id bigint unsigned default 0,
+ command_args TEXT character set latin1,
+ PRIMARY KEY (contactnotificationmethod_id),
+ UNIQUE KEY instance_id (instance_id,contactnotification_id,start_time,start_time_usec)
+) ENGINE=InnoDB COMMENT='Historical record of contact notification methods';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactnotifications
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contactnotifications (
+ contactnotification_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ notification_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ PRIMARY KEY (contactnotification_id),
+ UNIQUE KEY instance_id (instance_id,contact_object_id,start_time,start_time_usec)
+) ENGINE=InnoDB COMMENT='Historical record of contact notifications';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contacts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contacts (
+ contact_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ contact_object_id bigint unsigned default 0,
+ alias varchar(255) character set latin1 default '',
+ email_address varchar(255) character set latin1 default '',
+ pager_address varchar(64) character set latin1 default '',
+ host_timeperiod_object_id bigint unsigned default 0,
+ service_timeperiod_object_id bigint unsigned default 0,
+ host_notifications_enabled smallint default 0,
+ service_notifications_enabled smallint default 0,
+ can_submit_commands smallint default 0,
+ notify_service_recovery smallint default 0,
+ notify_service_warning smallint default 0,
+ notify_service_unknown smallint default 0,
+ notify_service_critical smallint default 0,
+ notify_service_flapping smallint default 0,
+ notify_service_downtime smallint default 0,
+ notify_host_recovery smallint default 0,
+ notify_host_down smallint default 0,
+ notify_host_unreachable smallint default 0,
+ notify_host_flapping smallint default 0,
+ notify_host_downtime smallint default 0,
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (contact_id),
+ UNIQUE KEY instance_id (instance_id,config_type,contact_object_id)
+) ENGINE=InnoDB COMMENT='Contact definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactstatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contactstatus (
+ contactstatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ status_update_time timestamp NULL,
+ host_notifications_enabled smallint default 0,
+ service_notifications_enabled smallint default 0,
+ last_host_notification timestamp NULL,
+ last_service_notification timestamp NULL,
+ modified_attributes int default 0,
+ modified_host_attributes int default 0,
+ modified_service_attributes int default 0,
+ PRIMARY KEY (contactstatus_id),
+ UNIQUE KEY contact_object_id (contact_object_id)
+) ENGINE=InnoDB COMMENT='Contact status';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contact_addresses
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contact_addresses (
+ contact_address_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ contact_id bigint unsigned default 0,
+ address_number smallint default 0,
+ address varchar(255) character set latin1 default '',
+ PRIMARY KEY (contact_address_id),
+ UNIQUE KEY contact_id (contact_id,address_number)
+) ENGINE=InnoDB COMMENT='Contact addresses';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contact_notificationcommands
+--
+
+CREATE TABLE IF NOT EXISTS icinga_contact_notificationcommands (
+ contact_notificationcommand_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ contact_id bigint unsigned default 0,
+ notification_type smallint default 0,
+ command_object_id bigint unsigned default 0,
+ command_args varchar(255) character set latin1 default '',
+ PRIMARY KEY (contact_notificationcommand_id),
+ UNIQUE KEY contact_id (contact_id,notification_type,command_object_id,command_args)
+) ENGINE=InnoDB COMMENT='Contact host and service notification commands';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_customvariables
+--
+
+CREATE TABLE IF NOT EXISTS icinga_customvariables (
+ customvariable_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ object_id bigint unsigned default 0,
+ config_type smallint default 0,
+ has_been_modified smallint default 0,
+ varname varchar(255) character set latin1 collate latin1_general_cs default NULL,
+ varvalue TEXT character set latin1,
+ is_json smallint default 0,
+ PRIMARY KEY (customvariable_id),
+ UNIQUE KEY object_id_2 (object_id,config_type,varname),
+ KEY varname (varname)
+) ENGINE=InnoDB COMMENT='Custom variables';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_customvariablestatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_customvariablestatus (
+ customvariablestatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ object_id bigint unsigned default 0,
+ status_update_time timestamp NULL,
+ has_been_modified smallint default 0,
+ varname varchar(255) character set latin1 collate latin1_general_cs default NULL,
+ varvalue TEXT character set latin1,
+ is_json smallint default 0,
+ PRIMARY KEY (customvariablestatus_id),
+ UNIQUE KEY object_id_2 (object_id,varname),
+ KEY varname (varname)
+) ENGINE=InnoDB COMMENT='Custom variable status information';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_dbversion
+--
+
+CREATE TABLE IF NOT EXISTS icinga_dbversion (
+ dbversion_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ name varchar(10) character set latin1 default '',
+ version varchar(10) character set latin1 default '',
+ create_time timestamp NULL,
+ modify_time timestamp NULL,
+ PRIMARY KEY (dbversion_id),
+ UNIQUE KEY dbversion (name)
+) ENGINE=InnoDB;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_downtimehistory
+--
+
+CREATE TABLE IF NOT EXISTS icinga_downtimehistory (
+ downtimehistory_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ downtime_type smallint default 0,
+ object_id bigint unsigned default 0,
+ entry_time timestamp NULL,
+ author_name varchar(64) character set latin1 default '',
+ comment_data TEXT character set latin1,
+ internal_downtime_id bigint unsigned default 0,
+ triggered_by_id bigint unsigned default 0,
+ is_fixed smallint default 0,
+ duration bigint(20) default 0,
+ scheduled_start_time timestamp NULL,
+ scheduled_end_time timestamp NULL,
+ was_started smallint default 0,
+ actual_start_time timestamp NULL,
+ actual_start_time_usec int default 0,
+ actual_end_time timestamp NULL,
+ actual_end_time_usec int default 0,
+ was_cancelled smallint default 0,
+ is_in_effect smallint default 0,
+ trigger_time timestamp NULL,
+ name TEXT character set latin1 default NULL,
+ PRIMARY KEY (downtimehistory_id)
+) ENGINE=InnoDB COMMENT='Historical scheduled host and service downtime';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_eventhandlers
+--
+
+CREATE TABLE IF NOT EXISTS icinga_eventhandlers (
+ eventhandler_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ eventhandler_type smallint default 0,
+ object_id bigint unsigned default 0,
+ state smallint default 0,
+ state_type smallint default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ command_object_id bigint unsigned default 0,
+ command_args TEXT character set latin1,
+ command_line TEXT character set latin1,
+ timeout smallint default 0,
+ early_timeout smallint default 0,
+ execution_time double default '0',
+ return_code smallint default 0,
+ output TEXT character set latin1,
+ long_output TEXT,
+ PRIMARY KEY (eventhandler_id),
+ UNIQUE KEY instance_id (instance_id,object_id,start_time,start_time_usec)
+) ENGINE=InnoDB COMMENT='Historical host and service event handlers';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_externalcommands
+--
+
+CREATE TABLE IF NOT EXISTS icinga_externalcommands (
+ externalcommand_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ entry_time timestamp NULL,
+ command_type smallint default 0,
+ command_name varchar(128) character set latin1 default '',
+ command_args TEXT character set latin1,
+ PRIMARY KEY (externalcommand_id)
+) ENGINE=InnoDB COMMENT='Historical record of processed external commands';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_flappinghistory
+--
+
+CREATE TABLE IF NOT EXISTS icinga_flappinghistory (
+ flappinghistory_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ event_time timestamp NULL,
+ event_time_usec int default 0,
+ event_type smallint default 0,
+ reason_type smallint default 0,
+ flapping_type smallint default 0,
+ object_id bigint unsigned default 0,
+ percent_state_change double default '0',
+ low_threshold double default '0',
+ high_threshold double default '0',
+ comment_time timestamp NULL,
+ internal_comment_id bigint unsigned default 0,
+ PRIMARY KEY (flappinghistory_id)
+) ENGINE=InnoDB COMMENT='Current and historical record of host and service flapping';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostchecks
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostchecks (
+ hostcheck_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ host_object_id bigint unsigned default 0,
+ check_type smallint default 0,
+ is_raw_check smallint default 0,
+ current_check_attempt smallint default 0,
+ max_check_attempts smallint default 0,
+ state smallint default 0,
+ state_type smallint default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ command_object_id bigint unsigned default 0,
+ command_args TEXT character set latin1,
+ command_line TEXT character set latin1,
+ timeout smallint default 0,
+ early_timeout smallint default 0,
+ execution_time double default '0',
+ latency double default '0',
+ return_code smallint default 0,
+ output TEXT character set latin1,
+ long_output TEXT,
+ perfdata TEXT character set latin1,
+ PRIMARY KEY (hostcheck_id)
+) ENGINE=InnoDB COMMENT='Historical host checks';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostdependencies
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostdependencies (
+ hostdependency_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ host_object_id bigint unsigned default 0,
+ dependent_host_object_id bigint unsigned default 0,
+ dependency_type smallint default 0,
+ inherits_parent smallint default 0,
+ timeperiod_object_id bigint unsigned default 0,
+ fail_on_up smallint default 0,
+ fail_on_down smallint default 0,
+ fail_on_unreachable smallint default 0,
+ PRIMARY KEY (hostdependency_id),
+ KEY instance_id (instance_id,config_type,host_object_id,dependent_host_object_id,dependency_type,inherits_parent,fail_on_up,fail_on_down,fail_on_unreachable)
+) ENGINE=InnoDB COMMENT='Host dependency definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostescalations
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostescalations (
+ hostescalation_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ host_object_id bigint unsigned default 0,
+ timeperiod_object_id bigint unsigned default 0,
+ first_notification smallint default 0,
+ last_notification smallint default 0,
+ notification_interval double default '0',
+ escalate_on_recovery smallint default 0,
+ escalate_on_down smallint default 0,
+ escalate_on_unreachable smallint default 0,
+ PRIMARY KEY (hostescalation_id),
+ UNIQUE KEY instance_id (instance_id,config_type,host_object_id,timeperiod_object_id,first_notification,last_notification)
+) ENGINE=InnoDB COMMENT='Host escalation definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostescalation_contactgroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostescalation_contactgroups (
+ hostescalation_contactgroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ hostescalation_id bigint unsigned default 0,
+ contactgroup_object_id bigint unsigned default 0,
+ PRIMARY KEY (hostescalation_contactgroup_id),
+ UNIQUE KEY instance_id (hostescalation_id,contactgroup_object_id)
+) ENGINE=InnoDB COMMENT='Host escalation contact groups';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostescalation_contacts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostescalation_contacts (
+ hostescalation_contact_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ hostescalation_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ PRIMARY KEY (hostescalation_contact_id),
+ UNIQUE KEY instance_id (instance_id,hostescalation_id,contact_object_id)
+) ENGINE=InnoDB COMMENT='Host escalation contacts';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostgroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostgroups (
+ hostgroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ hostgroup_object_id bigint unsigned default 0,
+ alias varchar(255) character set latin1 default '',
+ notes TEXT character set latin1 default NULL,
+ notes_url TEXT character set latin1 default NULL,
+ action_url TEXT character set latin1 default NULL,
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (hostgroup_id),
+ UNIQUE KEY instance_id (instance_id,hostgroup_object_id)
+) ENGINE=InnoDB COMMENT='Hostgroup definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostgroup_members
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hostgroup_members (
+ hostgroup_member_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ hostgroup_id bigint unsigned default 0,
+ host_object_id bigint unsigned default 0,
+ PRIMARY KEY (hostgroup_member_id)
+) ENGINE=InnoDB COMMENT='Hostgroup members';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hosts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hosts (
+ host_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ host_object_id bigint unsigned default 0,
+ alias varchar(255) character set latin1 default '',
+ display_name varchar(255) character set latin1 collate latin1_general_cs default '',
+ address varchar(128) character set latin1 default '',
+ address6 varchar(128) character set latin1 default '',
+ check_command_object_id bigint unsigned default 0,
+ check_command_args TEXT character set latin1,
+ eventhandler_command_object_id bigint unsigned default 0,
+ eventhandler_command_args TEXT character set latin1,
+ notification_timeperiod_object_id bigint unsigned default 0,
+ check_timeperiod_object_id bigint unsigned default 0,
+ failure_prediction_options varchar(128) character set latin1 default '',
+ check_interval double default '0',
+ retry_interval double default '0',
+ max_check_attempts smallint default 0,
+ first_notification_delay double default '0',
+ notification_interval double default '0',
+ notify_on_down smallint default 0,
+ notify_on_unreachable smallint default 0,
+ notify_on_recovery smallint default 0,
+ notify_on_flapping smallint default 0,
+ notify_on_downtime smallint default 0,
+ stalk_on_up smallint default 0,
+ stalk_on_down smallint default 0,
+ stalk_on_unreachable smallint default 0,
+ flap_detection_enabled smallint default 0,
+ flap_detection_on_up smallint default 0,
+ flap_detection_on_down smallint default 0,
+ flap_detection_on_unreachable smallint default 0,
+ low_flap_threshold double default '0',
+ high_flap_threshold double default '0',
+ process_performance_data smallint default 0,
+ freshness_checks_enabled smallint default 0,
+ freshness_threshold int default 0,
+ passive_checks_enabled smallint default 0,
+ event_handler_enabled smallint default 0,
+ active_checks_enabled smallint default 0,
+ retain_status_information smallint default 0,
+ retain_nonstatus_information smallint default 0,
+ notifications_enabled smallint default 0,
+ obsess_over_host smallint default 0,
+ failure_prediction_enabled smallint default 0,
+ notes TEXT character set latin1,
+ notes_url TEXT character set latin1,
+ action_url TEXT character set latin1,
+ icon_image TEXT character set latin1,
+ icon_image_alt TEXT character set latin1,
+ vrml_image TEXT character set latin1,
+ statusmap_image TEXT character set latin1,
+ have_2d_coords smallint default 0,
+ x_2d smallint default 0,
+ y_2d smallint default 0,
+ have_3d_coords smallint default 0,
+ x_3d double default '0',
+ y_3d double default '0',
+ z_3d double default '0',
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (host_id),
+ UNIQUE KEY instance_id (instance_id,config_type,host_object_id)
+) ENGINE=InnoDB COMMENT='Host definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hoststatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_hoststatus (
+ hoststatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ host_object_id bigint unsigned default 0,
+ status_update_time timestamp NULL,
+ output TEXT character set latin1,
+ long_output TEXT,
+ perfdata TEXT character set latin1,
+ check_source varchar(255) character set latin1 default '',
+ current_state smallint default 0,
+ has_been_checked smallint default 0,
+ should_be_scheduled smallint default 0,
+ current_check_attempt smallint default 0,
+ max_check_attempts smallint default 0,
+ last_check timestamp NULL,
+ next_check timestamp NULL,
+ check_type smallint default 0,
+ last_state_change timestamp NULL,
+ last_hard_state_change timestamp NULL,
+ last_hard_state smallint default 0,
+ last_time_up timestamp NULL,
+ last_time_down timestamp NULL,
+ last_time_unreachable timestamp NULL,
+ state_type smallint default 0,
+ last_notification timestamp NULL,
+ next_notification timestamp NULL,
+ no_more_notifications smallint default 0,
+ notifications_enabled smallint default 0,
+ problem_has_been_acknowledged smallint default 0,
+ acknowledgement_type smallint default 0,
+ current_notification_number int unsigned default 0,
+ passive_checks_enabled smallint default 0,
+ active_checks_enabled smallint default 0,
+ event_handler_enabled smallint default 0,
+ flap_detection_enabled smallint default 0,
+ is_flapping smallint default 0,
+ percent_state_change double default '0',
+ latency double default '0',
+ execution_time double default '0',
+ scheduled_downtime_depth smallint default 0,
+ failure_prediction_enabled smallint default 0,
+ process_performance_data smallint default 0,
+ obsess_over_host smallint default 0,
+ modified_host_attributes int default 0,
+ original_attributes TEXT character set latin1 default NULL,
+ event_handler TEXT character set latin1,
+ check_command TEXT character set latin1,
+ normal_check_interval double default '0',
+ retry_check_interval double default '0',
+ check_timeperiod_object_id bigint unsigned default 0,
+ is_reachable smallint default 0,
+ PRIMARY KEY (hoststatus_id),
+ UNIQUE KEY object_id (host_object_id)
+) ENGINE=InnoDB COMMENT='Current host status information';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_host_contactgroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_host_contactgroups (
+ host_contactgroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ host_id bigint unsigned default 0,
+ contactgroup_object_id bigint unsigned default 0,
+ PRIMARY KEY (host_contactgroup_id)
+) ENGINE=InnoDB COMMENT='Host contact groups';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_host_contacts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_host_contacts (
+ host_contact_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ host_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ PRIMARY KEY (host_contact_id)
+) ENGINE=InnoDB COMMENT='Host contacts';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_host_parenthosts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_host_parenthosts (
+ host_parenthost_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ host_id bigint unsigned default 0,
+ parent_host_object_id bigint unsigned default 0,
+ PRIMARY KEY (host_parenthost_id)
+) ENGINE=InnoDB COMMENT='Parent hosts';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_instances
+--
+
+CREATE TABLE IF NOT EXISTS icinga_instances (
+ instance_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_name varchar(64) character set latin1 default '',
+ instance_description varchar(128) character set latin1 default '',
+ PRIMARY KEY (instance_id)
+) ENGINE=InnoDB COMMENT='Location names of various Icinga installations';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_logentries
+--
+
+CREATE TABLE IF NOT EXISTS icinga_logentries (
+ logentry_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ logentry_time timestamp NULL,
+ entry_time timestamp NULL,
+ entry_time_usec int default 0,
+ logentry_type int default 0,
+ logentry_data TEXT character set latin1,
+ realtime_data smallint default 0,
+ inferred_data_extracted smallint default 0,
+ object_id bigint unsigned default NULL,
+ PRIMARY KEY (logentry_id)
+) ENGINE=InnoDB COMMENT='Historical record of log entries';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_notifications
+--
+
+CREATE TABLE IF NOT EXISTS icinga_notifications (
+ notification_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ notification_type smallint default 0,
+ notification_reason smallint default 0,
+ object_id bigint unsigned default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ state smallint default 0,
+ output TEXT character set latin1,
+ long_output TEXT,
+ escalated smallint default 0,
+ contacts_notified smallint default 0,
+ PRIMARY KEY (notification_id),
+ UNIQUE KEY instance_id (instance_id,object_id,start_time,start_time_usec)
+) ENGINE=InnoDB COMMENT='Historical record of host and service notifications';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_objects
+--
+
+CREATE TABLE IF NOT EXISTS icinga_objects (
+ object_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ objecttype_id bigint unsigned default 0,
+ name1 varchar(255) character set latin1 collate latin1_general_cs default '',
+ name2 varchar(255) character set latin1 collate latin1_general_cs default NULL,
+ is_active smallint default 0,
+ PRIMARY KEY (object_id),
+ KEY objecttype_id (objecttype_id,name1,name2)
+) ENGINE=InnoDB COMMENT='Current and historical objects of all kinds';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_processevents
+--
+
+CREATE TABLE IF NOT EXISTS icinga_processevents (
+ processevent_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ event_type smallint default 0,
+ event_time timestamp NULL,
+ event_time_usec int default 0,
+ process_id bigint unsigned default 0,
+ program_name varchar(16) character set latin1 default '',
+ program_version varchar(20) character set latin1 default '',
+ program_date varchar(10) character set latin1 default '',
+ PRIMARY KEY (processevent_id)
+) ENGINE=InnoDB COMMENT='Historical Icinga process events';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_programstatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_programstatus (
+ programstatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ program_version varchar(64) character set latin1 collate latin1_general_cs default NULL,
+ status_update_time timestamp NULL,
+ program_start_time timestamp NULL,
+ program_end_time timestamp NULL,
+ endpoint_name varchar(255) character set latin1 collate latin1_general_cs default NULL,
+ is_currently_running smallint default 0,
+ process_id bigint unsigned default 0,
+ daemon_mode smallint default 0,
+ last_command_check timestamp NULL,
+ last_log_rotation timestamp NULL,
+ notifications_enabled smallint default 0,
+ disable_notif_expire_time timestamp NULL,
+ active_service_checks_enabled smallint default 0,
+ passive_service_checks_enabled smallint default 0,
+ active_host_checks_enabled smallint default 0,
+ passive_host_checks_enabled smallint default 0,
+ event_handlers_enabled smallint default 0,
+ flap_detection_enabled smallint default 0,
+ failure_prediction_enabled smallint default 0,
+ process_performance_data smallint default 0,
+ obsess_over_hosts smallint default 0,
+ obsess_over_services smallint default 0,
+ modified_host_attributes int default 0,
+ modified_service_attributes int default 0,
+ global_host_event_handler TEXT character set latin1,
+ global_service_event_handler TEXT character set latin1,
+ config_dump_in_progress smallint default 0,
+ PRIMARY KEY (programstatus_id),
+ UNIQUE KEY instance_id (instance_id)
+) ENGINE=InnoDB COMMENT='Current program status information';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_runtimevariables
+--
+
+CREATE TABLE IF NOT EXISTS icinga_runtimevariables (
+ runtimevariable_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ varname varchar(64) character set latin1 default '',
+ varvalue TEXT character set latin1,
+ PRIMARY KEY (runtimevariable_id)
+) ENGINE=InnoDB COMMENT='Runtime variables from the Icinga daemon';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_scheduleddowntime
+--
+
+CREATE TABLE IF NOT EXISTS icinga_scheduleddowntime (
+ scheduleddowntime_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ downtime_type smallint default 0,
+ object_id bigint unsigned default 0,
+ entry_time timestamp NULL,
+ author_name varchar(64) character set latin1 default '',
+ comment_data TEXT character set latin1,
+ internal_downtime_id bigint unsigned default 0,
+ triggered_by_id bigint unsigned default 0,
+ is_fixed smallint default 0,
+ duration bigint(20) default 0,
+ scheduled_start_time timestamp NULL,
+ scheduled_end_time timestamp NULL,
+ was_started smallint default 0,
+ actual_start_time timestamp NULL,
+ actual_start_time_usec int default 0,
+ is_in_effect smallint default 0,
+ trigger_time timestamp NULL,
+ name TEXT character set latin1 default NULL,
+ session_token int default NULL,
+ PRIMARY KEY (scheduleddowntime_id)
+) ENGINE=InnoDB COMMENT='Current scheduled host and service downtime';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicechecks
+--
+
+CREATE TABLE IF NOT EXISTS icinga_servicechecks (
+ servicecheck_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ service_object_id bigint unsigned default 0,
+ check_type smallint default 0,
+ current_check_attempt smallint default 0,
+ max_check_attempts smallint default 0,
+ state smallint default 0,
+ state_type smallint default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ command_object_id bigint unsigned default 0,
+ command_args TEXT character set latin1,
+ command_line TEXT character set latin1,
+ timeout smallint default 0,
+ early_timeout smallint default 0,
+ execution_time double default '0',
+ latency double default '0',
+ return_code smallint default 0,
+ output TEXT character set latin1,
+ long_output TEXT,
+ perfdata TEXT character set latin1,
+ PRIMARY KEY (servicecheck_id)
+) ENGINE=InnoDB COMMENT='Historical service checks';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicedependencies
+--
+
+CREATE TABLE IF NOT EXISTS icinga_servicedependencies (
+ servicedependency_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ service_object_id bigint unsigned default 0,
+ dependent_service_object_id bigint unsigned default 0,
+ dependency_type smallint default 0,
+ inherits_parent smallint default 0,
+ timeperiod_object_id bigint unsigned default 0,
+ fail_on_ok smallint default 0,
+ fail_on_warning smallint default 0,
+ fail_on_unknown smallint default 0,
+ fail_on_critical smallint default 0,
+ PRIMARY KEY (servicedependency_id),
+ KEY instance_id (instance_id,config_type,service_object_id,dependent_service_object_id,dependency_type,inherits_parent,fail_on_ok,fail_on_warning,fail_on_unknown,fail_on_critical)
+) ENGINE=InnoDB COMMENT='Service dependency definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_serviceescalations
+--
+
+CREATE TABLE IF NOT EXISTS icinga_serviceescalations (
+ serviceescalation_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ service_object_id bigint unsigned default 0,
+ timeperiod_object_id bigint unsigned default 0,
+ first_notification smallint default 0,
+ last_notification smallint default 0,
+ notification_interval double default '0',
+ escalate_on_recovery smallint default 0,
+ escalate_on_warning smallint default 0,
+ escalate_on_unknown smallint default 0,
+ escalate_on_critical smallint default 0,
+ PRIMARY KEY (serviceescalation_id),
+ UNIQUE KEY instance_id (instance_id,config_type,service_object_id,timeperiod_object_id,first_notification,last_notification)
+) ENGINE=InnoDB COMMENT='Service escalation definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_serviceescalation_contactgroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_serviceescalation_contactgroups (
+ serviceescalation_contactgroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ serviceescalation_id bigint unsigned default 0,
+ contactgroup_object_id bigint unsigned default 0,
+ PRIMARY KEY (serviceescalation_contactgroup_id),
+ UNIQUE KEY instance_id (serviceescalation_id,contactgroup_object_id)
+) ENGINE=InnoDB COMMENT='Service escalation contact groups';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_serviceescalation_contacts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_serviceescalation_contacts (
+ serviceescalation_contact_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ serviceescalation_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ PRIMARY KEY (serviceescalation_contact_id),
+ UNIQUE KEY instance_id (instance_id,serviceescalation_id,contact_object_id)
+) ENGINE=InnoDB COMMENT='Service escalation contacts';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicegroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_servicegroups (
+ servicegroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ servicegroup_object_id bigint unsigned default 0,
+ alias varchar(255) character set latin1 default '',
+ notes TEXT character set latin1 default NULL,
+ notes_url TEXT character set latin1 default NULL,
+ action_url TEXT character set latin1 default NULL,
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (servicegroup_id),
+ UNIQUE KEY instance_id (instance_id,config_type,servicegroup_object_id)
+) ENGINE=InnoDB COMMENT='Servicegroup definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicegroup_members
+--
+
+CREATE TABLE IF NOT EXISTS icinga_servicegroup_members (
+ servicegroup_member_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ servicegroup_id bigint unsigned default 0,
+ service_object_id bigint unsigned default 0,
+ PRIMARY KEY (servicegroup_member_id)
+) ENGINE=InnoDB COMMENT='Servicegroup members';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_services
+--
+
+CREATE TABLE IF NOT EXISTS icinga_services (
+ service_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ host_object_id bigint unsigned default 0,
+ service_object_id bigint unsigned default 0,
+ display_name varchar(255) character set latin1 collate latin1_general_cs default '',
+ check_command_object_id bigint unsigned default 0,
+ check_command_args TEXT character set latin1,
+ eventhandler_command_object_id bigint unsigned default 0,
+ eventhandler_command_args TEXT character set latin1,
+ notification_timeperiod_object_id bigint unsigned default 0,
+ check_timeperiod_object_id bigint unsigned default 0,
+ failure_prediction_options varchar(64) character set latin1 default '',
+ check_interval double default '0',
+ retry_interval double default '0',
+ max_check_attempts smallint default 0,
+ first_notification_delay double default '0',
+ notification_interval double default '0',
+ notify_on_warning smallint default 0,
+ notify_on_unknown smallint default 0,
+ notify_on_critical smallint default 0,
+ notify_on_recovery smallint default 0,
+ notify_on_flapping smallint default 0,
+ notify_on_downtime smallint default 0,
+ stalk_on_ok smallint default 0,
+ stalk_on_warning smallint default 0,
+ stalk_on_unknown smallint default 0,
+ stalk_on_critical smallint default 0,
+ is_volatile smallint default 0,
+ flap_detection_enabled smallint default 0,
+ flap_detection_on_ok smallint default 0,
+ flap_detection_on_warning smallint default 0,
+ flap_detection_on_unknown smallint default 0,
+ flap_detection_on_critical smallint default 0,
+ low_flap_threshold double default '0',
+ high_flap_threshold double default '0',
+ process_performance_data smallint default 0,
+ freshness_checks_enabled smallint default 0,
+ freshness_threshold int default 0,
+ passive_checks_enabled smallint default 0,
+ event_handler_enabled smallint default 0,
+ active_checks_enabled smallint default 0,
+ retain_status_information smallint default 0,
+ retain_nonstatus_information smallint default 0,
+ notifications_enabled smallint default 0,
+ obsess_over_service smallint default 0,
+ failure_prediction_enabled smallint default 0,
+ notes TEXT character set latin1,
+ notes_url TEXT character set latin1,
+ action_url TEXT character set latin1,
+ icon_image TEXT character set latin1,
+ icon_image_alt TEXT character set latin1,
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (service_id),
+ UNIQUE KEY instance_id (instance_id,config_type,service_object_id)
+) ENGINE=InnoDB COMMENT='Service definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicestatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_servicestatus (
+ servicestatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ service_object_id bigint unsigned default 0,
+ status_update_time timestamp NULL,
+ output TEXT character set latin1,
+ long_output TEXT,
+ perfdata TEXT character set latin1,
+ check_source varchar(255) character set latin1 default '',
+ current_state smallint default 0,
+ has_been_checked smallint default 0,
+ should_be_scheduled smallint default 0,
+ current_check_attempt smallint default 0,
+ max_check_attempts smallint default 0,
+ last_check timestamp NULL,
+ next_check timestamp NULL,
+ check_type smallint default 0,
+ last_state_change timestamp NULL,
+ last_hard_state_change timestamp NULL,
+ last_hard_state smallint default 0,
+ last_time_ok timestamp NULL,
+ last_time_warning timestamp NULL,
+ last_time_unknown timestamp NULL,
+ last_time_critical timestamp NULL,
+ state_type smallint default 0,
+ last_notification timestamp NULL,
+ next_notification timestamp NULL,
+ no_more_notifications smallint default 0,
+ notifications_enabled smallint default 0,
+ problem_has_been_acknowledged smallint default 0,
+ acknowledgement_type smallint default 0,
+ current_notification_number int unsigned default 0,
+ passive_checks_enabled smallint default 0,
+ active_checks_enabled smallint default 0,
+ event_handler_enabled smallint default 0,
+ flap_detection_enabled smallint default 0,
+ is_flapping smallint default 0,
+ percent_state_change double default '0',
+ latency double default '0',
+ execution_time double default '0',
+ scheduled_downtime_depth smallint default 0,
+ failure_prediction_enabled smallint default 0,
+ process_performance_data smallint default 0,
+ obsess_over_service smallint default 0,
+ modified_service_attributes int default 0,
+ original_attributes TEXT character set latin1 default NULL,
+ event_handler TEXT character set latin1,
+ check_command TEXT character set latin1,
+ normal_check_interval double default '0',
+ retry_check_interval double default '0',
+ check_timeperiod_object_id bigint unsigned default 0,
+ is_reachable smallint default 0,
+ PRIMARY KEY (servicestatus_id),
+ UNIQUE KEY object_id (service_object_id)
+) ENGINE=InnoDB COMMENT='Current service status information';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_service_contactgroups
+--
+
+CREATE TABLE IF NOT EXISTS icinga_service_contactgroups (
+ service_contactgroup_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ service_id bigint unsigned default 0,
+ contactgroup_object_id bigint unsigned default 0,
+ PRIMARY KEY (service_contactgroup_id)
+) ENGINE=InnoDB COMMENT='Service contact groups';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_service_contacts
+--
+
+CREATE TABLE IF NOT EXISTS icinga_service_contacts (
+ service_contact_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ service_id bigint unsigned default 0,
+ contact_object_id bigint unsigned default 0,
+ PRIMARY KEY (service_contact_id)
+) ENGINE=InnoDB COMMENT='Service contacts';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_statehistory
+--
+
+CREATE TABLE IF NOT EXISTS icinga_statehistory (
+ statehistory_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ state_time timestamp NULL,
+ state_time_usec int default 0,
+ object_id bigint unsigned default 0,
+ state_change smallint default 0,
+ state smallint default 0,
+ state_type smallint default 0,
+ current_check_attempt smallint default 0,
+ max_check_attempts smallint default 0,
+ last_state smallint default 0,
+ last_hard_state smallint default 0,
+ output TEXT character set latin1,
+ long_output TEXT,
+ check_source varchar(255) character set latin1 default NULL,
+ PRIMARY KEY (statehistory_id)
+) ENGINE=InnoDB COMMENT='Historical host and service state changes';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_systemcommands
+--
+
+CREATE TABLE IF NOT EXISTS icinga_systemcommands (
+ systemcommand_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ start_time timestamp NULL,
+ start_time_usec int default 0,
+ end_time timestamp NULL,
+ end_time_usec int default 0,
+ command_line TEXT character set latin1,
+ timeout smallint default 0,
+ early_timeout smallint default 0,
+ execution_time double default '0',
+ return_code smallint default 0,
+ output TEXT character set latin1,
+ long_output TEXT,
+ PRIMARY KEY (systemcommand_id),
+ UNIQUE KEY instance_id (instance_id,start_time,start_time_usec)
+) ENGINE=InnoDB COMMENT='Historical system commands that are executed';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_timeperiods
+--
+
+CREATE TABLE IF NOT EXISTS icinga_timeperiods (
+ timeperiod_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ config_type smallint default 0,
+ timeperiod_object_id bigint unsigned default 0,
+ alias varchar(255) character set latin1 default '',
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (timeperiod_id),
+ UNIQUE KEY instance_id (instance_id,config_type,timeperiod_object_id)
+) ENGINE=InnoDB COMMENT='Timeperiod definitions';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_timeperiod_timeranges
+--
+
+CREATE TABLE IF NOT EXISTS icinga_timeperiod_timeranges (
+ timeperiod_timerange_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ timeperiod_id bigint unsigned default 0,
+ day smallint default 0,
+ start_sec int default 0,
+ end_sec int default 0,
+ PRIMARY KEY (timeperiod_timerange_id)
+) ENGINE=InnoDB COMMENT='Timeperiod definitions';
+
+
+-- --------------------------------------------------------
+-- Icinga 2 specific schema extensions
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_endpoints
+--
+
+CREATE TABLE IF NOT EXISTS icinga_endpoints (
+ endpoint_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ endpoint_object_id bigint(20) unsigned DEFAULT '0',
+ zone_object_id bigint(20) unsigned DEFAULT '0',
+ config_type smallint(6) DEFAULT '0',
+ identity varchar(255) DEFAULT NULL,
+ node varchar(255) DEFAULT NULL,
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (endpoint_id)
+) ENGINE=InnoDB COMMENT='Endpoint configuration';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_endpointstatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_endpointstatus (
+ endpointstatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ endpoint_object_id bigint(20) unsigned DEFAULT '0',
+ zone_object_id bigint(20) unsigned DEFAULT '0',
+ status_update_time timestamp NULL,
+ identity varchar(255) DEFAULT NULL,
+ node varchar(255) DEFAULT NULL,
+ is_connected smallint(6),
+ PRIMARY KEY (endpointstatus_id)
+) ENGINE=InnoDB COMMENT='Endpoint status';
+
+--
+-- Table structure for table icinga_zones
+--
+
+CREATE TABLE IF NOT EXISTS icinga_zones (
+ zone_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ zone_object_id bigint(20) unsigned DEFAULT '0',
+ config_type smallint(6) DEFAULT '0',
+ parent_zone_object_id bigint(20) unsigned DEFAULT '0',
+ is_global smallint(6),
+ config_hash varchar(64) DEFAULT NULL,
+ PRIMARY KEY (zone_id)
+) ENGINE=InnoDB COMMENT='Zone configuration';
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_zonestatus
+--
+
+CREATE TABLE IF NOT EXISTS icinga_zonestatus (
+ zonestatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ zone_object_id bigint(20) unsigned DEFAULT '0',
+ status_update_time timestamp NULL,
+ parent_zone_object_id bigint(20) unsigned DEFAULT '0',
+ PRIMARY KEY (zonestatus_id)
+) ENGINE=InnoDB COMMENT='Zone status';
+
+
+
+
+ALTER TABLE icinga_servicestatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_hoststatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_contactstatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_programstatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_comments ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_scheduleddowntime ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_runtimevariables ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_customvariablestatus ADD COLUMN endpoint_object_id bigint default NULL;
+
+ALTER TABLE icinga_acknowledgements ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_commenthistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_contactnotifications ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_downtimehistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_eventhandlers ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_externalcommands ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_flappinghistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_hostchecks ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_logentries ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_notifications ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_processevents ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_servicechecks ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_statehistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_systemcommands ADD COLUMN endpoint_object_id bigint default NULL;
+
+-- -----------------------------------------
+-- add index (delete)
+-- -----------------------------------------
+
+-- for periodic delete
+-- instance_id and
+-- SYSTEMCOMMANDS, SERVICECHECKS, HOSTCHECKS, EVENTHANDLERS => start_time
+-- EXTERNALCOMMANDS => entry_time
+
+-- instance_id
+CREATE INDEX servicechecks_i_id_idx on icinga_servicechecks(instance_id);
+CREATE INDEX hostchecks_i_id_idx on icinga_hostchecks(instance_id);
+CREATE INDEX externalcommands_i_id_idx on icinga_externalcommands(instance_id);
+
+-- time
+CREATE INDEX systemcommands_time_id_idx on icinga_systemcommands(start_time);
+CREATE INDEX servicechecks_time_id_idx on icinga_servicechecks(start_time);
+CREATE INDEX hostchecks_time_id_idx on icinga_hostchecks(start_time);
+CREATE INDEX eventhandlers_time_id_idx on icinga_eventhandlers(start_time);
+CREATE INDEX externalcommands_time_id_idx on icinga_externalcommands(entry_time);
+
+
+-- for starting cleanup - referenced in dbhandler.c:882
+-- instance_id only
+
+-- realtime data
+CREATE INDEX hoststatus_i_id_idx on icinga_hoststatus(instance_id);
+CREATE INDEX servicestatus_i_id_idx on icinga_servicestatus(instance_id);
+CREATE INDEX contactstatus_i_id_idx on icinga_contactstatus(instance_id);
+CREATE INDEX customvariablestatus_i_id_idx on icinga_customvariablestatus(instance_id);
+
+-- config data
+CREATE INDEX configfilevariables_i_id_idx on icinga_configfilevariables(instance_id);
+CREATE INDEX customvariables_i_id_idx on icinga_customvariables(instance_id);
+CREATE INDEX timeperiod_timeranges_i_id_idx on icinga_timeperiod_timeranges(instance_id);
+CREATE INDEX contactgroup_members_i_id_idx on icinga_contactgroup_members(instance_id);
+CREATE INDEX hostgroup_members_i_id_idx on icinga_hostgroup_members(instance_id);
+CREATE INDEX servicegroup_members_i_id_idx on icinga_servicegroup_members(instance_id);
+CREATE INDEX contact_addresses_i_id_idx on icinga_contact_addresses(instance_id);
+CREATE INDEX contact_notifcommands_i_id_idx on icinga_contact_notificationcommands(instance_id);
+CREATE INDEX host_parenthosts_i_id_idx on icinga_host_parenthosts(instance_id);
+CREATE INDEX host_contacts_i_id_idx on icinga_host_contacts(instance_id);
+CREATE INDEX service_contacts_i_id_idx on icinga_service_contacts(instance_id);
+CREATE INDEX service_contactgroups_i_id_idx on icinga_service_contactgroups(instance_id);
+CREATE INDEX host_contactgroups_i_id_idx on icinga_host_contactgroups(instance_id);
+CREATE INDEX hostesc_cgroups_i_id_idx on icinga_hostescalation_contactgroups(instance_id);
+CREATE INDEX serviceesc_cgroups_i_id_idx on icinga_serviceescalation_contactgroups(instance_id);
+
+-- -----------------------------------------
+-- more index stuff (WHERE clauses)
+-- -----------------------------------------
+
+-- hosts
+CREATE INDEX hosts_host_object_id_idx on icinga_hosts(host_object_id);
+
+-- hoststatus
+CREATE INDEX hoststatus_stat_upd_time_idx on icinga_hoststatus(status_update_time);
+CREATE INDEX hoststatus_current_state_idx on icinga_hoststatus(current_state);
+CREATE INDEX hoststatus_check_type_idx on icinga_hoststatus(check_type);
+CREATE INDEX hoststatus_state_type_idx on icinga_hoststatus(state_type);
+CREATE INDEX hoststatus_last_state_chg_idx on icinga_hoststatus(last_state_change);
+CREATE INDEX hoststatus_notif_enabled_idx on icinga_hoststatus(notifications_enabled);
+CREATE INDEX hoststatus_problem_ack_idx on icinga_hoststatus(problem_has_been_acknowledged);
+CREATE INDEX hoststatus_act_chks_en_idx on icinga_hoststatus(active_checks_enabled);
+CREATE INDEX hoststatus_pas_chks_en_idx on icinga_hoststatus(passive_checks_enabled);
+CREATE INDEX hoststatus_event_hdl_en_idx on icinga_hoststatus(event_handler_enabled);
+CREATE INDEX hoststatus_flap_det_en_idx on icinga_hoststatus(flap_detection_enabled);
+CREATE INDEX hoststatus_is_flapping_idx on icinga_hoststatus(is_flapping);
+CREATE INDEX hoststatus_p_state_chg_idx on icinga_hoststatus(percent_state_change);
+CREATE INDEX hoststatus_latency_idx on icinga_hoststatus(latency);
+CREATE INDEX hoststatus_ex_time_idx on icinga_hoststatus(execution_time);
+CREATE INDEX hoststatus_sch_downt_d_idx on icinga_hoststatus(scheduled_downtime_depth);
+
+-- services
+CREATE INDEX services_host_object_id_idx on icinga_services(host_object_id);
+
+-- servicestatus
+CREATE INDEX srvcstatus_stat_upd_time_idx on icinga_servicestatus(status_update_time);
+CREATE INDEX srvcstatus_current_state_idx on icinga_servicestatus(current_state);
+CREATE INDEX srvcstatus_check_type_idx on icinga_servicestatus(check_type);
+CREATE INDEX srvcstatus_state_type_idx on icinga_servicestatus(state_type);
+CREATE INDEX srvcstatus_last_state_chg_idx on icinga_servicestatus(last_state_change);
+CREATE INDEX srvcstatus_notif_enabled_idx on icinga_servicestatus(notifications_enabled);
+CREATE INDEX srvcstatus_problem_ack_idx on icinga_servicestatus(problem_has_been_acknowledged);
+CREATE INDEX srvcstatus_act_chks_en_idx on icinga_servicestatus(active_checks_enabled);
+CREATE INDEX srvcstatus_pas_chks_en_idx on icinga_servicestatus(passive_checks_enabled);
+CREATE INDEX srvcstatus_event_hdl_en_idx on icinga_servicestatus(event_handler_enabled);
+CREATE INDEX srvcstatus_flap_det_en_idx on icinga_servicestatus(flap_detection_enabled);
+CREATE INDEX srvcstatus_is_flapping_idx on icinga_servicestatus(is_flapping);
+CREATE INDEX srvcstatus_p_state_chg_idx on icinga_servicestatus(percent_state_change);
+CREATE INDEX srvcstatus_latency_idx on icinga_servicestatus(latency);
+CREATE INDEX srvcstatus_ex_time_idx on icinga_servicestatus(execution_time);
+CREATE INDEX srvcstatus_sch_downt_d_idx on icinga_servicestatus(scheduled_downtime_depth);
+
+-- hostchecks
+CREATE INDEX hostchks_h_obj_id_idx on icinga_hostchecks(host_object_id);
+
+-- servicechecks
+CREATE INDEX servicechks_s_obj_id_idx on icinga_servicechecks(service_object_id);
+
+-- objects
+CREATE INDEX objects_name1_idx ON icinga_objects(name1);
+CREATE INDEX objects_name2_idx ON icinga_objects(name2);
+CREATE INDEX objects_inst_id_idx ON icinga_objects(instance_id);
+
+-- instances
+-- CREATE INDEX instances_name_idx on icinga_instances(instance_name);
+
+-- logentries
+-- CREATE INDEX loge_instance_id_idx on icinga_logentries(instance_id);
+-- #236
+CREATE INDEX loge_time_idx on icinga_logentries(logentry_time);
+-- CREATE INDEX loge_data_idx on icinga_logentries(logentry_data);
+CREATE INDEX loge_inst_id_time_idx on icinga_logentries (instance_id ASC, logentry_time DESC);
+
+-- commenthistory
+-- CREATE INDEX c_hist_instance_id_idx on icinga_logentries(instance_id);
+-- CREATE INDEX c_hist_c_time_idx on icinga_logentries(comment_time);
+-- CREATE INDEX c_hist_i_c_id_idx on icinga_logentries(internal_comment_id);
+
+-- downtimehistory
+-- CREATE INDEX d_t_hist_nstance_id_idx on icinga_downtimehistory(instance_id);
+-- CREATE INDEX d_t_hist_type_idx on icinga_downtimehistory(downtime_type);
+-- CREATE INDEX d_t_hist_object_id_idx on icinga_downtimehistory(object_id);
+-- CREATE INDEX d_t_hist_entry_time_idx on icinga_downtimehistory(entry_time);
+-- CREATE INDEX d_t_hist_sched_start_idx on icinga_downtimehistory(scheduled_start_time);
+-- CREATE INDEX d_t_hist_sched_end_idx on icinga_downtimehistory(scheduled_end_time);
+
+-- scheduleddowntime
+-- CREATE INDEX sched_d_t_downtime_type_idx on icinga_scheduleddowntime(downtime_type);
+-- CREATE INDEX sched_d_t_object_id_idx on icinga_scheduleddowntime(object_id);
+-- CREATE INDEX sched_d_t_entry_time_idx on icinga_scheduleddowntime(entry_time);
+-- CREATE INDEX sched_d_t_start_time_idx on icinga_scheduleddowntime(scheduled_start_time);
+-- CREATE INDEX sched_d_t_end_time_idx on icinga_scheduleddowntime(scheduled_end_time);
+
+-- statehistory
+CREATE INDEX statehist_i_id_o_id_s_ty_s_ti on icinga_statehistory(instance_id, object_id, state_type, state_time);
+-- #2274
+create index statehist_state_idx on icinga_statehistory(object_id,state);
+
+
+-- Icinga Web Notifications
+CREATE INDEX notification_idx ON icinga_notifications(notification_type, object_id, start_time);
+CREATE INDEX notification_object_id_idx ON icinga_notifications(object_id);
+CREATE INDEX contact_notification_idx ON icinga_contactnotifications(notification_id, contact_object_id);
+CREATE INDEX contacts_object_id_idx ON icinga_contacts(contact_object_id);
+CREATE INDEX contact_notif_meth_notif_idx ON icinga_contactnotificationmethods(contactnotification_id, command_object_id);
+CREATE INDEX command_object_idx ON icinga_commands(object_id);
+CREATE INDEX services_combined_object_idx ON icinga_services(service_object_id, host_object_id);
+
+
+-- #2618
+CREATE INDEX cntgrpmbrs_cgid_coid ON icinga_contactgroup_members (contactgroup_id,contact_object_id);
+CREATE INDEX hstgrpmbrs_hgid_hoid ON icinga_hostgroup_members (hostgroup_id,host_object_id);
+CREATE INDEX hstcntgrps_hid_cgoid ON icinga_host_contactgroups (host_id,contactgroup_object_id);
+CREATE INDEX hstprnthsts_hid_phoid ON icinga_host_parenthosts (host_id,parent_host_object_id);
+CREATE INDEX runtimevars_iid_varn ON icinga_runtimevariables (instance_id,varname);
+CREATE INDEX sgmbrs_sgid_soid ON icinga_servicegroup_members (servicegroup_id,service_object_id);
+CREATE INDEX scgrps_sid_cgoid ON icinga_service_contactgroups (service_id,contactgroup_object_id);
+CREATE INDEX tperiod_tid_d_ss_es ON icinga_timeperiod_timeranges (timeperiod_id,day,start_sec,end_sec);
+
+-- #3649
+CREATE INDEX sla_idx_sthist ON icinga_statehistory (object_id, state_time DESC);
+CREATE INDEX sla_idx_dohist ON icinga_downtimehistory (object_id, actual_start_time, actual_end_time);
+CREATE INDEX sla_idx_obj ON icinga_objects (objecttype_id, is_active, name1);
+
+-- #4985
+CREATE INDEX commenthistory_delete_idx ON icinga_commenthistory (instance_id, comment_time, internal_comment_id);
+
+-- #10066
+CREATE INDEX idx_endpoints_object_id on icinga_endpoints(endpoint_object_id);
+CREATE INDEX idx_endpointstatus_object_id on icinga_endpointstatus(endpoint_object_id);
+
+CREATE INDEX idx_endpoints_zone_object_id on icinga_endpoints(zone_object_id);
+CREATE INDEX idx_endpointstatus_zone_object_id on icinga_endpointstatus(zone_object_id);
+
+CREATE INDEX idx_zones_object_id on icinga_zones(zone_object_id);
+CREATE INDEX idx_zonestatus_object_id on icinga_zonestatus(zone_object_id);
+
+CREATE INDEX idx_zones_parent_object_id on icinga_zones(parent_zone_object_id);
+CREATE INDEX idx_zonestatus_parent_object_id on icinga_zonestatus(parent_zone_object_id);
+
+-- #12210
+CREATE INDEX idx_comments_session_del ON icinga_comments (instance_id, session_token);
+CREATE INDEX idx_downtimes_session_del ON icinga_scheduleddowntime (instance_id, session_token);
+
+-- #12107
+CREATE INDEX idx_statehistory_cleanup on icinga_statehistory(instance_id, state_time);
+
+-- #12435
+CREATE INDEX idx_contactgroup_members_object_id on icinga_contactgroup_members(contact_object_id);
+CREATE INDEX idx_hostgroup_members_object_id on icinga_hostgroup_members(host_object_id);
+CREATE INDEX idx_servicegroup_members_object_id on icinga_servicegroup_members(service_object_id);
+CREATE INDEX idx_servicedependencies_dependent_service_object_id on icinga_servicedependencies(dependent_service_object_id);
+CREATE INDEX idx_hostdependencies_dependent_host_object_id on icinga_hostdependencies(dependent_host_object_id);
+CREATE INDEX idx_service_contacts_service_id on icinga_service_contacts(service_id);
+CREATE INDEX idx_host_contacts_host_id on icinga_host_contacts(host_id);
+
+-- #5458
+create index idx_downtimehistory_remove on icinga_downtimehistory (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+create index idx_scheduleddowntime_remove on icinga_scheduleddowntime (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+
+-- #5492
+CREATE INDEX idx_commenthistory_remove ON icinga_commenthistory (object_id, entry_time);
+CREATE INDEX idx_comments_remove ON icinga_comments (object_id, entry_time);
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.15.1', NOW(), NOW())
+ON DUPLICATE KEY UPDATE version='1.15.1', modify_time=NOW();
+
+
diff --git a/lib/db_ido_mysql/schema/upgrade/2.0.2.sql b/lib/db_ido_mysql/schema/upgrade/2.0.2.sql
new file mode 100644
index 0000000..c622ae9
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.0.2.sql
@@ -0,0 +1,20 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.0.2
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+UPDATE icinga_objects SET name2 = NULL WHERE name2 = '';
+
+ALTER TABLE `icinga_customvariables` MODIFY COLUMN `varname` varchar(255) character set latin1 collate latin1_general_cs default NULL;
+ALTER TABLE `icinga_customvariablestatus` MODIFY COLUMN `varname` varchar(255) character set latin1 collate latin1_general_cs default NULL;
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.11.6', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.11.6', modify_time=NOW();
+
diff --git a/lib/db_ido_mysql/schema/upgrade/2.1.0.sql b/lib/db_ido_mysql/schema/upgrade/2.1.0.sql
new file mode 100644
index 0000000..7bbed72
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.1.0.sql
@@ -0,0 +1,17 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.1.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+ALTER TABLE `icinga_programstatus` ADD COLUMN `endpoint_name` varchar(255) character set latin1 collate latin1_general_cs default NULL;
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.11.7', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.11.7', modify_time=NOW();
+
diff --git a/lib/db_ido_mysql/schema/upgrade/2.11.0.sql b/lib/db_ido_mysql/schema/upgrade/2.11.0.sql
new file mode 100644
index 0000000..bafa93f
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.11.0.sql
@@ -0,0 +1,89 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.11.0
+--
+-- -----------------------------------------
+-- Copyright (c) 2019 Icinga Development Team (https://icinga.com/)
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- --------------------------------------------------------
+-- Helper functions and procedures for DROP INDEX IF EXISTS
+-- --------------------------------------------------------
+
+DELIMITER //
+DROP FUNCTION IF EXISTS ido_index_exists //
+CREATE FUNCTION ido_index_exists(
+ f_table_name varchar(64),
+ f_index_name varchar(64)
+)
+ RETURNS BOOL
+ DETERMINISTIC
+ READS SQL DATA
+ BEGIN
+ DECLARE index_exists BOOL DEFAULT FALSE;
+ SELECT EXISTS (
+ SELECT 1
+ FROM information_schema.statistics
+ WHERE table_schema = SCHEMA()
+ AND table_name = f_table_name
+ AND index_name = f_index_name
+ ) INTO index_exists;
+ RETURN index_exists;
+ END //
+
+DROP PROCEDURE IF EXISTS ido_drop_index_if_exists //
+CREATE PROCEDURE ido_drop_index_if_exists (
+ IN p_table_name varchar(64),
+ IN p_index_name varchar(64)
+)
+ DETERMINISTIC
+ MODIFIES SQL DATA
+ BEGIN
+ IF ido_index_exists(p_table_name, p_index_name)
+ THEN
+ SET @ido_drop_index_sql = CONCAT('ALTER TABLE `', SCHEMA(), '`.`', p_table_name, '` DROP INDEX `', p_index_name, '`');
+ PREPARE stmt FROM @ido_drop_index_sql;
+ EXECUTE stmt;
+ DEALLOCATE PREPARE stmt;
+ SET @ido_drop_index_sql = NULL;
+ END IF;
+ END //
+DELIMITER ;
+
+CALL ido_drop_index_if_exists('icinga_commands', 'commands_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_comments', 'idx_comments_object_id');
+CALL ido_drop_index_if_exists('icinga_comments', 'comments_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_configfiles', 'configfiles_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_contactgroups', 'contactgroups_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_contacts', 'contacts_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_customvariables', 'idx_customvariables_object_id');
+CALL ido_drop_index_if_exists('icinga_eventhandlers', 'eventhandlers_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_hostdependencies', 'hostdependencies_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_hostescalations', 'hostesc_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_hostescalation_contacts', 'hostesc_contacts_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_hostgroups', 'hostgroups_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_hosts', 'host_object_id');
+CALL ido_drop_index_if_exists('icinga_hosts', 'hosts_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_objects', 'objects_objtype_id_idx');
+CALL ido_drop_index_if_exists('icinga_programstatus', 'programstatus_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_runtimevariables', 'runtimevariables_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_scheduleddowntime', 'scheduleddowntime_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_scheduleddowntime', 'idx_scheduleddowntime_object_id');
+CALL ido_drop_index_if_exists('icinga_serviceescalations', 'serviceesc_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_serviceescalation_contacts', 'serviceesc_contacts_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_servicegroups', 'servicegroups_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_services', 'services_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_services', 'service_object_id');
+CALL ido_drop_index_if_exists('icinga_systemcommands', 'systemcommands_i_id_idx');
+CALL ido_drop_index_if_exists('icinga_timeperiods', 'timeperiods_i_id_idx');
+
+DROP FUNCTION ido_index_exists;
+DROP PROCEDURE ido_drop_index_if_exists;
+
+-- -----------------------------------------
+-- set dbversion (same as 2.11.0)
+-- -----------------------------------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.15.0', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.15.0', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.12.7.sql b/lib/db_ido_mysql/schema/upgrade/2.12.7.sql
new file mode 100644
index 0000000..6319b37
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.12.7.sql
@@ -0,0 +1,15 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.12.7
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- -------------
+-- set dbversion
+-- -------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.15.0', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.15.0', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.13.0.sql b/lib/db_ido_mysql/schema/upgrade/2.13.0.sql
new file mode 100644
index 0000000..462be6f
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.13.0.sql
@@ -0,0 +1,23 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.13.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- ----------------------------------------
+-- #7472 Support hosts with >128 characters
+-- ----------------------------------------
+
+ALTER TABLE icinga_objects
+ MODIFY COLUMN name1 varchar(255) character set latin1 collate latin1_general_cs default '',
+ MODIFY COLUMN name2 varchar(255) character set latin1 collate latin1_general_cs default NULL;
+
+-- -------------
+-- set dbversion
+-- -------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.15.1', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.15.1', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.13.3.sql b/lib/db_ido_mysql/schema/upgrade/2.13.3.sql
new file mode 100644
index 0000000..577eb0a
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.13.3.sql
@@ -0,0 +1,15 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.13.3
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- -------------
+-- set dbversion
+-- -------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.15.1', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.15.1', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.2.0.sql b/lib/db_ido_mysql/schema/upgrade/2.2.0.sql
new file mode 100644
index 0000000..22a6115
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.2.0.sql
@@ -0,0 +1,23 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.2.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+ALTER TABLE `icinga_programstatus` ADD COLUMN `program_version` varchar(64) character set latin1 collate latin1_general_cs default NULL;
+
+ALTER TABLE icinga_contacts MODIFY alias TEXT character set latin1;
+ALTER TABLE icinga_hosts MODIFY alias TEXT character set latin1;
+
+ALTER TABLE icinga_customvariables ADD COLUMN is_json smallint default 0;
+ALTER TABLE icinga_customvariablestatus ADD COLUMN is_json smallint default 0;
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.12.0', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.12.0', modify_time=NOW();
+
diff --git a/lib/db_ido_mysql/schema/upgrade/2.3.0.sql b/lib/db_ido_mysql/schema/upgrade/2.3.0.sql
new file mode 100644
index 0000000..f2fe463
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.3.0.sql
@@ -0,0 +1,26 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.3.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #7765 drop unique constraint
+-- -----------------------------------------
+
+ALTER TABLE icinga_servicedependencies DROP KEY instance_id;
+ALTER TABLE icinga_hostdependencies DROP KEY instance_id;
+
+ALTER TABLE icinga_servicedependencies ADD KEY instance_id (instance_id,config_type,service_object_id,dependent_service_object_id,dependency_type,inherits_parent,fail_on_ok,fail_on_warning,fail_on_unknown,fail_on_critical);
+ALTER TABLE icinga_hostdependencies ADD KEY instance_id (instance_id,config_type,host_object_id,dependent_host_object_id,dependency_type,inherits_parent,fail_on_up,fail_on_down,fail_on_unreachable);
+
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.13.0', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.13.0', modify_time=NOW();
+
diff --git a/lib/db_ido_mysql/schema/upgrade/2.4.0.sql b/lib/db_ido_mysql/schema/upgrade/2.4.0.sql
new file mode 100644
index 0000000..f6803f7
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.4.0.sql
@@ -0,0 +1,75 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.4.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #9286 - zone tables
+-- -----------------------------------------
+
+ALTER TABLE icinga_endpoints ADD COLUMN zone_object_id bigint(20) unsigned DEFAULT '0';
+ALTER TABLE icinga_endpointstatus ADD COLUMN zone_object_id bigint(20) unsigned DEFAULT '0';
+
+CREATE TABLE IF NOT EXISTS icinga_zones (
+ zone_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ zone_object_id bigint(20) unsigned DEFAULT '0',
+ config_type smallint(6) DEFAULT '0',
+ parent_zone_object_id bigint(20) unsigned DEFAULT '0',
+ is_global smallint(6),
+ PRIMARY KEY (zone_id)
+) ENGINE=InnoDB COMMENT='Zone configuration';
+
+CREATE TABLE IF NOT EXISTS icinga_zonestatus (
+ zonestatus_id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+ instance_id bigint unsigned default 0,
+ zone_object_id bigint(20) unsigned DEFAULT '0',
+ status_update_time timestamp NOT NULL,
+ parent_zone_object_id bigint(20) unsigned DEFAULT '0',
+ PRIMARY KEY (zonestatus_id)
+) ENGINE=InnoDB COMMENT='Zone status';
+
+
+-- -----------------------------------------
+-- #9576 - freshness_threshold
+-- -----------------------------------------
+
+ALTER TABLE icinga_services MODIFY freshness_threshold int;
+ALTER TABLE icinga_hosts MODIFY freshness_threshold int;
+
+-- -----------------------------------------
+-- #10392 - original attributes
+-- -----------------------------------------
+
+ALTER TABLE icinga_servicestatus ADD COLUMN original_attributes TEXT character set latin1 default NULL;
+ALTER TABLE icinga_hoststatus ADD COLUMN original_attributes TEXT character set latin1 default NULL;
+
+-- -----------------------------------------
+-- #10436 deleted custom vars
+-- -----------------------------------------
+
+ALTER TABLE icinga_customvariables ADD COLUMN session_token int default NULL;
+ALTER TABLE icinga_customvariablestatus ADD COLUMN session_token int default NULL;
+
+CREATE INDEX cv_session_del_idx ON icinga_customvariables (session_token);
+CREATE INDEX cvs_session_del_idx ON icinga_customvariablestatus (session_token);
+
+-- -----------------------------------------
+-- #10431 comment/downtime name
+-- -----------------------------------------
+
+ALTER TABLE icinga_comments ADD COLUMN name TEXT character set latin1 default NULL;
+ALTER TABLE icinga_commenthistory ADD COLUMN name TEXT character set latin1 default NULL;
+
+ALTER TABLE icinga_scheduleddowntime ADD COLUMN name TEXT character set latin1 default NULL;
+ALTER TABLE icinga_downtimehistory ADD COLUMN name TEXT character set latin1 default NULL;
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.14.0', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.14.0', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.5.0.sql b/lib/db_ido_mysql/schema/upgrade/2.5.0.sql
new file mode 100644
index 0000000..d5714a0
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.5.0.sql
@@ -0,0 +1,103 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.5.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- -----------------------------------------
+-- #10069 IDO: check_source should not be a TEXT field
+-- -----------------------------------------
+
+ALTER TABLE icinga_hoststatus MODIFY COLUMN check_source varchar(255) character set latin1 default '';
+ALTER TABLE icinga_servicestatus MODIFY COLUMN check_source varchar(255) character set latin1 default '';
+
+-- -----------------------------------------
+-- #10070
+-- -----------------------------------------
+
+CREATE INDEX idx_comments_object_id on icinga_comments(object_id);
+CREATE INDEX idx_scheduleddowntime_object_id on icinga_scheduleddowntime(object_id);
+
+-- -----------------------------------------
+-- #11962
+-- -----------------------------------------
+
+ALTER TABLE icinga_hoststatus MODIFY COLUMN current_notification_number int unsigned default 0;
+ALTER TABLE icinga_servicestatus MODIFY COLUMN current_notification_number int unsigned default 0;
+
+-- -----------------------------------------
+-- #10061
+-- -----------------------------------------
+
+ALTER TABLE icinga_contactgroups MODIFY COLUMN alias varchar(255) character set latin1 default '';
+ALTER TABLE icinga_contacts MODIFY COLUMN alias varchar(255) character set latin1 default '';
+ALTER TABLE icinga_hostgroups MODIFY COLUMN alias varchar(255) character set latin1 default '';
+ALTER TABLE icinga_hosts MODIFY COLUMN alias varchar(255) character set latin1 default '';
+ALTER TABLE icinga_servicegroups MODIFY COLUMN alias varchar(255) character set latin1 default '';
+ALTER TABLE icinga_timeperiods MODIFY COLUMN alias varchar(255) character set latin1 default '';
+
+-- -----------------------------------------
+-- #10066
+-- -----------------------------------------
+
+CREATE INDEX idx_endpoints_object_id on icinga_endpoints(endpoint_object_id);
+CREATE INDEX idx_endpointstatus_object_id on icinga_endpointstatus(endpoint_object_id);
+
+CREATE INDEX idx_endpoints_zone_object_id on icinga_endpoints(zone_object_id);
+CREATE INDEX idx_endpointstatus_zone_object_id on icinga_endpointstatus(zone_object_id);
+
+CREATE INDEX idx_zones_object_id on icinga_zones(zone_object_id);
+CREATE INDEX idx_zonestatus_object_id on icinga_zonestatus(zone_object_id);
+
+CREATE INDEX idx_zones_parent_object_id on icinga_zones(parent_zone_object_id);
+CREATE INDEX idx_zonestatus_parent_object_id on icinga_zonestatus(parent_zone_object_id);
+
+-- -----------------------------------------
+-- #12107
+-- -----------------------------------------
+CREATE INDEX idx_statehistory_cleanup on icinga_statehistory(instance_id, state_time);
+
+-- -----------------------------------------
+-- #12258
+-- -----------------------------------------
+ALTER TABLE icinga_comments ADD COLUMN session_token INTEGER default NULL;
+ALTER TABLE icinga_scheduleddowntime ADD COLUMN session_token INTEGER default NULL;
+
+CREATE INDEX idx_comments_session_del ON icinga_comments (instance_id, session_token);
+CREATE INDEX idx_downtimes_session_del ON icinga_scheduleddowntime (instance_id, session_token);
+
+-- -----------------------------------------
+-- #12435
+-- -----------------------------------------
+ALTER TABLE icinga_commands ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_contactgroups ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_contacts ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_hostgroups ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_hosts ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_servicegroups ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_services ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_timeperiods ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_endpoints ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_zones ADD config_hash VARCHAR(64) DEFAULT NULL;
+
+ALTER TABLE icinga_customvariables DROP session_token;
+ALTER TABLE icinga_customvariablestatus DROP session_token;
+
+CREATE INDEX idx_customvariables_object_id on icinga_customvariables(object_id);
+CREATE INDEX idx_contactgroup_members_object_id on icinga_contactgroup_members(contact_object_id);
+CREATE INDEX idx_hostgroup_members_object_id on icinga_hostgroup_members(host_object_id);
+CREATE INDEX idx_servicegroup_members_object_id on icinga_servicegroup_members(service_object_id);
+CREATE INDEX idx_servicedependencies_dependent_service_object_id on icinga_servicedependencies(dependent_service_object_id);
+CREATE INDEX idx_hostdependencies_dependent_host_object_id on icinga_hostdependencies(dependent_host_object_id);
+CREATE INDEX idx_service_contacts_service_id on icinga_service_contacts(service_id);
+CREATE INDEX idx_host_contacts_host_id on icinga_host_contacts(host_id);
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.14.1', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.14.1', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.6.0.sql b/lib/db_ido_mysql/schema/upgrade/2.6.0.sql
new file mode 100644
index 0000000..33dd780
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.6.0.sql
@@ -0,0 +1,151 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.6.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #10502 IDO: Support NO_ZERO_DATE and NO_ZERO_IN_DATE SQL modes
+-- -----------------------------------------
+
+ALTER TABLE icinga_acknowledgements
+ MODIFY COLUMN entry_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_commenthistory
+ MODIFY COLUMN entry_time timestamp NULL,
+ MODIFY COLUMN comment_time timestamp NULL,
+ MODIFY COLUMN expiration_time timestamp NULL,
+ MODIFY COLUMN deletion_time timestamp NULL;
+
+ALTER TABLE icinga_comments
+ MODIFY COLUMN entry_time timestamp NULL,
+ MODIFY COLUMN comment_time timestamp NULL,
+ MODIFY COLUMN expiration_time timestamp NULL;
+
+ALTER TABLE icinga_conninfo
+ MODIFY COLUMN connect_time timestamp NULL,
+ MODIFY COLUMN disconnect_time timestamp NULL,
+ MODIFY COLUMN last_checkin_time timestamp NULL,
+ MODIFY COLUMN data_start_time timestamp NULL,
+ MODIFY COLUMN data_end_time timestamp NULL;
+
+ALTER TABLE icinga_contactnotificationmethods
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_contactnotifications
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_contactstatus
+ MODIFY COLUMN status_update_time timestamp NULL,
+ MODIFY COLUMN last_host_notification timestamp NULL,
+ MODIFY COLUMN last_service_notification timestamp NULL;
+
+ALTER TABLE icinga_customvariablestatus
+ MODIFY COLUMN status_update_time timestamp NULL;
+
+ALTER TABLE icinga_dbversion
+ MODIFY COLUMN create_time timestamp NULL,
+ MODIFY COLUMN modify_time timestamp NULL;
+
+ALTER TABLE icinga_downtimehistory
+ MODIFY COLUMN entry_time timestamp NULL,
+ MODIFY COLUMN scheduled_start_time timestamp NULL,
+ MODIFY COLUMN scheduled_end_time timestamp NULL,
+ MODIFY COLUMN actual_start_time timestamp NULL,
+ MODIFY COLUMN actual_end_time timestamp NULL,
+ MODIFY COLUMN trigger_time timestamp NULL;
+
+ALTER TABLE icinga_eventhandlers
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_externalcommands
+ MODIFY COLUMN entry_time timestamp NULL;
+
+ALTER TABLE icinga_flappinghistory
+ MODIFY COLUMN event_time timestamp NULL,
+ MODIFY COLUMN comment_time timestamp NULL;
+
+ALTER TABLE icinga_hostchecks
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_hoststatus
+ MODIFY COLUMN status_update_time timestamp NULL,
+ MODIFY COLUMN last_check timestamp NULL,
+ MODIFY COLUMN next_check timestamp NULL,
+ MODIFY COLUMN last_state_change timestamp NULL,
+ MODIFY COLUMN last_hard_state_change timestamp NULL,
+ MODIFY COLUMN last_time_up timestamp NULL,
+ MODIFY COLUMN last_time_down timestamp NULL,
+ MODIFY COLUMN last_time_unreachable timestamp NULL,
+ MODIFY COLUMN last_notification timestamp NULL,
+ MODIFY COLUMN next_notification timestamp NULL;
+
+ALTER TABLE icinga_logentries
+ MODIFY COLUMN logentry_time timestamp NULL,
+ MODIFY COLUMN entry_time timestamp NULL;
+
+ALTER TABLE icinga_notifications
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_processevents
+ MODIFY COLUMN event_time timestamp NULL;
+
+ALTER TABLE icinga_programstatus
+ MODIFY COLUMN status_update_time timestamp NULL,
+ MODIFY COLUMN program_start_time timestamp NULL,
+ MODIFY COLUMN program_end_time timestamp NULL,
+ MODIFY COLUMN last_command_check timestamp NULL,
+ MODIFY COLUMN last_log_rotation timestamp NULL,
+ MODIFY COLUMN disable_notif_expire_time timestamp NULL;
+
+ALTER TABLE icinga_scheduleddowntime
+ MODIFY COLUMN entry_time timestamp NULL,
+ MODIFY COLUMN scheduled_start_time timestamp NULL,
+ MODIFY COLUMN scheduled_end_time timestamp NULL,
+ MODIFY COLUMN actual_start_time timestamp NULL,
+ MODIFY COLUMN trigger_time timestamp NULL;
+
+ALTER TABLE icinga_servicechecks
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_servicestatus
+ MODIFY COLUMN status_update_time timestamp NULL,
+ MODIFY COLUMN last_check timestamp NULL,
+ MODIFY COLUMN next_check timestamp NULL,
+ MODIFY COLUMN last_state_change timestamp NULL,
+ MODIFY COLUMN last_hard_state_change timestamp NULL,
+ MODIFY COLUMN last_time_ok timestamp NULL,
+ MODIFY COLUMN last_time_warning timestamp NULL,
+ MODIFY COLUMN last_time_unknown timestamp NULL,
+ MODIFY COLUMN last_time_critical timestamp NULL,
+ MODIFY COLUMN last_notification timestamp NULL,
+ MODIFY COLUMN next_notification timestamp NULL;
+
+ALTER TABLE icinga_statehistory
+ MODIFY COLUMN state_time timestamp NULL;
+
+ALTER TABLE icinga_systemcommands
+ MODIFY COLUMN start_time timestamp NULL,
+ MODIFY COLUMN end_time timestamp NULL;
+
+ALTER TABLE icinga_endpointstatus
+ MODIFY COLUMN status_update_time timestamp NULL;
+
+ALTER TABLE icinga_zonestatus
+ MODIFY COLUMN status_update_time timestamp NULL;
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.14.2', NOW(), NOW())
+ON DUPLICATE KEY UPDATE version='1.14.2', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.8.0.sql b/lib/db_ido_mysql/schema/upgrade/2.8.0.sql
new file mode 100644
index 0000000..8d511a7
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.8.0.sql
@@ -0,0 +1,81 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.8.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- --------------------------------------------------------
+-- Helper functions and procedures for DROP INDEX IF EXISTS
+-- --------------------------------------------------------
+
+DELIMITER //
+DROP FUNCTION IF EXISTS ido_index_exists //
+CREATE FUNCTION ido_index_exists(
+ f_table_name varchar(64),
+ f_index_name varchar(64)
+)
+ RETURNS BOOL
+ DETERMINISTIC
+ READS SQL DATA
+ BEGIN
+ DECLARE index_exists BOOL DEFAULT FALSE;
+ SELECT EXISTS (
+ SELECT 1
+ FROM information_schema.statistics
+ WHERE table_schema = SCHEMA()
+ AND table_name = f_table_name
+ AND index_name = f_index_name
+ ) INTO index_exists;
+ RETURN index_exists;
+ END //
+
+DROP PROCEDURE IF EXISTS ido_drop_index_if_exists //
+CREATE PROCEDURE ido_drop_index_if_exists (
+ IN p_table_name varchar(64),
+ IN p_index_name varchar(64)
+)
+ DETERMINISTIC
+ MODIFIES SQL DATA
+ BEGIN
+ IF ido_index_exists(p_table_name, p_index_name)
+ THEN
+ SET @ido_drop_index_sql = CONCAT('ALTER TABLE `', SCHEMA(), '`.`', p_table_name, '` DROP INDEX `', p_index_name, '`');
+ PREPARE stmt FROM @ido_drop_index_sql;
+ EXECUTE stmt;
+ DEALLOCATE PREPARE stmt;
+ SET @ido_drop_index_sql = NULL;
+ END IF;
+ END //
+DELIMITER ;
+
+CALL ido_drop_index_if_exists('icinga_downtimehistory', 'instance_id');
+CALL ido_drop_index_if_exists('icinga_scheduleddowntime', 'instance_id');
+CALL ido_drop_index_if_exists('icinga_commenthistory', 'instance_id');
+CALL ido_drop_index_if_exists('icinga_comments', 'instance_id');
+
+DROP FUNCTION ido_index_exists;
+DROP PROCEDURE ido_drop_index_if_exists;
+
+-- -----------------------------------------
+-- #5458 IDO: Improve downtime removal/cancel
+-- -----------------------------------------
+
+CREATE INDEX idx_downtimehistory_remove ON icinga_downtimehistory (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+CREATE INDEX idx_scheduleddowntime_remove ON icinga_scheduleddowntime (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+
+-- -----------------------------------------
+-- #5492 IDO: Improve comment removal
+-- -----------------------------------------
+
+CREATE INDEX idx_commenthistory_remove ON icinga_commenthistory (object_id, entry_time);
+CREATE INDEX idx_comments_remove ON icinga_comments (object_id, entry_time);
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.14.3', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.14.3', modify_time=NOW();
diff --git a/lib/db_ido_mysql/schema/upgrade/2.8.1.sql b/lib/db_ido_mysql/schema/upgrade/2.8.1.sql
new file mode 100644
index 0000000..98f8511
--- /dev/null
+++ b/lib/db_ido_mysql/schema/upgrade/2.8.1.sql
@@ -0,0 +1,67 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.8.1 (fix for fresh 2.8.0 installation only)
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+SET SQL_MODE="NO_AUTO_VALUE_ON_ZERO";
+
+-- --------------------------------------------------------
+-- Helper functions and procedures for DROP INDEX IF EXISTS
+-- --------------------------------------------------------
+
+DELIMITER //
+DROP FUNCTION IF EXISTS ido_index_exists //
+CREATE FUNCTION ido_index_exists(
+ f_table_name varchar(64),
+ f_index_name varchar(64)
+)
+ RETURNS BOOL
+ DETERMINISTIC
+ READS SQL DATA
+ BEGIN
+ DECLARE index_exists BOOL DEFAULT FALSE;
+ SELECT EXISTS (
+ SELECT 1
+ FROM information_schema.statistics
+ WHERE table_schema = SCHEMA()
+ AND table_name = f_table_name
+ AND index_name = f_index_name
+ ) INTO index_exists;
+ RETURN index_exists;
+ END //
+
+DROP PROCEDURE IF EXISTS ido_drop_index_if_exists //
+CREATE PROCEDURE ido_drop_index_if_exists (
+ IN p_table_name varchar(64),
+ IN p_index_name varchar(64)
+)
+ DETERMINISTIC
+ MODIFIES SQL DATA
+ BEGIN
+ IF ido_index_exists(p_table_name, p_index_name)
+ THEN
+ SET @ido_drop_index_sql = CONCAT('ALTER TABLE `', SCHEMA(), '`.`', p_table_name, '` DROP INDEX `', p_index_name, '`');
+ PREPARE stmt FROM @ido_drop_index_sql;
+ EXECUTE stmt;
+ DEALLOCATE PREPARE stmt;
+ SET @ido_drop_index_sql = NULL;
+ END IF;
+ END //
+DELIMITER ;
+
+CALL ido_drop_index_if_exists('icinga_downtimehistory', 'instance_id');
+CALL ido_drop_index_if_exists('icinga_scheduleddowntime', 'instance_id');
+CALL ido_drop_index_if_exists('icinga_commenthistory', 'instance_id');
+CALL ido_drop_index_if_exists('icinga_comments', 'instance_id');
+
+DROP FUNCTION ido_index_exists;
+DROP PROCEDURE ido_drop_index_if_exists;
+
+-- -----------------------------------------
+-- set dbversion (same as 2.8.0)
+-- -----------------------------------------
+INSERT INTO icinga_dbversion (name, version, create_time, modify_time) VALUES ('idoutils', '1.14.3', NOW(), NOW()) ON DUPLICATE KEY UPDATE version='1.14.3', modify_time=NOW();
diff --git a/lib/db_ido_pgsql/CMakeLists.txt b/lib/db_ido_pgsql/CMakeLists.txt
new file mode 100644
index 0000000..e081a62
--- /dev/null
+++ b/lib/db_ido_pgsql/CMakeLists.txt
@@ -0,0 +1,41 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(idopgsqlconnection.ti idopgsqlconnection-ti.cpp idopgsqlconnection-ti.hpp)
+
+set(db_ido_pgsql_SOURCES
+ idopgsqlconnection.cpp idopgsqlconnection.hpp idopgsqlconnection-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(db_ido_pgsql db_ido_pgsql db_ido_pgsql_SOURCES)
+endif()
+
+add_library(db_ido_pgsql OBJECT ${db_ido_pgsql_SOURCES})
+
+include_directories(${PostgreSQL_INCLUDE_DIRS})
+
+add_dependencies(db_ido_pgsql base config icinga db_ido)
+
+set_target_properties (
+ db_ido_pgsql PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/ido-pgsql.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install(
+ DIRECTORY schema
+ DESTINATION ${CMAKE_INSTALL_DATADIR}/icinga2-ido-pgsql
+ FILES_MATCHING PATTERN "*.sql"
+)
+
+install(
+ DIRECTORY schema/upgrade
+ DESTINATION ${CMAKE_INSTALL_DATADIR}/icinga2-ido-pgsql/schema
+ FILES_MATCHING PATTERN "*.sql"
+)
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/db_ido_pgsql/idopgsqlconnection.cpp b/lib/db_ido_pgsql/idopgsqlconnection.cpp
new file mode 100644
index 0000000..07e88e6
--- /dev/null
+++ b/lib/db_ido_pgsql/idopgsqlconnection.cpp
@@ -0,0 +1,1029 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido_pgsql/idopgsqlconnection.hpp"
+#include "db_ido_pgsql/idopgsqlconnection-ti.cpp"
+#include "db_ido/dbtype.hpp"
+#include "db_ido/dbvalue.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/application.hpp"
+#include "base/configtype.hpp"
+#include "base/exception.hpp"
+#include "base/context.hpp"
+#include "base/statsfunction.hpp"
+#include "base/defer.hpp"
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(IdoPgsqlConnection);
+
+REGISTER_STATSFUNCTION(IdoPgsqlConnection, &IdoPgsqlConnection::StatsFunc);
+
+const char * IdoPgsqlConnection::GetLatestSchemaVersion() const noexcept
+{
+ return "1.14.3";
+}
+
+const char * IdoPgsqlConnection::GetCompatSchemaVersion() const noexcept
+{
+ return "1.14.3";
+}
+
+IdoPgsqlConnection::IdoPgsqlConnection()
+{
+ m_QueryQueue.SetName("IdoPgsqlConnection, " + GetName());
+}
+
+void IdoPgsqlConnection::OnConfigLoaded()
+{
+ ObjectImpl<IdoPgsqlConnection>::OnConfigLoaded();
+
+ m_QueryQueue.SetName("IdoPgsqlConnection, " + GetName());
+
+ Library shimLibrary{"pgsql_shim"};
+
+ auto create_pgsql_shim = shimLibrary.GetSymbolAddress<create_pgsql_shim_ptr>("create_pgsql_shim");
+
+ m_Pgsql.reset(create_pgsql_shim());
+
+ std::swap(m_Library, shimLibrary);
+}
+
+void IdoPgsqlConnection::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const IdoPgsqlConnection::Ptr& idopgsqlconnection : ConfigType::GetObjectsByType<IdoPgsqlConnection>()) {
+ size_t queryQueueItems = idopgsqlconnection->m_QueryQueue.GetLength();
+ double queryQueueItemRate = idopgsqlconnection->m_QueryQueue.GetTaskCount(60) / 60.0;
+
+ nodes.emplace_back(idopgsqlconnection->GetName(), new Dictionary({
+ { "version", idopgsqlconnection->GetSchemaVersion() },
+ { "instance_name", idopgsqlconnection->GetInstanceName() },
+ { "connected", idopgsqlconnection->GetConnected() },
+ { "query_queue_items", queryQueueItems },
+ { "query_queue_item_rate", queryQueueItemRate }
+ }));
+
+ perfdata->Add(new PerfdataValue("idopgsqlconnection_" + idopgsqlconnection->GetName() + "_queries_rate", idopgsqlconnection->GetQueryCount(60) / 60.0));
+ perfdata->Add(new PerfdataValue("idopgsqlconnection_" + idopgsqlconnection->GetName() + "_queries_1min", idopgsqlconnection->GetQueryCount(60)));
+ perfdata->Add(new PerfdataValue("idopgsqlconnection_" + idopgsqlconnection->GetName() + "_queries_5mins", idopgsqlconnection->GetQueryCount(5 * 60)));
+ perfdata->Add(new PerfdataValue("idopgsqlconnection_" + idopgsqlconnection->GetName() + "_queries_15mins", idopgsqlconnection->GetQueryCount(15 * 60)));
+ perfdata->Add(new PerfdataValue("idopgsqlconnection_" + idopgsqlconnection->GetName() + "_query_queue_items", queryQueueItems));
+ perfdata->Add(new PerfdataValue("idopgsqlconnection_" + idopgsqlconnection->GetName() + "_query_queue_item_rate", queryQueueItemRate));
+ }
+
+ status->Set("idopgsqlconnection", new Dictionary(std::move(nodes)));
+}
+
+void IdoPgsqlConnection::Resume()
+{
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "'" << GetName() << "' resumed.";
+
+ SetConnected(false);
+
+ m_QueryQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ /* Immediately try to connect on Resume() without timer. */
+ m_QueryQueue.Enqueue([this]() { Reconnect(); }, PriorityImmediate);
+
+ m_TxTimer = Timer::Create();
+ m_TxTimer->SetInterval(1);
+ m_TxTimer->OnTimerExpired.connect([this](const Timer * const&) { NewTransaction(); });
+ m_TxTimer->Start();
+
+ m_ReconnectTimer = Timer::Create();
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->OnTimerExpired.connect([this](const Timer * const&) { ReconnectTimerHandler(); });
+ m_ReconnectTimer->Start();
+
+ /* Start with queries after connect. */
+ DbConnection::Resume();
+
+ ASSERT(m_Pgsql->isthreadsafe());
+}
+
+void IdoPgsqlConnection::Pause()
+{
+ DbConnection::Pause();
+
+ m_ReconnectTimer->Stop(true);
+ m_TxTimer->Stop(true);
+
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "'" << GetName() << "' paused.";
+}
+
+void IdoPgsqlConnection::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogWarning, "IdoPgsqlConnection", "Exception during database operation: Verify that your database is operational!");
+
+ Log(LogDebug, "IdoPgsqlConnection")
+ << "Exception during database operation: " << DiagnosticInformation(std::move(exp));
+
+ if (GetConnected()) {
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+ }
+}
+
+void IdoPgsqlConnection::AssertOnWorkQueue()
+{
+ ASSERT(m_QueryQueue.IsWorkerThread());
+}
+
+void IdoPgsqlConnection::Disconnect()
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ IncreasePendingQueries(1);
+ Query("COMMIT");
+
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "Disconnected from '" << GetName() << "' database '" << GetDatabase() << "'.";
+}
+
+void IdoPgsqlConnection::NewTransaction()
+{
+ if (IsPaused())
+ return;
+
+ m_QueryQueue.Enqueue([this]() { InternalNewTransaction(); }, PriorityNormal, true);
+}
+
+void IdoPgsqlConnection::InternalNewTransaction()
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ IncreasePendingQueries(2);
+ Query("COMMIT");
+ Query("BEGIN");
+}
+
+void IdoPgsqlConnection::ReconnectTimerHandler()
+{
+ /* Only allow Reconnect events with high priority. */
+ m_QueryQueue.Enqueue([this]() { Reconnect(); }, PriorityHigh);
+}
+
+void IdoPgsqlConnection::Reconnect()
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("Reconnecting to PostgreSQL IDO database '" << GetName() << "'");
+
+ double startTime = Utility::GetTime();
+
+ SetShouldConnect(true);
+
+ bool reconnect = false;
+
+ if (GetConnected()) {
+ /* Check if we're really still connected */
+ try {
+ IncreasePendingQueries(1);
+ Query("SELECT 1");
+ return;
+ } catch (const std::exception&) {
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+ reconnect = true;
+ }
+ }
+
+ ClearIDCache();
+
+ String host = GetHost();
+ String port = GetPort();
+ String user = GetUser();
+ String password = GetPassword();
+ String database = GetDatabase();
+
+ String sslMode = GetSslMode();
+ String sslKey = GetSslKey();
+ String sslCert = GetSslCert();
+ String sslCa = GetSslCa();
+
+ String conninfo;
+
+ if (!host.IsEmpty())
+ conninfo += " host=" + host;
+ if (!port.IsEmpty())
+ conninfo += " port=" + port;
+ if (!user.IsEmpty())
+ conninfo += " user=" + user;
+ if (!password.IsEmpty())
+ conninfo += " password=" + password;
+ if (!database.IsEmpty())
+ conninfo += " dbname=" + database;
+
+ if (!sslMode.IsEmpty())
+ conninfo += " sslmode=" + sslMode;
+ if (!sslKey.IsEmpty())
+ conninfo += " sslkey=" + sslKey;
+ if (!sslCert.IsEmpty())
+ conninfo += " sslcert=" + sslCert;
+ if (!sslCa.IsEmpty())
+ conninfo += " sslrootcert=" + sslCa;
+
+ /* connection */
+ m_Connection = m_Pgsql->connectdb(conninfo.CStr());
+
+ if (!m_Connection)
+ return;
+
+ if (m_Pgsql->status(m_Connection) != CONNECTION_OK) {
+ String message = m_Pgsql->errorMessage(m_Connection);
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+
+ Log(LogCritical, "IdoPgsqlConnection")
+ << "Connection to database '" << database << "' with user '" << user << "' on '" << host << ":" << port
+ << "' failed: \"" << message << "\"";
+
+ BOOST_THROW_EXCEPTION(std::runtime_error(message));
+ }
+
+ SetConnected(true);
+
+ IdoPgsqlResult result;
+
+ String dbVersionName = "idoutils";
+ IncreasePendingQueries(1);
+ result = Query("SELECT version FROM " + GetTablePrefix() + "dbversion WHERE name='" + Escape(dbVersionName) + "'");
+
+ Dictionary::Ptr row = FetchRow(result, 0);
+
+ if (!row) {
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+
+ Log(LogCritical, "IdoPgsqlConnection", "Schema does not provide any valid version! Verify your schema installation.");
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid schema."));
+ }
+
+ String version = row->Get("version");
+
+ SetSchemaVersion(version);
+
+ if (Utility::CompareVersion(GetCompatSchemaVersion(), version) < 0) {
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+
+ Log(LogCritical, "IdoPgsqlConnection")
+ << "Schema version '" << version << "' does not match the required version '"
+ << GetCompatSchemaVersion() << "' (or newer)! Please check the upgrade documentation at "
+ << "https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/#upgrading-postgresql-db";
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Schema version mismatch."));
+ }
+
+ String instanceName = GetInstanceName();
+
+ IncreasePendingQueries(1);
+ result = Query("SELECT instance_id FROM " + GetTablePrefix() + "instances WHERE instance_name = '" + Escape(instanceName) + "'");
+ row = FetchRow(result, 0);
+
+ if (!row) {
+ IncreasePendingQueries(1);
+ Query("INSERT INTO " + GetTablePrefix() + "instances (instance_name, instance_description) VALUES ('" + Escape(instanceName) + "', '" + Escape(GetInstanceDescription()) + "')");
+ m_InstanceID = GetSequenceValue(GetTablePrefix() + "instances", "instance_id");
+ } else {
+ m_InstanceID = DbReference(row->Get("instance_id"));
+ }
+
+ Endpoint::Ptr my_endpoint = Endpoint::GetLocalEndpoint();
+
+ /* we have an endpoint in a cluster setup, so decide if we can proceed here */
+ if (my_endpoint && GetHAMode() == HARunOnce) {
+ /* get the current endpoint writing to programstatus table */
+ IncreasePendingQueries(1);
+ result = Query("SELECT UNIX_TIMESTAMP(status_update_time) AS status_update_time, endpoint_name FROM " +
+ GetTablePrefix() + "programstatus WHERE instance_id = " + Convert::ToString(m_InstanceID));
+ row = FetchRow(result, 0);
+
+ String endpoint_name;
+
+ if (row)
+ endpoint_name = row->Get("endpoint_name");
+ else
+ Log(LogNotice, "IdoPgsqlConnection", "Empty program status table");
+
+ /* if we did not write into the database earlier, another instance is active */
+ if (endpoint_name != my_endpoint->GetName()) {
+ double status_update_time;
+
+ if (row)
+ status_update_time = row->Get("status_update_time");
+ else
+ status_update_time = 0;
+
+ double now = Utility::GetTime();
+
+ double status_update_age = now - status_update_time;
+ double failoverTimeout = GetFailoverTimeout();
+
+ if (status_update_age < GetFailoverTimeout()) {
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "Last update by endpoint '" << endpoint_name << "' was "
+ << status_update_age << "s ago (< failover timeout of " << failoverTimeout << "s). Retrying.";
+
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+ SetShouldConnect(false);
+
+ return;
+ }
+
+ /* activate the IDO only, if we're authoritative in this zone */
+ if (IsPaused()) {
+ Log(LogNotice, "IdoPgsqlConnection")
+ << "Local endpoint '" << my_endpoint->GetName() << "' is not authoritative, bailing out.";
+
+ m_Pgsql->finish(m_Connection);
+ SetConnected(false);
+
+ return;
+ }
+
+ SetLastFailover(now);
+
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "Last update by endpoint '" << endpoint_name << "' was "
+ << status_update_age << "s ago. Taking over '" << GetName() << "' in HA zone '" << Zone::GetLocalZone()->GetName() << "'.";
+ }
+
+ Log(LogNotice, "IdoPgsqlConnection", "Enabling IDO connection.");
+ }
+
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "PGSQL IDO instance id: " << static_cast<long>(m_InstanceID) << " (schema version: '" + version + "')"
+ << (!sslMode.IsEmpty() ? ", sslmode='" + sslMode + "'" : "");
+
+ IncreasePendingQueries(1);
+ Query("BEGIN");
+
+ /* update programstatus table */
+ UpdateProgramStatus();
+
+ /* record connection */
+ IncreasePendingQueries(1);
+ Query("INSERT INTO " + GetTablePrefix() + "conninfo " +
+ "(instance_id, connect_time, last_checkin_time, agent_name, agent_version, connect_type, data_start_time) VALUES ("
+ + Convert::ToString(static_cast<long>(m_InstanceID)) + ", NOW(), NOW(), 'icinga2 db_ido_pgsql', '" + Escape(Application::GetAppVersion())
+ + "', '" + (reconnect ? "RECONNECT" : "INITIAL") + "', NOW())");
+
+ /* clear config tables for the initial config dump */
+ PrepareDatabase();
+
+ std::ostringstream q1buf;
+ q1buf << "SELECT object_id, objecttype_id, name1, name2, is_active FROM " + GetTablePrefix() + "objects WHERE instance_id = " << static_cast<long>(m_InstanceID);
+ IncreasePendingQueries(1);
+ result = Query(q1buf.str());
+
+ std::vector<DbObject::Ptr> activeDbObjs;
+
+ int index = 0;
+ while ((row = FetchRow(result, index))) {
+ index++;
+
+ DbType::Ptr dbtype = DbType::GetByID(row->Get("objecttype_id"));
+
+ if (!dbtype)
+ continue;
+
+ DbObject::Ptr dbobj = dbtype->GetOrCreateObjectByName(row->Get("name1"), row->Get("name2"));
+ SetObjectID(dbobj, DbReference(row->Get("object_id")));
+ bool active = row->Get("is_active");
+ SetObjectActive(dbobj, active);
+
+ if (active)
+ activeDbObjs.push_back(dbobj);
+ }
+
+ SetIDCacheValid(true);
+
+ EnableActiveChangedHandler();
+
+ for (const DbObject::Ptr& dbobj : activeDbObjs) {
+ if (dbobj->GetObject())
+ continue;
+
+ Log(LogNotice, "IdoPgsqlConnection")
+ << "Deactivate deleted object name1: '" << dbobj->GetName1()
+ << "' name2: '" << dbobj->GetName2() + "'.";
+ DeactivateObject(dbobj);
+ }
+
+ UpdateAllObjects();
+
+ m_QueryQueue.Enqueue([this]() { ClearTablesBySession(); }, PriorityNormal);
+
+ m_QueryQueue.Enqueue([this, startTime]() { FinishConnect(startTime); }, PriorityNormal);
+}
+
+void IdoPgsqlConnection::FinishConnect(double startTime)
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ Log(LogInformation, "IdoPgsqlConnection")
+ << "Finished reconnecting to '" << GetName() << "' database '" << GetDatabase() << "' in "
+ << std::setw(2) << Utility::GetTime() - startTime << " second(s).";
+
+ IncreasePendingQueries(2);
+ Query("COMMIT");
+ Query("BEGIN");
+}
+
+void IdoPgsqlConnection::ClearTablesBySession()
+{
+ /* delete all comments and downtimes without current session token */
+ ClearTableBySession("comments");
+ ClearTableBySession("scheduleddowntime");
+}
+
+void IdoPgsqlConnection::ClearTableBySession(const String& table)
+{
+ IncreasePendingQueries(1);
+ Query("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
+ Convert::ToString(static_cast<long>(m_InstanceID)) + " AND session_token <> " +
+ Convert::ToString(GetSessionToken()));
+}
+
+IdoPgsqlResult IdoPgsqlConnection::Query(const String& query)
+{
+ AssertOnWorkQueue();
+
+ Defer decreaseQueries ([this]() { DecreasePendingQueries(1); });
+
+ Log(LogDebug, "IdoPgsqlConnection")
+ << "Query: " << query;
+
+ IncreaseQueryCount();
+
+ PGresult *result = m_Pgsql->exec(m_Connection, query.CStr());
+
+ if (!result) {
+ String message = m_Pgsql->errorMessage(m_Connection);
+ Log(LogCritical, "IdoPgsqlConnection")
+ << "Error \"" << message << "\" when executing query \"" << query << "\"";
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(message)
+ << errinfo_database_query(query)
+ );
+ }
+
+ char *rowCount = m_Pgsql->cmdTuples(result);
+ m_AffectedRows = atoi(rowCount);
+
+ if (m_Pgsql->resultStatus(result) == PGRES_COMMAND_OK) {
+ m_Pgsql->clear(result);
+ return IdoPgsqlResult();
+ }
+
+ if (m_Pgsql->resultStatus(result) != PGRES_TUPLES_OK) {
+ String message = m_Pgsql->resultErrorMessage(result);
+ m_Pgsql->clear(result);
+
+ Log(LogCritical, "IdoPgsqlConnection")
+ << "Error \"" << message << "\" when executing query \"" << query << "\"";
+
+ BOOST_THROW_EXCEPTION(
+ database_error()
+ << errinfo_message(message)
+ << errinfo_database_query(query)
+ );
+ }
+
+ return IdoPgsqlResult(result, [this](PGresult* result) { m_Pgsql->clear(result); });
+}
+
+DbReference IdoPgsqlConnection::GetSequenceValue(const String& table, const String& column)
+{
+ AssertOnWorkQueue();
+
+ IncreasePendingQueries(1);
+ IdoPgsqlResult result = Query("SELECT CURRVAL(pg_get_serial_sequence('" + Escape(table) + "', '" + Escape(column) + "')) AS id");
+
+ Dictionary::Ptr row = FetchRow(result, 0);
+
+ ASSERT(row);
+
+ Log(LogDebug, "IdoPgsqlConnection")
+ << "Sequence Value: " << row->Get("id");
+
+ return {Convert::ToLong(row->Get("id"))};
+}
+
+int IdoPgsqlConnection::GetAffectedRows()
+{
+ AssertOnWorkQueue();
+
+ return m_AffectedRows;
+}
+
+String IdoPgsqlConnection::Escape(const String& s)
+{
+ AssertOnWorkQueue();
+
+ String utf8s = Utility::ValidateUTF8(s);
+
+ size_t length = utf8s.GetLength();
+ auto *to = new char[utf8s.GetLength() * 2 + 1];
+
+ m_Pgsql->escapeStringConn(m_Connection, to, utf8s.CStr(), length, nullptr);
+
+ String result = String(to);
+
+ delete [] to;
+
+ return result;
+}
+
+Dictionary::Ptr IdoPgsqlConnection::FetchRow(const IdoPgsqlResult& result, int row)
+{
+ AssertOnWorkQueue();
+
+ if (row >= m_Pgsql->ntuples(result.get()))
+ return nullptr;
+
+ int columns = m_Pgsql->nfields(result.get());
+
+ DictionaryData dict;
+
+ for (int column = 0; column < columns; column++) {
+ Value value;
+
+ if (!m_Pgsql->getisnull(result.get(), row, column))
+ value = m_Pgsql->getvalue(result.get(), row, column);
+
+ dict.emplace_back(m_Pgsql->fname(result.get(), column), value);
+ }
+
+ return new Dictionary(std::move(dict));
+}
+
+void IdoPgsqlConnection::ActivateObject(const DbObject::Ptr& dbobj)
+{
+ if (IsPaused())
+ return;
+
+ m_QueryQueue.Enqueue([this, dbobj]() { InternalActivateObject(dbobj); }, PriorityNormal);
+}
+
+void IdoPgsqlConnection::InternalActivateObject(const DbObject::Ptr& dbobj)
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ DbReference dbref = GetObjectID(dbobj);
+ std::ostringstream qbuf;
+
+ if (!dbref.IsValid()) {
+ if (!dbobj->GetName2().IsEmpty()) {
+ qbuf << "INSERT INTO " + GetTablePrefix() + "objects (instance_id, objecttype_id, name1, name2, is_active) VALUES ("
+ << static_cast<long>(m_InstanceID) << ", " << dbobj->GetType()->GetTypeID() << ", "
+ << "'" << Escape(dbobj->GetName1()) << "', '" << Escape(dbobj->GetName2()) << "', 1)";
+ } else {
+ qbuf << "INSERT INTO " + GetTablePrefix() + "objects (instance_id, objecttype_id, name1, is_active) VALUES ("
+ << static_cast<long>(m_InstanceID) << ", " << dbobj->GetType()->GetTypeID() << ", "
+ << "'" << Escape(dbobj->GetName1()) << "', 1)";
+ }
+
+ IncreasePendingQueries(1);
+ Query(qbuf.str());
+ SetObjectID(dbobj, GetSequenceValue(GetTablePrefix() + "objects", "object_id"));
+ } else {
+ qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 1 WHERE object_id = " << static_cast<long>(dbref);
+ IncreasePendingQueries(1);
+ Query(qbuf.str());
+ }
+}
+
+void IdoPgsqlConnection::DeactivateObject(const DbObject::Ptr& dbobj)
+{
+ if (IsPaused())
+ return;
+
+ m_QueryQueue.Enqueue([this, dbobj]() { InternalDeactivateObject(dbobj); }, PriorityNormal);
+}
+
+void IdoPgsqlConnection::InternalDeactivateObject(const DbObject::Ptr& dbobj)
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected())
+ return;
+
+ DbReference dbref = GetObjectID(dbobj);
+
+ if (!dbref.IsValid())
+ return;
+
+ std::ostringstream qbuf;
+ qbuf << "UPDATE " + GetTablePrefix() + "objects SET is_active = 0 WHERE object_id = " << static_cast<long>(dbref);
+ IncreasePendingQueries(1);
+ Query(qbuf.str());
+
+ /* Note that we're _NOT_ clearing the db refs via SetReference/SetConfigUpdate/SetStatusUpdate
+ * because the object is still in the database. */
+
+ SetObjectActive(dbobj, false);
+}
+
+bool IdoPgsqlConnection::FieldToEscapedString(const String& key, const Value& value, Value *result)
+{
+ if (key == "instance_id") {
+ *result = static_cast<long>(m_InstanceID);
+ return true;
+ } else if (key == "session_token") {
+ *result = GetSessionToken();
+ return true;
+ }
+
+ Value rawvalue = DbValue::ExtractValue(value);
+
+ if (rawvalue.GetType() == ValueEmpty) {
+ *result = "NULL";
+ } else if (rawvalue.IsObjectType<ConfigObject>()) {
+ DbObject::Ptr dbobjcol = DbObject::GetOrCreateByObject(rawvalue);
+
+ if (!dbobjcol) {
+ *result = 0;
+ return true;
+ }
+
+ if (!IsIDCacheValid())
+ return false;
+
+ DbReference dbrefcol;
+
+ if (DbValue::IsObjectInsertID(value)) {
+ dbrefcol = GetInsertID(dbobjcol);
+
+ if (!dbrefcol.IsValid())
+ return false;
+ } else {
+ dbrefcol = GetObjectID(dbobjcol);
+
+ if (!dbrefcol.IsValid()) {
+ InternalActivateObject(dbobjcol);
+
+ dbrefcol = GetObjectID(dbobjcol);
+
+ if (!dbrefcol.IsValid())
+ return false;
+ }
+ }
+
+ *result = static_cast<long>(dbrefcol);
+ } else if (DbValue::IsTimestamp(value)) {
+ long ts = rawvalue;
+ std::ostringstream msgbuf;
+ msgbuf << "TO_TIMESTAMP(" << ts << ") AT TIME ZONE 'UTC'";
+ *result = Value(msgbuf.str());
+ } else if (DbValue::IsObjectInsertID(value)) {
+ auto id = static_cast<long>(rawvalue);
+
+ if (id <= 0)
+ return false;
+
+ *result = id;
+ return true;
+ } else {
+ Value fvalue;
+
+ if (rawvalue.IsBoolean())
+ fvalue = Convert::ToLong(rawvalue);
+ else
+ fvalue = rawvalue;
+
+ *result = "'" + Escape(fvalue) + "'";
+ }
+
+ return true;
+}
+
+void IdoPgsqlConnection::ExecuteQuery(const DbQuery& query)
+{
+ if (IsPaused() && GetPauseCalled())
+ return;
+
+ ASSERT(query.Category != DbCatInvalid);
+
+ IncreasePendingQueries(1);
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, -1); }, query.Priority, true);
+}
+
+void IdoPgsqlConnection::ExecuteMultipleQueries(const std::vector<DbQuery>& queries)
+{
+ if (IsPaused())
+ return;
+
+ if (queries.empty())
+ return;
+
+ IncreasePendingQueries(queries.size());
+ m_QueryQueue.Enqueue([this, queries]() { InternalExecuteMultipleQueries(queries); }, queries[0].Priority, true);
+}
+
+bool IdoPgsqlConnection::CanExecuteQuery(const DbQuery& query)
+{
+ if (query.Object && !IsIDCacheValid())
+ return false;
+
+ if (query.WhereCriteria) {
+ ObjectLock olock(query.WhereCriteria);
+ Value value;
+
+ for (const Dictionary::Pair& kv : query.WhereCriteria) {
+ if (!FieldToEscapedString(kv.first, kv.second, &value))
+ return false;
+ }
+ }
+
+ if (query.Fields) {
+ ObjectLock olock(query.Fields);
+
+ for (const Dictionary::Pair& kv : query.Fields) {
+ Value value;
+
+ if (!FieldToEscapedString(kv.first, kv.second, &value))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void IdoPgsqlConnection::InternalExecuteMultipleQueries(const std::vector<DbQuery>& queries)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused()) {
+ DecreasePendingQueries(queries.size());
+ return;
+ }
+
+ if (!GetConnected()) {
+ DecreasePendingQueries(queries.size());
+ return;
+ }
+
+ for (const DbQuery& query : queries) {
+ ASSERT(query.Type == DbQueryNewTransaction || query.Category != DbCatInvalid);
+
+ if (!CanExecuteQuery(query)) {
+ m_QueryQueue.Enqueue([this, queries]() { InternalExecuteMultipleQueries(queries); }, query.Priority);
+ return;
+ }
+ }
+
+ for (const DbQuery& query : queries) {
+ InternalExecuteQuery(query);
+ }
+}
+
+void IdoPgsqlConnection::InternalExecuteQuery(const DbQuery& query, int typeOverride)
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused() && GetPauseCalled()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (!GetConnected()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (query.Type == DbQueryNewTransaction) {
+ DecreasePendingQueries(1);
+ InternalNewTransaction();
+ return;
+ }
+
+ /* check whether we're allowed to execute the query first */
+ if (GetCategoryFilter() != DbCatEverything && (query.Category & GetCategoryFilter()) == 0) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ if (query.Object && query.Object->GetObject()->GetExtension("agent_check").ToBool()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ /* check if there are missing object/insert ids and re-enqueue the query */
+ if (!CanExecuteQuery(query)) {
+ m_QueryQueue.Enqueue([this, query, typeOverride]() { InternalExecuteQuery(query, typeOverride); }, query.Priority);
+ return;
+ }
+
+ std::ostringstream qbuf, where;
+ int type;
+
+ if (query.WhereCriteria) {
+ where << " WHERE ";
+
+ ObjectLock olock(query.WhereCriteria);
+ Value value;
+ bool first = true;
+
+ for (const Dictionary::Pair& kv : query.WhereCriteria) {
+ if (!FieldToEscapedString(kv.first, kv.second, &value)) {
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, -1); }, query.Priority);
+ return;
+ }
+
+ if (!first)
+ where << " AND ";
+
+ where << kv.first << " = " << value;
+
+ if (first)
+ first = false;
+ }
+ }
+
+ type = (typeOverride != -1) ? typeOverride : query.Type;
+
+ bool upsert = false;
+
+ if ((type & DbQueryInsert) && (type & DbQueryUpdate)) {
+ bool hasid = false;
+
+ if (query.Object) {
+ if (query.ConfigUpdate)
+ hasid = GetConfigUpdate(query.Object);
+ else if (query.StatusUpdate)
+ hasid = GetStatusUpdate(query.Object);
+ }
+
+ if (!hasid)
+ upsert = true;
+
+ type = DbQueryUpdate;
+ }
+
+ if ((type & DbQueryInsert) && (type & DbQueryDelete)) {
+ std::ostringstream qdel;
+ qdel << "DELETE FROM " << GetTablePrefix() << query.Table << where.str();
+ IncreasePendingQueries(1);
+ Query(qdel.str());
+
+ type = DbQueryInsert;
+ }
+
+ switch (type) {
+ case DbQueryInsert:
+ qbuf << "INSERT INTO " << GetTablePrefix() << query.Table;
+ break;
+ case DbQueryUpdate:
+ qbuf << "UPDATE " << GetTablePrefix() << query.Table << " SET";
+ break;
+ case DbQueryDelete:
+ qbuf << "DELETE FROM " << GetTablePrefix() << query.Table;
+ break;
+ default:
+ VERIFY(!"Invalid query type.");
+ }
+
+ if (type == DbQueryInsert || type == DbQueryUpdate) {
+ std::ostringstream colbuf, valbuf;
+
+ if (type == DbQueryUpdate && query.Fields->GetLength() == 0)
+ return;
+
+ ObjectLock olock(query.Fields);
+
+ Value value;
+ bool first = true;
+ for (const Dictionary::Pair& kv : query.Fields) {
+ if (!FieldToEscapedString(kv.first, kv.second, &value)) {
+ m_QueryQueue.Enqueue([this, query]() { InternalExecuteQuery(query, -1); }, query.Priority);
+ return;
+ }
+
+ if (type == DbQueryInsert) {
+ if (!first) {
+ colbuf << ", ";
+ valbuf << ", ";
+ }
+
+ colbuf << kv.first;
+ valbuf << value;
+ } else {
+ if (!first)
+ qbuf << ", ";
+
+ qbuf << " " << kv.first << " = " << value;
+ }
+
+ if (first)
+ first = false;
+ }
+
+ if (type == DbQueryInsert)
+ qbuf << " (" << colbuf.str() << ") VALUES (" << valbuf.str() << ")";
+ }
+
+ if (type != DbQueryInsert)
+ qbuf << where.str();
+
+ Query(qbuf.str());
+
+ if (upsert && GetAffectedRows() == 0) {
+ IncreasePendingQueries(1);
+ InternalExecuteQuery(query, DbQueryDelete | DbQueryInsert);
+
+ return;
+ }
+
+ if (type == DbQueryInsert && query.Object) {
+ if (query.ConfigUpdate) {
+ String idField = query.IdColumn;
+
+ if (idField.IsEmpty())
+ idField = query.Table.SubStr(0, query.Table.GetLength() - 1) + "_id";
+
+ SetInsertID(query.Object, GetSequenceValue(GetTablePrefix() + query.Table, idField));
+
+ SetConfigUpdate(query.Object, true);
+ } else if (query.StatusUpdate)
+ SetStatusUpdate(query.Object, true);
+ }
+
+ if (type == DbQueryInsert && query.Table == "notifications" && query.NotificationInsertID) {
+ DbReference seqval = GetSequenceValue(GetTablePrefix() + query.Table, "notification_id");
+ query.NotificationInsertID->SetValue(static_cast<long>(seqval));
+ }
+}
+
+void IdoPgsqlConnection::CleanUpExecuteQuery(const String& table, const String& time_column, double max_age)
+{
+ if (IsPaused())
+ return;
+
+ IncreasePendingQueries(1);
+ m_QueryQueue.Enqueue([this, table, time_column, max_age]() { InternalCleanUpExecuteQuery(table, time_column, max_age); }, PriorityLow, true);
+}
+
+void IdoPgsqlConnection::InternalCleanUpExecuteQuery(const String& table, const String& time_column, double max_age)
+{
+ AssertOnWorkQueue();
+
+ if (!GetConnected()) {
+ DecreasePendingQueries(1);
+ return;
+ }
+
+ Query("DELETE FROM " + GetTablePrefix() + table + " WHERE instance_id = " +
+ Convert::ToString(static_cast<long>(m_InstanceID)) + " AND " + time_column +
+ " < TO_TIMESTAMP(" + Convert::ToString(static_cast<long>(max_age)) + ") AT TIME ZONE 'UTC'");
+}
+
+void IdoPgsqlConnection::FillIDCache(const DbType::Ptr& type)
+{
+ String query = "SELECT " + type->GetIDColumn() + " AS object_id, " + type->GetTable() + "_id, config_hash FROM " + GetTablePrefix() + type->GetTable() + "s";
+ IncreasePendingQueries(1);
+ IdoPgsqlResult result = Query(query);
+
+ Dictionary::Ptr row;
+
+ int index = 0;
+ while ((row = FetchRow(result, index))) {
+ index++;
+ DbReference dbref(row->Get("object_id"));
+ SetInsertID(type, dbref, DbReference(row->Get(type->GetTable() + "_id")));
+ SetConfigHash(type, dbref, row->Get("config_hash"));
+ }
+}
+
+int IdoPgsqlConnection::GetPendingQueryCount() const
+{
+ return m_QueryQueue.GetLength();
+}
diff --git a/lib/db_ido_pgsql/idopgsqlconnection.hpp b/lib/db_ido_pgsql/idopgsqlconnection.hpp
new file mode 100644
index 0000000..dc06a93
--- /dev/null
+++ b/lib/db_ido_pgsql/idopgsqlconnection.hpp
@@ -0,0 +1,99 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef IDOPGSQLCONNECTION_H
+#define IDOPGSQLCONNECTION_H
+
+#include "db_ido_pgsql/idopgsqlconnection-ti.hpp"
+#include "pgsql_shim/pgsqlinterface.hpp"
+#include "base/array.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include "base/library.hpp"
+
+namespace icinga
+{
+
+typedef std::shared_ptr<PGresult> IdoPgsqlResult;
+
+/**
+ * An IDO pgSQL database connection.
+ *
+ * @ingroup ido
+ */
+class IdoPgsqlConnection final : public ObjectImpl<IdoPgsqlConnection>
+{
+public:
+ DECLARE_OBJECT(IdoPgsqlConnection);
+ DECLARE_OBJECTNAME(IdoPgsqlConnection);
+
+ IdoPgsqlConnection();
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ const char * GetLatestSchemaVersion() const noexcept override;
+ const char * GetCompatSchemaVersion() const noexcept override;
+
+ int GetPendingQueryCount() const override;
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+ void ActivateObject(const DbObject::Ptr& dbobj) override;
+ void DeactivateObject(const DbObject::Ptr& dbobj) override;
+ void ExecuteQuery(const DbQuery& query) override;
+ void ExecuteMultipleQueries(const std::vector<DbQuery>& queries) override;
+ void CleanUpExecuteQuery(const String& table, const String& time_key, double time_value) override;
+ void FillIDCache(const DbType::Ptr& type) override;
+ void NewTransaction() override;
+ void Disconnect() override;
+
+private:
+ DbReference m_InstanceID;
+
+ Library m_Library;
+ std::unique_ptr<PgsqlInterface, PgsqlInterfaceDeleter> m_Pgsql;
+
+ PGconn *m_Connection;
+ int m_AffectedRows;
+
+ Timer::Ptr m_ReconnectTimer;
+ Timer::Ptr m_TxTimer;
+
+ IdoPgsqlResult Query(const String& query);
+ DbReference GetSequenceValue(const String& table, const String& column);
+ int GetAffectedRows();
+ String Escape(const String& s);
+ Dictionary::Ptr FetchRow(const IdoPgsqlResult& result, int row);
+
+ bool FieldToEscapedString(const String& key, const Value& value, Value *result);
+ void InternalActivateObject(const DbObject::Ptr& dbobj);
+ void InternalDeactivateObject(const DbObject::Ptr& dbobj);
+
+ void InternalNewTransaction();
+ void Reconnect();
+
+ void AssertOnWorkQueue();
+
+ void ReconnectTimerHandler();
+
+ void StatsLoggerTimerHandler();
+
+ bool CanExecuteQuery(const DbQuery& query);
+
+ void InternalExecuteQuery(const DbQuery& query, int typeOverride = -1);
+ void InternalExecuteMultipleQueries(const std::vector<DbQuery>& queries);
+ void InternalCleanUpExecuteQuery(const String& table, const String& time_key, double time_value);
+
+ void ClearTableBySession(const String& table);
+ void ClearTablesBySession();
+
+ void ExceptionHandler(boost::exception_ptr exp);
+
+ void FinishConnect(double startTime);
+};
+
+}
+
+#endif /* IDOPGSQLCONNECTION_H */
diff --git a/lib/db_ido_pgsql/idopgsqlconnection.ti b/lib/db_ido_pgsql/idopgsqlconnection.ti
new file mode 100644
index 0000000..bc4deff
--- /dev/null
+++ b/lib/db_ido_pgsql/idopgsqlconnection.ti
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "db_ido/dbconnection.hpp"
+
+library db_ido_pgsql;
+
+namespace icinga
+{
+
+class IdoPgsqlConnection : DbConnection
+{
+ activation_priority 100;
+
+ [config] String host {
+ default {{{ return "localhost"; }}}
+ };
+ [config] String port {
+ default {{{ return "5432"; }}}
+ };
+ [config] String user {
+ default {{{ return "icinga"; }}}
+ };
+ [config, no_user_view, no_user_modify] String password {
+ default {{{ return "icinga"; }}}
+ };
+ [config] String database {
+ default {{{ return "icinga"; }}}
+ };
+ [config] String instance_name {
+ default {{{ return "default"; }}}
+ };
+ [config] String instance_description;
+ [config] String ssl_mode;
+ [config] String ssl_key;
+ [config] String ssl_cert;
+ [config] String ssl_ca;
+};
+
+}
diff --git a/lib/db_ido_pgsql/schema/pgsql.sql b/lib/db_ido_pgsql/schema/pgsql.sql
new file mode 100644
index 0000000..242b6db
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/pgsql.sql
@@ -0,0 +1,1733 @@
+-- --------------------------------------------------------
+-- pgsql.sql
+-- DB definition for IDO Postgresql
+--
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- --------------------------------------------------------
+
+--
+-- Functions
+--
+
+DROP FUNCTION IF EXISTS from_unixtime(bigint);
+CREATE FUNCTION from_unixtime(bigint) RETURNS timestamp AS $$
+ SELECT to_timestamp($1) AT TIME ZONE 'UTC' AS result
+$$ LANGUAGE sql;
+
+DROP FUNCTION IF EXISTS unix_timestamp(timestamp WITH TIME ZONE);
+CREATE OR REPLACE FUNCTION unix_timestamp(timestamp) RETURNS bigint AS '
+ SELECT CAST(EXTRACT(EPOCH FROM $1) AS bigint) AS result;
+' LANGUAGE sql;
+
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+
+CREATE OR REPLACE FUNCTION updatedbversion(version_i TEXT) RETURNS void AS $$
+BEGIN
+ IF EXISTS( SELECT * FROM icinga_dbversion WHERE name='idoutils')
+ THEN
+ UPDATE icinga_dbversion
+ SET version=version_i, modify_time=NOW()
+ WHERE name='idoutils';
+ ELSE
+ INSERT INTO icinga_dbversion (dbversion_id, name, version, create_time, modify_time) VALUES ('1', 'idoutils', version_i, NOW(), NOW());
+ END IF;
+
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+-- HINT: su - postgres; createlang plpgsql icinga;
+
+
+
+--
+-- Database: icinga
+--
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_acknowledgements
+--
+
+CREATE TABLE icinga_acknowledgements (
+ acknowledgement_id bigserial,
+ instance_id bigint default 0,
+ entry_time timestamp,
+ entry_time_usec INTEGER default 0,
+ acknowledgement_type INTEGER default 0,
+ object_id bigint default 0,
+ state INTEGER default 0,
+ author_name TEXT default '',
+ comment_data TEXT default '',
+ is_sticky INTEGER default 0,
+ persistent_comment INTEGER default 0,
+ notify_contacts INTEGER default 0,
+ end_time timestamp,
+ CONSTRAINT PK_acknowledgement_id PRIMARY KEY (acknowledgement_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_commands
+--
+
+CREATE TABLE icinga_commands (
+ command_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ object_id bigint default 0,
+ command_line TEXT default '',
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_command_id PRIMARY KEY (command_id) ,
+ CONSTRAINT UQ_commands UNIQUE (instance_id,object_id,config_type)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_commenthistory
+--
+
+CREATE TABLE icinga_commenthistory (
+ commenthistory_id bigserial,
+ instance_id bigint default 0,
+ entry_time timestamp,
+ entry_time_usec INTEGER default 0,
+ comment_type INTEGER default 0,
+ entry_type INTEGER default 0,
+ object_id bigint default 0,
+ comment_time timestamp,
+ internal_comment_id bigint default 0,
+ author_name TEXT default '',
+ comment_data TEXT default '',
+ is_persistent INTEGER default 0,
+ comment_source INTEGER default 0,
+ expires INTEGER default 0,
+ expiration_time timestamp,
+ deletion_time timestamp,
+ deletion_time_usec INTEGER default 0,
+ name TEXT default NULL,
+ CONSTRAINT PK_commenthistory_id PRIMARY KEY (commenthistory_id)
+);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_comments
+--
+
+CREATE TABLE icinga_comments (
+ comment_id bigserial,
+ instance_id bigint default 0,
+ entry_time timestamp,
+ entry_time_usec INTEGER default 0,
+ comment_type INTEGER default 0,
+ entry_type INTEGER default 0,
+ object_id bigint default 0,
+ comment_time timestamp,
+ internal_comment_id bigint default 0,
+ author_name TEXT default '',
+ comment_data TEXT default '',
+ is_persistent INTEGER default 0,
+ comment_source INTEGER default 0,
+ expires INTEGER default 0,
+ expiration_time timestamp,
+ name TEXT default NULL,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_comment_id PRIMARY KEY (comment_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_configfiles
+--
+
+CREATE TABLE icinga_configfiles (
+ configfile_id bigserial,
+ instance_id bigint default 0,
+ configfile_type INTEGER default 0,
+ configfile_path TEXT default '',
+ CONSTRAINT PK_configfile_id PRIMARY KEY (configfile_id) ,
+ CONSTRAINT UQ_configfiles UNIQUE (instance_id,configfile_type,configfile_path)
+);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_configfilevariables
+--
+
+CREATE TABLE icinga_configfilevariables (
+ configfilevariable_id bigserial,
+ instance_id bigint default 0,
+ configfile_id bigint default 0,
+ varname TEXT default '',
+ varvalue TEXT default '',
+ CONSTRAINT PK_configfilevariable_id PRIMARY KEY (configfilevariable_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_conninfo
+--
+
+CREATE TABLE icinga_conninfo (
+ conninfo_id bigserial,
+ instance_id bigint default 0,
+ agent_name TEXT default '',
+ agent_version TEXT default '',
+ disposition TEXT default '',
+ connect_source TEXT default '',
+ connect_type TEXT default '',
+ connect_time timestamp,
+ disconnect_time timestamp,
+ last_checkin_time timestamp,
+ data_start_time timestamp,
+ data_end_time timestamp,
+ bytes_processed bigint default 0,
+ lines_processed bigint default 0,
+ entries_processed bigint default 0,
+ CONSTRAINT PK_conninfo_id PRIMARY KEY (conninfo_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactgroups
+--
+
+CREATE TABLE icinga_contactgroups (
+ contactgroup_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ contactgroup_object_id bigint default 0,
+ alias TEXT default '',
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_contactgroup_id PRIMARY KEY (contactgroup_id) ,
+ CONSTRAINT UQ_contactgroups UNIQUE (instance_id,config_type,contactgroup_object_id)
+);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactgroup_members
+--
+
+CREATE TABLE icinga_contactgroup_members (
+ contactgroup_member_id bigserial,
+ instance_id bigint default 0,
+ contactgroup_id bigint default 0,
+ contact_object_id bigint default 0,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_contactgroup_member_id PRIMARY KEY (contactgroup_member_id)
+);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactnotificationmethods
+--
+
+CREATE TABLE icinga_contactnotificationmethods (
+ contactnotificationmethod_id bigserial,
+ instance_id bigint default 0,
+ contactnotification_id bigint default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ command_object_id bigint default 0,
+ command_args TEXT default '',
+ CONSTRAINT PK_contactnotificationmethod_id PRIMARY KEY (contactnotificationmethod_id) ,
+ CONSTRAINT UQ_contactnotificationmethods UNIQUE (instance_id,contactnotification_id,start_time,start_time_usec)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactnotifications
+--
+
+CREATE TABLE icinga_contactnotifications (
+ contactnotification_id bigserial,
+ instance_id bigint default 0,
+ notification_id bigint default 0,
+ contact_object_id bigint default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ CONSTRAINT PK_contactnotification_id PRIMARY KEY (contactnotification_id) ,
+ CONSTRAINT UQ_contactnotifications UNIQUE (instance_id,contact_object_id,start_time,start_time_usec)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contacts
+--
+
+CREATE TABLE icinga_contacts (
+ contact_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ contact_object_id bigint default 0,
+ alias TEXT default '',
+ email_address TEXT default '',
+ pager_address TEXT default '',
+ host_timeperiod_object_id bigint default 0,
+ service_timeperiod_object_id bigint default 0,
+ host_notifications_enabled INTEGER default 0,
+ service_notifications_enabled INTEGER default 0,
+ can_submit_commands INTEGER default 0,
+ notify_service_recovery INTEGER default 0,
+ notify_service_warning INTEGER default 0,
+ notify_service_unknown INTEGER default 0,
+ notify_service_critical INTEGER default 0,
+ notify_service_flapping INTEGER default 0,
+ notify_service_downtime INTEGER default 0,
+ notify_host_recovery INTEGER default 0,
+ notify_host_down INTEGER default 0,
+ notify_host_unreachable INTEGER default 0,
+ notify_host_flapping INTEGER default 0,
+ notify_host_downtime INTEGER default 0,
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_contact_id PRIMARY KEY (contact_id) ,
+ CONSTRAINT UQ_contacts UNIQUE (instance_id,config_type,contact_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contactstatus
+--
+
+CREATE TABLE icinga_contactstatus (
+ contactstatus_id bigserial,
+ instance_id bigint default 0,
+ contact_object_id bigint default 0,
+ status_update_time timestamp,
+ host_notifications_enabled INTEGER default 0,
+ service_notifications_enabled INTEGER default 0,
+ last_host_notification timestamp,
+ last_service_notification timestamp,
+ modified_attributes INTEGER default 0,
+ modified_host_attributes INTEGER default 0,
+ modified_service_attributes INTEGER default 0,
+ CONSTRAINT PK_contactstatus_id PRIMARY KEY (contactstatus_id) ,
+ CONSTRAINT UQ_contactstatus UNIQUE (contact_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contact_addresses
+--
+
+CREATE TABLE icinga_contact_addresses (
+ contact_address_id bigserial,
+ instance_id bigint default 0,
+ contact_id bigint default 0,
+ address_number INTEGER default 0,
+ address TEXT default '',
+ CONSTRAINT PK_contact_address_id PRIMARY KEY (contact_address_id) ,
+ CONSTRAINT UQ_contact_addresses UNIQUE (contact_id,address_number)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_contact_notificationcommands
+--
+
+CREATE TABLE icinga_contact_notificationcommands (
+ contact_notificationcommand_id bigserial,
+ instance_id bigint default 0,
+ contact_id bigint default 0,
+ notification_type INTEGER default 0,
+ command_object_id bigint default 0,
+ command_args TEXT default '',
+ CONSTRAINT PK_contact_notificationcommand_id PRIMARY KEY (contact_notificationcommand_id) ,
+ CONSTRAINT UQ_contact_notificationcommands UNIQUE (contact_id,notification_type,command_object_id,command_args)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_customvariables
+--
+
+CREATE TABLE icinga_customvariables (
+ customvariable_id bigserial,
+ instance_id bigint default 0,
+ object_id bigint default 0,
+ config_type INTEGER default 0,
+ has_been_modified INTEGER default 0,
+ varname TEXT default '',
+ varvalue TEXT default '',
+ is_json INTEGER default 0,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_customvariable_id PRIMARY KEY (customvariable_id) ,
+ CONSTRAINT UQ_customvariables UNIQUE (object_id,config_type,varname)
+) ;
+CREATE INDEX icinga_customvariables_i ON icinga_customvariables(varname);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_customvariablestatus
+--
+
+CREATE TABLE icinga_customvariablestatus (
+ customvariablestatus_id bigserial,
+ instance_id bigint default 0,
+ object_id bigint default 0,
+ status_update_time timestamp,
+ has_been_modified INTEGER default 0,
+ varname TEXT default '',
+ varvalue TEXT default '',
+ is_json INTEGER default 0,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_customvariablestatus_id PRIMARY KEY (customvariablestatus_id) ,
+ CONSTRAINT UQ_customvariablestatus UNIQUE (object_id,varname)
+) ;
+CREATE INDEX icinga_customvariablestatus_i ON icinga_customvariablestatus(varname);
+
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_dbversion
+--
+
+CREATE TABLE icinga_dbversion (
+ dbversion_id bigserial,
+ name TEXT default '',
+ version TEXT default '',
+ create_time timestamp,
+ modify_time timestamp,
+ CONSTRAINT PK_dbversion_id PRIMARY KEY (dbversion_id) ,
+ CONSTRAINT UQ_dbversion UNIQUE (name)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_downtimehistory
+--
+
+CREATE TABLE icinga_downtimehistory (
+ downtimehistory_id bigserial,
+ instance_id bigint default 0,
+ downtime_type INTEGER default 0,
+ object_id bigint default 0,
+ entry_time timestamp,
+ author_name TEXT default '',
+ comment_data TEXT default '',
+ internal_downtime_id bigint default 0,
+ triggered_by_id bigint default 0,
+ is_fixed INTEGER default 0,
+ duration BIGINT default 0,
+ scheduled_start_time timestamp,
+ scheduled_end_time timestamp,
+ was_started INTEGER default 0,
+ actual_start_time timestamp,
+ actual_start_time_usec INTEGER default 0,
+ actual_end_time timestamp,
+ actual_end_time_usec INTEGER default 0,
+ was_cancelled INTEGER default 0,
+ is_in_effect INTEGER default 0,
+ trigger_time timestamp,
+ name TEXT default NULL,
+ CONSTRAINT PK_downtimehistory_id PRIMARY KEY (downtimehistory_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_eventhandlers
+--
+
+CREATE TABLE icinga_eventhandlers (
+ eventhandler_id bigserial,
+ instance_id bigint default 0,
+ eventhandler_type INTEGER default 0,
+ object_id bigint default 0,
+ state INTEGER default 0,
+ state_type INTEGER default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ command_object_id bigint default 0,
+ command_args TEXT default '',
+ command_line TEXT default '',
+ timeout INTEGER default 0,
+ early_timeout INTEGER default 0,
+ execution_time double precision default 0,
+ return_code INTEGER default 0,
+ output TEXT default '',
+ long_output TEXT default '',
+ CONSTRAINT PK_eventhandler_id PRIMARY KEY (eventhandler_id) ,
+ CONSTRAINT UQ_eventhandlers UNIQUE (instance_id,object_id,start_time,start_time_usec)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_externalcommands
+--
+
+CREATE TABLE icinga_externalcommands (
+ externalcommand_id bigserial,
+ instance_id bigint default 0,
+ entry_time timestamp,
+ command_type INTEGER default 0,
+ command_name TEXT default '',
+ command_args TEXT default '',
+ CONSTRAINT PK_externalcommand_id PRIMARY KEY (externalcommand_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_flappinghistory
+--
+
+CREATE TABLE icinga_flappinghistory (
+ flappinghistory_id bigserial,
+ instance_id bigint default 0,
+ event_time timestamp,
+ event_time_usec INTEGER default 0,
+ event_type INTEGER default 0,
+ reason_type INTEGER default 0,
+ flapping_type INTEGER default 0,
+ object_id bigint default 0,
+ percent_state_change double precision default 0,
+ low_threshold double precision default 0,
+ high_threshold double precision default 0,
+ comment_time timestamp,
+ internal_comment_id bigint default 0,
+ CONSTRAINT PK_flappinghistory_id PRIMARY KEY (flappinghistory_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostchecks
+--
+
+CREATE TABLE icinga_hostchecks (
+ hostcheck_id bigserial,
+ instance_id bigint default 0,
+ host_object_id bigint default 0,
+ check_type INTEGER default 0,
+ is_raw_check INTEGER default 0,
+ current_check_attempt INTEGER default 0,
+ max_check_attempts INTEGER default 0,
+ state INTEGER default 0,
+ state_type INTEGER default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ command_object_id bigint default 0,
+ command_args TEXT default '',
+ command_line TEXT default '',
+ timeout INTEGER default 0,
+ early_timeout INTEGER default 0,
+ execution_time double precision default 0,
+ latency double precision default 0,
+ return_code INTEGER default 0,
+ output TEXT default '',
+ long_output TEXT default '',
+ perfdata TEXT default '',
+ CONSTRAINT PK_hostcheck_id PRIMARY KEY (hostcheck_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostdependencies
+--
+
+CREATE TABLE icinga_hostdependencies (
+ hostdependency_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ host_object_id bigint default 0,
+ dependent_host_object_id bigint default 0,
+ dependency_type INTEGER default 0,
+ inherits_parent INTEGER default 0,
+ timeperiod_object_id bigint default 0,
+ fail_on_up INTEGER default 0,
+ fail_on_down INTEGER default 0,
+ fail_on_unreachable INTEGER default 0,
+ CONSTRAINT PK_hostdependency_id PRIMARY KEY (hostdependency_id)
+) ;
+CREATE INDEX idx_hostdependencies ON icinga_hostdependencies(instance_id,config_type,host_object_id,dependent_host_object_id,dependency_type,inherits_parent,fail_on_up,fail_on_down,fail_on_unreachable);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostescalations
+--
+
+CREATE TABLE icinga_hostescalations (
+ hostescalation_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ host_object_id bigint default 0,
+ timeperiod_object_id bigint default 0,
+ first_notification INTEGER default 0,
+ last_notification INTEGER default 0,
+ notification_interval double precision default 0,
+ escalate_on_recovery INTEGER default 0,
+ escalate_on_down INTEGER default 0,
+ escalate_on_unreachable INTEGER default 0,
+ CONSTRAINT PK_hostescalation_id PRIMARY KEY (hostescalation_id) ,
+ CONSTRAINT UQ_hostescalations UNIQUE (instance_id,config_type,host_object_id,timeperiod_object_id,first_notification,last_notification)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostescalation_contactgroups
+--
+
+CREATE TABLE icinga_hostescalation_contactgroups (
+ hostescalation_contactgroup_id bigserial,
+ instance_id bigint default 0,
+ hostescalation_id bigint default 0,
+ contactgroup_object_id bigint default 0,
+ CONSTRAINT PK_hostescalation_contactgroup_id PRIMARY KEY (hostescalation_contactgroup_id) ,
+ CONSTRAINT UQ_hostescalation_contactgroups UNIQUE (hostescalation_id,contactgroup_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostescalation_contacts
+--
+
+CREATE TABLE icinga_hostescalation_contacts (
+ hostescalation_contact_id bigserial,
+ instance_id bigint default 0,
+ hostescalation_id bigint default 0,
+ contact_object_id bigint default 0,
+ CONSTRAINT PK_hostescalation_contact_id PRIMARY KEY (hostescalation_contact_id) ,
+ CONSTRAINT UQ_hostescalation_contacts UNIQUE (instance_id,hostescalation_id,contact_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostgroups
+--
+
+CREATE TABLE icinga_hostgroups (
+ hostgroup_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ hostgroup_object_id bigint default 0,
+ alias TEXT default '',
+ notes TEXT default NULL,
+ notes_url TEXT default NULL,
+ action_url TEXT default NULL,
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_hostgroup_id PRIMARY KEY (hostgroup_id) ,
+ CONSTRAINT UQ_hostgroups UNIQUE (instance_id,hostgroup_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hostgroup_members
+--
+
+CREATE TABLE icinga_hostgroup_members (
+ hostgroup_member_id bigserial,
+ instance_id bigint default 0,
+ hostgroup_id bigint default 0,
+ host_object_id bigint default 0,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_hostgroup_member_id PRIMARY KEY (hostgroup_member_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hosts
+--
+
+CREATE TABLE icinga_hosts (
+ host_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ host_object_id bigint default 0,
+ alias TEXT default '',
+ display_name TEXT default '',
+ address TEXT default '',
+ address6 TEXT default '',
+ check_command_object_id bigint default 0,
+ check_command_args TEXT default '',
+ eventhandler_command_object_id bigint default 0,
+ eventhandler_command_args TEXT default '',
+ notification_timeperiod_object_id bigint default 0,
+ check_timeperiod_object_id bigint default 0,
+ failure_prediction_options TEXT default '',
+ check_interval double precision default 0,
+ retry_interval double precision default 0,
+ max_check_attempts INTEGER default 0,
+ first_notification_delay double precision default 0,
+ notification_interval double precision default 0,
+ notify_on_down INTEGER default 0,
+ notify_on_unreachable INTEGER default 0,
+ notify_on_recovery INTEGER default 0,
+ notify_on_flapping INTEGER default 0,
+ notify_on_downtime INTEGER default 0,
+ stalk_on_up INTEGER default 0,
+ stalk_on_down INTEGER default 0,
+ stalk_on_unreachable INTEGER default 0,
+ flap_detection_enabled INTEGER default 0,
+ flap_detection_on_up INTEGER default 0,
+ flap_detection_on_down INTEGER default 0,
+ flap_detection_on_unreachable INTEGER default 0,
+ low_flap_threshold double precision default 0,
+ high_flap_threshold double precision default 0,
+ process_performance_data INTEGER default 0,
+ freshness_checks_enabled INTEGER default 0,
+ freshness_threshold INTEGER default 0,
+ passive_checks_enabled INTEGER default 0,
+ event_handler_enabled INTEGER default 0,
+ active_checks_enabled INTEGER default 0,
+ retain_status_information INTEGER default 0,
+ retain_nonstatus_information INTEGER default 0,
+ notifications_enabled INTEGER default 0,
+ obsess_over_host INTEGER default 0,
+ failure_prediction_enabled INTEGER default 0,
+ notes TEXT default '',
+ notes_url TEXT default '',
+ action_url TEXT default '',
+ icon_image TEXT default '',
+ icon_image_alt TEXT default '',
+ vrml_image TEXT default '',
+ statusmap_image TEXT default '',
+ have_2d_coords INTEGER default 0,
+ x_2d INTEGER default 0,
+ y_2d INTEGER default 0,
+ have_3d_coords INTEGER default 0,
+ x_3d double precision default 0,
+ y_3d double precision default 0,
+ z_3d double precision default 0,
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_host_id PRIMARY KEY (host_id) ,
+ CONSTRAINT UQ_hosts UNIQUE (instance_id,config_type,host_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_hoststatus
+--
+
+CREATE TABLE icinga_hoststatus (
+ hoststatus_id bigserial,
+ instance_id bigint default 0,
+ host_object_id bigint default 0,
+ status_update_time timestamp,
+ output TEXT default '',
+ long_output TEXT default '',
+ perfdata TEXT default '',
+ check_source varchar(255) default '',
+ current_state INTEGER default 0,
+ has_been_checked INTEGER default 0,
+ should_be_scheduled INTEGER default 0,
+ current_check_attempt INTEGER default 0,
+ max_check_attempts INTEGER default 0,
+ last_check timestamp,
+ next_check timestamp,
+ check_type INTEGER default 0,
+ last_state_change timestamp,
+ last_hard_state_change timestamp,
+ last_hard_state INTEGER default 0,
+ last_time_up timestamp,
+ last_time_down timestamp,
+ last_time_unreachable timestamp,
+ state_type INTEGER default 0,
+ last_notification timestamp,
+ next_notification timestamp,
+ no_more_notifications INTEGER default 0,
+ notifications_enabled INTEGER default 0,
+ problem_has_been_acknowledged INTEGER default 0,
+ acknowledgement_type INTEGER default 0,
+ current_notification_number INTEGER default 0,
+ passive_checks_enabled INTEGER default 0,
+ active_checks_enabled INTEGER default 0,
+ event_handler_enabled INTEGER default 0,
+ flap_detection_enabled INTEGER default 0,
+ is_flapping INTEGER default 0,
+ percent_state_change double precision default 0,
+ latency double precision default 0,
+ execution_time double precision default 0,
+ scheduled_downtime_depth INTEGER default 0,
+ failure_prediction_enabled INTEGER default 0,
+ process_performance_data INTEGER default 0,
+ obsess_over_host INTEGER default 0,
+ modified_host_attributes INTEGER default 0,
+ original_attributes TEXT default NULL,
+ event_handler TEXT default '',
+ check_command TEXT default '',
+ normal_check_interval double precision default 0,
+ retry_check_interval double precision default 0,
+ check_timeperiod_object_id bigint default 0,
+ is_reachable INTEGER default 0,
+ CONSTRAINT PK_hoststatus_id PRIMARY KEY (hoststatus_id) ,
+ CONSTRAINT UQ_hoststatus UNIQUE (host_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_host_contactgroups
+--
+
+CREATE TABLE icinga_host_contactgroups (
+ host_contactgroup_id bigserial,
+ instance_id bigint default 0,
+ host_id bigint default 0,
+ contactgroup_object_id bigint default 0,
+ CONSTRAINT PK_host_contactgroup_id PRIMARY KEY (host_contactgroup_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_host_contacts
+--
+
+CREATE TABLE icinga_host_contacts (
+ host_contact_id bigserial,
+ instance_id bigint default 0,
+ host_id bigint default 0,
+ contact_object_id bigint default 0,
+ CONSTRAINT PK_host_contact_id PRIMARY KEY (host_contact_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_host_parenthosts
+--
+
+CREATE TABLE icinga_host_parenthosts (
+ host_parenthost_id bigserial,
+ instance_id bigint default 0,
+ host_id bigint default 0,
+ parent_host_object_id bigint default 0,
+ CONSTRAINT PK_host_parenthost_id PRIMARY KEY (host_parenthost_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_instances
+--
+
+CREATE TABLE icinga_instances (
+ instance_id bigserial,
+ instance_name TEXT default '',
+ instance_description TEXT default '',
+ CONSTRAINT PK_instance_id PRIMARY KEY (instance_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_logentries
+--
+
+CREATE TABLE icinga_logentries (
+ logentry_id bigserial,
+ instance_id bigint default 0,
+ logentry_time timestamp,
+ entry_time timestamp,
+ entry_time_usec INTEGER default 0,
+ logentry_type INTEGER default 0,
+ logentry_data TEXT default '',
+ realtime_data INTEGER default 0,
+ inferred_data_extracted INTEGER default 0,
+ object_id bigint default NULL,
+ CONSTRAINT PK_logentry_id PRIMARY KEY (logentry_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_notifications
+--
+
+CREATE TABLE icinga_notifications (
+ notification_id bigserial,
+ instance_id bigint default 0,
+ notification_type INTEGER default 0,
+ notification_reason INTEGER default 0,
+ object_id bigint default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ state INTEGER default 0,
+ output TEXT default '',
+ long_output TEXT default '',
+ escalated INTEGER default 0,
+ contacts_notified INTEGER default 0,
+ CONSTRAINT PK_notification_id PRIMARY KEY (notification_id) ,
+ CONSTRAINT UQ_notifications UNIQUE (instance_id,object_id,start_time,start_time_usec)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_objects
+--
+
+CREATE TABLE icinga_objects (
+ object_id bigserial,
+ instance_id bigint default 0,
+ objecttype_id bigint default 0,
+ name1 TEXT,
+ name2 TEXT,
+ is_active INTEGER default 0,
+ CONSTRAINT PK_object_id PRIMARY KEY (object_id)
+-- UNIQUE (objecttype_id,name1,name2)
+) ;
+CREATE INDEX icinga_objects_i ON icinga_objects(objecttype_id,name1,name2);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_processevents
+--
+
+CREATE TABLE icinga_processevents (
+ processevent_id bigserial,
+ instance_id bigint default 0,
+ event_type INTEGER default 0,
+ event_time timestamp,
+ event_time_usec INTEGER default 0,
+ process_id bigint default 0,
+ program_name TEXT default '',
+ program_version TEXT default '',
+ program_date TEXT default '',
+ CONSTRAINT PK_processevent_id PRIMARY KEY (processevent_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_programstatus
+--
+
+CREATE TABLE icinga_programstatus (
+ programstatus_id bigserial,
+ instance_id bigint default 0,
+ program_version TEXT default NULL,
+ status_update_time timestamp,
+ program_start_time timestamp,
+ program_end_time timestamp,
+ is_currently_running INTEGER default 0,
+ endpoint_name TEXT default '',
+ process_id bigint default 0,
+ daemon_mode INTEGER default 0,
+ last_command_check timestamp,
+ last_log_rotation timestamp,
+ notifications_enabled INTEGER default 0,
+ disable_notif_expire_time timestamp,
+ active_service_checks_enabled INTEGER default 0,
+ passive_service_checks_enabled INTEGER default 0,
+ active_host_checks_enabled INTEGER default 0,
+ passive_host_checks_enabled INTEGER default 0,
+ event_handlers_enabled INTEGER default 0,
+ flap_detection_enabled INTEGER default 0,
+ failure_prediction_enabled INTEGER default 0,
+ process_performance_data INTEGER default 0,
+ obsess_over_hosts INTEGER default 0,
+ obsess_over_services INTEGER default 0,
+ modified_host_attributes INTEGER default 0,
+ modified_service_attributes INTEGER default 0,
+ global_host_event_handler TEXT default '',
+ global_service_event_handler TEXT default '',
+ config_dump_in_progress INTEGER default 0,
+ CONSTRAINT PK_programstatus_id PRIMARY KEY (programstatus_id) ,
+ CONSTRAINT UQ_programstatus UNIQUE (instance_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_runtimevariables
+--
+
+CREATE TABLE icinga_runtimevariables (
+ runtimevariable_id bigserial,
+ instance_id bigint default 0,
+ varname TEXT default '',
+ varvalue TEXT default '',
+ CONSTRAINT PK_runtimevariable_id PRIMARY KEY (runtimevariable_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_scheduleddowntime
+--
+
+CREATE TABLE icinga_scheduleddowntime (
+ scheduleddowntime_id bigserial,
+ instance_id bigint default 0,
+ downtime_type INTEGER default 0,
+ object_id bigint default 0,
+ entry_time timestamp,
+ author_name TEXT default '',
+ comment_data TEXT default '',
+ internal_downtime_id bigint default 0,
+ triggered_by_id bigint default 0,
+ is_fixed INTEGER default 0,
+ duration BIGINT default 0,
+ scheduled_start_time timestamp,
+ scheduled_end_time timestamp,
+ was_started INTEGER default 0,
+ actual_start_time timestamp,
+ actual_start_time_usec INTEGER default 0,
+ is_in_effect INTEGER default 0,
+ trigger_time timestamp,
+ name TEXT default NULL,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_scheduleddowntime_id PRIMARY KEY (scheduleddowntime_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicechecks
+--
+
+CREATE TABLE icinga_servicechecks (
+ servicecheck_id bigserial,
+ instance_id bigint default 0,
+ service_object_id bigint default 0,
+ check_type INTEGER default 0,
+ current_check_attempt INTEGER default 0,
+ max_check_attempts INTEGER default 0,
+ state INTEGER default 0,
+ state_type INTEGER default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ command_object_id bigint default 0,
+ command_args TEXT default '',
+ command_line TEXT default '',
+ timeout INTEGER default 0,
+ early_timeout INTEGER default 0,
+ execution_time double precision default 0,
+ latency double precision default 0,
+ return_code INTEGER default 0,
+ output TEXT default '',
+ long_output TEXT default '',
+ perfdata TEXT default '',
+ CONSTRAINT PK_servicecheck_id PRIMARY KEY (servicecheck_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicedependencies
+--
+
+CREATE TABLE icinga_servicedependencies (
+ servicedependency_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ service_object_id bigint default 0,
+ dependent_service_object_id bigint default 0,
+ dependency_type INTEGER default 0,
+ inherits_parent INTEGER default 0,
+ timeperiod_object_id bigint default 0,
+ fail_on_ok INTEGER default 0,
+ fail_on_warning INTEGER default 0,
+ fail_on_unknown INTEGER default 0,
+ fail_on_critical INTEGER default 0,
+ CONSTRAINT PK_servicedependency_id PRIMARY KEY (servicedependency_id)
+) ;
+CREATE INDEX idx_servicedependencies ON icinga_servicedependencies(instance_id,config_type,service_object_id,dependent_service_object_id,dependency_type,inherits_parent,fail_on_ok,fail_on_warning,fail_on_unknown,fail_on_critical);
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_serviceescalations
+--
+
+CREATE TABLE icinga_serviceescalations (
+ serviceescalation_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ service_object_id bigint default 0,
+ timeperiod_object_id bigint default 0,
+ first_notification INTEGER default 0,
+ last_notification INTEGER default 0,
+ notification_interval double precision default 0,
+ escalate_on_recovery INTEGER default 0,
+ escalate_on_warning INTEGER default 0,
+ escalate_on_unknown INTEGER default 0,
+ escalate_on_critical INTEGER default 0,
+ CONSTRAINT PK_serviceescalation_id PRIMARY KEY (serviceescalation_id) ,
+ CONSTRAINT UQ_serviceescalations UNIQUE (instance_id,config_type,service_object_id,timeperiod_object_id,first_notification,last_notification)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_serviceescalation_contactgroups
+--
+
+CREATE TABLE icinga_serviceescalation_contactgroups (
+ serviceescalation_contactgroup_id bigserial,
+ instance_id bigint default 0,
+ serviceescalation_id bigint default 0,
+ contactgroup_object_id bigint default 0,
+ CONSTRAINT PK_serviceescalation_contactgroup_id PRIMARY KEY (serviceescalation_contactgroup_id) ,
+ CONSTRAINT UQ_serviceescalation_contactgro UNIQUE (serviceescalation_id,contactgroup_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_serviceescalation_contacts
+--
+
+CREATE TABLE icinga_serviceescalation_contacts (
+ serviceescalation_contact_id bigserial,
+ instance_id bigint default 0,
+ serviceescalation_id bigint default 0,
+ contact_object_id bigint default 0,
+ CONSTRAINT PK_serviceescalation_contact_id PRIMARY KEY (serviceescalation_contact_id) ,
+ CONSTRAINT UQ_serviceescalation_contacts UNIQUE (instance_id,serviceescalation_id,contact_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicegroups
+--
+
+CREATE TABLE icinga_servicegroups (
+ servicegroup_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ servicegroup_object_id bigint default 0,
+ alias TEXT default '',
+ notes TEXT default NULL,
+ notes_url TEXT default NULL,
+ action_url TEXT default NULL,
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_servicegroup_id PRIMARY KEY (servicegroup_id) ,
+ CONSTRAINT UQ_servicegroups UNIQUE (instance_id,config_type,servicegroup_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicegroup_members
+--
+
+CREATE TABLE icinga_servicegroup_members (
+ servicegroup_member_id bigserial,
+ instance_id bigint default 0,
+ servicegroup_id bigint default 0,
+ service_object_id bigint default 0,
+ session_token INTEGER default NULL,
+ CONSTRAINT PK_servicegroup_member_id PRIMARY KEY (servicegroup_member_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_services
+--
+
+CREATE TABLE icinga_services (
+ service_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ host_object_id bigint default 0,
+ service_object_id bigint default 0,
+ display_name TEXT default '',
+ check_command_object_id bigint default 0,
+ check_command_args TEXT default '',
+ eventhandler_command_object_id bigint default 0,
+ eventhandler_command_args TEXT default '',
+ notification_timeperiod_object_id bigint default 0,
+ check_timeperiod_object_id bigint default 0,
+ failure_prediction_options TEXT default '',
+ check_interval double precision default 0,
+ retry_interval double precision default 0,
+ max_check_attempts INTEGER default 0,
+ first_notification_delay double precision default 0,
+ notification_interval double precision default 0,
+ notify_on_warning INTEGER default 0,
+ notify_on_unknown INTEGER default 0,
+ notify_on_critical INTEGER default 0,
+ notify_on_recovery INTEGER default 0,
+ notify_on_flapping INTEGER default 0,
+ notify_on_downtime INTEGER default 0,
+ stalk_on_ok INTEGER default 0,
+ stalk_on_warning INTEGER default 0,
+ stalk_on_unknown INTEGER default 0,
+ stalk_on_critical INTEGER default 0,
+ is_volatile INTEGER default 0,
+ flap_detection_enabled INTEGER default 0,
+ flap_detection_on_ok INTEGER default 0,
+ flap_detection_on_warning INTEGER default 0,
+ flap_detection_on_unknown INTEGER default 0,
+ flap_detection_on_critical INTEGER default 0,
+ low_flap_threshold double precision default 0,
+ high_flap_threshold double precision default 0,
+ process_performance_data INTEGER default 0,
+ freshness_checks_enabled INTEGER default 0,
+ freshness_threshold INTEGER default 0,
+ passive_checks_enabled INTEGER default 0,
+ event_handler_enabled INTEGER default 0,
+ active_checks_enabled INTEGER default 0,
+ retain_status_information INTEGER default 0,
+ retain_nonstatus_information INTEGER default 0,
+ notifications_enabled INTEGER default 0,
+ obsess_over_service INTEGER default 0,
+ failure_prediction_enabled INTEGER default 0,
+ notes TEXT default '',
+ notes_url TEXT default '',
+ action_url TEXT default '',
+ icon_image TEXT default '',
+ icon_image_alt TEXT default '',
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_service_id PRIMARY KEY (service_id) ,
+ CONSTRAINT UQ_services UNIQUE (instance_id,config_type,service_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_servicestatus
+--
+
+CREATE TABLE icinga_servicestatus (
+ servicestatus_id bigserial,
+ instance_id bigint default 0,
+ service_object_id bigint default 0,
+ status_update_time timestamp,
+ output TEXT default '',
+ long_output TEXT default '',
+ perfdata TEXT default '',
+ check_source varchar(255) default '',
+ current_state INTEGER default 0,
+ has_been_checked INTEGER default 0,
+ should_be_scheduled INTEGER default 0,
+ current_check_attempt INTEGER default 0,
+ max_check_attempts INTEGER default 0,
+ last_check timestamp,
+ next_check timestamp,
+ check_type INTEGER default 0,
+ last_state_change timestamp,
+ last_hard_state_change timestamp,
+ last_hard_state INTEGER default 0,
+ last_time_ok timestamp,
+ last_time_warning timestamp,
+ last_time_unknown timestamp,
+ last_time_critical timestamp,
+ state_type INTEGER default 0,
+ last_notification timestamp,
+ next_notification timestamp,
+ no_more_notifications INTEGER default 0,
+ notifications_enabled INTEGER default 0,
+ problem_has_been_acknowledged INTEGER default 0,
+ acknowledgement_type INTEGER default 0,
+ current_notification_number INTEGER default 0,
+ passive_checks_enabled INTEGER default 0,
+ active_checks_enabled INTEGER default 0,
+ event_handler_enabled INTEGER default 0,
+ flap_detection_enabled INTEGER default 0,
+ is_flapping INTEGER default 0,
+ percent_state_change double precision default 0,
+ latency double precision default 0,
+ execution_time double precision default 0,
+ scheduled_downtime_depth INTEGER default 0,
+ failure_prediction_enabled INTEGER default 0,
+ process_performance_data INTEGER default 0,
+ obsess_over_service INTEGER default 0,
+ modified_service_attributes INTEGER default 0,
+ original_attributes TEXT default NULL,
+ event_handler TEXT default '',
+ check_command TEXT default '',
+ normal_check_interval double precision default 0,
+ retry_check_interval double precision default 0,
+ check_timeperiod_object_id bigint default 0,
+ is_reachable INTEGER default 0,
+ CONSTRAINT PK_servicestatus_id PRIMARY KEY (servicestatus_id) ,
+ CONSTRAINT UQ_servicestatus UNIQUE (service_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_service_contactgroups
+--
+
+CREATE TABLE icinga_service_contactgroups (
+ service_contactgroup_id bigserial,
+ instance_id bigint default 0,
+ service_id bigint default 0,
+ contactgroup_object_id bigint default 0,
+ CONSTRAINT PK_service_contactgroup_id PRIMARY KEY (service_contactgroup_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_service_contacts
+--
+
+CREATE TABLE icinga_service_contacts (
+ service_contact_id bigserial,
+ instance_id bigint default 0,
+ service_id bigint default 0,
+ contact_object_id bigint default 0,
+ CONSTRAINT PK_service_contact_id PRIMARY KEY (service_contact_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_statehistory
+--
+
+CREATE TABLE icinga_statehistory (
+ statehistory_id bigserial,
+ instance_id bigint default 0,
+ state_time timestamp,
+ state_time_usec INTEGER default 0,
+ object_id bigint default 0,
+ state_change INTEGER default 0,
+ state INTEGER default 0,
+ state_type INTEGER default 0,
+ current_check_attempt INTEGER default 0,
+ max_check_attempts INTEGER default 0,
+ last_state INTEGER default '-1',
+ last_hard_state INTEGER default '-1',
+ output TEXT default '',
+ long_output TEXT default '',
+ check_source varchar(255) default '',
+ CONSTRAINT PK_statehistory_id PRIMARY KEY (statehistory_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_systemcommands
+--
+
+CREATE TABLE icinga_systemcommands (
+ systemcommand_id bigserial,
+ instance_id bigint default 0,
+ start_time timestamp,
+ start_time_usec INTEGER default 0,
+ end_time timestamp,
+ end_time_usec INTEGER default 0,
+ command_line TEXT default '',
+ timeout INTEGER default 0,
+ early_timeout INTEGER default 0,
+ execution_time double precision default 0,
+ return_code INTEGER default 0,
+ output TEXT default '',
+ long_output TEXT default '',
+ CONSTRAINT PK_systemcommand_id PRIMARY KEY (systemcommand_id) ,
+ CONSTRAINT UQ_systemcommands UNIQUE (instance_id,start_time,start_time_usec)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_timeperiods
+--
+
+CREATE TABLE icinga_timeperiods (
+ timeperiod_id bigserial,
+ instance_id bigint default 0,
+ config_type INTEGER default 0,
+ timeperiod_object_id bigint default 0,
+ alias TEXT default '',
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_timeperiod_id PRIMARY KEY (timeperiod_id) ,
+ CONSTRAINT UQ_timeperiods UNIQUE (instance_id,config_type,timeperiod_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_timeperiod_timeranges
+--
+
+CREATE TABLE icinga_timeperiod_timeranges (
+ timeperiod_timerange_id bigserial,
+ instance_id bigint default 0,
+ timeperiod_id bigint default 0,
+ day INTEGER default 0,
+ start_sec INTEGER default 0,
+ end_sec INTEGER default 0,
+ CONSTRAINT PK_timeperiod_timerange_id PRIMARY KEY (timeperiod_timerange_id)
+) ;
+
+
+-- --------------------------------------------------------
+-- Icinga 2 specific schema extensions
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_endpoints
+--
+
+CREATE TABLE icinga_endpoints (
+ endpoint_id bigserial,
+ instance_id bigint default 0,
+ endpoint_object_id bigint default 0,
+ zone_object_id bigint default 0,
+ config_type integer default 0,
+ identity text DEFAULT NULL,
+ node text DEFAULT NULL,
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_endpoint_id PRIMARY KEY (endpoint_id) ,
+ CONSTRAINT UQ_endpoints UNIQUE (instance_id,config_type,endpoint_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_endpointstatus
+--
+
+CREATE TABLE icinga_endpointstatus (
+ endpointstatus_id bigserial,
+ instance_id bigint default 0,
+ endpoint_object_id bigint default 0,
+ zone_object_id bigint default 0,
+ status_update_time timestamp,
+ identity text DEFAULT NULL,
+ node text DEFAULT NULL,
+ is_connected integer default 0,
+ CONSTRAINT PK_endpointstatus_id PRIMARY KEY (endpointstatus_id) ,
+ CONSTRAINT UQ_endpointstatus UNIQUE (endpoint_object_id)
+) ;
+
+--
+-- Table structure for table icinga_zones
+--
+
+CREATE TABLE icinga_zones (
+ zone_id bigserial,
+ instance_id bigint default 0,
+ zone_object_id bigint default 0,
+ parent_zone_object_id bigint default 0,
+ config_type integer default 0,
+ is_global integer default 0,
+ config_hash varchar(64) DEFAULT NULL,
+ CONSTRAINT PK_zone_id PRIMARY KEY (zone_id) ,
+ CONSTRAINT UQ_zones UNIQUE (instance_id,config_type,zone_object_id)
+) ;
+
+-- --------------------------------------------------------
+
+--
+-- Table structure for table icinga_zonestatus
+--
+
+CREATE TABLE icinga_zonestatus (
+ zonestatus_id bigserial,
+ instance_id bigint default 0,
+ zone_object_id bigint default 0,
+ parent_zone_object_id bigint default 0,
+ status_update_time timestamp,
+ CONSTRAINT PK_zonestatus_id PRIMARY KEY (zonestatus_id) ,
+ CONSTRAINT UQ_zonestatus UNIQUE (zone_object_id)
+) ;
+
+
+ALTER TABLE icinga_servicestatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_hoststatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_contactstatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_programstatus ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_comments ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_scheduleddowntime ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_runtimevariables ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_customvariablestatus ADD COLUMN endpoint_object_id bigint default NULL;
+
+ALTER TABLE icinga_acknowledgements ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_commenthistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_contactnotifications ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_downtimehistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_eventhandlers ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_externalcommands ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_flappinghistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_hostchecks ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_logentries ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_notifications ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_processevents ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_servicechecks ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_statehistory ADD COLUMN endpoint_object_id bigint default NULL;
+ALTER TABLE icinga_systemcommands ADD COLUMN endpoint_object_id bigint default NULL;
+
+
+-- -----------------------------------------
+-- add index (delete)
+-- -----------------------------------------
+
+-- for periodic delete
+-- instance_id and
+-- TIMEDEVENTS => scheduled_time
+-- SYSTEMCOMMANDS, SERVICECHECKS, HOSTCHECKS, EVENTHANDLERS => start_time
+-- EXTERNALCOMMANDS => entry_time
+
+-- instance_id
+CREATE INDEX systemcommands_i_id_idx on icinga_systemcommands(instance_id);
+CREATE INDEX servicechecks_i_id_idx on icinga_servicechecks(instance_id);
+CREATE INDEX hostchecks_i_id_idx on icinga_hostchecks(instance_id);
+CREATE INDEX eventhandlers_i_id_idx on icinga_eventhandlers(instance_id);
+CREATE INDEX externalcommands_i_id_idx on icinga_externalcommands(instance_id);
+
+-- time
+CREATE INDEX systemcommands_time_id_idx on icinga_systemcommands(start_time);
+CREATE INDEX servicechecks_time_id_idx on icinga_servicechecks(start_time);
+CREATE INDEX hostchecks_time_id_idx on icinga_hostchecks(start_time);
+CREATE INDEX eventhandlers_time_id_idx on icinga_eventhandlers(start_time);
+CREATE INDEX externalcommands_time_id_idx on icinga_externalcommands(entry_time);
+
+
+-- for starting cleanup - referenced in dbhandler.c:882
+-- instance_id only
+
+-- realtime data
+CREATE INDEX programstatus_i_id_idx on icinga_programstatus(instance_id);
+CREATE INDEX hoststatus_i_id_idx on icinga_hoststatus(instance_id);
+CREATE INDEX servicestatus_i_id_idx on icinga_servicestatus(instance_id);
+CREATE INDEX contactstatus_i_id_idx on icinga_contactstatus(instance_id);
+CREATE INDEX comments_i_id_idx on icinga_comments(instance_id);
+CREATE INDEX scheduleddowntime_i_id_idx on icinga_scheduleddowntime(instance_id);
+CREATE INDEX runtimevariables_i_id_idx on icinga_runtimevariables(instance_id);
+CREATE INDEX customvariablestatus_i_id_idx on icinga_customvariablestatus(instance_id);
+
+-- config data
+CREATE INDEX configfiles_i_id_idx on icinga_configfiles(instance_id);
+CREATE INDEX configfilevariables_i_id_idx on icinga_configfilevariables(instance_id);
+CREATE INDEX customvariables_i_id_idx on icinga_customvariables(instance_id);
+CREATE INDEX commands_i_id_idx on icinga_commands(instance_id);
+CREATE INDEX timeperiods_i_id_idx on icinga_timeperiods(instance_id);
+CREATE INDEX timeperiod_timeranges_i_id_idx on icinga_timeperiod_timeranges(instance_id);
+CREATE INDEX contactgroups_i_id_idx on icinga_contactgroups(instance_id);
+CREATE INDEX contactgroup_members_i_id_idx on icinga_contactgroup_members(instance_id);
+CREATE INDEX hostgroups_i_id_idx on icinga_hostgroups(instance_id);
+CREATE INDEX hostgroup_members_i_id_idx on icinga_hostgroup_members(instance_id);
+CREATE INDEX servicegroups_i_id_idx on icinga_servicegroups(instance_id);
+CREATE INDEX servicegroup_members_i_id_idx on icinga_servicegroup_members(instance_id);
+CREATE INDEX hostesc_i_id_idx on icinga_hostescalations(instance_id);
+CREATE INDEX hostesc_contacts_i_id_idx on icinga_hostescalation_contacts(instance_id);
+CREATE INDEX serviceesc_i_id_idx on icinga_serviceescalations(instance_id);
+CREATE INDEX serviceesc_contacts_i_id_idx on icinga_serviceescalation_contacts(instance_id);
+CREATE INDEX hostdependencies_i_id_idx on icinga_hostdependencies(instance_id);
+CREATE INDEX contacts_i_id_idx on icinga_contacts(instance_id);
+CREATE INDEX contact_addresses_i_id_idx on icinga_contact_addresses(instance_id);
+CREATE INDEX contact_notifcommands_i_id_idx on icinga_contact_notificationcommands(instance_id);
+CREATE INDEX hosts_i_id_idx on icinga_hosts(instance_id);
+CREATE INDEX host_parenthosts_i_id_idx on icinga_host_parenthosts(instance_id);
+CREATE INDEX host_contacts_i_id_idx on icinga_host_contacts(instance_id);
+CREATE INDEX services_i_id_idx on icinga_services(instance_id);
+CREATE INDEX service_contacts_i_id_idx on icinga_service_contacts(instance_id);
+CREATE INDEX service_contactgroups_i_id_idx on icinga_service_contactgroups(instance_id);
+CREATE INDEX host_contactgroups_i_id_idx on icinga_host_contactgroups(instance_id);
+CREATE INDEX hostesc_cgroups_i_id_idx on icinga_hostescalation_contactgroups(instance_id);
+CREATE INDEX serviceesc_cgroups_i_id_idx on icinga_serviceescalation_contactgroups(instance_id);
+
+-- -----------------------------------------
+-- more index stuff (WHERE clauses)
+-- -----------------------------------------
+
+-- hosts
+CREATE INDEX hosts_host_object_id_idx on icinga_hosts(host_object_id);
+
+-- hoststatus
+CREATE INDEX hoststatus_stat_upd_time_idx on icinga_hoststatus(status_update_time);
+CREATE INDEX hoststatus_current_state_idx on icinga_hoststatus(current_state);
+CREATE INDEX hoststatus_check_type_idx on icinga_hoststatus(check_type);
+CREATE INDEX hoststatus_state_type_idx on icinga_hoststatus(state_type);
+CREATE INDEX hoststatus_last_state_chg_idx on icinga_hoststatus(last_state_change);
+CREATE INDEX hoststatus_notif_enabled_idx on icinga_hoststatus(notifications_enabled);
+CREATE INDEX hoststatus_problem_ack_idx on icinga_hoststatus(problem_has_been_acknowledged);
+CREATE INDEX hoststatus_act_chks_en_idx on icinga_hoststatus(active_checks_enabled);
+CREATE INDEX hoststatus_pas_chks_en_idx on icinga_hoststatus(passive_checks_enabled);
+CREATE INDEX hoststatus_event_hdl_en_idx on icinga_hoststatus(event_handler_enabled);
+CREATE INDEX hoststatus_flap_det_en_idx on icinga_hoststatus(flap_detection_enabled);
+CREATE INDEX hoststatus_is_flapping_idx on icinga_hoststatus(is_flapping);
+CREATE INDEX hoststatus_p_state_chg_idx on icinga_hoststatus(percent_state_change);
+CREATE INDEX hoststatus_latency_idx on icinga_hoststatus(latency);
+CREATE INDEX hoststatus_ex_time_idx on icinga_hoststatus(execution_time);
+CREATE INDEX hoststatus_sch_downt_d_idx on icinga_hoststatus(scheduled_downtime_depth);
+
+-- services
+CREATE INDEX services_host_object_id_idx on icinga_services(host_object_id);
+
+--servicestatus
+CREATE INDEX srvcstatus_stat_upd_time_idx on icinga_servicestatus(status_update_time);
+CREATE INDEX srvcstatus_current_state_idx on icinga_servicestatus(current_state);
+CREATE INDEX srvcstatus_check_type_idx on icinga_servicestatus(check_type);
+CREATE INDEX srvcstatus_state_type_idx on icinga_servicestatus(state_type);
+CREATE INDEX srvcstatus_last_state_chg_idx on icinga_servicestatus(last_state_change);
+CREATE INDEX srvcstatus_notif_enabled_idx on icinga_servicestatus(notifications_enabled);
+CREATE INDEX srvcstatus_problem_ack_idx on icinga_servicestatus(problem_has_been_acknowledged);
+CREATE INDEX srvcstatus_act_chks_en_idx on icinga_servicestatus(active_checks_enabled);
+CREATE INDEX srvcstatus_pas_chks_en_idx on icinga_servicestatus(passive_checks_enabled);
+CREATE INDEX srvcstatus_event_hdl_en_idx on icinga_servicestatus(event_handler_enabled);
+CREATE INDEX srvcstatus_flap_det_en_idx on icinga_servicestatus(flap_detection_enabled);
+CREATE INDEX srvcstatus_is_flapping_idx on icinga_servicestatus(is_flapping);
+CREATE INDEX srvcstatus_p_state_chg_idx on icinga_servicestatus(percent_state_change);
+CREATE INDEX srvcstatus_latency_idx on icinga_servicestatus(latency);
+CREATE INDEX srvcstatus_ex_time_idx on icinga_servicestatus(execution_time);
+CREATE INDEX srvcstatus_sch_downt_d_idx on icinga_servicestatus(scheduled_downtime_depth);
+
+-- hostchecks
+CREATE INDEX hostchks_h_obj_id_idx on icinga_hostchecks(host_object_id);
+
+-- servicechecks
+CREATE INDEX servicechks_s_obj_id_idx on icinga_servicechecks(service_object_id);
+
+-- objects
+CREATE INDEX objects_objtype_id_idx ON icinga_objects(objecttype_id);
+CREATE INDEX objects_name1_idx ON icinga_objects(name1);
+CREATE INDEX objects_name2_idx ON icinga_objects(name2);
+CREATE INDEX objects_inst_id_idx ON icinga_objects(instance_id);
+
+-- instances
+-- CREATE INDEX instances_name_idx on icinga_instances(instance_name);
+
+-- logentries
+-- CREATE INDEX loge_instance_id_idx on icinga_logentries(instance_id);
+-- #236
+CREATE INDEX loge_time_idx on icinga_logentries(logentry_time);
+-- CREATE INDEX loge_data_idx on icinga_logentries(logentry_data);
+CREATE INDEX loge_inst_id_time_idx on icinga_logentries (instance_id, logentry_time);
+
+
+-- commenthistory
+-- CREATE INDEX c_hist_instance_id_idx on icinga_logentries(instance_id);
+-- CREATE INDEX c_hist_c_time_idx on icinga_logentries(comment_time);
+-- CREATE INDEX c_hist_i_c_id_idx on icinga_logentries(internal_comment_id);
+
+-- downtimehistory
+-- CREATE INDEX d_t_hist_nstance_id_idx on icinga_downtimehistory(instance_id);
+-- CREATE INDEX d_t_hist_type_idx on icinga_downtimehistory(downtime_type);
+-- CREATE INDEX d_t_hist_object_id_idx on icinga_downtimehistory(object_id);
+-- CREATE INDEX d_t_hist_entry_time_idx on icinga_downtimehistory(entry_time);
+-- CREATE INDEX d_t_hist_sched_start_idx on icinga_downtimehistory(scheduled_start_time);
+-- CREATE INDEX d_t_hist_sched_end_idx on icinga_downtimehistory(scheduled_end_time);
+
+-- scheduleddowntime
+-- CREATE INDEX sched_d_t_downtime_type_idx on icinga_scheduleddowntime(downtime_type);
+-- CREATE INDEX sched_d_t_object_id_idx on icinga_scheduleddowntime(object_id);
+-- CREATE INDEX sched_d_t_entry_time_idx on icinga_scheduleddowntime(entry_time);
+-- CREATE INDEX sched_d_t_start_time_idx on icinga_scheduleddowntime(scheduled_start_time);
+-- CREATE INDEX sched_d_t_end_time_idx on icinga_scheduleddowntime(scheduled_end_time);
+
+-- Icinga Web Notifications
+CREATE INDEX notification_idx ON icinga_notifications(notification_type, object_id, start_time);
+CREATE INDEX notification_object_id_idx ON icinga_notifications(object_id);
+CREATE INDEX contact_notification_idx ON icinga_contactnotifications(notification_id, contact_object_id);
+CREATE INDEX contacts_object_id_idx ON icinga_contacts(contact_object_id);
+CREATE INDEX contact_notif_meth_notif_idx ON icinga_contactnotificationmethods(contactnotification_id, command_object_id);
+CREATE INDEX command_object_idx ON icinga_commands(object_id);
+CREATE INDEX services_combined_object_idx ON icinga_services(service_object_id, host_object_id);
+
+-- statehistory
+CREATE INDEX statehist_i_id_o_id_s_ty_s_ti on icinga_statehistory(instance_id, object_id, state_type, state_time);
+--#2274
+create index statehist_state_idx on icinga_statehistory(object_id,state);
+
+-- #2618
+CREATE INDEX cntgrpmbrs_cgid_coid ON icinga_contactgroup_members (contactgroup_id,contact_object_id);
+CREATE INDEX hstgrpmbrs_hgid_hoid ON icinga_hostgroup_members (hostgroup_id,host_object_id);
+CREATE INDEX hstcntgrps_hid_cgoid ON icinga_host_contactgroups (host_id,contactgroup_object_id);
+CREATE INDEX hstprnthsts_hid_phoid ON icinga_host_parenthosts (host_id,parent_host_object_id);
+CREATE INDEX runtimevars_iid_varn ON icinga_runtimevariables (instance_id,varname);
+CREATE INDEX sgmbrs_sgid_soid ON icinga_servicegroup_members (servicegroup_id,service_object_id);
+CREATE INDEX scgrps_sid_cgoid ON icinga_service_contactgroups (service_id,contactgroup_object_id);
+CREATE INDEX tperiod_tid_d_ss_es ON icinga_timeperiod_timeranges (timeperiod_id,day,start_sec,end_sec);
+
+-- #3649
+CREATE INDEX sla_idx_sthist ON icinga_statehistory (object_id, state_time DESC);
+CREATE INDEX sla_idx_dohist ON icinga_downtimehistory (object_id, actual_start_time, actual_end_time);
+CREATE INDEX sla_idx_obj ON icinga_objects (objecttype_id, is_active, name1);
+
+-- #4985
+CREATE INDEX commenthistory_delete_idx ON icinga_commenthistory (instance_id, comment_time, internal_comment_id);
+
+-- #10070
+CREATE INDEX idx_comments_object_id on icinga_comments(object_id);
+CREATE INDEX idx_scheduleddowntime_object_id on icinga_scheduleddowntime(object_id);
+
+-- #10066
+CREATE INDEX idx_endpoints_object_id on icinga_endpoints(endpoint_object_id);
+CREATE INDEX idx_endpointstatus_object_id on icinga_endpointstatus(endpoint_object_id);
+
+CREATE INDEX idx_endpoints_zone_object_id on icinga_endpoints(zone_object_id);
+CREATE INDEX idx_endpointstatus_zone_object_id on icinga_endpointstatus(zone_object_id);
+
+CREATE INDEX idx_zones_object_id on icinga_zones(zone_object_id);
+CREATE INDEX idx_zonestatus_object_id on icinga_zonestatus(zone_object_id);
+
+CREATE INDEX idx_zones_parent_object_id on icinga_zones(parent_zone_object_id);
+CREATE INDEX idx_zonestatus_parent_object_id on icinga_zonestatus(parent_zone_object_id);
+
+-- #12210
+CREATE INDEX idx_comments_session_del ON icinga_comments (instance_id, session_token);
+CREATE INDEX idx_downtimes_session_del ON icinga_scheduleddowntime (instance_id, session_token);
+
+-- #12107
+CREATE INDEX idx_statehistory_cleanup on icinga_statehistory(instance_id, state_time);
+
+-- #12435
+CREATE INDEX idx_customvariables_object_id on icinga_customvariables(object_id);
+CREATE INDEX idx_contactgroup_members_object_id on icinga_contactgroup_members(contact_object_id);
+CREATE INDEX idx_hostgroup_members_object_id on icinga_hostgroup_members(host_object_id);
+CREATE INDEX idx_servicegroup_members_object_id on icinga_servicegroup_members(service_object_id);
+CREATE INDEX idx_servicedependencies_dependent_service_object_id on icinga_servicedependencies(dependent_service_object_id);
+CREATE INDEX idx_hostdependencies_dependent_host_object_id on icinga_hostdependencies(dependent_host_object_id);
+CREATE INDEX idx_service_contacts_service_id on icinga_service_contacts(service_id);
+CREATE INDEX idx_host_contacts_host_id on icinga_host_contacts(host_id);
+
+-- #5458
+CREATE INDEX idx_downtimehistory_remove ON icinga_downtimehistory (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+CREATE INDEX idx_scheduleddowntime_remove ON icinga_scheduleddowntime (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+
+-- #5492
+CREATE INDEX idx_commenthistory_remove ON icinga_commenthistory (object_id, entry_time);
+CREATE INDEX idx_comments_remove ON icinga_comments (object_id, entry_time);
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.14.3');
+
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.0.2.sql b/lib/db_ido_pgsql/schema/upgrade/2.0.2.sql
new file mode 100644
index 0000000..60710ef
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.0.2.sql
@@ -0,0 +1,17 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.0.2
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+UPDATE icinga_objects SET name2 = NULL WHERE name2 = '';
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.11.6');
+
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.1.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.1.0.sql
new file mode 100644
index 0000000..a32ecea
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.1.0.sql
@@ -0,0 +1,17 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.1.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+ALTER TABLE icinga_programstatus ADD COLUMN endpoint_name TEXT default NULL;
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.11.7');
+
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.2.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.2.0.sql
new file mode 100644
index 0000000..d105a34
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.2.0.sql
@@ -0,0 +1,21 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.2.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+ALTER TABLE icinga_programstatus ADD COLUMN program_version TEXT default NULL;
+
+ALTER TABLE icinga_customvariables ADD COLUMN is_json INTEGER default 0;
+ALTER TABLE icinga_customvariablestatus ADD COLUMN is_json INTEGER default 0;
+
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.12.0');
+
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.3.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.3.0.sql
new file mode 100644
index 0000000..91764de
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.3.0.sql
@@ -0,0 +1,26 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.3.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #7765 drop unique constraint
+-- -----------------------------------------
+
+ALTER TABLE icinga_servicedependencies DROP CONSTRAINT uq_servicedependencies;
+ALTER TABLE icinga_hostdependencies DROP CONSTRAINT uq_hostdependencies;
+
+CREATE INDEX idx_servicedependencies ON icinga_servicedependencies(instance_id,config_type,service_object_id,dependent_service_object_id,dependency_type,inherits_parent,fail_on_ok,fail_on_warning,fail_on_unknown,fail_on_critical);
+CREATE INDEX idx_hostdependencies ON icinga_hostdependencies(instance_id,config_type,host_object_id,dependent_host_object_id,dependency_type,inherits_parent,fail_on_up,fail_on_down,fail_on_unreachable);
+
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.13.0');
+
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.4.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.4.0.sql
new file mode 100644
index 0000000..4a6e45e
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.4.0.sql
@@ -0,0 +1,185 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.4.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #9027 Default timestamps lack time zone
+-- -----------------------------------------
+
+ALTER TABLE icinga_acknowledgements ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_acknowledgements ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_commenthistory ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_commenthistory ALTER COLUMN comment_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_commenthistory ALTER COLUMN expiration_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_commenthistory ALTER COLUMN deletion_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_comments ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_comments ALTER COLUMN comment_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_comments ALTER COLUMN expiration_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_conninfo ALTER COLUMN connect_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_conninfo ALTER COLUMN disconnect_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_conninfo ALTER COLUMN last_checkin_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_conninfo ALTER COLUMN data_start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_conninfo ALTER COLUMN data_end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_contactnotificationmethods ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_contactnotificationmethods ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_contactnotifications ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_contactnotifications ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_contactstatus ALTER COLUMN status_update_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_contactstatus ALTER COLUMN last_host_notification SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_contactstatus ALTER COLUMN last_service_notification SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_customvariablestatus ALTER COLUMN status_update_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_dbversion ALTER COLUMN create_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_dbversion ALTER COLUMN modify_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_downtimehistory ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_downtimehistory ALTER COLUMN scheduled_start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_downtimehistory ALTER COLUMN scheduled_end_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_downtimehistory ALTER COLUMN actual_start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_downtimehistory ALTER COLUMN actual_end_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_downtimehistory ALTER COLUMN trigger_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_eventhandlers ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_eventhandlers ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_externalcommands ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_flappinghistory ALTER COLUMN event_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_flappinghistory ALTER COLUMN comment_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_hostchecks ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hostchecks ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_hoststatus ALTER COLUMN status_update_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_check SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN next_check SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_state_change SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_hard_state_change SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_time_up SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_time_down SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_time_unreachable SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN last_notification SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_hoststatus ALTER COLUMN next_notification SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_logentries ALTER COLUMN logentry_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_logentries ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_notifications ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_notifications ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_processevents ALTER COLUMN event_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_programstatus ALTER COLUMN status_update_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_programstatus ALTER COLUMN program_start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_programstatus ALTER COLUMN program_end_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_programstatus ALTER COLUMN last_command_check SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_programstatus ALTER COLUMN last_log_rotation SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_programstatus ALTER COLUMN disable_notif_expire_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_scheduleddowntime ALTER COLUMN entry_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_scheduleddowntime ALTER COLUMN scheduled_start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_scheduleddowntime ALTER COLUMN scheduled_end_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_scheduleddowntime ALTER COLUMN actual_start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_scheduleddowntime ALTER COLUMN trigger_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_servicechecks ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicechecks ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_servicestatus ALTER COLUMN status_update_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_check SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN next_check SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_state_change SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_hard_state_change SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_time_ok SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_time_warning SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_time_unknown SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_time_critical SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN last_notification SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_servicestatus ALTER COLUMN next_notification SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_statehistory ALTER COLUMN state_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_systemcommands ALTER COLUMN start_time SET DEFAULT '1970-01-01 00:00:00+00';
+ALTER TABLE icinga_systemcommands ALTER COLUMN end_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+ALTER TABLE icinga_endpointstatus ALTER COLUMN status_update_time SET DEFAULT '1970-01-01 00:00:00+00';
+
+-- -----------------------------------------
+-- #9455 check_source data type
+-- -----------------------------------------
+
+ALTER TABLE icinga_statehistory ALTER COLUMN check_source TYPE TEXT;
+ALTER TABLE icinga_statehistory ALTER COLUMN check_source SET default '';
+
+-- -----------------------------------------
+-- #9286 zones table
+-- -----------------------------------------
+
+ALTER TABLE icinga_endpoints ADD COLUMN zone_object_id bigint default 0;
+ALTER TABLE icinga_endpointstatus ADD COLUMN zone_object_id bigint default 0;
+
+CREATE TABLE icinga_zones (
+ zone_id bigserial,
+ instance_id bigint default 0,
+ zone_object_id bigint default 0,
+ parent_zone_object_id bigint default 0,
+ config_type integer default 0,
+ is_global integer default 0,
+ CONSTRAINT PK_zone_id PRIMARY KEY (zone_id) ,
+ CONSTRAINT UQ_zones UNIQUE (instance_id,config_type,zone_object_id)
+) ;
+
+CREATE TABLE icinga_zonestatus (
+ zonestatus_id bigserial,
+ instance_id bigint default 0,
+ zone_object_id bigint default 0,
+ parent_zone_object_id bigint default 0,
+ status_update_time timestamp with time zone default '1970-01-01 00:00:00+00',
+ CONSTRAINT PK_zonestatus_id PRIMARY KEY (zonestatus_id) ,
+ CONSTRAINT UQ_zonestatus UNIQUE (zone_object_id)
+) ;
+
+-- -----------------------------------------
+-- #10392 original attributes
+-- -----------------------------------------
+
+ALTER TABLE icinga_servicestatus ADD COLUMN original_attributes TEXT default NULL;
+ALTER TABLE icinga_hoststatus ADD COLUMN original_attributes TEXT default NULL;
+
+-- -----------------------------------------
+-- #10436 deleted custom vars
+-- -----------------------------------------
+
+ALTER TABLE icinga_customvariables ADD COLUMN session_token INTEGER default NULL;
+ALTER TABLE icinga_customvariablestatus ADD COLUMN session_token INTEGER default NULL;
+
+CREATE INDEX cv_session_del_idx ON icinga_customvariables (session_token);
+CREATE INDEX cvs_session_del_idx ON icinga_customvariablestatus (session_token);
+
+-- -----------------------------------------
+-- #10431 comment/downtime name
+-- -----------------------------------------
+
+ALTER TABLE icinga_comments ADD COLUMN name TEXT default NULL;
+ALTER TABLE icinga_commenthistory ADD COLUMN name TEXT default NULL;
+
+ALTER TABLE icinga_scheduleddowntime ADD COLUMN name TEXT default NULL;
+ALTER TABLE icinga_downtimehistory ADD COLUMN name TEXT default NULL;
+
+-- -----------------------------------------
+-- update dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.14.0');
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.5.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.5.0.sql
new file mode 100644
index 0000000..063a812
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.5.0.sql
@@ -0,0 +1,85 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.5.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #10069 IDO: check_source should not be a TEXT field
+-- -----------------------------------------
+
+ALTER TABLE icinga_hoststatus ALTER COLUMN check_source TYPE varchar(255);
+ALTER TABLE icinga_servicestatus ALTER COLUMN check_source TYPE varchar(255);
+ALTER TABLE icinga_statehistory ALTER COLUMN check_source TYPE varchar(255);
+
+-- -----------------------------------------
+-- #10070
+-- -----------------------------------------
+
+CREATE INDEX idx_comments_object_id on icinga_comments(object_id);
+CREATE INDEX idx_scheduleddowntime_object_id on icinga_scheduleddowntime(object_id);
+
+-- -----------------------------------------
+-- #10066
+-- -----------------------------------------
+
+CREATE INDEX idx_endpoints_object_id on icinga_endpoints(endpoint_object_id);
+CREATE INDEX idx_endpointstatus_object_id on icinga_endpointstatus(endpoint_object_id);
+
+CREATE INDEX idx_endpoints_zone_object_id on icinga_endpoints(zone_object_id);
+CREATE INDEX idx_endpointstatus_zone_object_id on icinga_endpointstatus(zone_object_id);
+
+CREATE INDEX idx_zones_object_id on icinga_zones(zone_object_id);
+CREATE INDEX idx_zonestatus_object_id on icinga_zonestatus(zone_object_id);
+
+CREATE INDEX idx_zones_parent_object_id on icinga_zones(parent_zone_object_id);
+CREATE INDEX idx_zonestatus_parent_object_id on icinga_zonestatus(parent_zone_object_id);
+
+-- -----------------------------------------
+-- #12258
+-- -----------------------------------------
+ALTER TABLE icinga_comments ADD COLUMN session_token INTEGER default NULL;
+ALTER TABLE icinga_scheduleddowntime ADD COLUMN session_token INTEGER default NULL;
+
+CREATE INDEX idx_comments_session_del ON icinga_comments (instance_id, session_token);
+CREATE INDEX idx_downtimes_session_del ON icinga_scheduleddowntime (instance_id, session_token);
+
+-- -----------------------------------------
+-- #12107
+-- -----------------------------------------
+CREATE INDEX idx_statehistory_cleanup on icinga_statehistory(instance_id, state_time);
+
+-- -----------------------------------------
+-- #12435
+-- -----------------------------------------
+ALTER TABLE icinga_commands ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_contactgroups ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_contacts ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_hostgroups ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_hosts ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_servicegroups ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_services ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_timeperiods ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_endpoints ADD config_hash VARCHAR(64) DEFAULT NULL;
+ALTER TABLE icinga_zones ADD config_hash VARCHAR(64) DEFAULT NULL;
+
+ALTER TABLE icinga_customvariables DROP session_token;
+ALTER TABLE icinga_customvariablestatus DROP session_token;
+
+CREATE INDEX idx_customvariables_object_id on icinga_customvariables(object_id);
+CREATE INDEX idx_contactgroup_members_object_id on icinga_contactgroup_members(contact_object_id);
+CREATE INDEX idx_hostgroup_members_object_id on icinga_hostgroup_members(host_object_id);
+CREATE INDEX idx_servicegroup_members_object_id on icinga_servicegroup_members(service_object_id);
+CREATE INDEX idx_servicedependencies_dependent_service_object_id on icinga_servicedependencies(dependent_service_object_id);
+CREATE INDEX idx_hostdependencies_dependent_host_object_id on icinga_hostdependencies(dependent_host_object_id);
+CREATE INDEX idx_service_contacts_service_id on icinga_service_contacts(service_id);
+CREATE INDEX idx_host_contacts_host_id on icinga_host_contacts(host_id);
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.14.1');
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.6.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.6.0.sql
new file mode 100644
index 0000000..aa538a6
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.6.0.sql
@@ -0,0 +1,161 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.6.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+-- -----------------------------------------
+-- #13221 IDO: PostgreSQL: Don't use timestamp with timezone for unix timestamp columns
+-- -----------------------------------------
+
+DROP FUNCTION IF EXISTS from_unixtime(bigint);
+CREATE FUNCTION from_unixtime(bigint) RETURNS timestamp AS $$
+ SELECT to_timestamp($1) AT TIME ZONE 'UTC' AS result
+$$ LANGUAGE sql;
+
+DROP FUNCTION IF EXISTS unix_timestamp(timestamp WITH TIME ZONE);
+CREATE OR REPLACE FUNCTION unix_timestamp(timestamp) RETURNS bigint AS '
+ SELECT CAST(EXTRACT(EPOCH FROM $1) AS bigint) AS result;
+' LANGUAGE sql;
+
+ALTER TABLE icinga_acknowledgements
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_commenthistory
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp,
+ ALTER COLUMN comment_time DROP DEFAULT, ALTER COLUMN comment_time TYPE timestamp,
+ ALTER COLUMN expiration_time DROP DEFAULT, ALTER COLUMN expiration_time TYPE timestamp,
+ ALTER COLUMN deletion_time DROP DEFAULT, ALTER COLUMN deletion_time TYPE timestamp;
+
+ALTER TABLE icinga_comments
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp,
+ ALTER COLUMN comment_time DROP DEFAULT, ALTER COLUMN comment_time TYPE timestamp,
+ ALTER COLUMN expiration_time DROP DEFAULT, ALTER COLUMN expiration_time TYPE timestamp;
+
+ALTER TABLE icinga_conninfo
+ ALTER COLUMN connect_time DROP DEFAULT, ALTER COLUMN connect_time TYPE timestamp,
+ ALTER COLUMN disconnect_time DROP DEFAULT, ALTER COLUMN disconnect_time TYPE timestamp,
+ ALTER COLUMN last_checkin_time DROP DEFAULT, ALTER COLUMN last_checkin_time TYPE timestamp,
+ ALTER COLUMN data_start_time DROP DEFAULT, ALTER COLUMN data_start_time TYPE timestamp,
+ ALTER COLUMN data_end_time DROP DEFAULT, ALTER COLUMN data_end_time TYPE timestamp;
+
+ALTER TABLE icinga_contactnotificationmethods
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_contactnotifications
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_contactstatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp,
+ ALTER COLUMN last_host_notification DROP DEFAULT, ALTER COLUMN last_host_notification TYPE timestamp,
+ ALTER COLUMN last_service_notification DROP DEFAULT, ALTER COLUMN last_service_notification TYPE timestamp;
+
+ALTER TABLE icinga_customvariablestatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp;
+
+ALTER TABLE icinga_dbversion
+ ALTER COLUMN create_time DROP DEFAULT, ALTER COLUMN create_time TYPE timestamp,
+ ALTER COLUMN modify_time DROP DEFAULT, ALTER COLUMN modify_time TYPE timestamp;
+
+ALTER TABLE icinga_downtimehistory
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp,
+ ALTER COLUMN scheduled_start_time DROP DEFAULT, ALTER COLUMN scheduled_start_time TYPE timestamp,
+ ALTER COLUMN scheduled_end_time DROP DEFAULT, ALTER COLUMN scheduled_end_time TYPE timestamp,
+ ALTER COLUMN actual_start_time DROP DEFAULT, ALTER COLUMN actual_start_time TYPE timestamp,
+ ALTER COLUMN actual_end_time DROP DEFAULT, ALTER COLUMN actual_end_time TYPE timestamp,
+ ALTER COLUMN trigger_time DROP DEFAULT, ALTER COLUMN trigger_time TYPE timestamp;
+
+ALTER TABLE icinga_eventhandlers
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_externalcommands
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp;
+
+ALTER TABLE icinga_flappinghistory
+ ALTER COLUMN event_time DROP DEFAULT, ALTER COLUMN event_time TYPE timestamp,
+ ALTER COLUMN comment_time DROP DEFAULT, ALTER COLUMN comment_time TYPE timestamp;
+
+ALTER TABLE icinga_hostchecks
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_hoststatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp,
+ ALTER COLUMN last_check DROP DEFAULT, ALTER COLUMN last_check TYPE timestamp,
+ ALTER COLUMN next_check DROP DEFAULT, ALTER COLUMN next_check TYPE timestamp,
+ ALTER COLUMN last_state_change DROP DEFAULT, ALTER COLUMN last_state_change TYPE timestamp,
+ ALTER COLUMN last_hard_state_change DROP DEFAULT, ALTER COLUMN last_hard_state_change TYPE timestamp,
+ ALTER COLUMN last_time_up DROP DEFAULT, ALTER COLUMN last_time_up TYPE timestamp,
+ ALTER COLUMN last_time_down DROP DEFAULT, ALTER COLUMN last_time_down TYPE timestamp,
+ ALTER COLUMN last_time_unreachable DROP DEFAULT, ALTER COLUMN last_time_unreachable TYPE timestamp,
+ ALTER COLUMN last_notification DROP DEFAULT, ALTER COLUMN last_notification TYPE timestamp,
+ ALTER COLUMN next_notification DROP DEFAULT, ALTER COLUMN next_notification TYPE timestamp;
+
+ALTER TABLE icinga_logentries
+ ALTER COLUMN logentry_time DROP DEFAULT, ALTER COLUMN logentry_time TYPE timestamp,
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp;
+
+ALTER TABLE icinga_notifications
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_processevents
+ ALTER COLUMN event_time DROP DEFAULT, ALTER COLUMN event_time TYPE timestamp;
+
+ALTER TABLE icinga_programstatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp,
+ ALTER COLUMN program_start_time DROP DEFAULT, ALTER COLUMN program_start_time TYPE timestamp,
+ ALTER COLUMN program_end_time DROP DEFAULT, ALTER COLUMN program_end_time TYPE timestamp,
+ ALTER COLUMN last_command_check DROP DEFAULT, ALTER COLUMN last_command_check TYPE timestamp,
+ ALTER COLUMN last_log_rotation DROP DEFAULT, ALTER COLUMN last_log_rotation TYPE timestamp,
+ ALTER COLUMN disable_notif_expire_time DROP DEFAULT, ALTER COLUMN disable_notif_expire_time TYPE timestamp;
+
+ALTER TABLE icinga_scheduleddowntime
+ ALTER COLUMN entry_time DROP DEFAULT, ALTER COLUMN entry_time TYPE timestamp,
+ ALTER COLUMN scheduled_start_time DROP DEFAULT, ALTER COLUMN scheduled_start_time TYPE timestamp,
+ ALTER COLUMN scheduled_end_time DROP DEFAULT, ALTER COLUMN scheduled_end_time TYPE timestamp,
+ ALTER COLUMN actual_start_time DROP DEFAULT, ALTER COLUMN actual_start_time TYPE timestamp,
+ ALTER COLUMN trigger_time DROP DEFAULT, ALTER COLUMN trigger_time TYPE timestamp;
+
+ALTER TABLE icinga_servicechecks
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_servicestatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp,
+ ALTER COLUMN last_check DROP DEFAULT, ALTER COLUMN last_check TYPE timestamp,
+ ALTER COLUMN next_check DROP DEFAULT, ALTER COLUMN next_check TYPE timestamp,
+ ALTER COLUMN last_state_change DROP DEFAULT, ALTER COLUMN last_state_change TYPE timestamp,
+ ALTER COLUMN last_hard_state_change DROP DEFAULT, ALTER COLUMN last_hard_state_change TYPE timestamp,
+ ALTER COLUMN last_time_ok DROP DEFAULT, ALTER COLUMN last_time_ok TYPE timestamp,
+ ALTER COLUMN last_time_warning DROP DEFAULT, ALTER COLUMN last_time_warning TYPE timestamp,
+ ALTER COLUMN last_time_unknown DROP DEFAULT, ALTER COLUMN last_time_unknown TYPE timestamp,
+ ALTER COLUMN last_time_critical DROP DEFAULT, ALTER COLUMN last_time_critical TYPE timestamp,
+ ALTER COLUMN last_notification DROP DEFAULT, ALTER COLUMN last_notification TYPE timestamp,
+ ALTER COLUMN next_notification DROP DEFAULT, ALTER COLUMN next_notification TYPE timestamp;
+
+ALTER TABLE icinga_statehistory
+ ALTER COLUMN state_time DROP DEFAULT, ALTER COLUMN state_time TYPE timestamp;
+
+ALTER TABLE icinga_systemcommands
+ ALTER COLUMN start_time DROP DEFAULT, ALTER COLUMN start_time TYPE timestamp,
+ ALTER COLUMN end_time DROP DEFAULT, ALTER COLUMN end_time TYPE timestamp;
+
+ALTER TABLE icinga_endpointstatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp;
+
+ALTER TABLE icinga_zonestatus
+ ALTER COLUMN status_update_time DROP DEFAULT, ALTER COLUMN status_update_time TYPE timestamp;
+
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.14.2');
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.8.0.sql b/lib/db_ido_pgsql/schema/upgrade/2.8.0.sql
new file mode 100644
index 0000000..31ab324
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.8.0.sql
@@ -0,0 +1,32 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.8.0
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+ALTER TABLE icinga_downtimehistory DROP CONSTRAINT IF EXISTS UQ_downtimehistory;
+ALTER TABLE icinga_scheduleddowntime DROP CONSTRAINT IF EXISTS UQ_scheduleddowntime;
+ALTER TABLE icinga_commenthistory DROP CONSTRAINT IF EXISTS UQ_commenthistory;
+ALTER TABLE icinga_comments DROP CONSTRAINT IF EXISTS UQ_comments;
+
+-- -----------------------------------------
+-- #5458 IDO: Improve downtime removal/cancel
+-- -----------------------------------------
+
+CREATE INDEX idx_downtimehistory_remove ON icinga_downtimehistory (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+CREATE INDEX idx_scheduleddowntime_remove ON icinga_scheduleddowntime (object_id, entry_time, scheduled_start_time, scheduled_end_time);
+
+-- -----------------------------------------
+-- #5492
+-- -----------------------------------------
+
+CREATE INDEX idx_commenthistory_remove ON icinga_commenthistory (object_id, entry_time);
+CREATE INDEX idx_comments_remove ON icinga_comments (object_id, entry_time);
+-- -----------------------------------------
+-- set dbversion
+-- -----------------------------------------
+
+SELECT updatedbversion('1.14.3');
diff --git a/lib/db_ido_pgsql/schema/upgrade/2.8.1.sql b/lib/db_ido_pgsql/schema/upgrade/2.8.1.sql
new file mode 100644
index 0000000..05202c0
--- /dev/null
+++ b/lib/db_ido_pgsql/schema/upgrade/2.8.1.sql
@@ -0,0 +1,19 @@
+-- -----------------------------------------
+-- upgrade path for Icinga 2.8.1 (fix for fresh 2.8.0 installation only)
+--
+-- -----------------------------------------
+-- Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+--
+-- Please check https://docs.icinga.com for upgrading information!
+-- -----------------------------------------
+
+ALTER TABLE icinga_downtimehistory DROP CONSTRAINT IF EXISTS UQ_downtimehistory;
+ALTER TABLE icinga_scheduleddowntime DROP CONSTRAINT IF EXISTS UQ_scheduleddowntime;
+ALTER TABLE icinga_commenthistory DROP CONSTRAINT IF EXISTS UQ_commenthistory;
+ALTER TABLE icinga_comments DROP CONSTRAINT IF EXISTS UQ_comments;
+
+-- -----------------------------------------
+-- set dbversion (same as 2.8.0)
+-- -----------------------------------------
+
+SELECT updatedbversion('1.14.3');
diff --git a/lib/icinga/CMakeLists.txt b/lib/icinga/CMakeLists.txt
new file mode 100644
index 0000000..62077bc
--- /dev/null
+++ b/lib/icinga/CMakeLists.txt
@@ -0,0 +1,76 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(checkable.ti checkable-ti.cpp checkable-ti.hpp)
+mkclass_target(checkcommand.ti checkcommand-ti.cpp checkcommand-ti.hpp)
+mkclass_target(checkresult.ti checkresult-ti.cpp checkresult-ti.hpp)
+mkclass_target(command.ti command-ti.cpp command-ti.hpp)
+mkclass_target(comment.ti comment-ti.cpp comment-ti.hpp)
+mkclass_target(dependency.ti dependency-ti.cpp dependency-ti.hpp)
+mkclass_target(downtime.ti downtime-ti.cpp downtime-ti.hpp)
+mkclass_target(eventcommand.ti eventcommand-ti.cpp eventcommand-ti.hpp)
+mkclass_target(hostgroup.ti hostgroup-ti.cpp hostgroup-ti.hpp)
+mkclass_target(host.ti host-ti.cpp host-ti.hpp)
+mkclass_target(icingaapplication.ti icingaapplication-ti.cpp icingaapplication-ti.hpp)
+mkclass_target(customvarobject.ti customvarobject-ti.cpp customvarobject-ti.hpp)
+mkclass_target(notificationcommand.ti notificationcommand-ti.cpp notificationcommand-ti.hpp)
+mkclass_target(notification.ti notification-ti.cpp notification-ti.hpp)
+mkclass_target(scheduleddowntime.ti scheduleddowntime-ti.cpp scheduleddowntime-ti.hpp)
+mkclass_target(servicegroup.ti servicegroup-ti.cpp servicegroup-ti.hpp)
+mkclass_target(service.ti service-ti.cpp service-ti.hpp)
+mkclass_target(timeperiod.ti timeperiod-ti.cpp timeperiod-ti.hpp)
+mkclass_target(usergroup.ti usergroup-ti.cpp usergroup-ti.hpp)
+mkclass_target(user.ti user-ti.cpp user-ti.hpp)
+
+mkembedconfig_target(icinga-itl.conf icinga-itl.cpp)
+
+set(icinga_SOURCES
+ i2-icinga.hpp icinga-itl.cpp
+ apiactions.cpp apiactions.hpp
+ apievents.cpp apievents.hpp
+ checkable.cpp checkable.hpp checkable-ti.hpp
+ checkable-check.cpp checkable-comment.cpp checkable-dependency.cpp
+ checkable-downtime.cpp checkable-event.cpp checkable-flapping.cpp
+ checkable-notification.cpp checkable-script.cpp
+ checkcommand.cpp checkcommand.hpp checkcommand-ti.hpp
+ checkresult.cpp checkresult.hpp checkresult-ti.hpp
+ cib.cpp cib.hpp
+ clusterevents.cpp clusterevents.hpp clusterevents-check.cpp
+ command.cpp command.hpp command-ti.hpp
+ comment.cpp comment.hpp comment-ti.hpp
+ compatutility.cpp compatutility.hpp
+ customvarobject.cpp customvarobject.hpp customvarobject-ti.hpp
+ dependency.cpp dependency.hpp dependency-ti.hpp dependency-apply.cpp
+ downtime.cpp downtime.hpp downtime-ti.hpp
+ envresolver.cpp envresolver.hpp
+ eventcommand.cpp eventcommand.hpp eventcommand-ti.hpp
+ externalcommandprocessor.cpp externalcommandprocessor.hpp
+ host.cpp host.hpp host-ti.hpp
+ hostgroup.cpp hostgroup.hpp hostgroup-ti.hpp
+ icingaapplication.cpp icingaapplication.hpp icingaapplication-ti.hpp
+ legacytimeperiod.cpp legacytimeperiod.hpp
+ macroprocessor.cpp macroprocessor.hpp
+ macroresolver.hpp
+ notification.cpp notification.hpp notification-ti.hpp notification-apply.cpp
+ notificationcommand.cpp notificationcommand.hpp notificationcommand-ti.hpp
+ objectutils.cpp objectutils.hpp
+ pluginutility.cpp pluginutility.hpp
+ scheduleddowntime.cpp scheduleddowntime.hpp scheduleddowntime-ti.hpp scheduleddowntime-apply.cpp
+ service.cpp service.hpp service-ti.hpp service-apply.cpp
+ servicegroup.cpp servicegroup.hpp servicegroup-ti.hpp
+ timeperiod.cpp timeperiod.hpp timeperiod-ti.hpp
+ user.cpp user.hpp user-ti.hpp
+ usergroup.cpp usergroup.hpp usergroup-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(icinga icinga icinga_SOURCES)
+endif()
+
+add_library(icinga OBJECT ${icinga_SOURCES})
+
+add_dependencies(icinga base config remote)
+
+set_target_properties (
+ icinga PROPERTIES
+ FOLDER Lib
+)
diff --git a/lib/icinga/apiactions.cpp b/lib/icinga/apiactions.cpp
new file mode 100644
index 0000000..885834e
--- /dev/null
+++ b/lib/icinga/apiactions.cpp
@@ -0,0 +1,962 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/apiactions.hpp"
+#include "icinga/checkable.hpp"
+#include "icinga/service.hpp"
+#include "icinga/servicegroup.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/clusterevents.hpp"
+#include "remote/apiaction.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/configobjectslock.hpp"
+#include "remote/filterutility.hpp"
+#include "remote/pkiutility.hpp"
+#include "remote/httputility.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/defer.hpp"
+#include "remote/actionshandler.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+REGISTER_APIACTION(process_check_result, "Service;Host", &ApiActions::ProcessCheckResult);
+REGISTER_APIACTION(reschedule_check, "Service;Host", &ApiActions::RescheduleCheck);
+REGISTER_APIACTION(send_custom_notification, "Service;Host", &ApiActions::SendCustomNotification);
+REGISTER_APIACTION(delay_notification, "Service;Host", &ApiActions::DelayNotification);
+REGISTER_APIACTION(acknowledge_problem, "Service;Host", &ApiActions::AcknowledgeProblem);
+REGISTER_APIACTION(remove_acknowledgement, "Service;Host", &ApiActions::RemoveAcknowledgement);
+REGISTER_APIACTION(add_comment, "Service;Host", &ApiActions::AddComment);
+REGISTER_APIACTION(remove_comment, "Service;Host;Comment", &ApiActions::RemoveComment);
+REGISTER_APIACTION(schedule_downtime, "Service;Host", &ApiActions::ScheduleDowntime);
+REGISTER_APIACTION(remove_downtime, "Service;Host;Downtime", &ApiActions::RemoveDowntime);
+REGISTER_APIACTION(shutdown_process, "", &ApiActions::ShutdownProcess);
+REGISTER_APIACTION(restart_process, "", &ApiActions::RestartProcess);
+REGISTER_APIACTION(generate_ticket, "", &ApiActions::GenerateTicket);
+REGISTER_APIACTION(execute_command, "Service;Host", &ApiActions::ExecuteCommand);
+
+Dictionary::Ptr ApiActions::CreateResult(int code, const String& status,
+ const Dictionary::Ptr& additional)
+{
+ Dictionary::Ptr result = new Dictionary({
+ { "code", code },
+ { "status", status }
+ });
+
+ if (additional)
+ additional->CopyTo(result);
+
+ return result;
+}
+
+Dictionary::Ptr ApiActions::ProcessCheckResult(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ using Result = Checkable::ProcessingResult;
+
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404,
+ "Cannot process passive check result for non-existent object.");
+
+ if (!checkable->GetEnablePassiveChecks())
+ return ApiActions::CreateResult(403, "Passive checks are disabled for object '" + checkable->GetName() + "'.");
+
+ if (!checkable->IsReachable(DependencyCheckExecution))
+ return ApiActions::CreateResult(200, "Ignoring passive check result for unreachable object '" + checkable->GetName() + "'.");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ if (!params->Contains("exit_status"))
+ return ApiActions::CreateResult(400, "Parameter 'exit_status' is required.");
+
+ int exitStatus = HttpUtility::GetLastParameter(params, "exit_status");
+
+ ServiceState state;
+
+ if (!service) {
+ if (exitStatus == 0)
+ state = ServiceOK;
+ else if (exitStatus == 1)
+ state = ServiceCritical;
+ else
+ return ApiActions::CreateResult(400, "Invalid 'exit_status' for Host "
+ + checkable->GetName() + ".");
+ } else {
+ state = PluginUtility::ExitStatusToState(exitStatus);
+ }
+
+ if (!params->Contains("plugin_output"))
+ return ApiActions::CreateResult(400, "Parameter 'plugin_output' is required");
+
+ CheckResult::Ptr cr = new CheckResult();
+ cr->SetOutput(HttpUtility::GetLastParameter(params, "plugin_output"));
+ cr->SetState(state);
+
+ if (params->Contains("execution_start"))
+ cr->SetExecutionStart(HttpUtility::GetLastParameter(params, "execution_start"));
+
+ if (params->Contains("execution_end"))
+ cr->SetExecutionEnd(HttpUtility::GetLastParameter(params, "execution_end"));
+
+ cr->SetCheckSource(HttpUtility::GetLastParameter(params, "check_source"));
+ cr->SetSchedulingSource(HttpUtility::GetLastParameter(params, "scheduling_source"));
+
+ Value perfData = params->Get("performance_data");
+
+ /* Allow to pass a performance data string from Icinga Web 2 next to the new Array notation. */
+ if (perfData.IsString())
+ cr->SetPerformanceData(PluginUtility::SplitPerfdata(perfData));
+ else
+ cr->SetPerformanceData(perfData);
+
+ cr->SetCommand(params->Get("check_command"));
+
+ /* Mark this check result as passive. */
+ cr->SetActive(false);
+
+ /* Result TTL allows to overrule the next expected freshness check. */
+ if (params->Contains("ttl"))
+ cr->SetTtl(HttpUtility::GetLastParameter(params, "ttl"));
+
+ Result result = checkable->ProcessCheckResult(cr);
+ switch (result) {
+ case Result::Ok:
+ return ApiActions::CreateResult(200, "Successfully processed check result for object '" + checkable->GetName() + "'.");
+ case Result::NoCheckResult:
+ return ApiActions::CreateResult(400, "Could not process check result for object '" + checkable->GetName() + "' because no check result was passed.");
+ case Result::CheckableInactive:
+ return ApiActions::CreateResult(503, "Could not process check result for object '" + checkable->GetName() + "' because the object is inactive.");
+ case Result::NewerCheckResultPresent:
+ return ApiActions::CreateResult(409, "Newer check result already present. Check result for '" + checkable->GetName() + "' was discarded.");
+ }
+
+ return ApiActions::CreateResult(500, "Unexpected result (" + std::to_string(static_cast<int>(result)) + ") for object '" + checkable->GetName() + "'. Please submit a bug report at https://github.com/Icinga/icinga2");
+}
+
+Dictionary::Ptr ApiActions::RescheduleCheck(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Cannot reschedule check for non-existent object.");
+
+ if (Convert::ToBool(HttpUtility::GetLastParameter(params, "force")))
+ checkable->SetForceNextCheck(true);
+
+ double nextCheck;
+ if (params->Contains("next_check"))
+ nextCheck = HttpUtility::GetLastParameter(params, "next_check");
+ else
+ nextCheck = Utility::GetTime();
+
+ checkable->SetNextCheck(nextCheck);
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(checkable);
+
+ return ApiActions::CreateResult(200, "Successfully rescheduled check for object '" + checkable->GetName() + "'.");
+}
+
+Dictionary::Ptr ApiActions::SendCustomNotification(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Cannot send notification for non-existent object.");
+
+ if (!params->Contains("author"))
+ return ApiActions::CreateResult(400, "Parameter 'author' is required.");
+
+ if (!params->Contains("comment"))
+ return ApiActions::CreateResult(400, "Parameter 'comment' is required.");
+
+ if (Convert::ToBool(HttpUtility::GetLastParameter(params, "force")))
+ checkable->SetForceNextNotification(true);
+
+ Checkable::OnNotificationsRequested(checkable, NotificationCustom, checkable->GetLastCheckResult(),
+ HttpUtility::GetLastParameter(params, "author"), HttpUtility::GetLastParameter(params, "comment"), nullptr);
+
+ return ApiActions::CreateResult(200, "Successfully sent custom notification for object '" + checkable->GetName() + "'.");
+}
+
+Dictionary::Ptr ApiActions::DelayNotification(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Cannot delay notifications for non-existent object");
+
+ if (!params->Contains("timestamp"))
+ return ApiActions::CreateResult(400, "A timestamp is required to delay notifications");
+
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ notification->SetNextNotification(HttpUtility::GetLastParameter(params, "timestamp"));
+ }
+
+ return ApiActions::CreateResult(200, "Successfully delayed notifications for object '" + checkable->GetName() + "'.");
+}
+
+Dictionary::Ptr ApiActions::AcknowledgeProblem(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Cannot acknowledge problem for non-existent object.");
+
+ if (!params->Contains("author") || !params->Contains("comment"))
+ return ApiActions::CreateResult(400, "Acknowledgements require author and comment.");
+
+ AcknowledgementType sticky = AcknowledgementNormal;
+ bool notify = false;
+ bool persistent = false;
+ double timestamp = 0.0;
+
+ if (params->Contains("sticky") && HttpUtility::GetLastParameter(params, "sticky"))
+ sticky = AcknowledgementSticky;
+ if (params->Contains("notify"))
+ notify = HttpUtility::GetLastParameter(params, "notify");
+ if (params->Contains("persistent"))
+ persistent = HttpUtility::GetLastParameter(params, "persistent");
+ if (params->Contains("expiry")) {
+ timestamp = HttpUtility::GetLastParameter(params, "expiry");
+
+ if (timestamp <= Utility::GetTime())
+ return ApiActions::CreateResult(409, "Acknowledgement 'expiry' timestamp must be in the future for object " + checkable->GetName());
+ } else
+ timestamp = 0;
+
+ ObjectLock oLock (checkable);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ if (!service) {
+ if (host->GetState() == HostUp)
+ return ApiActions::CreateResult(409, "Host " + checkable->GetName() + " is UP.");
+ } else {
+ if (service->GetState() == ServiceOK)
+ return ApiActions::CreateResult(409, "Service " + checkable->GetName() + " is OK.");
+ }
+
+ if (checkable->IsAcknowledged()) {
+ return ApiActions::CreateResult(409, (service ? "Service " : "Host ") + checkable->GetName() + " is already acknowledged.");
+ }
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ return ApiActions::CreateResult(503, "Icinga is reloading.");
+ }
+
+ Comment::AddComment(checkable, CommentAcknowledgement, HttpUtility::GetLastParameter(params, "author"),
+ HttpUtility::GetLastParameter(params, "comment"), persistent, timestamp, sticky == AcknowledgementSticky);
+ checkable->AcknowledgeProblem(HttpUtility::GetLastParameter(params, "author"),
+ HttpUtility::GetLastParameter(params, "comment"), sticky, notify, persistent, Utility::GetTime(), timestamp);
+
+ return ApiActions::CreateResult(200, "Successfully acknowledged problem for object '" + checkable->GetName() + "'.");
+}
+
+Dictionary::Ptr ApiActions::RemoveAcknowledgement(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404,
+ "Cannot remove acknowledgement for non-existent checkable object "
+ + object->GetName() + ".");
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ return ApiActions::CreateResult(503, "Icinga is reloading.");
+ }
+
+ String removedBy (HttpUtility::GetLastParameter(params, "author"));
+
+ checkable->ClearAcknowledgement(removedBy);
+ checkable->RemoveAckComments(removedBy);
+
+ return ApiActions::CreateResult(200, "Successfully removed acknowledgement for object '" + checkable->GetName() + "'.");
+}
+
+Dictionary::Ptr ApiActions::AddComment(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Cannot add comment for non-existent object");
+
+ if (!params->Contains("author") || !params->Contains("comment"))
+ return ApiActions::CreateResult(400, "Comments require author and comment.");
+
+ double timestamp = 0.0;
+
+ if (params->Contains("expiry")) {
+ timestamp = HttpUtility::GetLastParameter(params, "expiry");
+ }
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ return ApiActions::CreateResult(503, "Icinga is reloading.");
+ }
+
+ String commentName = Comment::AddComment(checkable, CommentUser,
+ HttpUtility::GetLastParameter(params, "author"),
+ HttpUtility::GetLastParameter(params, "comment"), false, timestamp);
+
+ Comment::Ptr comment = Comment::GetByName(commentName);
+
+ Dictionary::Ptr additional = new Dictionary({
+ { "name", commentName },
+ { "legacy_id", comment->GetLegacyId() }
+ });
+
+ return ApiActions::CreateResult(200, "Successfully added comment '"
+ + commentName + "' for object '" + checkable->GetName()
+ + "'.", additional);
+}
+
+Dictionary::Ptr ApiActions::RemoveComment(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ return ApiActions::CreateResult(503, "Icinga is reloading.");
+ }
+
+ auto author (HttpUtility::GetLastParameter(params, "author"));
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+
+ if (checkable) {
+ std::set<Comment::Ptr> comments = checkable->GetComments();
+
+ for (const Comment::Ptr& comment : comments) {
+ Comment::RemoveComment(comment->GetName(), true, author);
+ }
+
+ return ApiActions::CreateResult(200, "Successfully removed all comments for object '" + checkable->GetName() + "'.");
+ }
+
+ Comment::Ptr comment = static_pointer_cast<Comment>(object);
+
+ if (!comment)
+ return ApiActions::CreateResult(404, "Cannot remove non-existent comment object.");
+
+ String commentName = comment->GetName();
+
+ Comment::RemoveComment(commentName, true, author);
+
+ return ApiActions::CreateResult(200, "Successfully removed comment '" + commentName + "'.");
+}
+
+Dictionary::Ptr ApiActions::ScheduleDowntime(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Can't schedule downtime for non-existent object.");
+
+ if (!params->Contains("start_time") || !params->Contains("end_time") ||
+ !params->Contains("author") || !params->Contains("comment")) {
+
+ return ApiActions::CreateResult(400, "Options 'start_time', 'end_time', 'author' and 'comment' are required");
+ }
+
+ bool fixed = true;
+ if (params->Contains("fixed"))
+ fixed = HttpUtility::GetLastParameter(params, "fixed");
+
+ if (!fixed && !params->Contains("duration"))
+ return ApiActions::CreateResult(400, "Option 'duration' is required for flexible downtime");
+
+ double duration = 0.0;
+ if (params->Contains("duration"))
+ duration = HttpUtility::GetLastParameter(params, "duration");
+
+ String triggerName;
+ if (params->Contains("trigger_name"))
+ triggerName = HttpUtility::GetLastParameter(params, "trigger_name");
+
+ String author = HttpUtility::GetLastParameter(params, "author");
+ String comment = HttpUtility::GetLastParameter(params, "comment");
+ double startTime = HttpUtility::GetLastParameter(params, "start_time");
+ double endTime = HttpUtility::GetLastParameter(params, "end_time");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ DowntimeChildOptions childOptions = DowntimeNoChildren;
+ if (params->Contains("child_options")) {
+ try {
+ childOptions = Downtime::ChildOptionsFromValue(HttpUtility::GetLastParameter(params, "child_options"));
+ } catch (const std::exception&) {
+ return ApiActions::CreateResult(400, "Option 'child_options' provided an invalid value.");
+ }
+ }
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ return ApiActions::CreateResult(503, "Icinga is reloading.");
+ }
+
+ Downtime::Ptr downtime = Downtime::AddDowntime(checkable, author, comment, startTime, endTime,
+ fixed, triggerName, duration);
+ String downtimeName = downtime->GetName();
+
+ Dictionary::Ptr additional = new Dictionary({
+ { "name", downtimeName },
+ { "legacy_id", downtime->GetLegacyId() }
+ });
+
+ /* Schedule downtime for all services for the host type. */
+ bool allServices = false;
+
+ if (params->Contains("all_services"))
+ allServices = HttpUtility::GetLastParameter(params, "all_services");
+
+ if (allServices && !service) {
+ ArrayData serviceDowntimes;
+
+ for (const Service::Ptr& hostService : host->GetServices()) {
+ Log(LogNotice, "ApiActions")
+ << "Creating downtime for service " << hostService->GetName() << " on host " << host->GetName();
+
+ Downtime::Ptr serviceDowntime = Downtime::AddDowntime(hostService, author, comment, startTime, endTime,
+ fixed, triggerName, duration, String(), String(), downtimeName);
+ String serviceDowntimeName = serviceDowntime->GetName();
+
+ serviceDowntimes.push_back(new Dictionary({
+ { "name", serviceDowntimeName },
+ { "legacy_id", serviceDowntime->GetLegacyId() }
+ }));
+ }
+
+ additional->Set("service_downtimes", new Array(std::move(serviceDowntimes)));
+ }
+
+ /* Schedule downtime for all child objects. */
+ if (childOptions != DowntimeNoChildren) {
+ /* 'DowntimeTriggeredChildren' schedules child downtimes triggered by the parent downtime.
+ * 'DowntimeNonTriggeredChildren' schedules non-triggered downtimes for all children.
+ */
+ if (childOptions == DowntimeTriggeredChildren)
+ triggerName = downtimeName;
+
+ Log(LogNotice, "ApiActions")
+ << "Processing child options " << childOptions << " for downtime " << downtimeName;
+
+ ArrayData childDowntimes;
+
+ std::set<Checkable::Ptr> allChildren = checkable->GetAllChildren();
+ for (const Checkable::Ptr& child : allChildren) {
+ Host::Ptr childHost;
+ Service::Ptr childService;
+ tie(childHost, childService) = GetHostService(child);
+
+ if (allServices && childService &&
+ allChildren.find(static_pointer_cast<Checkable>(childHost)) != allChildren.end()) {
+ /* When scheduling downtimes for all service and all children, the current child is a service, and its
+ * host is also a child, skip it here. The downtime for this service will be scheduled below together
+ * with the downtimes of all services for that host. Scheduling it below ensures that the relation
+ * from the child service downtime to the child host downtime is set properly. */
+ continue;
+ }
+
+ Log(LogNotice, "ApiActions")
+ << "Scheduling downtime for child object " << child->GetName();
+
+ Downtime::Ptr childDowntime = Downtime::AddDowntime(child, author, comment, startTime, endTime,
+ fixed, triggerName, duration);
+ String childDowntimeName = childDowntime->GetName();
+
+ Log(LogNotice, "ApiActions")
+ << "Add child downtime '" << childDowntimeName << "'.";
+
+ Dictionary::Ptr childAdditional = new Dictionary({
+ { "name", childDowntimeName },
+ { "legacy_id", childDowntime->GetLegacyId() }
+ });
+
+ /* For a host, also schedule all service downtimes if requested. */
+ if (allServices && !childService) {
+ ArrayData childServiceDowntimes;
+
+ for (const Service::Ptr& childService : childHost->GetServices()) {
+ Log(LogNotice, "ApiActions")
+ << "Creating downtime for service " << childService->GetName() << " on child host " << childHost->GetName();
+
+ Downtime::Ptr serviceDowntime = Downtime::AddDowntime(childService, author, comment, startTime, endTime,
+ fixed, triggerName, duration, String(), String(), childDowntimeName);
+ String serviceDowntimeName = serviceDowntime->GetName();
+
+ childServiceDowntimes.push_back(new Dictionary({
+ { "name", serviceDowntimeName },
+ { "legacy_id", serviceDowntime->GetLegacyId() }
+ }));
+ }
+
+ childAdditional->Set("service_downtimes", new Array(std::move(childServiceDowntimes)));
+ }
+
+ childDowntimes.push_back(childAdditional);
+ }
+
+ additional->Set("child_downtimes", new Array(std::move(childDowntimes)));
+ }
+
+ return ApiActions::CreateResult(200, "Successfully scheduled downtime '" +
+ downtimeName + "' for object '" + checkable->GetName() + "'.", additional);
+}
+
+Dictionary::Ptr ApiActions::RemoveDowntime(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ return ApiActions::CreateResult(503, "Icinga is reloading.");
+ }
+
+ auto author (HttpUtility::GetLastParameter(params, "author"));
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+
+ size_t childCount = 0;
+
+ if (checkable) {
+ std::set<Downtime::Ptr> downtimes = checkable->GetDowntimes();
+
+ for (const Downtime::Ptr& downtime : downtimes) {
+ childCount += downtime->GetChildren().size();
+
+ try {
+ Downtime::RemoveDowntime(downtime->GetName(), true, true, false, author);
+ } catch (const invalid_downtime_removal_error& error) {
+ Log(LogWarning, "ApiActions") << error.what();
+
+ return ApiActions::CreateResult(400, error.what());
+ }
+ }
+
+ return ApiActions::CreateResult(200, "Successfully removed all downtimes for object '" +
+ checkable->GetName() + "' and " + std::to_string(childCount) + " child downtimes.");
+ }
+
+ Downtime::Ptr downtime = static_pointer_cast<Downtime>(object);
+
+ if (!downtime)
+ return ApiActions::CreateResult(404, "Cannot remove non-existent downtime object.");
+
+ childCount += downtime->GetChildren().size();
+
+ try {
+ String downtimeName = downtime->GetName();
+ Downtime::RemoveDowntime(downtimeName, true, true, false, author);
+
+ return ApiActions::CreateResult(200, "Successfully removed downtime '" + downtimeName +
+ "' and " + std::to_string(childCount) + " child downtimes.");
+ } catch (const invalid_downtime_removal_error& error) {
+ Log(LogWarning, "ApiActions") << error.what();
+
+ return ApiActions::CreateResult(400, error.what());
+ }
+}
+
+Dictionary::Ptr ApiActions::ShutdownProcess(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Application::RequestShutdown();
+
+ return ApiActions::CreateResult(200, "Shutting down Icinga 2.");
+}
+
+Dictionary::Ptr ApiActions::RestartProcess(const ConfigObject::Ptr& object,
+ const Dictionary::Ptr& params)
+{
+ Application::RequestRestart();
+
+ return ApiActions::CreateResult(200, "Restarting Icinga 2.");
+}
+
+Dictionary::Ptr ApiActions::GenerateTicket(const ConfigObject::Ptr&,
+ const Dictionary::Ptr& params)
+{
+ if (!params->Contains("cn"))
+ return ApiActions::CreateResult(400, "Option 'cn' is required");
+
+ String cn = HttpUtility::GetLastParameter(params, "cn");
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+ String salt = listener->GetTicketSalt();
+
+ if (salt.IsEmpty())
+ return ApiActions::CreateResult(500, "Ticket salt is not configured in ApiListener object");
+
+ String ticket = PBKDF2_SHA1(cn, salt, 50000);
+
+ Dictionary::Ptr additional = new Dictionary({
+ { "ticket", ticket }
+ });
+
+ return ApiActions::CreateResult(200, "Generated PKI ticket '" + ticket + "' for common name '"
+ + cn + "'.", additional);
+}
+
+Value ApiActions::GetSingleObjectByNameUsingPermissions(const String& type, const String& objectName, const ApiUser::Ptr& user)
+{
+ Dictionary::Ptr queryParams = new Dictionary();
+ queryParams->Set("type", type);
+ queryParams->Set(type.ToLower(), objectName);
+
+ QueryDescription qd;
+ qd.Types.insert(type);
+ qd.Permission = "objects/query/" + type;
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, queryParams, user);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ApiActions") << DiagnosticInformation(ex);
+ return nullptr;
+ }
+
+ if (objs.empty())
+ return nullptr;
+
+ return objs.at(0);
+};
+
+Dictionary::Ptr ApiActions::ExecuteCommand(const ConfigObject::Ptr& object, const Dictionary::Ptr& params)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("No ApiListener instance configured."));
+
+ /* Get command_type */
+ String command_type = "EventCommand";
+
+ if (params->Contains("command_type"))
+ command_type = HttpUtility::GetLastParameter(params, "command_type");
+
+ /* Validate command_type */
+ if (command_type != "EventCommand" && command_type != "CheckCommand" && command_type != "NotificationCommand")
+ return ApiActions::CreateResult(400, "Invalid command_type '" + command_type + "'.");
+
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+
+ if (!checkable)
+ return ApiActions::CreateResult(404, "Can't start a command execution for a non-existent object.");
+
+ /* Get TTL param */
+ if (!params->Contains("ttl"))
+ return ApiActions::CreateResult(400, "Parameter ttl is required.");
+
+ double ttl = HttpUtility::GetLastParameter(params, "ttl");
+
+ if (ttl <= 0)
+ return ApiActions::CreateResult(400, "Parameter ttl must be greater than 0.");
+
+ ObjectLock oLock (checkable);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ String endpoint = "$command_endpoint$";
+
+ if (params->Contains("endpoint"))
+ endpoint = HttpUtility::GetLastParameter(params, "endpoint");
+
+ MacroProcessor::ResolverList resolvers;
+ Value macros;
+
+ if (params->Contains("macros")) {
+ macros = HttpUtility::GetLastParameter(params, "macros");
+ if (macros.IsObjectType<Dictionary>()) {
+ resolvers.emplace_back("override", macros);
+ } else {
+ return ApiActions::CreateResult(400, "Parameter macros must be a dictionary.");
+ }
+ }
+
+ if (service)
+ resolvers.emplace_back("service", service);
+
+ resolvers.emplace_back("host", host);
+
+ String resolved_endpoint = MacroProcessor::ResolveMacros(
+ endpoint, resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), nullptr, false
+ );
+
+ if (!ActionsHandler::AuthenticatedApiUser)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Can't find API user."));
+
+ /* Get endpoint */
+ Endpoint::Ptr endpointPtr = GetSingleObjectByNameUsingPermissions(Endpoint::GetTypeName(), resolved_endpoint, ActionsHandler::AuthenticatedApiUser);
+
+ if (!endpointPtr)
+ return ApiActions::CreateResult(404, "Can't find a valid endpoint for '" + resolved_endpoint + "'.");
+
+ /* Return an error when
+ * the endpoint is different from the command endpoint of the checkable
+ * and the endpoint zone can't access the checkable.
+ * The endpoints are checked to allow for the case where command_endpoint is specified in the checkable
+ * but checkable is not actually present in the agent.
+ */
+ Zone::Ptr endpointZone = endpointPtr->GetZone();
+ Endpoint::Ptr commandEndpoint = checkable->GetCommandEndpoint();
+ if (endpointPtr != commandEndpoint && !endpointZone->CanAccessObject(checkable)) {
+ return ApiActions::CreateResult(
+ 409,
+ "Zone '" + endpointZone->GetName() + "' cannot access checkable '" + checkable->GetName() + "'."
+ );
+ }
+
+ /* Get command */
+ String command;
+
+ if (!params->Contains("command")) {
+ if (command_type == "CheckCommand" ) {
+ command = "$check_command$";
+ } else if (command_type == "EventCommand") {
+ command = "$event_command$";
+ } else if (command_type == "NotificationCommand") {
+ command = "$notification_command$";
+ }
+ } else {
+ command = HttpUtility::GetLastParameter(params, "command");
+ }
+
+ /* Resolve command macro */
+ String resolved_command = MacroProcessor::ResolveMacros(
+ command, resolvers, checkable->GetLastCheckResult(), nullptr,
+ MacroProcessor::EscapeCallback(), nullptr, false
+ );
+
+ CheckResult::Ptr cr = checkable->GetLastCheckResult();
+
+ if (!cr)
+ cr = new CheckResult();
+
+ /* Check if resolved_command exists and it is of type command_type */
+ Dictionary::Ptr execMacros = new Dictionary();
+
+ MacroResolver::OverrideMacros = macros;
+ Defer o ([]() {
+ MacroResolver::OverrideMacros = nullptr;
+ });
+
+ /* Create execution parameters */
+ Dictionary::Ptr execParams = new Dictionary();
+
+ if (command_type == "CheckCommand") {
+ CheckCommand::Ptr cmd = GetSingleObjectByNameUsingPermissions(CheckCommand::GetTypeName(), resolved_command, ActionsHandler::AuthenticatedApiUser);
+
+ if (!cmd)
+ return ApiActions::CreateResult(404, "Can't find a valid " + command_type + " for '" + resolved_command + "'.");
+ else {
+ CheckCommand::ExecuteOverride = cmd;
+ Defer resetCheckCommandOverride([]() {
+ CheckCommand::ExecuteOverride = nullptr;
+ });
+ cmd->Execute(checkable, cr, execMacros, false);
+ }
+ } else if (command_type == "EventCommand") {
+ EventCommand::Ptr cmd = GetSingleObjectByNameUsingPermissions(EventCommand::GetTypeName(), resolved_command, ActionsHandler::AuthenticatedApiUser);
+
+ if (!cmd)
+ return ApiActions::CreateResult(404, "Can't find a valid " + command_type + " for '" + resolved_command + "'.");
+ else {
+ EventCommand::ExecuteOverride = cmd;
+ Defer resetEventCommandOverride ([]() {
+ EventCommand::ExecuteOverride = nullptr;
+ });
+ cmd->Execute(checkable, execMacros, false);
+ }
+ } else if (command_type == "NotificationCommand") {
+ NotificationCommand::Ptr cmd = GetSingleObjectByNameUsingPermissions(NotificationCommand::GetTypeName(), resolved_command, ActionsHandler::AuthenticatedApiUser);
+
+ if (!cmd)
+ return ApiActions::CreateResult(404, "Can't find a valid " + command_type + " for '" + resolved_command + "'.");
+ else {
+ /* Get user */
+ String user_string = "";
+
+ if (params->Contains("user"))
+ user_string = HttpUtility::GetLastParameter(params, "user");
+
+ /* Resolve user macro */
+ String resolved_user = MacroProcessor::ResolveMacros(
+ user_string, resolvers, checkable->GetLastCheckResult(), nullptr,
+ MacroProcessor::EscapeCallback(), nullptr, false
+ );
+
+ User::Ptr user = GetSingleObjectByNameUsingPermissions(User::GetTypeName(), resolved_user, ActionsHandler::AuthenticatedApiUser);
+
+ if (!user)
+ return ApiActions::CreateResult(404, "Can't find a valid user for '" + resolved_user + "'.");
+
+ execParams->Set("user", user->GetName());
+
+ /* Get notification */
+ String notification_string = "";
+
+ if (params->Contains("notification"))
+ notification_string = HttpUtility::GetLastParameter(params, "notification");
+
+ /* Resolve notification macro */
+ String resolved_notification = MacroProcessor::ResolveMacros(
+ notification_string, resolvers, checkable->GetLastCheckResult(), nullptr,
+ MacroProcessor::EscapeCallback(), nullptr, false
+ );
+
+ Notification::Ptr notification = GetSingleObjectByNameUsingPermissions(Notification::GetTypeName(), resolved_notification, ActionsHandler::AuthenticatedApiUser);
+
+ if (!notification)
+ return ApiActions::CreateResult(404, "Can't find a valid notification for '" + resolved_notification + "'.");
+
+ execParams->Set("notification", notification->GetName());
+
+ NotificationCommand::ExecuteOverride = cmd;
+ Defer resetNotificationCommandOverride ([]() {
+ NotificationCommand::ExecuteOverride = nullptr;
+ });
+
+ cmd->Execute(notification, user, cr, NotificationType::NotificationCustom,
+ ActionsHandler::AuthenticatedApiUser->GetName(), "", execMacros, false);
+ }
+ }
+
+ /* This generates a UUID */
+ String uuid = Utility::NewUniqueID();
+
+ /* Create the deadline */
+ double deadline = Utility::GetTime() + ttl;
+
+ /* Update executions */
+ Dictionary::Ptr pending_execution = new Dictionary();
+ pending_execution->Set("pending", true);
+ pending_execution->Set("deadline", deadline);
+ pending_execution->Set("endpoint", resolved_endpoint);
+ Dictionary::Ptr executions = checkable->GetExecutions();
+
+ if (!executions)
+ executions = new Dictionary();
+
+ executions->Set(uuid, pending_execution);
+ checkable->SetExecutions(executions);
+
+ /* Broadcast the update */
+ Dictionary::Ptr executionsToBroadcast = new Dictionary();
+ executionsToBroadcast->Set(uuid, pending_execution);
+ Dictionary::Ptr updateParams = new Dictionary();
+ updateParams->Set("host", host->GetName());
+
+ if (service)
+ updateParams->Set("service", service->GetShortName());
+
+ updateParams->Set("executions", executionsToBroadcast);
+
+ Dictionary::Ptr updateMessage = new Dictionary();
+ updateMessage->Set("jsonrpc", "2.0");
+ updateMessage->Set("method", "event::UpdateExecutions");
+ updateMessage->Set("params", updateParams);
+
+ MessageOrigin::Ptr origin = new MessageOrigin();
+ listener->RelayMessage(origin, checkable, updateMessage, true);
+
+ /* Populate execution parameters */
+ if (command_type == "CheckCommand")
+ execParams->Set("command_type", "check_command");
+ else if (command_type == "EventCommand")
+ execParams->Set("command_type", "event_command");
+ else if (command_type == "NotificationCommand")
+ execParams->Set("command_type", "notification_command");
+
+ execParams->Set("command", resolved_command);
+ execParams->Set("host", host->GetName());
+
+ if (service)
+ execParams->Set("service", service->GetShortName());
+
+ /*
+ * If the host/service object specifies the 'check_timeout' attribute,
+ * forward this to the remote endpoint to limit the command execution time.
+ */
+ if (!checkable->GetCheckTimeout().IsEmpty())
+ execParams->Set("check_timeout", checkable->GetCheckTimeout());
+
+ execParams->Set("source", uuid);
+ execParams->Set("deadline", deadline);
+ execParams->Set("macros", execMacros);
+ execParams->Set("endpoint", resolved_endpoint);
+
+ /* Execute command */
+ bool local = endpointPtr == Endpoint::GetLocalEndpoint();
+
+ if (local) {
+ ClusterEvents::ExecuteCommandAPIHandler(origin, execParams);
+ } else {
+ /* Check if the child endpoints have Icinga version >= 2.13 */
+ Zone::Ptr localZone = Zone::GetLocalZone();
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ /* Fetch immediate child zone members */
+ if (zone->GetParent() == localZone && zone->CanAccessObject(endpointPtr->GetZone())) {
+ std::set<Endpoint::Ptr> endpoints = zone->GetEndpoints();
+
+ for (const Endpoint::Ptr& childEndpoint : endpoints) {
+ if (!(childEndpoint->GetCapabilities() & (uint_fast64_t)ApiCapabilities::ExecuteArbitraryCommand)) {
+ /* Update execution */
+ double now = Utility::GetTime();
+ pending_execution->Set("exit", 126);
+ pending_execution->Set("output", "Endpoint '" + childEndpoint->GetName() + "' doesn't support executing arbitrary commands.");
+ pending_execution->Set("start", now);
+ pending_execution->Set("end", now);
+ pending_execution->Remove("pending");
+
+ listener->RelayMessage(origin, checkable, updateMessage, true);
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("checkable", checkable->GetName());
+ result->Set("execution", uuid);
+ return ApiActions::CreateResult(202, "Accepted", result);
+ }
+ }
+ }
+ }
+
+ Dictionary::Ptr execMessage = new Dictionary();
+ execMessage->Set("jsonrpc", "2.0");
+ execMessage->Set("method", "event::ExecuteCommand");
+ execMessage->Set("params", execParams);
+
+ listener->RelayMessage(origin, endpointPtr->GetZone(), execMessage, true);
+ }
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("checkable", checkable->GetName());
+ result->Set("execution", uuid);
+ return ApiActions::CreateResult(202, "Accepted", result);
+}
diff --git a/lib/icinga/apiactions.hpp b/lib/icinga/apiactions.hpp
new file mode 100644
index 0000000..b6ba835
--- /dev/null
+++ b/lib/icinga/apiactions.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APIACTIONS_H
+#define APIACTIONS_H
+
+#include "icinga/i2-icinga.hpp"
+#include "base/configobject.hpp"
+#include "base/dictionary.hpp"
+#include "remote/apiuser.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup icinga
+ */
+class ApiActions
+{
+public:
+ static Dictionary::Ptr ProcessCheckResult(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr RescheduleCheck(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr SendCustomNotification(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr DelayNotification(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr AcknowledgeProblem(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr RemoveAcknowledgement(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr AddComment(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr RemoveComment(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr ScheduleDowntime(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr RemoveDowntime(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr ShutdownProcess(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr RestartProcess(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr GenerateTicket(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+ static Dictionary::Ptr ExecuteCommand(const ConfigObject::Ptr& object, const Dictionary::Ptr& params);
+
+private:
+ static Dictionary::Ptr CreateResult(int code, const String& status, const Dictionary::Ptr& additional = nullptr);
+ static Value GetSingleObjectByNameUsingPermissions(const String& type, const String& value, const ApiUser::Ptr& user);
+};
+
+}
+
+#endif /* APIACTIONS_H */
diff --git a/lib/icinga/apievents.cpp b/lib/icinga/apievents.cpp
new file mode 100644
index 0000000..53008fd
--- /dev/null
+++ b/lib/icinga/apievents.cpp
@@ -0,0 +1,438 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/apievents.hpp"
+#include "icinga/service.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "base/initialize.hpp"
+#include "base/serializer.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+INITIALIZE_ONCE(&ApiEvents::StaticInitialize);
+
+void ApiEvents::StaticInitialize()
+{
+ Checkable::OnNewCheckResult.connect(&ApiEvents::CheckResultHandler);
+ Checkable::OnStateChange.connect(&ApiEvents::StateChangeHandler);
+ Checkable::OnNotificationSentToAllUsers.connect(&ApiEvents::NotificationSentToAllUsersHandler);
+
+ Checkable::OnFlappingChanged.connect(&ApiEvents::FlappingChangedHandler);
+
+ Checkable::OnAcknowledgementSet.connect(&ApiEvents::AcknowledgementSetHandler);
+ Checkable::OnAcknowledgementCleared.connect(&ApiEvents::AcknowledgementClearedHandler);
+
+ Comment::OnCommentAdded.connect(&ApiEvents::CommentAddedHandler);
+ Comment::OnCommentRemoved.connect(&ApiEvents::CommentRemovedHandler);
+
+ Downtime::OnDowntimeAdded.connect(&ApiEvents::DowntimeAddedHandler);
+ Downtime::OnDowntimeRemoved.connect(&ApiEvents::DowntimeRemovedHandler);
+ Downtime::OnDowntimeStarted.connect(&ApiEvents::DowntimeStartedHandler);
+ Downtime::OnDowntimeTriggered.connect(&ApiEvents::DowntimeTriggeredHandler);
+
+ ConfigObject::OnActiveChanged.connect(&ApiEvents::OnActiveChangedHandler);
+ ConfigObject::OnVersionChanged.connect(&ApiEvents::OnVersionChangedHandler);
+}
+
+void ApiEvents::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr& origin)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("CheckResult");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::CheckResult));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'CheckResult'.");
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("type", "CheckResult");
+ result->Set("timestamp", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ result->Set("host", host->GetName());
+ if (service)
+ result->Set("service", service->GetShortName());
+
+ result->Set("check_result", Serialize(cr));
+
+ result->Set("downtime_depth", checkable->GetDowntimeDepth());
+ result->Set("acknowledgement", checkable->IsAcknowledged());
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type, const MessageOrigin::Ptr& origin)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("StateChange");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::StateChange));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'StateChange'.");
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("type", "StateChange");
+ result->Set("timestamp", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ result->Set("host", host->GetName());
+ if (service)
+ result->Set("service", service->GetShortName());
+
+ result->Set("state", service ? static_cast<int>(service->GetState()) : static_cast<int>(host->GetState()));
+ result->Set("state_type", checkable->GetStateType());
+ result->Set("check_result", Serialize(cr));
+
+ result->Set("downtime_depth", checkable->GetDowntimeDepth());
+ result->Set("acknowledgement", checkable->IsAcknowledged());
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::NotificationSentToAllUsersHandler(const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text, const MessageOrigin::Ptr& origin)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("Notification");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::Notification));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'Notification'.");
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("type", "Notification");
+ result->Set("timestamp", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ result->Set("host", host->GetName());
+ if (service)
+ result->Set("service", service->GetShortName());
+
+ NotificationCommand::Ptr command = notification->GetCommand();
+
+ if (command)
+ result->Set("command", command->GetName());
+
+ ArrayData userNames;
+
+ for (const User::Ptr& user : users) {
+ userNames.push_back(user->GetName());
+ }
+
+ result->Set("users", new Array(std::move(userNames)));
+ result->Set("notification_type", Notification::NotificationTypeToStringCompat(type)); //TODO: Change this to our own types.
+ result->Set("author", author);
+ result->Set("text", text);
+ result->Set("check_result", Serialize(cr));
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::FlappingChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("Flapping");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::Flapping));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'Flapping'.");
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("type", "Flapping");
+ result->Set("timestamp", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ result->Set("host", host->GetName());
+ if (service)
+ result->Set("service", service->GetShortName());
+
+ result->Set("state", service ? static_cast<int>(service->GetState()) : static_cast<int>(host->GetState()));
+ result->Set("state_type", checkable->GetStateType());
+ result->Set("is_flapping", checkable->IsFlapping());
+ result->Set("flapping_current", checkable->GetFlappingCurrent());
+ result->Set("threshold_low", checkable->GetFlappingThresholdLow());
+ result->Set("threshold_high", checkable->GetFlappingThresholdHigh());
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::AcknowledgementSetHandler(const Checkable::Ptr& checkable,
+ const String& author, const String& comment, AcknowledgementType type,
+ bool notify, bool persistent, double, double expiry, const MessageOrigin::Ptr& origin)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("AcknowledgementSet");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::AcknowledgementSet));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'AcknowledgementSet'.");
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("type", "AcknowledgementSet");
+ result->Set("timestamp", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ result->Set("host", host->GetName());
+ if (service)
+ result->Set("service", service->GetShortName());
+
+ result->Set("state", service ? static_cast<int>(service->GetState()) : static_cast<int>(host->GetState()));
+ result->Set("state_type", checkable->GetStateType());
+
+ result->Set("author", author);
+ result->Set("comment", comment);
+ result->Set("acknowledgement_type", type);
+ result->Set("notify", notify);
+ result->Set("persistent", persistent);
+ result->Set("expiry", expiry);
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::AcknowledgementClearedHandler(const Checkable::Ptr& checkable, const String& removedBy, double, const MessageOrigin::Ptr& origin)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("AcknowledgementCleared");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::AcknowledgementCleared));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'AcknowledgementCleared'.");
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("type", "AcknowledgementCleared");
+ result->Set("timestamp", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ result->Set("host", host->GetName());
+ if (service)
+ result->Set("service", service->GetShortName());
+
+ result->Set("state", service ? static_cast<int>(service->GetState()) : static_cast<int>(host->GetState()));
+ result->Set("state_type", checkable->GetStateType());
+ result->Set("acknowledgement_type", AcknowledgementNone);
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::CommentAddedHandler(const Comment::Ptr& comment)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("CommentAdded");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::CommentAdded));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'CommentAdded'.");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", "CommentAdded" },
+ { "timestamp", Utility::GetTime() },
+ { "comment", Serialize(comment, FAConfig | FAState) }
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::CommentRemovedHandler(const Comment::Ptr& comment)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("CommentRemoved");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::CommentRemoved));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'CommentRemoved'.");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", "CommentRemoved" },
+ { "timestamp", Utility::GetTime() },
+ { "comment", Serialize(comment, FAConfig | FAState) }
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::DowntimeAddedHandler(const Downtime::Ptr& downtime)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("DowntimeAdded");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::DowntimeAdded));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'DowntimeAdded'.");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", "DowntimeAdded" },
+ { "timestamp", Utility::GetTime() },
+ { "downtime", Serialize(downtime, FAConfig | FAState) }
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::DowntimeRemovedHandler(const Downtime::Ptr& downtime)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("DowntimeRemoved");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::DowntimeRemoved));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'DowntimeRemoved'.");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", "DowntimeRemoved" },
+ { "timestamp", Utility::GetTime() },
+ { "downtime", Serialize(downtime, FAConfig | FAState) }
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::DowntimeStartedHandler(const Downtime::Ptr& downtime)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("DowntimeStarted");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::DowntimeStarted));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'DowntimeStarted'.");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", "DowntimeStarted" },
+ { "timestamp", Utility::GetTime() },
+ { "downtime", Serialize(downtime, FAConfig | FAState) }
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::DowntimeTriggeredHandler(const Downtime::Ptr& downtime)
+{
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType("DowntimeTriggered");
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(EventType::DowntimeTriggered));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents", "Processing event type 'DowntimeTriggered'.");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", "DowntimeTriggered" },
+ { "timestamp", Utility::GetTime() },
+ { "downtime", Serialize(downtime, FAConfig | FAState) }
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
+
+void ApiEvents::OnActiveChangedHandler(const ConfigObject::Ptr& object, const Value&)
+{
+ if (object->IsActive()) {
+ ApiEvents::SendObjectChangeEvent(object, EventType::ObjectCreated, "ObjectCreated");
+ } else if (!object->IsActive() && !object->GetExtension("ConfigObjectDeleted").IsEmpty()) {
+ ApiEvents::SendObjectChangeEvent(object, EventType::ObjectDeleted, "ObjectDeleted");
+ }
+}
+
+void ApiEvents::OnVersionChangedHandler(const ConfigObject::Ptr& object, const Value&)
+{
+ ApiEvents::SendObjectChangeEvent(object, EventType::ObjectModified, "ObjectModified");
+}
+
+void ApiEvents::SendObjectChangeEvent(const ConfigObject::Ptr& object, const EventType& eventType, const String& eventQueue) {
+ std::vector<EventQueue::Ptr> queues = EventQueue::GetQueuesForType(eventQueue);
+ auto inboxes (EventsRouter::GetInstance().GetInboxes(eventType));
+
+ if (queues.empty() && !inboxes)
+ return;
+
+ Log(LogDebug, "ApiEvents") << "Processing event type '" + eventQueue + "'.";
+
+ Dictionary::Ptr result = new Dictionary ({
+ {"type", eventQueue},
+ {"timestamp", Utility::GetTime()},
+ {"object_type", object->GetReflectionType()->GetName()},
+ {"object_name", object->GetName()},
+ });
+
+ for (const EventQueue::Ptr& queue : queues) {
+ queue->ProcessEvent(result);
+ }
+
+ inboxes.Push(std::move(result));
+}
diff --git a/lib/icinga/apievents.hpp b/lib/icinga/apievents.hpp
new file mode 100644
index 0000000..07d5c60
--- /dev/null
+++ b/lib/icinga/apievents.hpp
@@ -0,0 +1,51 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APIEVENTS_H
+#define APIEVENTS_H
+
+#include "remote/eventqueue.hpp"
+#include "icinga/checkable.hpp"
+#include "icinga/host.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup icinga
+ */
+class ApiEvents
+{
+public:
+ static void StaticInitialize();
+
+ static void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr& origin);
+ static void StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type, const MessageOrigin::Ptr& origin);
+
+
+ static void NotificationSentToAllUsersHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const std::set<User::Ptr>& users, NotificationType type, const CheckResult::Ptr& cr, const String& author,
+ const String& text, const MessageOrigin::Ptr& origin);
+
+ static void FlappingChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+
+ static void AcknowledgementSetHandler(const Checkable::Ptr& checkable,
+ const String& author, const String& comment, AcknowledgementType type,
+ bool notify, bool persistent, double, double expiry, const MessageOrigin::Ptr& origin);
+ static void AcknowledgementClearedHandler(const Checkable::Ptr& checkable, const String& removedBy, double, const MessageOrigin::Ptr& origin);
+
+ static void CommentAddedHandler(const Comment::Ptr& comment);
+ static void CommentRemovedHandler(const Comment::Ptr& comment);
+
+ static void DowntimeAddedHandler(const Downtime::Ptr& downtime);
+ static void DowntimeRemovedHandler(const Downtime::Ptr& downtime);
+ static void DowntimeStartedHandler(const Downtime::Ptr& downtime);
+ static void DowntimeTriggeredHandler(const Downtime::Ptr& downtime);
+
+ static void OnActiveChangedHandler(const ConfigObject::Ptr& object, const Value&);
+ static void OnVersionChangedHandler(const ConfigObject::Ptr& object, const Value&);
+ static void SendObjectChangeEvent(const ConfigObject::Ptr& object, const EventType& eventType, const String& eventQueue);
+};
+
+}
+
+#endif /* APIEVENTS_H */
diff --git a/lib/icinga/checkable-check.cpp b/lib/icinga/checkable-check.cpp
new file mode 100644
index 0000000..efa9477
--- /dev/null
+++ b/lib/icinga/checkable-check.cpp
@@ -0,0 +1,709 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/service.hpp"
+#include "icinga/host.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/cib.hpp"
+#include "icinga/clusterevents.hpp"
+#include "remote/messageorigin.hpp"
+#include "remote/apilistener.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/context.hpp"
+
+using namespace icinga;
+
+boost::signals2::signal<void (const Checkable::Ptr&, const CheckResult::Ptr&, const MessageOrigin::Ptr&)> Checkable::OnNewCheckResult;
+boost::signals2::signal<void (const Checkable::Ptr&, const CheckResult::Ptr&, StateType, const MessageOrigin::Ptr&)> Checkable::OnStateChange;
+boost::signals2::signal<void (const Checkable::Ptr&, const CheckResult::Ptr&, std::set<Checkable::Ptr>, const MessageOrigin::Ptr&)> Checkable::OnReachabilityChanged;
+boost::signals2::signal<void (const Checkable::Ptr&, NotificationType, const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&)> Checkable::OnNotificationsRequested;
+boost::signals2::signal<void (const Checkable::Ptr&)> Checkable::OnNextCheckUpdated;
+
+Atomic<uint_fast64_t> Checkable::CurrentConcurrentChecks (0);
+
+std::mutex Checkable::m_StatsMutex;
+int Checkable::m_PendingChecks = 0;
+std::condition_variable Checkable::m_PendingChecksCV;
+
+CheckCommand::Ptr Checkable::GetCheckCommand() const
+{
+ return dynamic_pointer_cast<CheckCommand>(NavigateCheckCommandRaw());
+}
+
+TimePeriod::Ptr Checkable::GetCheckPeriod() const
+{
+ return TimePeriod::GetByName(GetCheckPeriodRaw());
+}
+
+void Checkable::SetSchedulingOffset(long offset)
+{
+ m_SchedulingOffset = offset;
+}
+
+long Checkable::GetSchedulingOffset()
+{
+ return m_SchedulingOffset;
+}
+
+void Checkable::UpdateNextCheck(const MessageOrigin::Ptr& origin)
+{
+ double interval;
+
+ if (GetStateType() == StateTypeSoft && GetLastCheckResult() != nullptr)
+ interval = GetRetryInterval();
+ else
+ interval = GetCheckInterval();
+
+ double now = Utility::GetTime();
+ double adj = 0;
+
+ if (interval > 1)
+ adj = fmod(now * 100 + GetSchedulingOffset(), interval * 100) / 100.0;
+
+ if (adj != 0.0)
+ adj = std::min(0.5 + fmod(GetSchedulingOffset(), interval * 5) / 100.0, adj);
+
+ double nextCheck = now - adj + interval;
+ double lastCheck = GetLastCheck();
+
+ Log(LogDebug, "Checkable")
+ << "Update checkable '" << GetName() << "' with check interval '" << GetCheckInterval()
+ << "' from last check time at " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", (lastCheck < 0 ? 0 : lastCheck))
+ << " (" << GetLastCheck() << ") to next check time at " << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", nextCheck) << " (" << nextCheck << ").";
+
+ SetNextCheck(nextCheck, false, origin);
+}
+
+bool Checkable::HasBeenChecked() const
+{
+ return GetLastCheckResult() != nullptr;
+}
+
+double Checkable::GetLastCheck() const
+{
+ CheckResult::Ptr cr = GetLastCheckResult();
+ double schedule_end = -1;
+
+ if (cr)
+ schedule_end = cr->GetScheduleEnd();
+
+ return schedule_end;
+}
+
+Checkable::ProcessingResult Checkable::ProcessCheckResult(const CheckResult::Ptr& cr, const MessageOrigin::Ptr& origin)
+{
+ using Result = Checkable::ProcessingResult;
+
+ {
+ ObjectLock olock(this);
+ m_CheckRunning = false;
+ }
+
+ if (!cr)
+ return Result::NoCheckResult;
+
+ double now = Utility::GetTime();
+
+ if (cr->GetScheduleStart() == 0)
+ cr->SetScheduleStart(now);
+
+ if (cr->GetScheduleEnd() == 0)
+ cr->SetScheduleEnd(now);
+
+ if (cr->GetExecutionStart() == 0)
+ cr->SetExecutionStart(now);
+
+ if (cr->GetExecutionEnd() == 0)
+ cr->SetExecutionEnd(now);
+
+ if (!origin || origin->IsLocal())
+ cr->SetSchedulingSource(IcingaApplication::GetInstance()->GetNodeName());
+
+ Endpoint::Ptr command_endpoint = GetCommandEndpoint();
+
+ if (cr->GetCheckSource().IsEmpty()) {
+ if ((!origin || origin->IsLocal()))
+ cr->SetCheckSource(IcingaApplication::GetInstance()->GetNodeName());
+
+ /* override check source if command_endpoint was defined */
+ if (command_endpoint && !GetExtension("agent_check"))
+ cr->SetCheckSource(command_endpoint->GetName());
+ }
+
+ /* agent checks go through the api */
+ if (command_endpoint && GetExtension("agent_check")) {
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (listener) {
+ /* send message back to its origin */
+ Dictionary::Ptr message = ClusterEvents::MakeCheckResultMessage(this, cr);
+ listener->SyncSendMessage(command_endpoint, message);
+ }
+
+ return Result::Ok;
+
+ }
+
+ if (!IsActive())
+ return Result::CheckableInactive;
+
+ bool reachable = IsReachable();
+ bool notification_reachable = IsReachable(DependencyNotification);
+
+ ObjectLock olock(this);
+
+ CheckResult::Ptr old_cr = GetLastCheckResult();
+ ServiceState old_state = GetStateRaw();
+ StateType old_stateType = GetStateType();
+ long old_attempt = GetCheckAttempt();
+ bool recovery = false;
+
+ /* When we have an check result already (not after fresh start),
+ * prevent to accept old check results and allow overrides for
+ * CRs happened in the future.
+ */
+ if (old_cr) {
+ double currentCRTimestamp = old_cr->GetExecutionStart();
+ double newCRTimestamp = cr->GetExecutionStart();
+
+ /* Our current timestamp may be from the future (wrong server time adjusted again). Allow overrides here. */
+ if (currentCRTimestamp > now) {
+ /* our current CR is from the future, let the new CR override it. */
+ Log(LogDebug, "Checkable")
+ << std::fixed << std::setprecision(6) << "Processing check result for checkable '" << GetName() << "' from "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", newCRTimestamp) << " (" << newCRTimestamp
+ << "). Overriding since ours is from the future at "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", currentCRTimestamp) << " (" << currentCRTimestamp << ").";
+ } else {
+ /* Current timestamp is from the past, but the new timestamp is even more in the past. Skip it. */
+ if (newCRTimestamp < currentCRTimestamp) {
+ Log(LogDebug, "Checkable")
+ << std::fixed << std::setprecision(6) << "Skipping check result for checkable '" << GetName() << "' from "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", newCRTimestamp) << " (" << newCRTimestamp
+ << "). It is in the past compared to ours at "
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", currentCRTimestamp) << " (" << currentCRTimestamp << ").";
+ return Result::NewerCheckResultPresent;
+ }
+ }
+ }
+
+ /* The ExecuteCheck function already sets the old state, but we need to do it again
+ * in case this was a passive check result. */
+ SetLastStateRaw(old_state);
+ SetLastStateType(old_stateType);
+ SetLastReachable(reachable);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(this);
+
+ CheckableType checkableType = CheckableHost;
+ if (service)
+ checkableType = CheckableService;
+
+ long attempt = 1;
+
+ std::set<Checkable::Ptr> children = GetChildren();
+
+ if (IsStateOK(cr->GetState())) {
+ SetStateType(StateTypeHard); // NOT-OK -> HARD OK
+
+ if (!IsStateOK(old_state))
+ recovery = true;
+
+ ResetNotificationNumbers();
+ SaveLastState(ServiceOK, cr->GetExecutionEnd());
+ } else {
+ /* OK -> NOT-OK change, first SOFT state. Reset attempt counter. */
+ if (IsStateOK(old_state)) {
+ SetStateType(StateTypeSoft);
+ attempt = 1;
+ }
+
+ /* SOFT state change, increase attempt counter. */
+ if (old_stateType == StateTypeSoft && !IsStateOK(old_state)) {
+ SetStateType(StateTypeSoft);
+ attempt = old_attempt + 1;
+ }
+
+ /* HARD state change (e.g. previously 2/3 and this next attempt). Reset attempt counter. */
+ if (attempt >= GetMaxCheckAttempts()) {
+ SetStateType(StateTypeHard);
+ attempt = 1;
+ }
+
+ if (!IsStateOK(cr->GetState())) {
+ SaveLastState(cr->GetState(), cr->GetExecutionEnd());
+ }
+ }
+
+ if (!reachable)
+ SetLastStateUnreachable(cr->GetExecutionEnd());
+
+ SetCheckAttempt(attempt);
+
+ ServiceState new_state = cr->GetState();
+ SetStateRaw(new_state);
+
+ bool stateChange;
+
+ /* Exception on state change calculation for hosts. */
+ if (checkableType == CheckableService)
+ stateChange = (old_state != new_state);
+ else
+ stateChange = (Host::CalculateState(old_state) != Host::CalculateState(new_state));
+
+ /* Store the current last state change for the next iteration. */
+ SetPreviousStateChange(GetLastStateChange());
+
+ if (stateChange) {
+ SetLastStateChange(cr->GetExecutionEnd());
+
+ /* remove acknowledgements */
+ if (GetAcknowledgement() == AcknowledgementNormal ||
+ (GetAcknowledgement() == AcknowledgementSticky && IsStateOK(new_state))) {
+ ClearAcknowledgement("");
+ }
+ }
+
+ bool remove_acknowledgement_comments = false;
+
+ if (GetAcknowledgement() == AcknowledgementNone)
+ remove_acknowledgement_comments = true;
+
+ bool hardChange = (GetStateType() == StateTypeHard && old_stateType == StateTypeSoft);
+
+ if (stateChange && old_stateType == StateTypeHard && GetStateType() == StateTypeHard)
+ hardChange = true;
+
+ bool is_volatile = GetVolatile();
+
+ if (hardChange || is_volatile) {
+ SetLastHardStateRaw(new_state);
+ SetLastHardStateChange(cr->GetExecutionEnd());
+ SetLastHardStatesRaw(GetLastHardStatesRaw() / 100u + new_state * 100u);
+ }
+
+ if (stateChange) {
+ SetLastSoftStatesRaw(GetLastSoftStatesRaw() / 100u + new_state * 100u);
+ }
+
+ cr->SetPreviousHardState(ServiceState(GetLastHardStatesRaw() % 100u));
+
+ if (!IsStateOK(new_state))
+ TriggerDowntimes(cr->GetExecutionEnd());
+
+ /* statistics for external tools */
+ Checkable::UpdateStatistics(cr, checkableType);
+
+ bool in_downtime = IsInDowntime();
+
+ bool send_notification = false;
+ bool suppress_notification = !notification_reachable || in_downtime || IsAcknowledged();
+
+ /* Send notifications whether when a hard state change occurred. */
+ if (hardChange && !(old_stateType == StateTypeSoft && IsStateOK(new_state)))
+ send_notification = true;
+ /* Or if the checkable is volatile and in a HARD state. */
+ else if (is_volatile && GetStateType() == StateTypeHard)
+ send_notification = true;
+
+ if (IsStateOK(old_state) && old_stateType == StateTypeSoft)
+ send_notification = false; /* Don't send notifications for SOFT-OK -> HARD-OK. */
+
+ if (is_volatile && IsStateOK(old_state) && IsStateOK(new_state))
+ send_notification = false; /* Don't send notifications for volatile OK -> OK changes. */
+
+ olock.Unlock();
+
+ if (remove_acknowledgement_comments)
+ RemoveAckComments(String(), cr->GetExecutionEnd());
+
+ Dictionary::Ptr vars_after = new Dictionary({
+ { "state", new_state },
+ { "state_type", GetStateType() },
+ { "attempt", GetCheckAttempt() },
+ { "reachable", reachable }
+ });
+
+ if (old_cr)
+ cr->SetVarsBefore(old_cr->GetVarsAfter());
+
+ cr->SetVarsAfter(vars_after);
+
+ olock.Lock();
+
+ if (service) {
+ SetLastCheckResult(cr);
+ } else {
+ bool wasProblem = GetProblem();
+
+ SetLastCheckResult(cr);
+
+ if (GetProblem() != wasProblem) {
+ auto services = host->GetServices();
+ olock.Unlock();
+ for (auto& service : services) {
+ Service::OnHostProblemChanged(service, cr, origin);
+ }
+ olock.Lock();
+ }
+ }
+
+ bool was_flapping = IsFlapping();
+
+ UpdateFlappingStatus(cr->GetState());
+
+ bool is_flapping = IsFlapping();
+
+ if (cr->GetActive()) {
+ UpdateNextCheck(origin);
+ } else {
+ /* Reschedule the next check for external passive check results. The side effect of
+ * this is that for as long as we receive results for a service we
+ * won't execute any active checks. */
+ double offset;
+ double ttl = cr->GetTtl();
+
+ if (ttl > 0)
+ offset = ttl;
+ else
+ offset = GetCheckInterval();
+
+ SetNextCheck(Utility::GetTime() + offset, false, origin);
+ }
+
+ olock.Unlock();
+
+#ifdef I2_DEBUG /* I2_DEBUG */
+ Log(LogDebug, "Checkable")
+ << "Flapping: Checkable " << GetName()
+ << " was: " << was_flapping
+ << " is: " << is_flapping
+ << " threshold low: " << GetFlappingThresholdLow()
+ << " threshold high: " << GetFlappingThresholdHigh()
+ << "% current: " << GetFlappingCurrent() << "%.";
+#endif /* I2_DEBUG */
+
+ if (recovery) {
+ for (auto& child : children) {
+ if (child->GetProblem() && child->GetEnableActiveChecks()) {
+ auto nextCheck (now + Utility::Random() % 60);
+
+ ObjectLock oLock (child);
+
+ if (nextCheck < child->GetNextCheck()) {
+ child->SetNextCheck(nextCheck);
+ }
+ }
+ }
+ }
+
+ if (stateChange) {
+ /* reschedule direct parents */
+ for (const Checkable::Ptr& parent : GetParents()) {
+ if (parent.get() == this)
+ continue;
+
+ if (!parent->GetEnableActiveChecks())
+ continue;
+
+ if (parent->GetNextCheck() >= now + parent->GetRetryInterval()) {
+ ObjectLock olock(parent);
+ parent->SetNextCheck(now);
+ }
+ }
+ }
+
+ OnNewCheckResult(this, cr, origin);
+
+ /* signal status updates to for example db_ido */
+ OnStateChanged(this);
+
+ String old_state_str = (service ? Service::StateToString(old_state) : Host::StateToString(Host::CalculateState(old_state)));
+ String new_state_str = (service ? Service::StateToString(new_state) : Host::StateToString(Host::CalculateState(new_state)));
+
+ /* Whether a hard state change or a volatile state change except OK -> OK happened. */
+ if (hardChange || (is_volatile && !(IsStateOK(old_state) && IsStateOK(new_state)))) {
+ OnStateChange(this, cr, StateTypeHard, origin);
+ Log(LogNotice, "Checkable")
+ << "State Change: Checkable '" << GetName() << "' hard state change from " << old_state_str << " to " << new_state_str << " detected." << (is_volatile ? " Checkable is volatile." : "");
+ }
+ /* Whether a state change happened or the state type is SOFT (must be logged too). */
+ else if (stateChange || GetStateType() == StateTypeSoft) {
+ OnStateChange(this, cr, StateTypeSoft, origin);
+ Log(LogNotice, "Checkable")
+ << "State Change: Checkable '" << GetName() << "' soft state change from " << old_state_str << " to " << new_state_str << " detected.";
+ }
+
+ if (GetStateType() == StateTypeSoft || hardChange || recovery ||
+ (is_volatile && !(IsStateOK(old_state) && IsStateOK(new_state))))
+ ExecuteEventHandler();
+
+ int suppressed_types = 0;
+
+ /* Flapping start/end notifications */
+ if (!was_flapping && is_flapping) {
+ /* FlappingStart notifications happen on state changes, not in downtimes */
+ if (!IsPaused()) {
+ if (in_downtime) {
+ suppressed_types |= NotificationFlappingStart;
+ } else {
+ OnNotificationsRequested(this, NotificationFlappingStart, cr, "", "", nullptr);
+ }
+ }
+
+ Log(LogNotice, "Checkable")
+ << "Flapping Start: Checkable '" << GetName() << "' started flapping (Current flapping value "
+ << GetFlappingCurrent() << "% > high threshold " << GetFlappingThresholdHigh() << "%).";
+
+ NotifyFlapping(origin);
+ } else if (was_flapping && !is_flapping) {
+ /* FlappingEnd notifications are independent from state changes, must not happen in downtine */
+ if (!IsPaused()) {
+ if (in_downtime) {
+ suppressed_types |= NotificationFlappingEnd;
+ } else {
+ OnNotificationsRequested(this, NotificationFlappingEnd, cr, "", "", nullptr);
+ }
+ }
+
+ Log(LogNotice, "Checkable")
+ << "Flapping Stop: Checkable '" << GetName() << "' stopped flapping (Current flapping value "
+ << GetFlappingCurrent() << "% < low threshold " << GetFlappingThresholdLow() << "%).";
+
+ NotifyFlapping(origin);
+ }
+
+ if (send_notification && !is_flapping) {
+ if (!IsPaused()) {
+ /* If there are still some pending suppressed state notification, keep the suppression until these are
+ * handled by Checkable::FireSuppressedNotifications().
+ */
+ bool pending = GetSuppressedNotifications() & (NotificationRecovery|NotificationProblem);
+
+ if (suppress_notification || pending) {
+ suppressed_types |= (recovery ? NotificationRecovery : NotificationProblem);
+ } else {
+ OnNotificationsRequested(this, recovery ? NotificationRecovery : NotificationProblem, cr, "", "", nullptr);
+ }
+ }
+ }
+
+ if (suppressed_types) {
+ /* If some notifications were suppressed, but just because of e.g. a downtime,
+ * stash them into a notification types bitmask for maybe re-sending later.
+ */
+
+ ObjectLock olock (this);
+ int suppressed_types_before (GetSuppressedNotifications());
+ int suppressed_types_after (suppressed_types_before | suppressed_types);
+
+ const int conflict = NotificationFlappingStart | NotificationFlappingEnd;
+ if ((suppressed_types_after & conflict) == conflict) {
+ /* Flapping start and end cancel out each other. */
+ suppressed_types_after &= ~conflict;
+ }
+
+ const int stateNotifications = NotificationRecovery | NotificationProblem;
+ if (!(suppressed_types_before & stateNotifications) && (suppressed_types & stateNotifications)) {
+ /* A state-related notification is suppressed for the first time, store the previous state. When
+ * notifications are no longer suppressed, this can be compared with the current state to determine
+ * if a notification must be sent. This is done differently compared to flapping notifications just above
+ * as for state notifications, problem and recovery don't always cancel each other. For example,
+ * WARNING -> OK -> CRITICAL generates both types once, but there should still be a notification.
+ */
+ SetStateBeforeSuppression(old_stateType == StateTypeHard ? old_state : ServiceOK);
+ }
+
+ if (suppressed_types_after != suppressed_types_before) {
+ SetSuppressedNotifications(suppressed_types_after);
+ }
+ }
+
+ /* update reachability for child objects */
+ if ((stateChange || hardChange) && !children.empty())
+ OnReachabilityChanged(this, cr, children, origin);
+
+ return Result::Ok;
+}
+
+void Checkable::ExecuteRemoteCheck(const Dictionary::Ptr& resolvedMacros)
+{
+ CONTEXT("Executing remote check for object '" << GetName() << "'");
+
+ double scheduled_start = GetNextCheck();
+ double before_check = Utility::GetTime();
+
+ CheckResult::Ptr cr = new CheckResult();
+ cr->SetScheduleStart(scheduled_start);
+ cr->SetExecutionStart(before_check);
+
+ GetCheckCommand()->Execute(this, cr, resolvedMacros, true);
+}
+
+void Checkable::ExecuteCheck()
+{
+ CONTEXT("Executing check for object '" << GetName() << "'");
+
+ /* keep track of scheduling info in case the check type doesn't provide its own information */
+ double scheduled_start = GetNextCheck();
+ double before_check = Utility::GetTime();
+
+ SetLastCheckStarted(Utility::GetTime());
+
+ /* This calls SetNextCheck() which updates the CheckerComponent's idle/pending
+ * queues and ensures that checks are not fired multiple times. ProcessCheckResult()
+ * is called too late. See #6421.
+ */
+ UpdateNextCheck();
+
+ bool reachable = IsReachable();
+
+ {
+ ObjectLock olock(this);
+
+ /* don't run another check if there is one pending */
+ if (m_CheckRunning)
+ return;
+
+ m_CheckRunning = true;
+
+ SetLastStateRaw(GetStateRaw());
+ SetLastStateType(GetLastStateType());
+ SetLastReachable(reachable);
+ }
+
+ CheckResult::Ptr cr = new CheckResult();
+
+ cr->SetScheduleStart(scheduled_start);
+ cr->SetExecutionStart(before_check);
+
+ Endpoint::Ptr endpoint = GetCommandEndpoint();
+ bool local = !endpoint || endpoint == Endpoint::GetLocalEndpoint();
+
+ if (local) {
+ GetCheckCommand()->Execute(this, cr, nullptr, false);
+ } else {
+ Dictionary::Ptr macros = new Dictionary();
+ GetCheckCommand()->Execute(this, cr, macros, false);
+
+ if (endpoint->GetConnected()) {
+ /* perform check on remote endpoint */
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::ExecuteCommand");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(this);
+
+ Dictionary::Ptr params = new Dictionary();
+ message->Set("params", params);
+ params->Set("command_type", "check_command");
+ params->Set("command", GetCheckCommand()->GetName());
+ params->Set("host", host->GetName());
+
+ if (service)
+ params->Set("service", service->GetShortName());
+
+ /*
+ * If the host/service object specifies the 'check_timeout' attribute,
+ * forward this to the remote endpoint to limit the command execution time.
+ */
+ if (!GetCheckTimeout().IsEmpty())
+ params->Set("check_timeout", GetCheckTimeout());
+
+ params->Set("macros", macros);
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (listener)
+ listener->SyncSendMessage(endpoint, message);
+
+ /* Re-schedule the check so we don't run it again until after we've received
+ * a check result from the remote instance. The check will be re-scheduled
+ * using the proper check interval once we've received a check result.
+ */
+ SetNextCheck(Utility::GetTime() + GetCheckCommand()->GetTimeout() + 30);
+
+ /*
+ * Let the user know that there was a problem with the check if
+ * 1) The endpoint is not syncing (replay log, etc.)
+ * 2) Outside of the cold startup window (5min)
+ */
+ } else if (!endpoint->GetSyncing() && Application::GetInstance()->GetStartTime() < Utility::GetTime() - 300) {
+ /* fail to perform check on unconnected endpoint */
+ cr->SetState(ServiceUnknown);
+
+ String output = "Remote Icinga instance '" + endpoint->GetName() + "' is not connected to ";
+
+ Endpoint::Ptr localEndpoint = Endpoint::GetLocalEndpoint();
+
+ if (localEndpoint)
+ output += "'" + localEndpoint->GetName() + "'";
+ else
+ output += "this instance";
+
+ cr->SetOutput(output);
+
+ ProcessCheckResult(cr);
+ }
+
+ {
+ ObjectLock olock(this);
+ m_CheckRunning = false;
+ }
+ }
+}
+
+void Checkable::UpdateStatistics(const CheckResult::Ptr& cr, CheckableType type)
+{
+ time_t ts = cr->GetScheduleEnd();
+
+ if (type == CheckableHost) {
+ if (cr->GetActive())
+ CIB::UpdateActiveHostChecksStatistics(ts, 1);
+ else
+ CIB::UpdatePassiveHostChecksStatistics(ts, 1);
+ } else if (type == CheckableService) {
+ if (cr->GetActive())
+ CIB::UpdateActiveServiceChecksStatistics(ts, 1);
+ else
+ CIB::UpdatePassiveServiceChecksStatistics(ts, 1);
+ } else {
+ Log(LogWarning, "Checkable", "Unknown checkable type for statistic update.");
+ }
+}
+
+void Checkable::IncreasePendingChecks()
+{
+ std::unique_lock<std::mutex> lock(m_StatsMutex);
+ m_PendingChecks++;
+}
+
+void Checkable::DecreasePendingChecks()
+{
+ std::unique_lock<std::mutex> lock(m_StatsMutex);
+ m_PendingChecks--;
+ m_PendingChecksCV.notify_one();
+}
+
+int Checkable::GetPendingChecks()
+{
+ std::unique_lock<std::mutex> lock(m_StatsMutex);
+ return m_PendingChecks;
+}
+
+void Checkable::AquirePendingCheckSlot(int maxPendingChecks)
+{
+ std::unique_lock<std::mutex> lock(m_StatsMutex);
+ while (m_PendingChecks >= maxPendingChecks)
+ m_PendingChecksCV.wait(lock);
+
+ m_PendingChecks++;
+}
diff --git a/lib/icinga/checkable-comment.cpp b/lib/icinga/checkable-comment.cpp
new file mode 100644
index 0000000..71cfac6
--- /dev/null
+++ b/lib/icinga/checkable-comment.cpp
@@ -0,0 +1,75 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/service.hpp"
+#include "remote/configobjectutility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include <utility>
+
+using namespace icinga;
+
+
+void Checkable::RemoveAllComments()
+{
+ for (const Comment::Ptr& comment : GetComments()) {
+ Comment::RemoveComment(comment->GetName());
+ }
+}
+
+void Checkable::RemoveAckComments(const String& removedBy, double createdBefore)
+{
+ for (const Comment::Ptr& comment : GetComments()) {
+ if (comment->GetEntryType() == CommentAcknowledgement) {
+ /* Do not remove persistent comments from an acknowledgement */
+ if (comment->GetPersistent()) {
+ continue;
+ }
+
+ if (comment->GetEntryTime() > createdBefore) {
+ continue;
+ }
+
+ {
+ ObjectLock oLock (comment);
+ comment->SetRemovedBy(removedBy);
+ }
+
+ Comment::RemoveComment(comment->GetName());
+ }
+ }
+}
+
+std::set<Comment::Ptr> Checkable::GetComments() const
+{
+ std::unique_lock<std::mutex> lock(m_CommentMutex);
+ return m_Comments;
+}
+
+Comment::Ptr Checkable::GetLastComment() const
+{
+ std::unique_lock<std::mutex> lock (m_CommentMutex);
+ Comment::Ptr lastComment;
+
+ for (auto& comment : m_Comments) {
+ if (!lastComment || comment->GetEntryTime() > lastComment->GetEntryTime()) {
+ lastComment = comment;
+ }
+ }
+
+ return lastComment;
+}
+
+void Checkable::RegisterComment(const Comment::Ptr& comment)
+{
+ std::unique_lock<std::mutex> lock(m_CommentMutex);
+ m_Comments.insert(comment);
+}
+
+void Checkable::UnregisterComment(const Comment::Ptr& comment)
+{
+ std::unique_lock<std::mutex> lock(m_CommentMutex);
+ m_Comments.erase(comment);
+}
diff --git a/lib/icinga/checkable-dependency.cpp b/lib/icinga/checkable-dependency.cpp
new file mode 100644
index 0000000..58d6b57
--- /dev/null
+++ b/lib/icinga/checkable-dependency.cpp
@@ -0,0 +1,176 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/service.hpp"
+#include "icinga/dependency.hpp"
+#include "base/logger.hpp"
+#include <unordered_map>
+
+using namespace icinga;
+
+void Checkable::AddDependency(const Dependency::Ptr& dep)
+{
+ std::unique_lock<std::mutex> lock(m_DependencyMutex);
+ m_Dependencies.insert(dep);
+}
+
+void Checkable::RemoveDependency(const Dependency::Ptr& dep)
+{
+ std::unique_lock<std::mutex> lock(m_DependencyMutex);
+ m_Dependencies.erase(dep);
+}
+
+std::vector<Dependency::Ptr> Checkable::GetDependencies() const
+{
+ std::unique_lock<std::mutex> lock(m_DependencyMutex);
+ return std::vector<Dependency::Ptr>(m_Dependencies.begin(), m_Dependencies.end());
+}
+
+void Checkable::AddReverseDependency(const Dependency::Ptr& dep)
+{
+ std::unique_lock<std::mutex> lock(m_DependencyMutex);
+ m_ReverseDependencies.insert(dep);
+}
+
+void Checkable::RemoveReverseDependency(const Dependency::Ptr& dep)
+{
+ std::unique_lock<std::mutex> lock(m_DependencyMutex);
+ m_ReverseDependencies.erase(dep);
+}
+
+std::vector<Dependency::Ptr> Checkable::GetReverseDependencies() const
+{
+ std::unique_lock<std::mutex> lock(m_DependencyMutex);
+ return std::vector<Dependency::Ptr>(m_ReverseDependencies.begin(), m_ReverseDependencies.end());
+}
+
+bool Checkable::IsReachable(DependencyType dt, Dependency::Ptr *failedDependency, int rstack) const
+{
+ /* Anything greater than 256 causes recursion bus errors. */
+ int limit = 256;
+
+ if (rstack > limit) {
+ Log(LogWarning, "Checkable")
+ << "Too many nested dependencies (>" << limit << ") for checkable '" << GetName() << "': Dependency failed.";
+
+ return false;
+ }
+
+ for (const Checkable::Ptr& checkable : GetParents()) {
+ if (!checkable->IsReachable(dt, failedDependency, rstack + 1))
+ return false;
+ }
+
+ /* implicit dependency on host if this is a service */
+ const auto *service = dynamic_cast<const Service *>(this);
+ if (service && (dt == DependencyState || dt == DependencyNotification)) {
+ Host::Ptr host = service->GetHost();
+
+ if (host && host->GetState() != HostUp && host->GetStateType() == StateTypeHard) {
+ if (failedDependency)
+ *failedDependency = nullptr;
+
+ return false;
+ }
+ }
+
+ auto deps = GetDependencies();
+
+ std::unordered_map<std::string, Dependency::Ptr> violated; // key: redundancy group, value: nullptr if satisfied, violating dependency otherwise
+
+ for (const Dependency::Ptr& dep : deps) {
+ std::string redundancy_group = dep->GetRedundancyGroup();
+
+ if (!dep->IsAvailable(dt)) {
+ if (redundancy_group.empty()) {
+ Log(LogDebug, "Checkable")
+ << "Non-redundant dependency '" << dep->GetName() << "' failed for checkable '" << GetName() << "': Marking as unreachable.";
+
+ if (failedDependency)
+ *failedDependency = dep;
+
+ return false;
+ }
+
+ // tentatively mark this dependency group as failed unless it is already marked;
+ // so it either passed before (don't overwrite) or already failed (so don't care)
+ // note that std::unordered_map::insert() will not overwrite an existing entry
+ violated.insert(std::make_pair(redundancy_group, dep));
+ } else if (!redundancy_group.empty()) {
+ violated[redundancy_group] = nullptr;
+ }
+ }
+
+ auto violator = std::find_if(violated.begin(), violated.end(), [](auto& v) { return v.second != nullptr; });
+ if (violator != violated.end()) {
+ Log(LogDebug, "Checkable")
+ << "All dependencies in redundancy group '" << violator->first << "' have failed for checkable '" << GetName() << "': Marking as unreachable.";
+
+ if (failedDependency)
+ *failedDependency = violator->second;
+
+ return false;
+ }
+
+ if (failedDependency)
+ *failedDependency = nullptr;
+
+ return true;
+}
+
+std::set<Checkable::Ptr> Checkable::GetParents() const
+{
+ std::set<Checkable::Ptr> parents;
+
+ for (const Dependency::Ptr& dep : GetDependencies()) {
+ Checkable::Ptr parent = dep->GetParent();
+
+ if (parent && parent.get() != this)
+ parents.insert(parent);
+ }
+
+ return parents;
+}
+
+std::set<Checkable::Ptr> Checkable::GetChildren() const
+{
+ std::set<Checkable::Ptr> parents;
+
+ for (const Dependency::Ptr& dep : GetReverseDependencies()) {
+ Checkable::Ptr service = dep->GetChild();
+
+ if (service && service.get() != this)
+ parents.insert(service);
+ }
+
+ return parents;
+}
+
+std::set<Checkable::Ptr> Checkable::GetAllChildren() const
+{
+ std::set<Checkable::Ptr> children = GetChildren();
+
+ GetAllChildrenInternal(children, 0);
+
+ return children;
+}
+
+void Checkable::GetAllChildrenInternal(std::set<Checkable::Ptr>& children, int level) const
+{
+ if (level > 32)
+ return;
+
+ std::set<Checkable::Ptr> localChildren;
+
+ for (const Checkable::Ptr& checkable : children) {
+ std::set<Checkable::Ptr> cChildren = checkable->GetChildren();
+
+ if (!cChildren.empty()) {
+ GetAllChildrenInternal(cChildren, level + 1);
+ localChildren.insert(cChildren.begin(), cChildren.end());
+ }
+
+ localChildren.insert(checkable);
+ }
+
+ children.insert(localChildren.begin(), localChildren.end());
+}
diff --git a/lib/icinga/checkable-downtime.cpp b/lib/icinga/checkable-downtime.cpp
new file mode 100644
index 0000000..d96003d
--- /dev/null
+++ b/lib/icinga/checkable-downtime.cpp
@@ -0,0 +1,64 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/service.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+void Checkable::RemoveAllDowntimes()
+{
+ for (const Downtime::Ptr& downtime : GetDowntimes()) {
+ Downtime::RemoveDowntime(downtime->GetName(), true, true, true);
+ }
+}
+
+void Checkable::TriggerDowntimes(double triggerTime)
+{
+ for (const Downtime::Ptr& downtime : GetDowntimes()) {
+ downtime->TriggerDowntime(triggerTime);
+ }
+}
+
+bool Checkable::IsInDowntime() const
+{
+ for (const Downtime::Ptr& downtime : GetDowntimes()) {
+ if (downtime->IsInEffect())
+ return true;
+ }
+
+ return false;
+}
+
+int Checkable::GetDowntimeDepth() const
+{
+ int downtime_depth = 0;
+
+ for (const Downtime::Ptr& downtime : GetDowntimes()) {
+ if (downtime->IsInEffect())
+ downtime_depth++;
+ }
+
+ return downtime_depth;
+}
+
+std::set<Downtime::Ptr> Checkable::GetDowntimes() const
+{
+ std::unique_lock<std::mutex> lock(m_DowntimeMutex);
+ return m_Downtimes;
+}
+
+void Checkable::RegisterDowntime(const Downtime::Ptr& downtime)
+{
+ std::unique_lock<std::mutex> lock(m_DowntimeMutex);
+ m_Downtimes.insert(downtime);
+}
+
+void Checkable::UnregisterDowntime(const Downtime::Ptr& downtime)
+{
+ std::unique_lock<std::mutex> lock(m_DowntimeMutex);
+ m_Downtimes.erase(downtime);
+}
diff --git a/lib/icinga/checkable-event.cpp b/lib/icinga/checkable-event.cpp
new file mode 100644
index 0000000..fb315d9
--- /dev/null
+++ b/lib/icinga/checkable-event.cpp
@@ -0,0 +1,81 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/service.hpp"
+#include "remote/apilistener.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+
+using namespace icinga;
+
+boost::signals2::signal<void (const Checkable::Ptr&)> Checkable::OnEventCommandExecuted;
+
+EventCommand::Ptr Checkable::GetEventCommand() const
+{
+ return EventCommand::GetByName(GetEventCommandRaw());
+}
+
+void Checkable::ExecuteEventHandler(const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ CONTEXT("Executing event handler for object '" << GetName() << "'");
+
+ if (!IcingaApplication::GetInstance()->GetEnableEventHandlers() || !GetEnableEventHandler())
+ return;
+
+ /* HA enabled zones. */
+ if (IsActive() && IsPaused()) {
+ Log(LogNotice, "Checkable")
+ << "Skipping event handler for HA-paused checkable '" << GetName() << "'";
+ return;
+ }
+
+ EventCommand::Ptr ec = GetEventCommand();
+
+ if (!ec)
+ return;
+
+ Log(LogNotice, "Checkable")
+ << "Executing event handler '" << ec->GetName() << "' for checkable '" << GetName() << "'";
+
+ Dictionary::Ptr macros;
+ Endpoint::Ptr endpoint = GetCommandEndpoint();
+
+ if (endpoint && !useResolvedMacros)
+ macros = new Dictionary();
+ else
+ macros = resolvedMacros;
+
+ ec->Execute(this, macros, useResolvedMacros);
+
+ if (endpoint && !GetExtension("agent_check")) {
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::ExecuteCommand");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(this);
+
+ Dictionary::Ptr params = new Dictionary();
+ message->Set("params", params);
+ params->Set("command_type", "event_command");
+ params->Set("command", GetEventCommand()->GetName());
+ params->Set("host", host->GetName());
+
+ if (service)
+ params->Set("service", service->GetShortName());
+
+ params->Set("macros", macros);
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (listener)
+ listener->SyncSendMessage(endpoint, message);
+
+ return;
+ }
+
+ OnEventCommandExecuted(this);
+}
diff --git a/lib/icinga/checkable-flapping.cpp b/lib/icinga/checkable-flapping.cpp
new file mode 100644
index 0000000..e905e05
--- /dev/null
+++ b/lib/icinga/checkable-flapping.cpp
@@ -0,0 +1,114 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+template<typename T>
+struct Bitset
+{
+public:
+ Bitset(T value)
+ : m_Data(value)
+ { }
+
+ void Modify(int index, bool bit)
+ {
+ if (bit)
+ m_Data |= 1 << index;
+ else
+ m_Data &= ~(1 << index);
+ }
+
+ bool Get(int index) const
+ {
+ return m_Data & (1 << index);
+ }
+
+ T GetValue() const
+ {
+ return m_Data;
+ }
+
+private:
+ T m_Data{0};
+};
+
+void Checkable::UpdateFlappingStatus(ServiceState newState)
+{
+ Bitset<unsigned long> stateChangeBuf = GetFlappingBuffer();
+ int oldestIndex = GetFlappingIndex();
+
+ ServiceState lastState = GetFlappingLastState();
+ bool stateChange = false;
+
+ int stateFilter = GetFlappingIgnoreStatesFilter();
+
+ /* Only count as state change if no state filter is set or the new state isn't filtered out */
+ if (stateFilter == -1 || !(ServiceStateToFlappingFilter(newState) & stateFilter)) {
+ stateChange = newState != lastState;
+ SetFlappingLastState(newState);
+ }
+
+ stateChangeBuf.Modify(oldestIndex, stateChange);
+ oldestIndex = (oldestIndex + 1) % 20;
+
+ double stateChanges = 0;
+
+ /* Iterate over our state array and compute a weighted total */
+ for (int i = 0; i < 20; i++) {
+ if (stateChangeBuf.Get((oldestIndex + i) % 20))
+ stateChanges += 0.8 + (0.02 * i);
+ }
+
+ double flappingValue = 100.0 * stateChanges / 20.0;
+
+ bool flapping;
+
+ if (GetFlapping())
+ flapping = flappingValue > GetFlappingThresholdLow();
+ else
+ flapping = flappingValue > GetFlappingThresholdHigh();
+
+ SetFlappingBuffer(stateChangeBuf.GetValue());
+ SetFlappingIndex(oldestIndex);
+ SetFlappingCurrent(flappingValue);
+
+ if (flapping != GetFlapping()) {
+ SetFlapping(flapping, true);
+
+ double ee = GetLastCheckResult()->GetExecutionEnd();
+
+ if (GetEnableFlapping() && IcingaApplication::GetInstance()->GetEnableFlapping()) {
+ OnFlappingChange(this, ee);
+ }
+
+ SetFlappingLastChange(ee);
+ }
+}
+
+bool Checkable::IsFlapping() const
+{
+ if (!GetEnableFlapping() || !IcingaApplication::GetInstance()->GetEnableFlapping())
+ return false;
+ else
+ return GetFlapping();
+}
+
+int Checkable::ServiceStateToFlappingFilter(ServiceState state)
+{
+ switch (state) {
+ case ServiceOK:
+ return StateFilterOK;
+ case ServiceWarning:
+ return StateFilterWarning;
+ case ServiceCritical:
+ return StateFilterCritical;
+ case ServiceUnknown:
+ return StateFilterUnknown;
+ default:
+ VERIFY(!"Invalid state type.");
+ }
+}
diff --git a/lib/icinga/checkable-notification.cpp b/lib/icinga/checkable-notification.cpp
new file mode 100644
index 0000000..79b5986
--- /dev/null
+++ b/lib/icinga/checkable-notification.cpp
@@ -0,0 +1,334 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/host.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/context.hpp"
+#include "base/convert.hpp"
+#include "base/lazy-init.hpp"
+#include "remote/apilistener.hpp"
+
+using namespace icinga;
+
+boost::signals2::signal<void (const Notification::Ptr&, const Checkable::Ptr&, const std::set<User::Ptr>&,
+ const NotificationType&, const CheckResult::Ptr&, const String&, const String&,
+ const MessageOrigin::Ptr&)> Checkable::OnNotificationSentToAllUsers;
+boost::signals2::signal<void (const Notification::Ptr&, const Checkable::Ptr&, const User::Ptr&,
+ const NotificationType&, const CheckResult::Ptr&, const String&, const String&, const String&,
+ const MessageOrigin::Ptr&)> Checkable::OnNotificationSentToUser;
+
+void Checkable::ResetNotificationNumbers()
+{
+ for (const Notification::Ptr& notification : GetNotifications()) {
+ ObjectLock olock(notification);
+ notification->ResetNotificationNumber();
+ }
+}
+
+void Checkable::SendNotifications(NotificationType type, const CheckResult::Ptr& cr, const String& author, const String& text)
+{
+ String checkableName = GetName();
+
+ CONTEXT("Sending notifications for object '" << checkableName << "'");
+
+ bool force = GetForceNextNotification();
+
+ SetForceNextNotification(false);
+
+ if (!IcingaApplication::GetInstance()->GetEnableNotifications() || !GetEnableNotifications()) {
+ if (!force) {
+ Log(LogInformation, "Checkable")
+ << "Notifications are disabled for checkable '" << checkableName << "'.";
+ return;
+ }
+ }
+
+ std::set<Notification::Ptr> notifications = GetNotifications();
+
+ String notificationTypeName = Notification::NotificationTypeToString(type);
+
+ // Bail early if there are no notifications.
+ if (notifications.empty()) {
+ Log(LogNotice, "Checkable")
+ << "Skipping checkable '" << checkableName << "' which doesn't have any notification objects configured.";
+ return;
+ }
+
+ Log(LogInformation, "Checkable")
+ << "Checkable '" << checkableName << "' has " << notifications.size()
+ << " notification(s). Checking filters for type '" << notificationTypeName << "', sends will be logged.";
+
+ for (const Notification::Ptr& notification : notifications) {
+ // Re-send stashed notifications from cold startup.
+ if (ApiListener::UpdatedObjectAuthority()) {
+ try {
+ if (!notification->IsPaused()) {
+ auto stashedNotifications (notification->GetStashedNotifications());
+
+ if (stashedNotifications->GetLength()) {
+ Log(LogNotice, "Notification")
+ << "Notification '" << notification->GetName() << "': there are some stashed notifications. Stashing notification to preserve order.";
+
+ stashedNotifications->Add(new Dictionary({
+ {"notification_type", type},
+ {"cr", cr},
+ {"force", force},
+ {"reminder", false},
+ {"author", author},
+ {"text", text}
+ }));
+ } else {
+ notification->BeginExecuteNotification(type, cr, force, false, author, text);
+ }
+ } else {
+ Log(LogNotice, "Notification")
+ << "Notification '" << notification->GetName() << "': HA cluster active, this endpoint does not have the authority (paused=true). Skipping.";
+ }
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "Checkable")
+ << "Exception occurred during notification '" << notification->GetName() << "' for checkable '"
+ << GetName() << "': " << DiagnosticInformation(ex, false);
+ }
+ } else {
+ // Cold startup phase. Stash notification for later.
+ Log(LogNotice, "Notification")
+ << "Notification '" << notification->GetName() << "': object authority hasn't been updated, yet. Stashing notification.";
+
+ notification->GetStashedNotifications()->Add(new Dictionary({
+ {"notification_type", type},
+ {"cr", cr},
+ {"force", force},
+ {"reminder", false},
+ {"author", author},
+ {"text", text}
+ }));
+ }
+ }
+}
+
+std::set<Notification::Ptr> Checkable::GetNotifications() const
+{
+ std::unique_lock<std::mutex> lock(m_NotificationMutex);
+ return m_Notifications;
+}
+
+void Checkable::RegisterNotification(const Notification::Ptr& notification)
+{
+ std::unique_lock<std::mutex> lock(m_NotificationMutex);
+ m_Notifications.insert(notification);
+}
+
+void Checkable::UnregisterNotification(const Notification::Ptr& notification)
+{
+ std::unique_lock<std::mutex> lock(m_NotificationMutex);
+ m_Notifications.erase(notification);
+}
+
+void Checkable::FireSuppressedNotifications()
+{
+ if (!IsActive())
+ return;
+
+ if (IsPaused())
+ return;
+
+ if (!GetEnableNotifications())
+ return;
+
+ int suppressed_types (GetSuppressedNotifications());
+ if (!suppressed_types)
+ return;
+
+ int subtract = 0;
+
+ {
+ LazyInit<bool> wasLastParentRecoveryRecent ([this]() {
+ auto cr (GetLastCheckResult());
+
+ if (!cr) {
+ return true;
+ }
+
+ auto threshold (cr->GetExecutionStart());
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(this);
+
+ if (service) {
+ ObjectLock oLock (host);
+
+ if (!host->GetProblem() && host->GetLastStateChange() >= threshold) {
+ return true;
+ }
+ }
+
+ for (auto& dep : GetDependencies()) {
+ auto parent (dep->GetParent());
+ ObjectLock oLock (parent);
+
+ if (!parent->GetProblem() && parent->GetLastStateChange() >= threshold) {
+ return true;
+ }
+ }
+
+ return false;
+ });
+
+ if (suppressed_types & (NotificationProblem|NotificationRecovery)) {
+ CheckResult::Ptr cr = GetLastCheckResult();
+ NotificationType type = cr && IsStateOK(cr->GetState()) ? NotificationRecovery : NotificationProblem;
+ bool state_suppressed = NotificationReasonSuppressed(NotificationProblem) || NotificationReasonSuppressed(NotificationRecovery);
+
+ /* Only process (i.e. send or dismiss) suppressed state notifications if the following conditions are met:
+ *
+ * 1. State notifications are not suppressed at the moment. State notifications must only be removed from
+ * the suppressed notifications bitset after the reason for the suppression is gone as these bits are
+ * used as a marker for when to set the state_before_suppression attribute.
+ * 2. The checkable is in a hard state. Soft states represent a state where we are not certain yet about
+ * the actual state and wait with sending notifications. If we want to immediately send a notification,
+ * we might send a recovery notification for something that just started failing or a problem
+ * notification which might be for an intermittent problem that would have never received a
+ * notification if there was no suppression as it still was in a soft state. Both cases aren't ideal so
+ * better wait until we are certain.
+ * 3. The checkable isn't likely checked soon. For example, if a downtime ended, give the checkable a
+ * chance to recover afterwards before sending a notification.
+ * 4. No parent recovered recently. Similar to the previous condition, give the checkable a chance to
+ * recover after one of its dependencies recovered before sending a notification.
+ *
+ * If any of these conditions is not met, processing the suppressed notification is further delayed.
+ */
+ if (!state_suppressed && GetStateType() == StateTypeHard && !IsLikelyToBeCheckedSoon() && !wasLastParentRecoveryRecent.Get()) {
+ if (NotificationReasonApplies(type)) {
+ Checkable::OnNotificationsRequested(this, type, cr, "", "", nullptr);
+ }
+ subtract |= NotificationRecovery|NotificationProblem;
+ }
+ }
+
+ for (auto type : {NotificationFlappingStart, NotificationFlappingEnd}) {
+ if (suppressed_types & type) {
+ bool still_applies = NotificationReasonApplies(type);
+
+ if (still_applies) {
+ if (!NotificationReasonSuppressed(type) && !IsLikelyToBeCheckedSoon() && !wasLastParentRecoveryRecent.Get()) {
+ Checkable::OnNotificationsRequested(this, type, GetLastCheckResult(), "", "", nullptr);
+
+ subtract |= type;
+ }
+ } else {
+ subtract |= type;
+ }
+ }
+ }
+ }
+
+ if (subtract) {
+ ObjectLock olock (this);
+
+ int suppressed_types_before (GetSuppressedNotifications());
+ int suppressed_types_after (suppressed_types_before & ~subtract);
+
+ if (suppressed_types_after != suppressed_types_before) {
+ SetSuppressedNotifications(suppressed_types_after);
+ }
+ }
+}
+
+/**
+ * Re-sends all notifications previously suppressed by e.g. downtimes if the notification reason still applies.
+ */
+void Checkable::FireSuppressedNotificationsTimer(const Timer * const&)
+{
+ for (auto& host : ConfigType::GetObjectsByType<Host>()) {
+ host->FireSuppressedNotifications();
+ }
+
+ for (auto& service : ConfigType::GetObjectsByType<Service>()) {
+ service->FireSuppressedNotifications();
+ }
+}
+
+/**
+ * Returns whether sending a notification of type type right now would represent *this' current state correctly.
+ *
+ * @param type The type of notification to send (or not to send).
+ *
+ * @return Whether to send the notification.
+ */
+bool Checkable::NotificationReasonApplies(NotificationType type)
+{
+ switch (type) {
+ case NotificationProblem:
+ {
+ auto cr (GetLastCheckResult());
+ return cr && !IsStateOK(cr->GetState()) && cr->GetState() != GetStateBeforeSuppression();
+ }
+ case NotificationRecovery:
+ {
+ auto cr (GetLastCheckResult());
+ return cr && IsStateOK(cr->GetState()) && cr->GetState() != GetStateBeforeSuppression();
+ }
+ case NotificationFlappingStart:
+ return IsFlapping();
+ case NotificationFlappingEnd:
+ return !IsFlapping();
+ default:
+ VERIFY(!"Checkable#NotificationReasonStillApplies(): given type not implemented");
+ return false;
+ }
+}
+
+/**
+ * Checks if notifications of a given type should be suppressed for this Checkable at the moment.
+ *
+ * @param type The notification type for which to query the suppression status.
+ *
+ * @return true if no notification of this type should be sent.
+ */
+bool Checkable::NotificationReasonSuppressed(NotificationType type)
+{
+ switch (type) {
+ case NotificationProblem:
+ case NotificationRecovery:
+ return !IsReachable(DependencyNotification) || IsInDowntime() || IsAcknowledged();
+ case NotificationFlappingStart:
+ case NotificationFlappingEnd:
+ return IsInDowntime();
+ default:
+ return false;
+ }
+}
+
+/**
+ * E.g. we're going to re-send a stashed problem notification as *this is still not ok.
+ * But if the next check result recovers *this soon, we would send a recovery notification soon after the problem one.
+ * This is not desired, especially for lots of checkables at once.
+ * Because of that if there's likely to be a check result soon,
+ * we delay the re-sending of the stashed notification until the next check.
+ * That check either doesn't change anything and we finally re-send the stashed problem notification
+ * or recovers *this and we drop the stashed notification.
+ *
+ * @return Whether *this is likely to be checked soon
+ */
+bool Checkable::IsLikelyToBeCheckedSoon()
+{
+ if (!GetEnableActiveChecks()) {
+ return false;
+ }
+
+ // One minute unless the check interval is too short so the next check will always run during the next minute.
+ auto threshold (GetCheckInterval() - 10);
+
+ if (threshold > 60) {
+ threshold = 60;
+ } else if (threshold < 0) {
+ threshold = 0;
+ }
+
+ return GetNextCheck() <= Utility::GetTime() + threshold;
+}
diff --git a/lib/icinga/checkable-script.cpp b/lib/icinga/checkable-script.cpp
new file mode 100644
index 0000000..4a0d1d8
--- /dev/null
+++ b/lib/icinga/checkable-script.cpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "base/configobject.hpp"
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/functionwrapper.hpp"
+#include "base/scriptframe.hpp"
+
+using namespace icinga;
+
+static void CheckableProcessCheckResult(const CheckResult::Ptr& cr)
+{
+ ScriptFrame *vframe = ScriptFrame::GetCurrentFrame();
+ Checkable::Ptr self = vframe->Self;
+ REQUIRE_NOT_NULL(self);
+ self->ProcessCheckResult(cr);
+}
+
+Object::Ptr Checkable::GetPrototype()
+{
+ static Dictionary::Ptr prototype = new Dictionary({
+ { "process_check_result", new Function("Checkable#process_check_result", CheckableProcessCheckResult, { "cr" }, false) }
+ });
+
+ return prototype;
+}
+
diff --git a/lib/icinga/checkable.cpp b/lib/icinga/checkable.cpp
new file mode 100644
index 0000000..ddf84cd
--- /dev/null
+++ b/lib/icinga/checkable.cpp
@@ -0,0 +1,322 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/checkable-ti.cpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/timer.hpp"
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE_WITH_PROTOTYPE(Checkable, Checkable::GetPrototype());
+INITIALIZE_ONCE(&Checkable::StaticInitialize);
+
+const std::map<String, int> Checkable::m_FlappingStateFilterMap ({
+ {"OK", FlappingStateFilterOk},
+ {"Warning", FlappingStateFilterWarning},
+ {"Critical", FlappingStateFilterCritical},
+ {"Unknown", FlappingStateFilterUnknown},
+ {"Up", FlappingStateFilterOk},
+ {"Down", FlappingStateFilterCritical},
+});
+
+boost::signals2::signal<void (const Checkable::Ptr&, const String&, const String&, AcknowledgementType, bool, bool, double, double, const MessageOrigin::Ptr&)> Checkable::OnAcknowledgementSet;
+boost::signals2::signal<void (const Checkable::Ptr&, const String&, double, const MessageOrigin::Ptr&)> Checkable::OnAcknowledgementCleared;
+boost::signals2::signal<void (const Checkable::Ptr&, double)> Checkable::OnFlappingChange;
+
+static Timer::Ptr l_CheckablesFireSuppressedNotifications;
+static Timer::Ptr l_CleanDeadlinedExecutions;
+
+thread_local std::function<void(const Value& commandLine, const ProcessResult&)> Checkable::ExecuteCommandProcessFinishedHandler;
+
+void Checkable::StaticInitialize()
+{
+ /* fixed downtime start */
+ Downtime::OnDowntimeStarted.connect([](const Downtime::Ptr& downtime) { Checkable::NotifyFixedDowntimeStart(downtime); });
+ /* flexible downtime start */
+ Downtime::OnDowntimeTriggered.connect([](const Downtime::Ptr& downtime) { Checkable::NotifyFlexibleDowntimeStart(downtime); });
+ /* fixed/flexible downtime end */
+ Downtime::OnDowntimeRemoved.connect([](const Downtime::Ptr& downtime) { Checkable::NotifyDowntimeEnd(downtime); });
+}
+
+Checkable::Checkable()
+{
+ SetSchedulingOffset(Utility::Random());
+}
+
+void Checkable::OnConfigLoaded()
+{
+ ObjectImpl<Checkable>::OnConfigLoaded();
+
+ SetFlappingIgnoreStatesFilter(FilterArrayToInt(GetFlappingIgnoreStates(), m_FlappingStateFilterMap, ~0));
+}
+
+void Checkable::OnAllConfigLoaded()
+{
+ ObjectImpl<Checkable>::OnAllConfigLoaded();
+
+ Endpoint::Ptr endpoint = GetCommandEndpoint();
+
+ if (endpoint) {
+ Zone::Ptr checkableZone = static_pointer_cast<Zone>(GetZone());
+
+ if (checkableZone) {
+ Zone::Ptr cmdZone = endpoint->GetZone();
+
+ if (cmdZone != checkableZone && cmdZone->GetParent() != checkableZone) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "command_endpoint" },
+ "Command endpoint must be in zone '" + checkableZone->GetName() + "' or in a direct child zone thereof."));
+ }
+ } else {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "command_endpoint" },
+ "Checkable with command endpoint requires a zone. Please check the troubleshooting documentation."));
+ }
+ }
+}
+
+void Checkable::Start(bool runtimeCreated)
+{
+ double now = Utility::GetTime();
+
+ {
+ auto cr (GetLastCheckResult());
+
+ if (GetLastCheckStarted() > (cr ? cr->GetExecutionEnd() : 0.0)) {
+ SetNextCheck(GetLastCheckStarted());
+ }
+ }
+
+ if (GetNextCheck() < now + 60) {
+ double delta = std::min(GetCheckInterval(), 60.0);
+ delta *= (double)std::rand() / RAND_MAX;
+ SetNextCheck(now + delta);
+ }
+
+ ObjectImpl<Checkable>::Start(runtimeCreated);
+
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, []() {
+ l_CheckablesFireSuppressedNotifications = Timer::Create();
+ l_CheckablesFireSuppressedNotifications->SetInterval(5);
+ l_CheckablesFireSuppressedNotifications->OnTimerExpired.connect(&Checkable::FireSuppressedNotificationsTimer);
+ l_CheckablesFireSuppressedNotifications->Start();
+
+ l_CleanDeadlinedExecutions = Timer::Create();
+ l_CleanDeadlinedExecutions->SetInterval(300);
+ l_CleanDeadlinedExecutions->OnTimerExpired.connect(&Checkable::CleanDeadlinedExecutions);
+ l_CleanDeadlinedExecutions->Start();
+ });
+}
+
+void Checkable::AddGroup(const String& name)
+{
+ std::unique_lock<std::mutex> lock(m_CheckableMutex);
+
+ Array::Ptr groups;
+ auto *host = dynamic_cast<Host *>(this);
+
+ if (host)
+ groups = host->GetGroups();
+ else
+ groups = static_cast<Service *>(this)->GetGroups();
+
+ if (groups && groups->Contains(name))
+ return;
+
+ if (!groups)
+ groups = new Array();
+
+ groups->Add(name);
+}
+
+AcknowledgementType Checkable::GetAcknowledgement()
+{
+ auto avalue = static_cast<AcknowledgementType>(GetAcknowledgementRaw());
+
+ if (avalue != AcknowledgementNone) {
+ double expiry = GetAcknowledgementExpiry();
+
+ if (expiry != 0 && expiry < Utility::GetTime()) {
+ avalue = AcknowledgementNone;
+ ClearAcknowledgement("");
+ }
+ }
+
+ return avalue;
+}
+
+bool Checkable::IsAcknowledged() const
+{
+ return const_cast<Checkable *>(this)->GetAcknowledgement() != AcknowledgementNone;
+}
+
+void Checkable::AcknowledgeProblem(const String& author, const String& comment, AcknowledgementType type, bool notify, bool persistent, double changeTime, double expiry, const MessageOrigin::Ptr& origin)
+{
+ SetAcknowledgementRaw(type);
+ SetAcknowledgementExpiry(expiry);
+
+ if (notify && !IsPaused())
+ OnNotificationsRequested(this, NotificationAcknowledgement, GetLastCheckResult(), author, comment, nullptr);
+
+ Log(LogInformation, "Checkable")
+ << "Acknowledgement set for checkable '" << GetName() << "'.";
+
+ OnAcknowledgementSet(this, author, comment, type, notify, persistent, changeTime, expiry, origin);
+
+ SetAcknowledgementLastChange(changeTime);
+}
+
+void Checkable::ClearAcknowledgement(const String& removedBy, double changeTime, const MessageOrigin::Ptr& origin)
+{
+ ObjectLock oLock (this);
+
+ bool wasAcked = GetAcknowledgementRaw() != AcknowledgementNone;
+
+ SetAcknowledgementRaw(AcknowledgementNone);
+ SetAcknowledgementExpiry(0);
+
+ Log(LogInformation, "Checkable")
+ << "Acknowledgement cleared for checkable '" << GetName() << "'.";
+
+ if (wasAcked) {
+ OnAcknowledgementCleared(this, removedBy, changeTime, origin);
+
+ SetAcknowledgementLastChange(changeTime);
+ }
+}
+
+Endpoint::Ptr Checkable::GetCommandEndpoint() const
+{
+ return Endpoint::GetByName(GetCommandEndpointRaw());
+}
+
+int Checkable::GetSeverity() const
+{
+ /* overridden in Host/Service class. */
+ return 0;
+}
+
+bool Checkable::GetProblem() const
+{
+ auto cr (GetLastCheckResult());
+
+ return cr && !IsStateOK(cr->GetState());
+}
+
+bool Checkable::GetHandled() const
+{
+ return GetProblem() && (IsInDowntime() || IsAcknowledged());
+}
+
+Timestamp Checkable::GetNextUpdate() const
+{
+ auto cr (GetLastCheckResult());
+ double interval, latency;
+
+ // TODO: Document this behavior.
+ if (cr) {
+ interval = GetEnableActiveChecks() && GetProblem() && GetStateType() == StateTypeSoft ? GetRetryInterval() : GetCheckInterval();
+ latency = cr->GetExecutionEnd() - cr->GetScheduleStart();
+ } else {
+ interval = GetCheckInterval();
+ latency = 0.0;
+ }
+
+ return (GetEnableActiveChecks() ? GetNextCheck() : (cr ? cr->GetExecutionEnd() : Application::GetStartTime()) + interval) + interval + 2 * latency;
+}
+
+void Checkable::NotifyFixedDowntimeStart(const Downtime::Ptr& downtime)
+{
+ if (!downtime->GetFixed())
+ return;
+
+ NotifyDowntimeInternal(downtime);
+}
+
+void Checkable::NotifyFlexibleDowntimeStart(const Downtime::Ptr& downtime)
+{
+ if (downtime->GetFixed())
+ return;
+
+ NotifyDowntimeInternal(downtime);
+}
+
+void Checkable::NotifyDowntimeInternal(const Downtime::Ptr& downtime)
+{
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ if (!checkable->IsPaused())
+ OnNotificationsRequested(checkable, NotificationDowntimeStart, checkable->GetLastCheckResult(), downtime->GetAuthor(), downtime->GetComment(), nullptr);
+}
+
+void Checkable::NotifyDowntimeEnd(const Downtime::Ptr& downtime)
+{
+ /* don't send notifications for downtimes which never triggered */
+ if (!downtime->IsTriggered())
+ return;
+
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ if (!checkable->IsPaused())
+ OnNotificationsRequested(checkable, NotificationDowntimeEnd, checkable->GetLastCheckResult(), downtime->GetAuthor(), downtime->GetComment(), nullptr);
+}
+
+void Checkable::ValidateCheckInterval(const Lazy<double>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Checkable>::ValidateCheckInterval(lvalue, utils);
+
+ if (lvalue() <= 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "check_interval" }, "Interval must be greater than 0."));
+}
+
+void Checkable::ValidateRetryInterval(const Lazy<double>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Checkable>::ValidateRetryInterval(lvalue, utils);
+
+ if (lvalue() <= 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "retry_interval" }, "Interval must be greater than 0."));
+}
+
+void Checkable::ValidateMaxCheckAttempts(const Lazy<int>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Checkable>::ValidateMaxCheckAttempts(lvalue, utils);
+
+ if (lvalue() <= 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "max_check_attempts" }, "Value must be greater than 0."));
+}
+
+void Checkable::CleanDeadlinedExecutions(const Timer * const&)
+{
+ double now = Utility::GetTime();
+ Dictionary::Ptr executions;
+ Dictionary::Ptr execution;
+
+ for (auto& host : ConfigType::GetObjectsByType<Host>()) {
+ executions = host->GetExecutions();
+ if (executions) {
+ for (const String& key : executions->GetKeys()) {
+ execution = executions->Get(key);
+ if (execution->Contains("deadline") && now > execution->Get("deadline")) {
+ executions->Remove(key);
+ }
+ }
+ }
+ }
+
+ for (auto& service : ConfigType::GetObjectsByType<Service>()) {
+ executions = service->GetExecutions();
+ if (executions) {
+ for (const String& key : executions->GetKeys()) {
+ execution = executions->Get(key);
+ if (execution->Contains("deadline") && now > execution->Get("deadline")) {
+ executions->Remove(key);
+ }
+ }
+ }
+ }
+}
diff --git a/lib/icinga/checkable.hpp b/lib/icinga/checkable.hpp
new file mode 100644
index 0000000..3d48b14
--- /dev/null
+++ b/lib/icinga/checkable.hpp
@@ -0,0 +1,264 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CHECKABLE_H
+#define CHECKABLE_H
+
+#include "base/atomic.hpp"
+#include "base/timer.hpp"
+#include "base/process.hpp"
+#include "icinga/i2-icinga.hpp"
+#include "icinga/checkable-ti.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/comment.hpp"
+#include "icinga/downtime.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/messageorigin.hpp"
+#include <condition_variable>
+#include <cstdint>
+#include <functional>
+#include <limits>
+
+namespace icinga
+{
+
+/**
+ * @ingroup icinga
+ */
+enum DependencyType
+{
+ DependencyState,
+ DependencyCheckExecution,
+ DependencyNotification
+};
+
+/**
+ * Checkable Types
+ *
+ * @ingroup icinga
+ */
+enum CheckableType
+{
+ CheckableHost,
+ CheckableService
+};
+
+/**
+ * @ingroup icinga
+ */
+enum FlappingStateFilter
+{
+ FlappingStateFilterOk = 1,
+ FlappingStateFilterWarning = 2,
+ FlappingStateFilterCritical = 4,
+ FlappingStateFilterUnknown = 8,
+};
+
+class CheckCommand;
+class EventCommand;
+class Dependency;
+
+/**
+ * An Icinga service.
+ *
+ * @ingroup icinga
+ */
+class Checkable : public ObjectImpl<Checkable>
+{
+public:
+ DECLARE_OBJECT(Checkable);
+ DECLARE_OBJECTNAME(Checkable);
+
+ static void StaticInitialize();
+ static thread_local std::function<void(const Value& commandLine, const ProcessResult&)> ExecuteCommandProcessFinishedHandler;
+
+ Checkable();
+
+ std::set<Checkable::Ptr> GetParents() const;
+ std::set<Checkable::Ptr> GetChildren() const;
+ std::set<Checkable::Ptr> GetAllChildren() const;
+
+ void AddGroup(const String& name);
+
+ bool IsReachable(DependencyType dt = DependencyState, intrusive_ptr<Dependency> *failedDependency = nullptr, int rstack = 0) const;
+
+ AcknowledgementType GetAcknowledgement();
+
+ void AcknowledgeProblem(const String& author, const String& comment, AcknowledgementType type, bool notify = true, bool persistent = false, double changeTime = Utility::GetTime(), double expiry = 0, const MessageOrigin::Ptr& origin = nullptr);
+ void ClearAcknowledgement(const String& removedBy, double changeTime = Utility::GetTime(), const MessageOrigin::Ptr& origin = nullptr);
+
+ int GetSeverity() const override;
+ bool GetProblem() const override;
+ bool GetHandled() const override;
+ Timestamp GetNextUpdate() const override;
+
+ /* Checks */
+ intrusive_ptr<CheckCommand> GetCheckCommand() const;
+ TimePeriod::Ptr GetCheckPeriod() const;
+
+ long GetSchedulingOffset();
+ void SetSchedulingOffset(long offset);
+
+ void UpdateNextCheck(const MessageOrigin::Ptr& origin = nullptr);
+
+ bool HasBeenChecked() const;
+ virtual bool IsStateOK(ServiceState state) const = 0;
+
+ double GetLastCheck() const final;
+
+ virtual void SaveLastState(ServiceState state, double timestamp) = 0;
+
+ static void UpdateStatistics(const CheckResult::Ptr& cr, CheckableType type);
+
+ void ExecuteRemoteCheck(const Dictionary::Ptr& resolvedMacros = nullptr);
+ void ExecuteCheck();
+ enum class ProcessingResult
+ {
+ Ok,
+ NoCheckResult,
+ CheckableInactive,
+ NewerCheckResultPresent,
+ };
+ ProcessingResult ProcessCheckResult(const CheckResult::Ptr& cr, const MessageOrigin::Ptr& origin = nullptr);
+
+ Endpoint::Ptr GetCommandEndpoint() const;
+
+ static boost::signals2::signal<void (const Checkable::Ptr&, const CheckResult::Ptr&, const MessageOrigin::Ptr&)> OnNewCheckResult;
+ static boost::signals2::signal<void (const Checkable::Ptr&, const CheckResult::Ptr&, StateType, const MessageOrigin::Ptr&)> OnStateChange;
+ static boost::signals2::signal<void (const Checkable::Ptr&, const CheckResult::Ptr&, std::set<Checkable::Ptr>, const MessageOrigin::Ptr&)> OnReachabilityChanged;
+ static boost::signals2::signal<void (const Checkable::Ptr&, NotificationType, const CheckResult::Ptr&,
+ const String&, const String&, const MessageOrigin::Ptr&)> OnNotificationsRequested;
+ static boost::signals2::signal<void (const Notification::Ptr&, const Checkable::Ptr&, const User::Ptr&,
+ const NotificationType&, const CheckResult::Ptr&, const String&, const String&, const String&,
+ const MessageOrigin::Ptr&)> OnNotificationSentToUser;
+ static boost::signals2::signal<void (const Notification::Ptr&, const Checkable::Ptr&, const std::set<User::Ptr>&,
+ const NotificationType&, const CheckResult::Ptr&, const String&,
+ const String&, const MessageOrigin::Ptr&)> OnNotificationSentToAllUsers;
+ static boost::signals2::signal<void (const Checkable::Ptr&, const String&, const String&, AcknowledgementType,
+ bool, bool, double, double, const MessageOrigin::Ptr&)> OnAcknowledgementSet;
+ static boost::signals2::signal<void (const Checkable::Ptr&, const String&, double, const MessageOrigin::Ptr&)> OnAcknowledgementCleared;
+ static boost::signals2::signal<void (const Checkable::Ptr&, double)> OnFlappingChange;
+ static boost::signals2::signal<void (const Checkable::Ptr&)> OnNextCheckUpdated;
+ static boost::signals2::signal<void (const Checkable::Ptr&)> OnEventCommandExecuted;
+
+ static Atomic<uint_fast64_t> CurrentConcurrentChecks;
+
+ /* Downtimes */
+ int GetDowntimeDepth() const final;
+
+ void RemoveAllDowntimes();
+ void TriggerDowntimes(double triggerTime);
+ bool IsInDowntime() const;
+ bool IsAcknowledged() const;
+
+ std::set<Downtime::Ptr> GetDowntimes() const;
+ void RegisterDowntime(const Downtime::Ptr& downtime);
+ void UnregisterDowntime(const Downtime::Ptr& downtime);
+
+ /* Comments */
+ void RemoveAllComments();
+ void RemoveAckComments(const String& removedBy = String(), double createdBefore = std::numeric_limits<double>::max());
+
+ std::set<Comment::Ptr> GetComments() const;
+ Comment::Ptr GetLastComment() const;
+ void RegisterComment(const Comment::Ptr& comment);
+ void UnregisterComment(const Comment::Ptr& comment);
+
+ /* Notifications */
+ void SendNotifications(NotificationType type, const CheckResult::Ptr& cr, const String& author = "", const String& text = "");
+
+ std::set<Notification::Ptr> GetNotifications() const;
+ void RegisterNotification(const Notification::Ptr& notification);
+ void UnregisterNotification(const Notification::Ptr& notification);
+
+ void ResetNotificationNumbers();
+
+ /* Event Handler */
+ void ExecuteEventHandler(const Dictionary::Ptr& resolvedMacros = nullptr,
+ bool useResolvedMacros = false);
+
+ intrusive_ptr<EventCommand> GetEventCommand() const;
+
+ /* Flapping Detection */
+ bool IsFlapping() const;
+
+ /* Dependencies */
+ void AddDependency(const intrusive_ptr<Dependency>& dep);
+ void RemoveDependency(const intrusive_ptr<Dependency>& dep);
+ std::vector<intrusive_ptr<Dependency> > GetDependencies() const;
+
+ void AddReverseDependency(const intrusive_ptr<Dependency>& dep);
+ void RemoveReverseDependency(const intrusive_ptr<Dependency>& dep);
+ std::vector<intrusive_ptr<Dependency> > GetReverseDependencies() const;
+
+ void ValidateCheckInterval(const Lazy<double>& lvalue, const ValidationUtils& value) final;
+ void ValidateRetryInterval(const Lazy<double>& lvalue, const ValidationUtils& value) final;
+ void ValidateMaxCheckAttempts(const Lazy<int>& lvalue, const ValidationUtils& value) final;
+
+ bool NotificationReasonApplies(NotificationType type);
+ bool NotificationReasonSuppressed(NotificationType type);
+ bool IsLikelyToBeCheckedSoon();
+
+ void FireSuppressedNotifications();
+
+ static void IncreasePendingChecks();
+ static void DecreasePendingChecks();
+ static int GetPendingChecks();
+ static void AquirePendingCheckSlot(int maxPendingChecks);
+
+ static Object::Ptr GetPrototype();
+
+protected:
+ void Start(bool runtimeCreated) override;
+ void OnConfigLoaded() override;
+ void OnAllConfigLoaded() override;
+
+private:
+ mutable std::mutex m_CheckableMutex;
+ bool m_CheckRunning{false};
+ long m_SchedulingOffset;
+
+ static std::mutex m_StatsMutex;
+ static int m_PendingChecks;
+ static std::condition_variable m_PendingChecksCV;
+
+ /* Downtimes */
+ std::set<Downtime::Ptr> m_Downtimes;
+ mutable std::mutex m_DowntimeMutex;
+
+ static void NotifyFixedDowntimeStart(const Downtime::Ptr& downtime);
+ static void NotifyFlexibleDowntimeStart(const Downtime::Ptr& downtime);
+ static void NotifyDowntimeInternal(const Downtime::Ptr& downtime);
+
+ static void NotifyDowntimeEnd(const Downtime::Ptr& downtime);
+
+ static void FireSuppressedNotificationsTimer(const Timer * const&);
+ static void CleanDeadlinedExecutions(const Timer * const&);
+
+ /* Comments */
+ std::set<Comment::Ptr> m_Comments;
+ mutable std::mutex m_CommentMutex;
+
+ /* Notifications */
+ std::set<Notification::Ptr> m_Notifications;
+ mutable std::mutex m_NotificationMutex;
+
+ /* Dependencies */
+ mutable std::mutex m_DependencyMutex;
+ std::set<intrusive_ptr<Dependency> > m_Dependencies;
+ std::set<intrusive_ptr<Dependency> > m_ReverseDependencies;
+
+ void GetAllChildrenInternal(std::set<Checkable::Ptr>& children, int level = 0) const;
+
+ /* Flapping */
+ static const std::map<String, int> m_FlappingStateFilterMap;
+
+ void UpdateFlappingStatus(ServiceState newState);
+ static int ServiceStateToFlappingFilter(ServiceState state);
+};
+
+}
+
+#endif /* CHECKABLE_H */
+
+#include "icinga/dependency.hpp"
diff --git a/lib/icinga/checkable.ti b/lib/icinga/checkable.ti
new file mode 100644
index 0000000..6f7a5da
--- /dev/null
+++ b/lib/icinga/checkable.ti
@@ -0,0 +1,192 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/icingaapplication.hpp"
+#include "icinga/customvarobject.hpp"
+#include "base/array.hpp"
+#impl_include "icinga/checkcommand.hpp"
+#impl_include "icinga/eventcommand.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+/**
+ * The acknowledgement type of a service.
+ *
+ * @ingroup icinga
+ */
+enum AcknowledgementType
+{
+ AcknowledgementNone = 0,
+ AcknowledgementNormal = 1,
+ AcknowledgementSticky = 2
+};
+}}}
+
+abstract class Checkable : CustomVarObject
+{
+ [config, required, navigation] name(CheckCommand) check_command (CheckCommandRaw) {
+ navigate {{{
+ return CheckCommand::GetByName(GetCheckCommandRaw());
+ }}}
+ };
+ [config] int max_check_attempts {
+ default {{{ return 3; }}}
+ };
+ [config, navigation] name(TimePeriod) check_period (CheckPeriodRaw) {
+ navigate {{{
+ return TimePeriod::GetByName(GetCheckPeriodRaw());
+ }}}
+ };
+ [config] Value check_timeout;
+ [config] double check_interval {
+ default {{{ return 5 * 60; }}}
+ };
+ [config] double retry_interval {
+ default {{{ return 60; }}}
+ };
+ [config, navigation] name(EventCommand) event_command (EventCommandRaw) {
+ navigate {{{
+ return EventCommand::GetByName(GetEventCommandRaw());
+ }}}
+ };
+ [config] bool volatile;
+
+ [config] bool enable_active_checks {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_passive_checks {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_event_handler {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_notifications {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_flapping {
+ default {{{ return false; }}}
+ };
+ [config] bool enable_perfdata {
+ default {{{ return true; }}}
+ };
+
+ [config] array(String) flapping_ignore_states;
+ [no_user_view, no_user_modify] int flapping_ignore_states_filter_real (FlappingIgnoreStatesFilter);
+
+ [config, deprecated] double flapping_threshold;
+
+ [config] double flapping_threshold_low {
+ default {{{ return 25; }}}
+ };
+
+ [config] double flapping_threshold_high{
+ default {{{ return 30; }}}
+ };
+
+ [config] String notes;
+ [config] String notes_url;
+ [config] String action_url;
+ [config] String icon_image;
+ [config] String icon_image_alt;
+
+ [state] Timestamp next_check;
+ [state, no_user_view, no_user_modify] Timestamp last_check_started;
+
+ [state] int check_attempt {
+ default {{{ return 1; }}}
+ };
+ [state, enum, no_user_view, no_user_modify] ServiceState state_raw {
+ default {{{ return ServiceUnknown; }}}
+ };
+ [state, enum] StateType state_type {
+ default {{{ return StateTypeSoft; }}}
+ };
+ [state, enum, no_user_view, no_user_modify] ServiceState last_state_raw {
+ default {{{ return ServiceUnknown; }}}
+ };
+ [state, enum, no_user_view, no_user_modify] ServiceState last_hard_state_raw {
+ default {{{ return ServiceUnknown; }}}
+ };
+ [state, no_user_view, no_user_modify] "unsigned short" last_hard_states_raw {
+ default {{{ return /* current */ 99 * 100 + /* previous */ 99; }}}
+ };
+ [state, no_user_view, no_user_modify] "unsigned short" last_soft_states_raw {
+ default {{{ return /* current */ 99 * 100 + /* previous */ 99; }}}
+ };
+ [state, enum] StateType last_state_type {
+ default {{{ return StateTypeSoft; }}}
+ };
+ [state] bool last_reachable {
+ default {{{ return true; }}}
+ };
+ [state] CheckResult::Ptr last_check_result;
+ [state] Timestamp last_state_change {
+ default {{{ return Application::GetStartTime(); }}}
+ };
+ [state] Timestamp last_hard_state_change {
+ default {{{ return Application::GetStartTime(); }}}
+ };
+ [state] Timestamp last_state_unreachable;
+
+ [state] Timestamp previous_state_change {
+ default {{{ return Application::GetStartTime(); }}}
+ };
+ [no_storage] int severity {
+ get;
+ };
+ [no_storage] bool problem {
+ get;
+ };
+ [no_storage] bool handled {
+ get;
+ };
+ [no_storage] Timestamp next_update {
+ get;
+ };
+
+ [state] bool force_next_check;
+ [state] int acknowledgement (AcknowledgementRaw) {
+ default {{{ return AcknowledgementNone; }}}
+ };
+ [state] Timestamp acknowledgement_expiry;
+ [state] Timestamp acknowledgement_last_change;
+ [state] bool force_next_notification;
+ [no_storage] Timestamp last_check {
+ get;
+ };
+ [no_storage] int downtime_depth {
+ get;
+ };
+
+ [state] double flapping_current {
+ default {{{ return 0; }}}
+ };
+ [state] Timestamp flapping_last_change;
+
+ [state, enum, no_user_view, no_user_modify] ServiceState flapping_last_state {
+ default {{{ return ServiceUnknown; }}}
+ };
+ [state, no_user_view, no_user_modify] int flapping_buffer;
+ [state, no_user_view, no_user_modify] int flapping_index;
+ [state, protected] bool flapping;
+ [state, no_user_view, no_user_modify] int suppressed_notifications {
+ default {{{ return 0; }}}
+ };
+ [state, enum, no_user_view, no_user_modify] ServiceState state_before_suppression {
+ default {{{ return ServiceOK; }}}
+ };
+
+ [config, navigation] name(Endpoint) command_endpoint (CommandEndpointRaw) {
+ navigate {{{
+ return Endpoint::GetByName(GetCommandEndpointRaw());
+ }}}
+ };
+
+ [state, no_user_modify] Dictionary::Ptr executions;
+ [state, no_user_view, no_user_modify] Dictionary::Ptr pending_executions;
+};
+
+}
diff --git a/lib/icinga/checkcommand.cpp b/lib/icinga/checkcommand.cpp
new file mode 100644
index 0000000..fb8032a
--- /dev/null
+++ b/lib/icinga/checkcommand.cpp
@@ -0,0 +1,22 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkcommand.hpp"
+#include "icinga/checkcommand-ti.cpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(CheckCommand);
+
+thread_local CheckCommand::Ptr CheckCommand::ExecuteOverride;
+
+void CheckCommand::Execute(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ GetExecute()->Invoke({
+ checkable,
+ cr,
+ resolvedMacros,
+ useResolvedMacros
+ });
+}
diff --git a/lib/icinga/checkcommand.hpp b/lib/icinga/checkcommand.hpp
new file mode 100644
index 0000000..c654cf9
--- /dev/null
+++ b/lib/icinga/checkcommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CHECKCOMMAND_H
+#define CHECKCOMMAND_H
+
+#include "icinga/checkcommand-ti.hpp"
+#include "icinga/checkable.hpp"
+
+namespace icinga
+{
+
+/**
+ * A command.
+ *
+ * @ingroup icinga
+ */
+class CheckCommand final : public ObjectImpl<CheckCommand>
+{
+public:
+ DECLARE_OBJECT(CheckCommand);
+ DECLARE_OBJECTNAME(CheckCommand);
+
+ static thread_local CheckCommand::Ptr ExecuteOverride;
+
+ void Execute(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros = nullptr,
+ bool useResolvedMacros = false);
+};
+
+}
+
+#endif /* CHECKCOMMAND_H */
diff --git a/lib/icinga/checkcommand.ti b/lib/icinga/checkcommand.ti
new file mode 100644
index 0000000..c211f0f
--- /dev/null
+++ b/lib/icinga/checkcommand.ti
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/command.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class CheckCommand : Command
+{
+};
+
+}
diff --git a/lib/icinga/checkresult.cpp b/lib/icinga/checkresult.cpp
new file mode 100644
index 0000000..07f7219
--- /dev/null
+++ b/lib/icinga/checkresult.cpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkresult.hpp"
+#include "icinga/checkresult-ti.cpp"
+#include "base/scriptglobal.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(CheckResult);
+
+INITIALIZE_ONCE([]() {
+ ScriptGlobal::Set("Icinga.ServiceOK", ServiceOK);
+ ScriptGlobal::Set("Icinga.ServiceWarning", ServiceWarning);
+ ScriptGlobal::Set("Icinga.ServiceCritical", ServiceCritical);
+ ScriptGlobal::Set("Icinga.ServiceUnknown", ServiceUnknown);
+
+ ScriptGlobal::Set("Icinga.HostUp", HostUp);
+ ScriptGlobal::Set("Icinga.HostDown", HostDown);
+})
+
+double CheckResult::CalculateExecutionTime() const
+{
+ return GetExecutionEnd() - GetExecutionStart();
+}
+
+double CheckResult::CalculateLatency() const
+{
+ double latency = (GetScheduleEnd() - GetScheduleStart()) - CalculateExecutionTime();
+
+ if (latency < 0)
+ latency = 0;
+
+ return latency;
+}
diff --git a/lib/icinga/checkresult.hpp b/lib/icinga/checkresult.hpp
new file mode 100644
index 0000000..ac54d6b
--- /dev/null
+++ b/lib/icinga/checkresult.hpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CHECKRESULT_H
+#define CHECKRESULT_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/checkresult-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * A check result.
+ *
+ * @ingroup icinga
+ */
+class CheckResult final : public ObjectImpl<CheckResult>
+{
+public:
+ DECLARE_OBJECT(CheckResult);
+
+ double CalculateExecutionTime() const;
+ double CalculateLatency() const;
+};
+
+}
+
+#endif /* CHECKRESULT_H */
diff --git a/lib/icinga/checkresult.ti b/lib/icinga/checkresult.ti
new file mode 100644
index 0000000..09312dc
--- /dev/null
+++ b/lib/icinga/checkresult.ti
@@ -0,0 +1,72 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+/**
+ * The state of a host.
+ *
+ * @ingroup icinga
+ */
+enum HostState
+{
+ HostUp = 0,
+ HostDown = 1
+};
+
+/**
+ * The state of a service.
+ *
+ * @ingroup icinga
+ */
+enum ServiceState
+{
+ ServiceOK = 0,
+ ServiceWarning = 1,
+ ServiceCritical = 2,
+ ServiceUnknown = 3
+};
+
+/**
+ * The state type of a host or service.
+ *
+ * @ingroup icinga
+ */
+enum StateType
+{
+ StateTypeSoft = 0,
+ StateTypeHard = 1
+};
+}}}
+
+class CheckResult
+{
+ [state] Timestamp schedule_start;
+ [state] Timestamp schedule_end;
+ [state] Timestamp execution_start;
+ [state] Timestamp execution_end;
+
+ [state] Value command;
+ [state] int exit_status;
+
+ [state, enum] ServiceState "state";
+ [state, enum] ServiceState previous_hard_state;
+ [state] String output;
+ [state] Array::Ptr performance_data;
+
+ [state] bool active {
+ default {{{ return true; }}}
+ };
+
+ [state] String check_source;
+ [state] String scheduling_source;
+ [state] double ttl;
+
+ [state] Dictionary::Ptr vars_before;
+ [state] Dictionary::Ptr vars_after;
+};
+
+}
diff --git a/lib/icinga/cib.cpp b/lib/icinga/cib.cpp
new file mode 100644
index 0000000..ce71a59
--- /dev/null
+++ b/lib/icinga/cib.cpp
@@ -0,0 +1,346 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/cib.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "icinga/clusterevents.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/configtype.hpp"
+#include "base/statsfunction.hpp"
+
+using namespace icinga;
+
+RingBuffer CIB::m_ActiveHostChecksStatistics(15 * 60);
+RingBuffer CIB::m_ActiveServiceChecksStatistics(15 * 60);
+RingBuffer CIB::m_PassiveHostChecksStatistics(15 * 60);
+RingBuffer CIB::m_PassiveServiceChecksStatistics(15 * 60);
+
+void CIB::UpdateActiveHostChecksStatistics(long tv, int num)
+{
+ m_ActiveHostChecksStatistics.InsertValue(tv, num);
+}
+
+void CIB::UpdateActiveServiceChecksStatistics(long tv, int num)
+{
+ m_ActiveServiceChecksStatistics.InsertValue(tv, num);
+}
+
+int CIB::GetActiveHostChecksStatistics(long timespan)
+{
+ return m_ActiveHostChecksStatistics.UpdateAndGetValues(Utility::GetTime(), timespan);
+}
+
+int CIB::GetActiveServiceChecksStatistics(long timespan)
+{
+ return m_ActiveServiceChecksStatistics.UpdateAndGetValues(Utility::GetTime(), timespan);
+}
+
+void CIB::UpdatePassiveHostChecksStatistics(long tv, int num)
+{
+ m_PassiveServiceChecksStatistics.InsertValue(tv, num);
+}
+
+void CIB::UpdatePassiveServiceChecksStatistics(long tv, int num)
+{
+ m_PassiveServiceChecksStatistics.InsertValue(tv, num);
+}
+
+int CIB::GetPassiveHostChecksStatistics(long timespan)
+{
+ return m_PassiveHostChecksStatistics.UpdateAndGetValues(Utility::GetTime(), timespan);
+}
+
+int CIB::GetPassiveServiceChecksStatistics(long timespan)
+{
+ return m_PassiveServiceChecksStatistics.UpdateAndGetValues(Utility::GetTime(), timespan);
+}
+
+CheckableCheckStatistics CIB::CalculateHostCheckStats()
+{
+ double min_latency = -1, max_latency = 0, sum_latency = 0;
+ int count_latency = 0;
+ double min_execution_time = -1, max_execution_time = 0, sum_execution_time = 0;
+ int count_execution_time = 0;
+ bool checkresult = false;
+
+ for (const Host::Ptr& host : ConfigType::GetObjectsByType<Host>()) {
+ ObjectLock olock(host);
+
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (!cr)
+ continue;
+
+ /* set to true, we have a checkresult */
+ checkresult = true;
+
+ /* latency */
+ double latency = cr->CalculateLatency();
+
+ if (min_latency == -1 || latency < min_latency)
+ min_latency = latency;
+
+ if (latency > max_latency)
+ max_latency = latency;
+
+ sum_latency += latency;
+ count_latency++;
+
+ /* execution_time */
+ double execution_time = cr->CalculateExecutionTime();
+
+ if (min_execution_time == -1 || execution_time < min_execution_time)
+ min_execution_time = execution_time;
+
+ if (execution_time > max_execution_time)
+ max_execution_time = execution_time;
+
+ sum_execution_time += execution_time;
+ count_execution_time++;
+ }
+
+ if (!checkresult) {
+ min_latency = 0;
+ min_execution_time = 0;
+ }
+
+ CheckableCheckStatistics ccs;
+
+ ccs.min_latency = min_latency;
+ ccs.max_latency = max_latency;
+ ccs.avg_latency = sum_latency / count_latency;
+ ccs.min_execution_time = min_execution_time;
+ ccs.max_execution_time = max_execution_time;
+ ccs.avg_execution_time = sum_execution_time / count_execution_time;
+
+ return ccs;
+}
+
+CheckableCheckStatistics CIB::CalculateServiceCheckStats()
+{
+ double min_latency = -1, max_latency = 0, sum_latency = 0;
+ int count_latency = 0;
+ double min_execution_time = -1, max_execution_time = 0, sum_execution_time = 0;
+ int count_execution_time = 0;
+ bool checkresult = false;
+
+ for (const Service::Ptr& service : ConfigType::GetObjectsByType<Service>()) {
+ ObjectLock olock(service);
+
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (!cr)
+ continue;
+
+ /* set to true, we have a checkresult */
+ checkresult = true;
+
+ /* latency */
+ double latency = cr->CalculateLatency();
+
+ if (min_latency == -1 || latency < min_latency)
+ min_latency = latency;
+
+ if (latency > max_latency)
+ max_latency = latency;
+
+ sum_latency += latency;
+ count_latency++;
+
+ /* execution_time */
+ double execution_time = cr->CalculateExecutionTime();
+
+ if (min_execution_time == -1 || execution_time < min_execution_time)
+ min_execution_time = execution_time;
+
+ if (execution_time > max_execution_time)
+ max_execution_time = execution_time;
+
+ sum_execution_time += execution_time;
+ count_execution_time++;
+ }
+
+ if (!checkresult) {
+ min_latency = 0;
+ min_execution_time = 0;
+ }
+
+ CheckableCheckStatistics ccs;
+
+ ccs.min_latency = min_latency;
+ ccs.max_latency = max_latency;
+ ccs.avg_latency = sum_latency / count_latency;
+ ccs.min_execution_time = min_execution_time;
+ ccs.max_execution_time = max_execution_time;
+ ccs.avg_execution_time = sum_execution_time / count_execution_time;
+
+ return ccs;
+}
+
+ServiceStatistics CIB::CalculateServiceStats()
+{
+ ServiceStatistics ss = {};
+
+ for (const Service::Ptr& service : ConfigType::GetObjectsByType<Service>()) {
+ ObjectLock olock(service);
+
+ if (service->GetState() == ServiceOK)
+ ss.services_ok++;
+ if (service->GetState() == ServiceWarning)
+ ss.services_warning++;
+ if (service->GetState() == ServiceCritical)
+ ss.services_critical++;
+ if (service->GetState() == ServiceUnknown)
+ ss.services_unknown++;
+
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (!cr)
+ ss.services_pending++;
+
+ if (!service->IsReachable())
+ ss.services_unreachable++;
+
+ if (service->IsFlapping())
+ ss.services_flapping++;
+ if (service->IsInDowntime())
+ ss.services_in_downtime++;
+ if (service->IsAcknowledged())
+ ss.services_acknowledged++;
+
+ if (service->GetHandled())
+ ss.services_handled++;
+ if (service->GetProblem())
+ ss.services_problem++;
+ }
+
+ return ss;
+}
+
+HostStatistics CIB::CalculateHostStats()
+{
+ HostStatistics hs = {};
+
+ for (const Host::Ptr& host : ConfigType::GetObjectsByType<Host>()) {
+ ObjectLock olock(host);
+
+ if (host->IsReachable()) {
+ if (host->GetState() == HostUp)
+ hs.hosts_up++;
+ if (host->GetState() == HostDown)
+ hs.hosts_down++;
+ } else
+ hs.hosts_unreachable++;
+
+ if (!host->GetLastCheckResult())
+ hs.hosts_pending++;
+
+ if (host->IsFlapping())
+ hs.hosts_flapping++;
+ if (host->IsInDowntime())
+ hs.hosts_in_downtime++;
+ if (host->IsAcknowledged())
+ hs.hosts_acknowledged++;
+
+ if (host->GetHandled())
+ hs.hosts_handled++;
+ if (host->GetProblem())
+ hs.hosts_problem++;
+ }
+
+ return hs;
+}
+
+/*
+ * 'perfdata' must be a flat dictionary with double values
+ * 'status' dictionary can contain multiple levels of dictionaries
+ */
+std::pair<Dictionary::Ptr, Array::Ptr> CIB::GetFeatureStats()
+{
+ Dictionary::Ptr status = new Dictionary();
+ Array::Ptr perfdata = new Array();
+
+ Namespace::Ptr statsFunctions = ScriptGlobal::Get("StatsFunctions", &Empty);
+
+ if (statsFunctions) {
+ ObjectLock olock(statsFunctions);
+
+ for (const Namespace::Pair& kv : statsFunctions)
+ static_cast<Function::Ptr>(kv.second.Val)->Invoke({ status, perfdata });
+ }
+
+ return std::make_pair(status, perfdata);
+}
+
+REGISTER_STATSFUNCTION(CIB, &CIB::StatsFunc);
+
+void CIB::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata) {
+ double interval = Utility::GetTime() - Application::GetStartTime();
+
+ if (interval > 60)
+ interval = 60;
+
+ status->Set("active_host_checks", GetActiveHostChecksStatistics(interval) / interval);
+ status->Set("passive_host_checks", GetPassiveHostChecksStatistics(interval) / interval);
+ status->Set("active_host_checks_1min", GetActiveHostChecksStatistics(60));
+ status->Set("passive_host_checks_1min", GetPassiveHostChecksStatistics(60));
+ status->Set("active_host_checks_5min", GetActiveHostChecksStatistics(60 * 5));
+ status->Set("passive_host_checks_5min", GetPassiveHostChecksStatistics(60 * 5));
+ status->Set("active_host_checks_15min", GetActiveHostChecksStatistics(60 * 15));
+ status->Set("passive_host_checks_15min", GetPassiveHostChecksStatistics(60 * 15));
+
+ status->Set("active_service_checks", GetActiveServiceChecksStatistics(interval) / interval);
+ status->Set("passive_service_checks", GetPassiveServiceChecksStatistics(interval) / interval);
+ status->Set("active_service_checks_1min", GetActiveServiceChecksStatistics(60));
+ status->Set("passive_service_checks_1min", GetPassiveServiceChecksStatistics(60));
+ status->Set("active_service_checks_5min", GetActiveServiceChecksStatistics(60 * 5));
+ status->Set("passive_service_checks_5min", GetPassiveServiceChecksStatistics(60 * 5));
+ status->Set("active_service_checks_15min", GetActiveServiceChecksStatistics(60 * 15));
+ status->Set("passive_service_checks_15min", GetPassiveServiceChecksStatistics(60 * 15));
+
+ // Checker related stats
+ status->Set("remote_check_queue", ClusterEvents::GetCheckRequestQueueSize());
+ status->Set("current_pending_callbacks", Application::GetTP().GetPending());
+ status->Set("current_concurrent_checks", Checkable::CurrentConcurrentChecks.load());
+
+ CheckableCheckStatistics scs = CalculateServiceCheckStats();
+
+ status->Set("min_latency", scs.min_latency);
+ status->Set("max_latency", scs.max_latency);
+ status->Set("avg_latency", scs.avg_latency);
+ status->Set("min_execution_time", scs.min_execution_time);
+ status->Set("max_execution_time", scs.max_execution_time);
+ status->Set("avg_execution_time", scs.avg_execution_time);
+
+ ServiceStatistics ss = CalculateServiceStats();
+
+ status->Set("num_services_ok", ss.services_ok);
+ status->Set("num_services_warning", ss.services_warning);
+ status->Set("num_services_critical", ss.services_critical);
+ status->Set("num_services_unknown", ss.services_unknown);
+ status->Set("num_services_pending", ss.services_pending);
+ status->Set("num_services_unreachable", ss.services_unreachable);
+ status->Set("num_services_flapping", ss.services_flapping);
+ status->Set("num_services_in_downtime", ss.services_in_downtime);
+ status->Set("num_services_acknowledged", ss.services_acknowledged);
+ status->Set("num_services_handled", ss.services_handled);
+ status->Set("num_services_problem", ss.services_problem);
+
+ double uptime = Application::GetUptime();
+ status->Set("uptime", uptime);
+
+ HostStatistics hs = CalculateHostStats();
+
+ status->Set("num_hosts_up", hs.hosts_up);
+ status->Set("num_hosts_down", hs.hosts_down);
+ status->Set("num_hosts_pending", hs.hosts_pending);
+ status->Set("num_hosts_unreachable", hs.hosts_unreachable);
+ status->Set("num_hosts_flapping", hs.hosts_flapping);
+ status->Set("num_hosts_in_downtime", hs.hosts_in_downtime);
+ status->Set("num_hosts_acknowledged", hs.hosts_acknowledged);
+ status->Set("num_hosts_handled", hs.hosts_handled);
+ status->Set("num_hosts_problem", hs.hosts_problem);
+}
diff --git a/lib/icinga/cib.hpp b/lib/icinga/cib.hpp
new file mode 100644
index 0000000..00461e3
--- /dev/null
+++ b/lib/icinga/cib.hpp
@@ -0,0 +1,91 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CIB_H
+#define CIB_H
+
+#include "icinga/i2-icinga.hpp"
+#include "base/ringbuffer.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+
+namespace icinga
+{
+
+struct CheckableCheckStatistics {
+ double min_latency;
+ double max_latency;
+ double avg_latency;
+ double min_execution_time;
+ double max_execution_time;
+ double avg_execution_time;
+};
+
+struct ServiceStatistics {
+ double services_ok;
+ double services_warning;
+ double services_critical;
+ double services_unknown;
+ double services_pending;
+ double services_unreachable;
+ double services_flapping;
+ double services_in_downtime;
+ double services_acknowledged;
+ double services_handled;
+ double services_problem;
+};
+
+struct HostStatistics {
+ double hosts_up;
+ double hosts_down;
+ double hosts_unreachable;
+ double hosts_pending;
+ double hosts_flapping;
+ double hosts_in_downtime;
+ double hosts_acknowledged;
+ double hosts_handled;
+ double hosts_problem;
+};
+
+/**
+ * Common Information Base class. Holds some statistics (and will likely be
+ * removed/refactored).
+ *
+ * @ingroup icinga
+ */
+class CIB
+{
+public:
+ static void UpdateActiveHostChecksStatistics(long tv, int num);
+ static int GetActiveHostChecksStatistics(long timespan);
+
+ static void UpdateActiveServiceChecksStatistics(long tv, int num);
+ static int GetActiveServiceChecksStatistics(long timespan);
+
+ static void UpdatePassiveHostChecksStatistics(long tv, int num);
+ static int GetPassiveHostChecksStatistics(long timespan);
+
+ static void UpdatePassiveServiceChecksStatistics(long tv, int num);
+ static int GetPassiveServiceChecksStatistics(long timespan);
+
+ static CheckableCheckStatistics CalculateHostCheckStats();
+ static CheckableCheckStatistics CalculateServiceCheckStats();
+ static HostStatistics CalculateHostStats();
+ static ServiceStatistics CalculateServiceStats();
+
+ static std::pair<Dictionary::Ptr, Array::Ptr> GetFeatureStats();
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+private:
+ CIB();
+
+ static std::mutex m_Mutex;
+ static RingBuffer m_ActiveHostChecksStatistics;
+ static RingBuffer m_PassiveHostChecksStatistics;
+ static RingBuffer m_ActiveServiceChecksStatistics;
+ static RingBuffer m_PassiveServiceChecksStatistics;
+};
+
+}
+
+#endif /* CIB_H */
diff --git a/lib/icinga/clusterevents-check.cpp b/lib/icinga/clusterevents-check.cpp
new file mode 100644
index 0000000..40325b4
--- /dev/null
+++ b/lib/icinga/clusterevents-check.cpp
@@ -0,0 +1,379 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/clusterevents.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "remote/apilistener.hpp"
+#include "base/configuration.hpp"
+#include "base/defer.hpp"
+#include "base/serializer.hpp"
+#include "base/exception.hpp"
+#include <boost/thread/once.hpp>
+#include <thread>
+
+using namespace icinga;
+
+std::mutex ClusterEvents::m_Mutex;
+std::deque<std::function<void ()>> ClusterEvents::m_CheckRequestQueue;
+bool ClusterEvents::m_CheckSchedulerRunning;
+int ClusterEvents::m_ChecksExecutedDuringInterval;
+int ClusterEvents::m_ChecksDroppedDuringInterval;
+Timer::Ptr ClusterEvents::m_LogTimer;
+
+void ClusterEvents::RemoteCheckThreadProc()
+{
+ Utility::SetThreadName("Remote Check Scheduler");
+
+ int maxConcurrentChecks = IcingaApplication::GetInstance()->GetMaxConcurrentChecks();
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ for(;;) {
+ if (m_CheckRequestQueue.empty())
+ break;
+
+ lock.unlock();
+ Checkable::AquirePendingCheckSlot(maxConcurrentChecks);
+ lock.lock();
+
+ auto callback = m_CheckRequestQueue.front();
+ m_CheckRequestQueue.pop_front();
+ m_ChecksExecutedDuringInterval++;
+ lock.unlock();
+
+ callback();
+ Checkable::DecreasePendingChecks();
+
+ lock.lock();
+ }
+
+ m_CheckSchedulerRunning = false;
+}
+
+void ClusterEvents::EnqueueCheck(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, []() {
+ m_LogTimer = Timer::Create();
+ m_LogTimer->SetInterval(10);
+ m_LogTimer->OnTimerExpired.connect([](const Timer * const&) { LogRemoteCheckQueueInformation(); });
+ m_LogTimer->Start();
+ });
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ if (m_CheckRequestQueue.size() >= 25000) {
+ m_ChecksDroppedDuringInterval++;
+ return;
+ }
+
+ m_CheckRequestQueue.emplace_back([origin, params]() { ExecuteCheckFromQueue(origin, params); });
+
+ if (!m_CheckSchedulerRunning) {
+ std::thread t(ClusterEvents::RemoteCheckThreadProc);
+ t.detach();
+ m_CheckSchedulerRunning = true;
+ }
+}
+
+static void SendEventExecutedCommand(const Dictionary::Ptr& params, long exitStatus, const String& output,
+ double start, double end, const ApiListener::Ptr& listener, const MessageOrigin::Ptr& origin,
+ const Endpoint::Ptr& sourceEndpoint)
+{
+ Dictionary::Ptr executedParams = new Dictionary();
+ executedParams->Set("execution", params->Get("source"));
+ executedParams->Set("host", params->Get("host"));
+
+ if (params->Contains("service"))
+ executedParams->Set("service", params->Get("service"));
+
+ executedParams->Set("exit", exitStatus);
+ executedParams->Set("output", output);
+ executedParams->Set("start", start);
+ executedParams->Set("end", end);
+
+ if (origin->IsLocal()) {
+ ClusterEvents::ExecutedCommandAPIHandler(origin, executedParams);
+ } else {
+ Dictionary::Ptr executedMessage = new Dictionary();
+ executedMessage->Set("jsonrpc", "2.0");
+ executedMessage->Set("method", "event::ExecutedCommand");
+ executedMessage->Set("params", executedParams);
+
+ listener->SyncSendMessage(sourceEndpoint, executedMessage);
+ }
+}
+
+void ClusterEvents::ExecuteCheckFromQueue(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params) {
+
+ Endpoint::Ptr sourceEndpoint;
+
+ if (origin->FromClient) {
+ sourceEndpoint = origin->FromClient->GetEndpoint();
+ } else if (origin->IsLocal()){
+ sourceEndpoint = Endpoint::GetLocalEndpoint();
+ }
+
+ if (!sourceEndpoint || (origin->FromZone && !Zone::GetLocalZone()->IsChildOf(origin->FromZone))) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'execute command' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return;
+ }
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener) {
+ Log(LogCritical, "ApiListener") << "No instance available.";
+ return;
+ }
+
+ Defer resetExecuteCommandProcessFinishedHandler ([]() {
+ Checkable::ExecuteCommandProcessFinishedHandler = nullptr;
+ });
+
+ if (params->Contains("source")) {
+ String uuid = params->Get("source");
+
+ String checkableName = params->Get("host");
+
+ if (params->Contains("service"))
+ checkableName += "!" + params->Get("service");
+
+ /* Check deadline */
+ double deadline = params->Get("deadline");
+
+ if (Utility::GetTime() > deadline) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding 'ExecuteCheckFromQueue' event for checkable '" << checkableName
+ << "' from '" << origin->FromClient->GetIdentity() << "': Deadline has expired.";
+ return;
+ }
+
+ Checkable::ExecuteCommandProcessFinishedHandler = [checkableName, listener, sourceEndpoint, origin, params] (const Value& commandLine, const ProcessResult& pr) {
+ if (params->Get("command_type") == "check_command") {
+ Checkable::CurrentConcurrentChecks.fetch_sub(1);
+ Checkable::DecreasePendingChecks();
+ }
+
+ if (pr.ExitStatus > 3) {
+ Process::Arguments parguments = Process::PrepareCommand(commandLine);
+ Log(LogWarning, "ApiListener")
+ << "Command for object '" << checkableName << "' (PID: " << pr.PID
+ << ", arguments: " << Process::PrettyPrintArguments(parguments) << ") terminated with exit code "
+ << pr.ExitStatus << ", output: " << pr.Output;
+ }
+
+ SendEventExecutedCommand(params, pr.ExitStatus, pr.Output, pr.ExecutionStart, pr.ExecutionEnd, listener,
+ origin, sourceEndpoint);
+ };
+ }
+
+ if (!listener->GetAcceptCommands() && !origin->IsLocal()) {
+ Log(LogWarning, "ApiListener")
+ << "Ignoring command. '" << listener->GetName() << "' does not accept commands.";
+
+ String output = "Endpoint '" + Endpoint::GetLocalEndpoint()->GetName() + "' does not accept commands.";
+
+ if (params->Contains("source")) {
+ double now = Utility::GetTime();
+ SendEventExecutedCommand(params, 126, output, now, now, listener, origin, sourceEndpoint);
+ } else {
+ Host::Ptr host = new Host();
+ Dictionary::Ptr attrs = new Dictionary();
+
+ attrs->Set("__name", params->Get("host"));
+ attrs->Set("type", "Host");
+ attrs->Set("enable_active_checks", false);
+
+ Deserialize(host, attrs, false, FAConfig);
+
+ if (params->Contains("service"))
+ host->SetExtension("agent_service_name", params->Get("service"));
+
+ CheckResult::Ptr cr = new CheckResult();
+ cr->SetState(ServiceUnknown);
+ cr->SetOutput(output);
+
+ Dictionary::Ptr message = MakeCheckResultMessage(host, cr);
+ listener->SyncSendMessage(sourceEndpoint, message);
+ }
+
+ return;
+ }
+
+ /* use a virtual host object for executing the command */
+ Host::Ptr host = new Host();
+ Dictionary::Ptr attrs = new Dictionary();
+
+ attrs->Set("__name", params->Get("host"));
+ attrs->Set("type", "Host");
+
+ /*
+ * Override the check timeout if the parent caller provided the value. Compatible with older versions not
+ * passing this inside the cluster message.
+ * This happens with host/service command_endpoint agents and the 'check_timeout' attribute being specified.
+ */
+ if (params->Contains("check_timeout"))
+ attrs->Set("check_timeout", params->Get("check_timeout"));
+
+ Deserialize(host, attrs, false, FAConfig);
+
+ if (params->Contains("service"))
+ host->SetExtension("agent_service_name", params->Get("service"));
+
+ String command = params->Get("command");
+ String command_type = params->Get("command_type");
+
+ if (command_type == "check_command") {
+ if (!CheckCommand::GetByName(command)) {
+ ServiceState state = ServiceUnknown;
+ String output = "Check command '" + command + "' does not exist.";
+ double now = Utility::GetTime();
+
+ if (params->Contains("source")) {
+ SendEventExecutedCommand(params, state, output, now, now, listener, origin, sourceEndpoint);
+ } else {
+ CheckResult::Ptr cr = new CheckResult();
+ cr->SetState(state);
+ cr->SetOutput(output);
+ Dictionary::Ptr message = MakeCheckResultMessage(host, cr);
+ listener->SyncSendMessage(sourceEndpoint, message);
+ }
+
+ return;
+ }
+ } else if (command_type == "event_command") {
+ if (!EventCommand::GetByName(command)) {
+ String output = "Event command '" + command + "' does not exist.";
+ Log(LogWarning, "ClusterEvents") << output;
+
+ if (params->Contains("source")) {
+ double now = Utility::GetTime();
+ SendEventExecutedCommand(params, ServiceUnknown, output, now, now, listener, origin, sourceEndpoint);
+ }
+
+ return;
+ }
+ } else if (command_type == "notification_command") {
+ if (!NotificationCommand::GetByName(command)) {
+ String output = "Notification command '" + command + "' does not exist.";
+ Log(LogWarning, "ClusterEvents") << output;
+
+ if (params->Contains("source")) {
+ double now = Utility::GetTime();
+ SendEventExecutedCommand(params, ServiceUnknown, output, now, now, listener, origin, sourceEndpoint);
+ }
+
+ return;
+ }
+ }
+
+ attrs->Set(command_type, params->Get("command"));
+ attrs->Set("command_endpoint", sourceEndpoint->GetName());
+
+ Deserialize(host, attrs, false, FAConfig);
+
+ host->SetExtension("agent_check", true);
+
+ Dictionary::Ptr macros = params->Get("macros");
+
+ if (command_type == "check_command") {
+ try {
+ host->ExecuteRemoteCheck(macros);
+ } catch (const std::exception& ex) {
+ String output = "Exception occurred while checking '" + host->GetName() + "': " + DiagnosticInformation(ex);
+ ServiceState state = ServiceUnknown;
+ double now = Utility::GetTime();
+
+ if (params->Contains("source")) {
+ SendEventExecutedCommand(params, state, output, now, now, listener, origin, sourceEndpoint);
+ } else {
+ CheckResult::Ptr cr = new CheckResult();
+ cr->SetState(state);
+ cr->SetOutput(output);
+ cr->SetScheduleStart(now);
+ cr->SetScheduleEnd(now);
+ cr->SetExecutionStart(now);
+ cr->SetExecutionEnd(now);
+
+ Dictionary::Ptr message = MakeCheckResultMessage(host, cr);
+ listener->SyncSendMessage(sourceEndpoint, message);
+ }
+
+ Log(LogCritical, "checker", output);
+ }
+ } else if (command_type == "event_command") {
+ try {
+ host->ExecuteEventHandler(macros, true);
+ } catch (const std::exception& ex) {
+ if (params->Contains("source")) {
+ String output = "Exception occurred while executing event command '" + command + "' for '" +
+ host->GetName() + "': " + DiagnosticInformation(ex);
+
+ double now = Utility::GetTime();
+ SendEventExecutedCommand(params, ServiceUnknown, output, now, now, listener, origin, sourceEndpoint);
+ } else {
+ throw;
+ }
+ }
+ } else if (command_type == "notification_command" && params->Contains("source")) {
+ /* Get user */
+ User::Ptr user = new User();
+ Dictionary::Ptr attrs = new Dictionary();
+ attrs->Set("__name", params->Get("user"));
+ attrs->Set("type", User::GetTypeName());
+
+ Deserialize(user, attrs, false, FAConfig);
+
+ /* Get notification */
+ Notification::Ptr notification = new Notification();
+ attrs->Clear();
+ attrs->Set("__name", params->Get("notification"));
+ attrs->Set("type", Notification::GetTypeName());
+ attrs->Set("command", command);
+
+ Deserialize(notification, attrs, false, FAConfig);
+
+ try {
+ CheckResult::Ptr cr = new CheckResult();
+ String author = macros->Get("notification_author");
+ NotificationCommand::Ptr notificationCommand = NotificationCommand::GetByName(command);
+
+ notificationCommand->Execute(notification, user, cr, NotificationType::NotificationCustom,
+ author, "");
+ } catch (const std::exception& ex) {
+ String output = "Exception occurred during notification '" + notification->GetName()
+ + "' for checkable '" + notification->GetCheckable()->GetName()
+ + "' and user '" + user->GetName() + "' using command '" + command + "': "
+ + DiagnosticInformation(ex, false);
+ double now = Utility::GetTime();
+ SendEventExecutedCommand(params, ServiceUnknown, output, now, now, listener, origin, sourceEndpoint);
+ }
+ }
+}
+
+int ClusterEvents::GetCheckRequestQueueSize()
+{
+ return m_CheckRequestQueue.size();
+}
+
+void ClusterEvents::LogRemoteCheckQueueInformation() {
+ if (m_ChecksDroppedDuringInterval > 0) {
+ Log(LogCritical, "ClusterEvents")
+ << "Remote check queue ran out of slots. "
+ << m_ChecksDroppedDuringInterval << " checks dropped.";
+ m_ChecksDroppedDuringInterval = 0;
+ }
+
+ if (m_ChecksExecutedDuringInterval == 0)
+ return;
+
+ Log(LogInformation, "RemoteCheckQueue")
+ << "items: " << m_CheckRequestQueue.size()
+ << ", rate: " << m_ChecksExecutedDuringInterval / 10 << "/s "
+ << "(" << m_ChecksExecutedDuringInterval * 6 << "/min "
+ << m_ChecksExecutedDuringInterval * 6 * 5 << "/5min "
+ << m_ChecksExecutedDuringInterval * 6 * 15 << "/15min" << ");";
+
+ m_ChecksExecutedDuringInterval = 0;
+}
diff --git a/lib/icinga/clusterevents.cpp b/lib/icinga/clusterevents.cpp
new file mode 100644
index 0000000..fe5167b
--- /dev/null
+++ b/lib/icinga/clusterevents.cpp
@@ -0,0 +1,1623 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/clusterevents.hpp"
+#include "icinga/service.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/messageorigin.hpp"
+#include "remote/zone.hpp"
+#include "remote/apifunction.hpp"
+#include "remote/eventqueue.hpp"
+#include "base/application.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/exception.hpp"
+#include "base/initialize.hpp"
+#include "base/serializer.hpp"
+#include "base/json.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+INITIALIZE_ONCE(&ClusterEvents::StaticInitialize);
+
+REGISTER_APIFUNCTION(CheckResult, event, &ClusterEvents::CheckResultAPIHandler);
+REGISTER_APIFUNCTION(SetNextCheck, event, &ClusterEvents::NextCheckChangedAPIHandler);
+REGISTER_APIFUNCTION(SetLastCheckStarted, event, &ClusterEvents::LastCheckStartedChangedAPIHandler);
+REGISTER_APIFUNCTION(SetStateBeforeSuppression, event, &ClusterEvents::StateBeforeSuppressionChangedAPIHandler);
+REGISTER_APIFUNCTION(SetSuppressedNotifications, event, &ClusterEvents::SuppressedNotificationsChangedAPIHandler);
+REGISTER_APIFUNCTION(SetSuppressedNotificationTypes, event, &ClusterEvents::SuppressedNotificationTypesChangedAPIHandler);
+REGISTER_APIFUNCTION(SetNextNotification, event, &ClusterEvents::NextNotificationChangedAPIHandler);
+REGISTER_APIFUNCTION(UpdateLastNotifiedStatePerUser, event, &ClusterEvents::LastNotifiedStatePerUserUpdatedAPIHandler);
+REGISTER_APIFUNCTION(ClearLastNotifiedStatePerUser, event, &ClusterEvents::LastNotifiedStatePerUserClearedAPIHandler);
+REGISTER_APIFUNCTION(SetForceNextCheck, event, &ClusterEvents::ForceNextCheckChangedAPIHandler);
+REGISTER_APIFUNCTION(SetForceNextNotification, event, &ClusterEvents::ForceNextNotificationChangedAPIHandler);
+REGISTER_APIFUNCTION(SetAcknowledgement, event, &ClusterEvents::AcknowledgementSetAPIHandler);
+REGISTER_APIFUNCTION(ClearAcknowledgement, event, &ClusterEvents::AcknowledgementClearedAPIHandler);
+REGISTER_APIFUNCTION(ExecuteCommand, event, &ClusterEvents::ExecuteCommandAPIHandler);
+REGISTER_APIFUNCTION(SendNotifications, event, &ClusterEvents::SendNotificationsAPIHandler);
+REGISTER_APIFUNCTION(NotificationSentUser, event, &ClusterEvents::NotificationSentUserAPIHandler);
+REGISTER_APIFUNCTION(NotificationSentToAllUsers, event, &ClusterEvents::NotificationSentToAllUsersAPIHandler);
+REGISTER_APIFUNCTION(ExecutedCommand, event, &ClusterEvents::ExecutedCommandAPIHandler);
+REGISTER_APIFUNCTION(UpdateExecutions, event, &ClusterEvents::UpdateExecutionsAPIHandler);
+REGISTER_APIFUNCTION(SetRemovalInfo, event, &ClusterEvents::SetRemovalInfoAPIHandler);
+
+void ClusterEvents::StaticInitialize()
+{
+ Checkable::OnNewCheckResult.connect(&ClusterEvents::CheckResultHandler);
+ Checkable::OnNextCheckChanged.connect(&ClusterEvents::NextCheckChangedHandler);
+ Checkable::OnLastCheckStartedChanged.connect(&ClusterEvents::LastCheckStartedChangedHandler);
+ Checkable::OnStateBeforeSuppressionChanged.connect(&ClusterEvents::StateBeforeSuppressionChangedHandler);
+ Checkable::OnSuppressedNotificationsChanged.connect(&ClusterEvents::SuppressedNotificationsChangedHandler);
+ Notification::OnSuppressedNotificationsChanged.connect(&ClusterEvents::SuppressedNotificationTypesChangedHandler);
+ Notification::OnNextNotificationChanged.connect(&ClusterEvents::NextNotificationChangedHandler);
+ Notification::OnLastNotifiedStatePerUserUpdated.connect(&ClusterEvents::LastNotifiedStatePerUserUpdatedHandler);
+ Notification::OnLastNotifiedStatePerUserCleared.connect(&ClusterEvents::LastNotifiedStatePerUserClearedHandler);
+ Checkable::OnForceNextCheckChanged.connect(&ClusterEvents::ForceNextCheckChangedHandler);
+ Checkable::OnForceNextNotificationChanged.connect(&ClusterEvents::ForceNextNotificationChangedHandler);
+ Checkable::OnNotificationsRequested.connect(&ClusterEvents::SendNotificationsHandler);
+ Checkable::OnNotificationSentToUser.connect(&ClusterEvents::NotificationSentUserHandler);
+ Checkable::OnNotificationSentToAllUsers.connect(&ClusterEvents::NotificationSentToAllUsersHandler);
+
+ Checkable::OnAcknowledgementSet.connect(&ClusterEvents::AcknowledgementSetHandler);
+ Checkable::OnAcknowledgementCleared.connect(&ClusterEvents::AcknowledgementClearedHandler);
+
+ Comment::OnRemovalInfoChanged.connect(&ClusterEvents::SetRemovalInfoHandler);
+ Downtime::OnRemovalInfoChanged.connect(&ClusterEvents::SetRemovalInfoHandler);
+}
+
+Dictionary::Ptr ClusterEvents::MakeCheckResultMessage(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::CheckResult");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ else {
+ Value agent_service_name = checkable->GetExtension("agent_service_name");
+
+ if (!agent_service_name.IsEmpty())
+ params->Set("service", agent_service_name);
+ }
+ params->Set("cr", Serialize(cr));
+
+ message->Set("params", params);
+
+ return message;
+}
+
+void ClusterEvents::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Dictionary::Ptr message = MakeCheckResultMessage(checkable, cr);
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::CheckResultAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'check result' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ CheckResult::Ptr cr;
+ Array::Ptr vperf;
+
+ if (params->Contains("cr")) {
+ cr = new CheckResult();
+ Dictionary::Ptr vcr = params->Get("cr");
+
+ if (vcr && vcr->Contains("performance_data")) {
+ vperf = vcr->Get("performance_data");
+
+ if (vperf)
+ vcr->Remove("performance_data");
+
+ Deserialize(cr, vcr, true);
+ }
+ }
+
+ if (!cr)
+ return Empty;
+
+ ArrayData rperf;
+
+ if (vperf) {
+ ObjectLock olock(vperf);
+ for (const Value& vp : vperf) {
+ Value p;
+
+ if (vp.IsObjectType<Dictionary>()) {
+ PerfdataValue::Ptr val = new PerfdataValue();
+ Deserialize(val, vp, true);
+ rperf.push_back(val);
+ } else
+ rperf.push_back(vp);
+ }
+ }
+
+ cr->SetPerformanceData(new Array(std::move(rperf)));
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable) && endpoint != checkable->GetCommandEndpoint()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'check result' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ if (!checkable->IsPaused() && Zone::GetLocalZone() == checkable->GetZone() && endpoint == checkable->GetCommandEndpoint())
+ checkable->ProcessCheckResult(cr);
+ else
+ checkable->ProcessCheckResult(cr, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::NextCheckChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("next_check", checkable->GetNextCheck());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetNextCheck");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::NextCheckChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'next check changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'next check changed' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ double nextCheck = params->Get("next_check");
+
+ if (nextCheck < Application::GetStartTime() + 60)
+ return Empty;
+
+ checkable->SetNextCheck(params->Get("next_check"), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::LastCheckStartedChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("last_check_started", checkable->GetLastCheckStarted());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetLastCheckStarted");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::LastCheckStartedChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'last_check_started changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'last_check_started changed' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ checkable->SetLastCheckStarted(params->Get("last_check_started"), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::StateBeforeSuppressionChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("state_before_suppression", checkable->GetStateBeforeSuppression());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetStateBeforeSuppression");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, nullptr, message, true);
+}
+
+Value ClusterEvents::StateBeforeSuppressionChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'state before suppression changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'state before suppression changed' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ checkable->SetStateBeforeSuppression(ServiceState(int(params->Get("state_before_suppression"))), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::SuppressedNotificationsChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("suppressed_notifications", checkable->GetSuppressedNotifications());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetSuppressedNotifications");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, nullptr, message, true);
+}
+
+Value ClusterEvents::SuppressedNotificationsChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'suppressed notifications changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'suppressed notifications changed' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ checkable->SetSuppressedNotifications(params->Get("suppressed_notifications"), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::SuppressedNotificationTypesChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("notification", notification->GetName());
+ params->Set("suppressed_notifications", notification->GetSuppressedNotifications());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetSuppressedNotificationTypes");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, nullptr, message, true);
+}
+
+Value ClusterEvents::SuppressedNotificationTypesChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'suppressed notifications changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ auto notification (Notification::GetByName(params->Get("notification")));
+
+ if (!notification)
+ return Empty;
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'suppressed notification types changed' message for notification '" << notification->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ notification->SetSuppressedNotifications(params->Get("suppressed_notifications"), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::NextNotificationChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("notification", notification->GetName());
+ params->Set("next_notification", notification->GetNextNotification());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetNextNotification");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, notification, message, true);
+}
+
+Value ClusterEvents::NextNotificationChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'next notification changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Notification::Ptr notification = Notification::GetByName(params->Get("notification"));
+
+ if (!notification)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(notification)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'next notification changed' message for notification '" << notification->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ double nextNotification = params->Get("next_notification");
+
+ if (nextNotification < Utility::GetTime())
+ return Empty;
+
+ notification->SetNextNotification(nextNotification, false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::LastNotifiedStatePerUserUpdatedHandler(const Notification::Ptr& notification, const String& user, uint_fast8_t state, const MessageOrigin::Ptr& origin)
+{
+ auto listener (ApiListener::GetInstance());
+
+ if (!listener) {
+ return;
+ }
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("notification", notification->GetName());
+ params->Set("user", user);
+ params->Set("state", state);
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::UpdateLastNotifiedStatePerUser");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, notification, message, true);
+}
+
+Value ClusterEvents::LastNotifiedStatePerUserUpdatedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ auto endpoint (origin->FromClient->GetEndpoint());
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'last notified state of user updated' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+
+ return Empty;
+ }
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'last notified state of user updated' message from '"
+ << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+
+ return Empty;
+ }
+
+ auto notification (Notification::GetByName(params->Get("notification")));
+
+ if (!notification) {
+ return Empty;
+ }
+
+ auto state (params->Get("state"));
+
+ if (!state.IsNumber()) {
+ return Empty;
+ }
+
+ notification->GetLastNotifiedStatePerUser()->Set(params->Get("user"), state);
+ Notification::OnLastNotifiedStatePerUserUpdated(notification, params->Get("user"), state, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::LastNotifiedStatePerUserClearedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin)
+{
+ auto listener (ApiListener::GetInstance());
+
+ if (!listener) {
+ return;
+ }
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("notification", notification->GetName());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::ClearLastNotifiedStatePerUser");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, notification, message, true);
+}
+
+Value ClusterEvents::LastNotifiedStatePerUserClearedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ auto endpoint (origin->FromClient->GetEndpoint());
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'last notified state of user cleared' message from '"
+ << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+
+ return Empty;
+ }
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'last notified state of user cleared' message from '"
+ << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+
+ return Empty;
+ }
+
+ auto notification (Notification::GetByName(params->Get("notification")));
+
+ if (!notification) {
+ return Empty;
+ }
+
+ notification->GetLastNotifiedStatePerUser()->Clear();
+ Notification::OnLastNotifiedStatePerUserCleared(notification, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::ForceNextCheckChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("forced", checkable->GetForceNextCheck());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetForceNextCheck");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::ForceNextCheckChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'force next check changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'force next check' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ checkable->SetForceNextCheck(params->Get("forced"), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::ForceNextNotificationChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("forced", checkable->GetForceNextNotification());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetForceNextNotification");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::ForceNextNotificationChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'force next notification changed' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'force next notification' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ checkable->SetForceNextNotification(params->Get("forced"), false, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::AcknowledgementSetHandler(const Checkable::Ptr& checkable,
+ const String& author, const String& comment, AcknowledgementType type,
+ bool notify, bool persistent, double changeTime, double expiry, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("author", author);
+ params->Set("comment", comment);
+ params->Set("acktype", type);
+ params->Set("notify", notify);
+ params->Set("persistent", persistent);
+ params->Set("expiry", expiry);
+ params->Set("change_time", changeTime);
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetAcknowledgement");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::AcknowledgementSetAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'acknowledgement set' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'acknowledgement set' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ ObjectLock oLock (checkable);
+
+ if (checkable->IsAcknowledged()) {
+ Log(LogWarning, "ClusterEvents")
+ << "Discarding 'acknowledgement set' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Checkable is already acknowledged.";
+ return Empty;
+ }
+
+ checkable->AcknowledgeProblem(params->Get("author"), params->Get("comment"),
+ static_cast<AcknowledgementType>(static_cast<int>(params->Get("acktype"))),
+ params->Get("notify"), params->Get("persistent"), params->Get("change_time"), params->Get("expiry"), origin);
+
+ return Empty;
+}
+
+void ClusterEvents::AcknowledgementClearedHandler(const Checkable::Ptr& checkable, const String& removedBy, double changeTime, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("author", removedBy);
+ params->Set("change_time", changeTime);
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::ClearAcknowledgement");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, message, true);
+}
+
+Value ClusterEvents::AcknowledgementClearedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'acknowledgement cleared' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'acknowledgement cleared' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ checkable->ClearAcknowledgement(params->Get("author"), params->Get("change_time"), origin);
+
+ return Empty;
+}
+
+Value ClusterEvents::ExecuteCommandAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return Empty;
+
+ if (!origin->IsLocal()) {
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ /* Discard messages from anonymous clients */
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents") << "Discarding 'execute command' message from '"
+ << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Zone::Ptr originZone = endpoint->GetZone();
+
+ Zone::Ptr localZone = Zone::GetLocalZone();
+ bool fromLocalZone = originZone == localZone;
+
+ Zone::Ptr parentZone = localZone->GetParent();
+ bool fromParentZone = parentZone && originZone == parentZone;
+
+ if (!fromLocalZone && !fromParentZone) {
+ Log(LogNotice, "ClusterEvents") << "Discarding 'execute command' message from '"
+ << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+ }
+
+ String executionUuid = params->Get("source");
+
+ if (params->Contains("endpoint")) {
+ Endpoint::Ptr execEndpoint = Endpoint::GetByName(params->Get("endpoint"));
+
+ if (!execEndpoint) {
+ Log(LogWarning, "ClusterEvents")
+ << "Discarding 'execute command' message " << executionUuid
+ << ": Endpoint " << params->Get("endpoint") << " does not exist";
+ return Empty;
+ }
+
+ if (execEndpoint != Endpoint::GetLocalEndpoint()) {
+ Zone::Ptr endpointZone = execEndpoint->GetZone();
+ Zone::Ptr localZone = Zone::GetLocalZone();
+
+ if (!endpointZone->IsChildOf(localZone)) {
+ return Empty;
+ }
+
+ /* Check if the child endpoints have Icinga version >= 2.13 */
+ for (const Zone::Ptr &zone : ConfigType::GetObjectsByType<Zone>()) {
+ /* Fetch immediate child zone members */
+ if (zone->GetParent() == localZone && zone->CanAccessObject(endpointZone)) {
+ std::set<Endpoint::Ptr> endpoints = zone->GetEndpoints();
+
+ for (const Endpoint::Ptr &childEndpoint : endpoints) {
+ if (!(childEndpoint->GetCapabilities() & (uint_fast64_t)ApiCapabilities::ExecuteArbitraryCommand)) {
+ double now = Utility::GetTime();
+ Dictionary::Ptr executedParams = new Dictionary();
+ executedParams->Set("execution", executionUuid);
+ executedParams->Set("host", params->Get("host"));
+
+ if (params->Contains("service"))
+ executedParams->Set("service", params->Get("service"));
+
+ executedParams->Set("exit", 126);
+ executedParams->Set("output",
+ "Endpoint '" + childEndpoint->GetName() + "' doesn't support executing arbitrary commands.");
+ executedParams->Set("start", now);
+ executedParams->Set("end", now);
+
+ Dictionary::Ptr executedMessage = new Dictionary();
+ executedMessage->Set("jsonrpc", "2.0");
+ executedMessage->Set("method", "event::ExecutedCommand");
+ executedMessage->Set("params", executedParams);
+
+ listener->RelayMessage(nullptr, nullptr, executedMessage, true);
+ return Empty;
+ }
+ }
+
+ Checkable::Ptr checkable;
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+ if (!host) {
+ Log(LogWarning, "ClusterEvents")
+ << "Discarding 'execute command' message " << executionUuid
+ << ": host " << params->Get("host") << " does not exist";
+ return Empty;
+ }
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable) {
+ String checkableName = host->GetName();
+ if (params->Contains("service"))
+ checkableName += "!" + params->Get("service");
+
+ Log(LogWarning, "ClusterEvents")
+ << "Discarding 'execute command' message " << executionUuid
+ << ": " << checkableName << " does not exist";
+ return Empty;
+ }
+
+ /* Return an error when the endpointZone is different than the child zone and
+ * the child zone can't access the checkable.
+ * The zones are checked to allow for the case where command_endpoint is specified in the checkable
+ * but checkable is not actually present in the agent.
+ */
+ if (!zone->CanAccessObject(checkable) && zone != endpointZone) {
+ double now = Utility::GetTime();
+ Dictionary::Ptr executedParams = new Dictionary();
+ executedParams->Set("execution", executionUuid);
+ executedParams->Set("host", params->Get("host"));
+
+ if (params->Contains("service"))
+ executedParams->Set("service", params->Get("service"));
+
+ executedParams->Set("exit", 126);
+ executedParams->Set(
+ "output",
+ "Zone '" + zone->GetName() + "' cannot access to checkable '" + checkable->GetName() + "'."
+ );
+ executedParams->Set("start", now);
+ executedParams->Set("end", now);
+
+ Dictionary::Ptr executedMessage = new Dictionary();
+ executedMessage->Set("jsonrpc", "2.0");
+ executedMessage->Set("method", "event::ExecutedCommand");
+ executedMessage->Set("params", executedParams);
+
+ listener->RelayMessage(nullptr, nullptr, executedMessage, true);
+ return Empty;
+ }
+ }
+ }
+
+ Dictionary::Ptr execMessage = new Dictionary();
+ execMessage->Set("jsonrpc", "2.0");
+ execMessage->Set("method", "event::ExecuteCommand");
+ execMessage->Set("params", params);
+
+ listener->RelayMessage(origin, endpointZone, execMessage, true);
+ return Empty;
+ }
+ }
+
+ EnqueueCheck(origin, params);
+
+ return Empty;
+}
+
+void ClusterEvents::SendNotificationsHandler(const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Dictionary::Ptr message = MakeCheckResultMessage(checkable, cr);
+ message->Set("method", "event::SendNotifications");
+
+ Dictionary::Ptr params = message->Get("params");
+ params->Set("type", type);
+ params->Set("author", author);
+ params->Set("text", text);
+
+ listener->RelayMessage(origin, nullptr, message, true);
+}
+
+Value ClusterEvents::SendNotificationsAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'send notification' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'send custom notification' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ CheckResult::Ptr cr;
+ Array::Ptr vperf;
+
+ if (params->Contains("cr")) {
+ cr = new CheckResult();
+ Dictionary::Ptr vcr = params->Get("cr");
+
+ if (vcr && vcr->Contains("performance_data")) {
+ vperf = vcr->Get("performance_data");
+
+ if (vperf)
+ vcr->Remove("performance_data");
+
+ Deserialize(cr, vcr, true);
+ }
+ }
+
+ NotificationType type = static_cast<NotificationType>(static_cast<int>(params->Get("type")));
+ String author = params->Get("author");
+ String text = params->Get("text");
+
+ Checkable::OnNotificationsRequested(checkable, type, cr, author, text, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::NotificationSentUserHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable, const User::Ptr& user,
+ NotificationType notificationType, const CheckResult::Ptr& cr, const String& author, const String& commentText, const String& command,
+ const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("notification", notification->GetName());
+ params->Set("user", user->GetName());
+ params->Set("type", notificationType);
+ params->Set("cr", Serialize(cr));
+ params->Set("author", author);
+ params->Set("text", commentText);
+ params->Set("command", command);
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::NotificationSentUser");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, nullptr, message, true);
+}
+
+Value ClusterEvents::NotificationSentUserAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'sent notification to user' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'send notification to user' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ CheckResult::Ptr cr;
+ Array::Ptr vperf;
+
+ if (params->Contains("cr")) {
+ cr = new CheckResult();
+ Dictionary::Ptr vcr = params->Get("cr");
+
+ if (vcr && vcr->Contains("performance_data")) {
+ vperf = vcr->Get("performance_data");
+
+ if (vperf)
+ vcr->Remove("performance_data");
+
+ Deserialize(cr, vcr, true);
+ }
+ }
+
+ NotificationType type = static_cast<NotificationType>(static_cast<int>(params->Get("type")));
+ String author = params->Get("author");
+ String text = params->Get("text");
+
+ Notification::Ptr notification = Notification::GetByName(params->Get("notification"));
+
+ if (!notification)
+ return Empty;
+
+ User::Ptr user = User::GetByName(params->Get("user"));
+
+ if (!user)
+ return Empty;
+
+ String command = params->Get("command");
+
+ Checkable::OnNotificationSentToUser(notification, checkable, user, type, cr, author, text, command, origin);
+
+ return Empty;
+}
+
+void ClusterEvents::NotificationSentToAllUsersHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ NotificationType notificationType, const CheckResult::Ptr& cr, const String& author, const String& commentText, const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("host", host->GetName());
+ if (service)
+ params->Set("service", service->GetShortName());
+ params->Set("notification", notification->GetName());
+
+ ArrayData ausers;
+ for (const User::Ptr& user : users) {
+ ausers.push_back(user->GetName());
+ }
+ params->Set("users", new Array(std::move(ausers)));
+
+ params->Set("type", notificationType);
+ params->Set("cr", Serialize(cr));
+ params->Set("author", author);
+ params->Set("text", commentText);
+
+ params->Set("last_notification", notification->GetLastNotification());
+ params->Set("next_notification", notification->GetNextNotification());
+ params->Set("notification_number", notification->GetNotificationNumber());
+ params->Set("last_problem_notification", notification->GetLastProblemNotification());
+ params->Set("no_more_notifications", notification->GetNoMoreNotifications());
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::NotificationSentToAllUsers");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, nullptr, message, true);
+}
+
+Value ClusterEvents::NotificationSentToAllUsersAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'sent notification to all users' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ if (origin->FromZone && origin->FromZone != Zone::GetLocalZone()) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'sent notification to all users' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ CheckResult::Ptr cr;
+ Array::Ptr vperf;
+
+ if (params->Contains("cr")) {
+ cr = new CheckResult();
+ Dictionary::Ptr vcr = params->Get("cr");
+
+ if (vcr && vcr->Contains("performance_data")) {
+ vperf = vcr->Get("performance_data");
+
+ if (vperf)
+ vcr->Remove("performance_data");
+
+ Deserialize(cr, vcr, true);
+ }
+ }
+
+ NotificationType type = static_cast<NotificationType>(static_cast<int>(params->Get("type")));
+ String author = params->Get("author");
+ String text = params->Get("text");
+
+ Notification::Ptr notification = Notification::GetByName(params->Get("notification"));
+
+ if (!notification)
+ return Empty;
+
+ Array::Ptr ausers = params->Get("users");
+
+ if (!ausers)
+ return Empty;
+
+ std::set<User::Ptr> users;
+
+ {
+ ObjectLock olock(ausers);
+ for (const String& auser : ausers) {
+ User::Ptr user = User::GetByName(auser);
+
+ if (!user)
+ continue;
+
+ users.insert(user);
+ }
+ }
+
+ notification->SetLastNotification(params->Get("last_notification"));
+ notification->SetNextNotification(params->Get("next_notification"));
+ notification->SetNotificationNumber(params->Get("notification_number"));
+ notification->SetLastProblemNotification(params->Get("last_problem_notification"));
+ notification->SetNoMoreNotifications(params->Get("no_more_notifications"));
+
+ ArrayData notifiedProblemUsers;
+ for (const User::Ptr& user : users) {
+ notifiedProblemUsers.push_back(user->GetName());
+ }
+
+ notification->SetNotifiedProblemUsers(new Array(std::move(notifiedProblemUsers)));
+
+ Checkable::OnNotificationSentToAllUsers(notification, checkable, users, type, cr, author, text, origin);
+
+ return Empty;
+}
+
+Value ClusterEvents::ExecutedCommandAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return Empty;
+
+ Endpoint::Ptr endpoint;
+
+ if (origin->FromClient) {
+ endpoint = origin->FromClient->GetEndpoint();
+ } else if (origin->IsLocal()) {
+ endpoint = Endpoint::GetLocalEndpoint();
+ }
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message from '" << origin->FromClient->GetIdentity()
+ << "': Invalid endpoint origin (client not allowed).";
+
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ ObjectLock oLock (checkable);
+
+ if (!params->Contains("execution")) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Execution UUID missing.";
+ return Empty;
+ }
+
+ String uuid = params->Get("execution");
+
+ Dictionary::Ptr executions = checkable->GetExecutions();
+
+ if (!executions) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Execution '" << uuid << "' missing.";
+ return Empty;
+ }
+
+ Dictionary::Ptr execution = executions->Get(uuid);
+
+ if (!execution) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Execution '" << uuid << "' missing.";
+ return Empty;
+ }
+
+ Endpoint::Ptr command_endpoint = Endpoint::GetByName(execution->Get("endpoint"));
+ if (!command_endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message from '" << origin->FromClient->GetIdentity()
+ << "': Command endpoint does not exists.";
+
+ return Empty;
+ }
+
+ if (origin->FromZone && !command_endpoint->GetZone()->IsChildOf(origin->FromZone)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ if (params->Contains("exit"))
+ execution->Set("exit", params->Get("exit"));
+
+ if (params->Contains("output"))
+ execution->Set("output", params->Get("output"));
+
+ if (params->Contains("start"))
+ execution->Set("start", params->Get("start"));
+
+ if (params->Contains("end"))
+ execution->Set("end", params->Get("end"));
+
+ execution->Remove("pending");
+
+ /* Broadcast the update */
+ Dictionary::Ptr executionsToBroadcast = new Dictionary();
+ executionsToBroadcast->Set(uuid, execution);
+ Dictionary::Ptr updateParams = new Dictionary();
+ updateParams->Set("host", host->GetName());
+
+ if (params->Contains("service"))
+ updateParams->Set("service", params->Get("service"));
+
+ updateParams->Set("executions", executionsToBroadcast);
+
+ Dictionary::Ptr updateMessage = new Dictionary();
+ updateMessage->Set("jsonrpc", "2.0");
+ updateMessage->Set("method", "event::UpdateExecutions");
+ updateMessage->Set("params", updateParams);
+
+ listener->RelayMessage(nullptr, checkable, updateMessage, true);
+
+ return Empty;
+}
+
+Value ClusterEvents::UpdateExecutionsAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message from '" << origin->FromClient->GetIdentity()
+ << "': Invalid endpoint origin (client not allowed).";
+
+ return Empty;
+ }
+
+ Host::Ptr host = Host::GetByName(params->Get("host"));
+
+ if (!host)
+ return Empty;
+
+ Checkable::Ptr checkable;
+
+ if (params->Contains("service"))
+ checkable = host->GetServiceByShortName(params->Get("service"));
+ else
+ checkable = host;
+
+ if (!checkable)
+ return Empty;
+
+ ObjectLock oLock (checkable);
+
+ if (origin->FromZone && !origin->FromZone->CanAccessObject(checkable)) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'update executions API handler' message for checkable '" << checkable->GetName()
+ << "' from '" << origin->FromClient->GetIdentity() << "': Unauthorized access.";
+ return Empty;
+ }
+
+ Dictionary::Ptr executions = checkable->GetExecutions();
+
+ if (!executions)
+ executions = new Dictionary();
+
+ Dictionary::Ptr newExecutions = params->Get("executions");
+ newExecutions->CopyTo(executions);
+ checkable->SetExecutions(executions);
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return Empty;
+
+ Dictionary::Ptr updateMessage = new Dictionary();
+ updateMessage->Set("jsonrpc", "2.0");
+ updateMessage->Set("method", "event::UpdateExecutions");
+ updateMessage->Set("params", params);
+
+ listener->RelayMessage(origin, checkable, updateMessage, true);
+
+ return Empty;
+}
+
+void ClusterEvents::SetRemovalInfoHandler(const ConfigObject::Ptr& obj, const String& removedBy, double removeTime,
+ const MessageOrigin::Ptr& origin)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Dictionary::Ptr params = new Dictionary();
+ params->Set("object_type", obj->GetReflectionType()->GetName());
+ params->Set("object_name", obj->GetName());
+ params->Set("removed_by", removedBy);
+ params->Set("remove_time", removeTime);
+
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "event::SetRemovalInfo");
+ message->Set("params", params);
+
+ listener->RelayMessage(origin, obj, message, true);
+}
+
+Value ClusterEvents::SetRemovalInfoAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint || (origin->FromZone && !Zone::GetLocalZone()->IsChildOf(origin->FromZone))) {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'set removal info' message from '" << origin->FromClient->GetIdentity()
+ << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ String objectType = params->Get("object_type");
+ String objectName = params->Get("object_name");
+ String removedBy = params->Get("removed_by");
+ double removeTime = params->Get("remove_time");
+
+ if (objectType == Comment::GetTypeName()) {
+ Comment::Ptr comment = Comment::GetByName(objectName);
+
+ if (comment) {
+ comment->SetRemovalInfo(removedBy, removeTime, origin);
+ }
+ } else if (objectType == Downtime::GetTypeName()) {
+ Downtime::Ptr downtime = Downtime::GetByName(objectName);
+
+ if (downtime) {
+ downtime->SetRemovalInfo(removedBy, removeTime, origin);
+ }
+ } else {
+ Log(LogNotice, "ClusterEvents")
+ << "Discarding 'set removal info' message from '" << origin->FromClient->GetIdentity()
+ << "': Unknown object type.";
+ }
+
+ return Empty;
+}
diff --git a/lib/icinga/clusterevents.hpp b/lib/icinga/clusterevents.hpp
new file mode 100644
index 0000000..8daf86a
--- /dev/null
+++ b/lib/icinga/clusterevents.hpp
@@ -0,0 +1,102 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CLUSTEREVENTS_H
+#define CLUSTEREVENTS_H
+
+#include "icinga/checkable.hpp"
+#include "icinga/host.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup icinga
+ */
+class ClusterEvents
+{
+public:
+ static void StaticInitialize();
+
+ static void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr& origin);
+ static Value CheckResultAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void NextCheckChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+ static Value NextCheckChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void LastCheckStartedChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+ static Value LastCheckStartedChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void StateBeforeSuppressionChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+ static Value StateBeforeSuppressionChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void SuppressedNotificationsChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+ static Value SuppressedNotificationsChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void SuppressedNotificationTypesChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin);
+ static Value SuppressedNotificationTypesChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void NextNotificationChangedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin);
+ static Value NextNotificationChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void LastNotifiedStatePerUserUpdatedHandler(const Notification::Ptr& notification, const String& user, uint_fast8_t state, const MessageOrigin::Ptr& origin);
+ static Value LastNotifiedStatePerUserUpdatedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void LastNotifiedStatePerUserClearedHandler(const Notification::Ptr& notification, const MessageOrigin::Ptr& origin);
+ static Value LastNotifiedStatePerUserClearedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void ForceNextCheckChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+ static Value ForceNextCheckChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void ForceNextNotificationChangedHandler(const Checkable::Ptr& checkable, const MessageOrigin::Ptr& origin);
+ static Value ForceNextNotificationChangedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void AcknowledgementSetHandler(const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type,
+ bool notify, bool persistent, double changeTime, double expiry, const MessageOrigin::Ptr& origin);
+ static Value AcknowledgementSetAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void AcknowledgementClearedHandler(const Checkable::Ptr& checkable, const String& removedBy, double changeTime, const MessageOrigin::Ptr& origin);
+ static Value AcknowledgementClearedAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static Value ExecuteCommandAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static Dictionary::Ptr MakeCheckResultMessage(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+
+ static void SendNotificationsHandler(const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text, const MessageOrigin::Ptr& origin);
+ static Value SendNotificationsAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void NotificationSentUserHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable, const User::Ptr& user,
+ NotificationType notificationType, const CheckResult::Ptr& cr, const String& author, const String& commentText, const String& command, const MessageOrigin::Ptr& origin);
+ static Value NotificationSentUserAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void NotificationSentToAllUsersHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ NotificationType notificationType, const CheckResult::Ptr& cr, const String& author, const String& commentText, const MessageOrigin::Ptr& origin);
+ static Value NotificationSentToAllUsersAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+ static Value ExecutedCommandAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+ static Value UpdateExecutionsAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void SetRemovalInfoHandler(const ConfigObject::Ptr& obj, const String& removedBy, double removeTime, const MessageOrigin::Ptr& origin);
+ static Value SetRemovalInfoAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static int GetCheckRequestQueueSize();
+ static void LogRemoteCheckQueueInformation();
+
+private:
+ static std::mutex m_Mutex;
+ static std::deque<std::function<void ()>> m_CheckRequestQueue;
+ static bool m_CheckSchedulerRunning;
+ static int m_ChecksExecutedDuringInterval;
+ static int m_ChecksDroppedDuringInterval;
+ static Timer::Ptr m_LogTimer;
+
+ static void RemoteCheckThreadProc();
+ static void EnqueueCheck(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+ static void ExecuteCheckFromQueue(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+};
+
+}
+
+#endif /* CLUSTEREVENTS_H */
diff --git a/lib/icinga/command.cpp b/lib/icinga/command.cpp
new file mode 100644
index 0000000..8e0f357
--- /dev/null
+++ b/lib/icinga/command.cpp
@@ -0,0 +1,68 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/command.hpp"
+#include "icinga/command-ti.cpp"
+#include "icinga/macroprocessor.hpp"
+#include "base/exception.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(Command);
+
+void Command::Validate(int types, const ValidationUtils& utils)
+{
+ ObjectImpl<Command>::Validate(types, utils);
+
+ Dictionary::Ptr arguments = GetArguments();
+
+ if (!(types & FAConfig))
+ return;
+
+ if (arguments) {
+ if (!GetCommandLine().IsObjectType<Array>())
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "command" }, "Attribute 'command' must be an array if the 'arguments' attribute is set."));
+
+ ObjectLock olock(arguments);
+ for (const Dictionary::Pair& kv : arguments) {
+ const Value& arginfo = kv.second;
+ Value argval;
+
+ if (arginfo.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr argdict = arginfo;
+
+ if (argdict->Contains("value")) {
+ Value argvalue = argdict->Get("value");
+
+ if (argvalue.IsString() && !MacroProcessor::ValidateMacroString(argvalue))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "arguments", kv.first, "value" }, "Validation failed: Closing $ not found in macro format string '" + argvalue + "'."));
+ }
+
+ if (argdict->Contains("set_if")) {
+ Value argsetif = argdict->Get("set_if");
+
+ if (argsetif.IsString() && !MacroProcessor::ValidateMacroString(argsetif))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "arguments", kv.first, "set_if" }, "Closing $ not found in macro format string '" + argsetif + "'."));
+ }
+ } else if (arginfo.IsString()) {
+ if (!MacroProcessor::ValidateMacroString(arginfo))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "arguments", kv.first }, "Closing $ not found in macro format string '" + arginfo + "'."));
+ }
+ }
+ }
+
+ Dictionary::Ptr env = GetEnv();
+
+ if (env) {
+ ObjectLock olock(env);
+ for (const Dictionary::Pair& kv : env) {
+ const Value& envval = kv.second;
+
+ if (!envval.IsString() || envval.IsEmpty())
+ continue;
+
+ if (!MacroProcessor::ValidateMacroString(envval))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "env", kv.first }, "Closing $ not found in macro format string '" + envval + "'."));
+ }
+ }
+}
diff --git a/lib/icinga/command.hpp b/lib/icinga/command.hpp
new file mode 100644
index 0000000..19bb050
--- /dev/null
+++ b/lib/icinga/command.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMMAND_H
+#define COMMAND_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/command-ti.hpp"
+#include "remote/messageorigin.hpp"
+
+namespace icinga
+{
+
+/**
+ * A command.
+ *
+ * @ingroup icinga
+ */
+class Command : public ObjectImpl<Command>
+{
+public:
+ DECLARE_OBJECT(Command);
+
+ //virtual Dictionary::Ptr Execute(const Object::Ptr& context) = 0;
+
+ void Validate(int types, const ValidationUtils& utils) override;
+};
+
+}
+
+#endif /* COMMAND_H */
diff --git a/lib/icinga/command.ti b/lib/icinga/command.ti
new file mode 100644
index 0000000..2275955
--- /dev/null
+++ b/lib/icinga/command.ti
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#include "base/function.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+abstract class Command : CustomVarObject
+{
+ [config] Value command (CommandLine);
+ [config, signal_with_old_value] Value arguments;
+ [config] int timeout {
+ default {{{ return 60; }}}
+ };
+ [config, signal_with_old_value] Dictionary::Ptr env;
+ [config, required] Function::Ptr execute;
+};
+
+validator Command {
+ String command;
+ Function command;
+ Array command {
+ String "*";
+ Function "*";
+ };
+
+ Dictionary arguments {
+ String "*";
+ Function "*";
+ Dictionary "*" {
+ String key;
+ String value;
+ Function value;
+ String description;
+ Number "required";
+ Number skip_key;
+ Number repeat_key;
+ String set_if;
+ Function set_if;
+ Number order;
+ String separator;
+ };
+ };
+
+ Dictionary env {
+ String "*";
+ Function "*";
+ };
+};
+
+}
diff --git a/lib/icinga/comment.cpp b/lib/icinga/comment.cpp
new file mode 100644
index 0000000..9c0b923
--- /dev/null
+++ b/lib/icinga/comment.cpp
@@ -0,0 +1,258 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/comment.hpp"
+#include "icinga/comment-ti.cpp"
+#include "icinga/host.hpp"
+#include "remote/configobjectutility.hpp"
+#include "base/utility.hpp"
+#include "base/configtype.hpp"
+#include "base/timer.hpp"
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+static int l_NextCommentID = 1;
+static std::mutex l_CommentMutex;
+static std::map<int, String> l_LegacyCommentsCache;
+static Timer::Ptr l_CommentsExpireTimer;
+
+boost::signals2::signal<void (const Comment::Ptr&)> Comment::OnCommentAdded;
+boost::signals2::signal<void (const Comment::Ptr&)> Comment::OnCommentRemoved;
+boost::signals2::signal<void (const Comment::Ptr&, const String&, double, const MessageOrigin::Ptr&)> Comment::OnRemovalInfoChanged;
+
+REGISTER_TYPE(Comment);
+
+String CommentNameComposer::MakeName(const String& shortName, const Object::Ptr& context) const
+{
+ Comment::Ptr comment = dynamic_pointer_cast<Comment>(context);
+
+ if (!comment)
+ return "";
+
+ String name = comment->GetHostName();
+
+ if (!comment->GetServiceName().IsEmpty())
+ name += "!" + comment->GetServiceName();
+
+ name += "!" + shortName;
+
+ return name;
+}
+
+Dictionary::Ptr CommentNameComposer::ParseName(const String& name) const
+{
+ std::vector<String> tokens = name.Split("!");
+
+ if (tokens.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid Comment name."));
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("host_name", tokens[0]);
+
+ if (tokens.size() > 2) {
+ result->Set("service_name", tokens[1]);
+ result->Set("name", tokens[2]);
+ } else {
+ result->Set("name", tokens[1]);
+ }
+
+ return result;
+}
+
+void Comment::OnAllConfigLoaded()
+{
+ ConfigObject::OnAllConfigLoaded();
+
+ Host::Ptr host = Host::GetByName(GetHostName());
+
+ if (GetServiceName().IsEmpty())
+ m_Checkable = host;
+ else
+ m_Checkable = host->GetServiceByShortName(GetServiceName());
+
+ if (!m_Checkable)
+ BOOST_THROW_EXCEPTION(ScriptError("Comment '" + GetName() + "' references a host/service which doesn't exist.", GetDebugInfo()));
+}
+
+void Comment::Start(bool runtimeCreated)
+{
+ ObjectImpl<Comment>::Start(runtimeCreated);
+
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, [this]() {
+ l_CommentsExpireTimer = Timer::Create();
+ l_CommentsExpireTimer->SetInterval(60);
+ l_CommentsExpireTimer->OnTimerExpired.connect([](const Timer * const&) { CommentsExpireTimerHandler(); });
+ l_CommentsExpireTimer->Start();
+ });
+
+ {
+ std::unique_lock<std::mutex> lock(l_CommentMutex);
+
+ SetLegacyId(l_NextCommentID);
+ l_LegacyCommentsCache[l_NextCommentID] = GetName();
+ l_NextCommentID++;
+ }
+
+ GetCheckable()->RegisterComment(this);
+
+ if (runtimeCreated)
+ OnCommentAdded(this);
+}
+
+void Comment::Stop(bool runtimeRemoved)
+{
+ GetCheckable()->UnregisterComment(this);
+
+ if (runtimeRemoved)
+ OnCommentRemoved(this);
+
+ ObjectImpl<Comment>::Stop(runtimeRemoved);
+}
+
+Checkable::Ptr Comment::GetCheckable() const
+{
+ return static_pointer_cast<Checkable>(m_Checkable);
+}
+
+bool Comment::IsExpired() const
+{
+ double expire_time = GetExpireTime();
+
+ return (expire_time != 0 && expire_time < Utility::GetTime());
+}
+
+int Comment::GetNextCommentID()
+{
+ std::unique_lock<std::mutex> lock(l_CommentMutex);
+
+ return l_NextCommentID;
+}
+
+String Comment::AddComment(const Checkable::Ptr& checkable, CommentType entryType, const String& author,
+ const String& text, bool persistent, double expireTime, bool sticky, const String& id, const MessageOrigin::Ptr& origin)
+{
+ String fullName;
+
+ if (id.IsEmpty())
+ fullName = checkable->GetName() + "!" + Utility::NewUniqueID();
+ else
+ fullName = id;
+
+ Dictionary::Ptr attrs = new Dictionary();
+
+ attrs->Set("author", author);
+ attrs->Set("text", text);
+ attrs->Set("persistent", persistent);
+ attrs->Set("expire_time", expireTime);
+ attrs->Set("entry_type", entryType);
+ attrs->Set("sticky", sticky);
+ attrs->Set("entry_time", Utility::GetTime());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ attrs->Set("host_name", host->GetName());
+ if (service)
+ attrs->Set("service_name", service->GetShortName());
+
+ String zone = checkable->GetZoneName();
+
+ if (!zone.IsEmpty())
+ attrs->Set("zone", zone);
+
+ String config = ConfigObjectUtility::CreateObjectConfig(Comment::TypeInstance, fullName, true, nullptr, attrs);
+
+ Array::Ptr errors = new Array();
+
+ if (!ConfigObjectUtility::CreateObject(Comment::TypeInstance, fullName, config, errors, nullptr)) {
+ ObjectLock olock(errors);
+ for (const String& error : errors) {
+ Log(LogCritical, "Comment", error);
+ }
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not create comment."));
+ }
+
+ Comment::Ptr comment = Comment::GetByName(fullName);
+
+ if (!comment)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not create comment."));
+
+ Log(LogNotice, "Comment")
+ << "Added comment '" << comment->GetName() << "'.";
+
+ return fullName;
+}
+
+void Comment::RemoveComment(const String& id, bool removedManually, const String& removedBy,
+ const MessageOrigin::Ptr& origin)
+{
+ Comment::Ptr comment = Comment::GetByName(id);
+
+ if (!comment || comment->GetPackage() != "_api")
+ return;
+
+ Log(LogNotice, "Comment")
+ << "Removed comment '" << comment->GetName() << "' from object '" << comment->GetCheckable()->GetName() << "'.";
+
+ if (removedManually) {
+ comment->SetRemovalInfo(removedBy, Utility::GetTime());
+ }
+
+ Array::Ptr errors = new Array();
+
+ if (!ConfigObjectUtility::DeleteObject(comment, false, errors, nullptr)) {
+ ObjectLock olock(errors);
+ for (const String& error : errors) {
+ Log(LogCritical, "Comment", error);
+ }
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not remove comment."));
+ }
+}
+
+void Comment::SetRemovalInfo(const String& removedBy, double removeTime, const MessageOrigin::Ptr& origin) {
+ {
+ ObjectLock olock(this);
+
+ SetRemovedBy(removedBy, false, origin);
+ SetRemoveTime(removeTime, false, origin);
+ }
+
+ OnRemovalInfoChanged(this, removedBy, removeTime, origin);
+}
+
+String Comment::GetCommentIDFromLegacyID(int id)
+{
+ std::unique_lock<std::mutex> lock(l_CommentMutex);
+
+ auto it = l_LegacyCommentsCache.find(id);
+
+ if (it == l_LegacyCommentsCache.end())
+ return Empty;
+
+ return it->second;
+}
+
+void Comment::CommentsExpireTimerHandler()
+{
+ std::vector<Comment::Ptr> comments;
+
+ for (const Comment::Ptr& comment : ConfigType::GetObjectsByType<Comment>()) {
+ comments.push_back(comment);
+ }
+
+ for (const Comment::Ptr& comment : comments) {
+ /* Only remove comments which are activated after daemon start. */
+ if (comment->IsActive() && comment->IsExpired()) {
+ /* Do not remove persistent comments from an acknowledgement */
+ if (comment->GetEntryType() == CommentAcknowledgement && comment->GetPersistent())
+ continue;
+
+ RemoveComment(comment->GetName());
+ }
+ }
+}
diff --git a/lib/icinga/comment.hpp b/lib/icinga/comment.hpp
new file mode 100644
index 0000000..6532084
--- /dev/null
+++ b/lib/icinga/comment.hpp
@@ -0,0 +1,59 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMMENT_H
+#define COMMENT_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/comment-ti.hpp"
+#include "icinga/checkable-ti.hpp"
+#include "remote/messageorigin.hpp"
+
+namespace icinga
+{
+
+/**
+ * A comment.
+ *
+ * @ingroup icinga
+ */
+class Comment final : public ObjectImpl<Comment>
+{
+public:
+ DECLARE_OBJECT(Comment);
+ DECLARE_OBJECTNAME(Comment);
+
+ static boost::signals2::signal<void (const Comment::Ptr&)> OnCommentAdded;
+ static boost::signals2::signal<void (const Comment::Ptr&)> OnCommentRemoved;
+ static boost::signals2::signal<void (const Comment::Ptr&, const String&, double, const MessageOrigin::Ptr&)> OnRemovalInfoChanged;
+
+ intrusive_ptr<Checkable> GetCheckable() const;
+
+ bool IsExpired() const;
+
+ void SetRemovalInfo(const String& removedBy, double removeTime, const MessageOrigin::Ptr& origin = nullptr);
+
+ static int GetNextCommentID();
+
+ static String AddComment(const intrusive_ptr<Checkable>& checkable, CommentType entryType,
+ const String& author, const String& text, bool persistent, double expireTime, bool sticky = false,
+ const String& id = String(), const MessageOrigin::Ptr& origin = nullptr);
+
+ static void RemoveComment(const String& id, bool removedManually = false, const String& removedBy = "",
+ const MessageOrigin::Ptr& origin = nullptr);
+
+ static String GetCommentIDFromLegacyID(int id);
+
+protected:
+ void OnAllConfigLoaded() override;
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ ObjectImpl<Checkable>::Ptr m_Checkable;
+
+ static void CommentsExpireTimerHandler();
+};
+
+}
+
+#endif /* COMMENT_H */
diff --git a/lib/icinga/comment.ti b/lib/icinga/comment.ti
new file mode 100644
index 0000000..b8ad6f7
--- /dev/null
+++ b/lib/icinga/comment.ti
@@ -0,0 +1,80 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/utility.hpp"
+#impl_include "icinga/service.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+/**
+ * The type of a service comment.
+ *
+ * @ingroup icinga
+ */
+enum CommentType
+{
+ CommentUser = 1,
+ CommentAcknowledgement = 4
+};
+
+class CommentNameComposer : public NameComposer
+{
+public:
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const;
+ virtual Dictionary::Ptr ParseName(const String& name) const;
+};
+}}}
+
+class Comment : ConfigObject < CommentNameComposer
+{
+ load_after Host;
+ load_after Service;
+
+ [config, no_user_modify, protected, required, navigation(host)] name(Host) host_name {
+ navigate {{{
+ return Host::GetByName(GetHostName());
+ }}}
+ };
+ [config, no_user_modify, protected, navigation(service)] String service_name {
+ track {{{
+ if (!oldValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), oldValue);
+ DependencyGraph::RemoveDependency(this, service.get());
+ }
+
+ if (!newValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), newValue);
+ DependencyGraph::AddDependency(this, service.get());
+ }
+ }}}
+ navigate {{{
+ if (GetServiceName().IsEmpty())
+ return nullptr;
+
+ Host::Ptr host = Host::GetByName(GetHostName());
+ return host->GetServiceByShortName(GetServiceName());
+ }}}
+ };
+
+ [config] Timestamp entry_time {
+ default {{{ return Utility::GetTime(); }}}
+ };
+ [config, enum] CommentType entry_type {
+ default {{{ return CommentUser; }}}
+ };
+ [config, no_user_view, no_user_modify] bool sticky;
+ [config, required] String author;
+ [config, required] String text;
+ [config] bool persistent;
+ [config] Timestamp expire_time;
+ [state] int legacy_id;
+
+ [no_user_view, no_user_modify] String removed_by;
+ [no_user_view, no_user_modify] Timestamp remove_time;
+};
+
+}
diff --git a/lib/icinga/compatutility.cpp b/lib/icinga/compatutility.cpp
new file mode 100644
index 0000000..95aed43
--- /dev/null
+++ b/lib/icinga/compatutility.cpp
@@ -0,0 +1,302 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/compatutility.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/service.hpp"
+#include "base/utility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/join.hpp>
+
+using namespace icinga;
+
+/* Used in DB IDO and Livestatus. */
+String CompatUtility::GetCommandLine(const Command::Ptr& command)
+{
+ Value commandLine = command->GetCommandLine();
+
+ String result;
+ if (commandLine.IsObjectType<Array>()) {
+ Array::Ptr args = commandLine;
+
+ ObjectLock olock(args);
+ for (const String& arg : args) {
+ // This is obviously incorrect for non-trivial cases.
+ result += " \"" + EscapeString(arg) + "\"";
+ }
+ } else if (!commandLine.IsEmpty()) {
+ result = EscapeString(Convert::ToString(commandLine));
+ } else {
+ result = "<internal>";
+ }
+
+ return result;
+}
+
+String CompatUtility::GetCommandNamePrefix(const Command::Ptr& command)
+/* Helper. */
+{
+ if (!command)
+ return Empty;
+
+ String prefix;
+ if (command->GetReflectionType() == CheckCommand::TypeInstance)
+ prefix = "check_";
+ else if (command->GetReflectionType() == NotificationCommand::TypeInstance)
+ prefix = "notification_";
+ else if (command->GetReflectionType() == EventCommand::TypeInstance)
+ prefix = "event_";
+
+ return prefix;
+}
+
+String CompatUtility::GetCommandName(const Command::Ptr& command)
+/* Used in DB IDO and Livestatus. */
+{
+ if (!command)
+ return Empty;
+
+ return GetCommandNamePrefix(command) + command->GetName();
+}
+
+/* Used in DB IDO and Livestatus. */
+String CompatUtility::GetCheckableCommandArgs(const Checkable::Ptr& checkable)
+{
+ CheckCommand::Ptr command = checkable->GetCheckCommand();
+
+ Dictionary::Ptr args = new Dictionary();
+
+ if (command) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+ String command_line = GetCommandLine(command);
+
+ Dictionary::Ptr command_vars = command->GetVars();
+
+ if (command_vars) {
+ ObjectLock olock(command_vars);
+ for (const Dictionary::Pair& kv : command_vars) {
+ String macro = "$" + kv.first + "$"; // this is too simple
+ if (command_line.Contains(macro))
+ args->Set(kv.first, kv.second);
+
+ }
+ }
+
+ Dictionary::Ptr host_vars = host->GetVars();
+
+ if (host_vars) {
+ ObjectLock olock(host_vars);
+ for (const Dictionary::Pair& kv : host_vars) {
+ String macro = "$" + kv.first + "$"; // this is too simple
+ if (command_line.Contains(macro))
+ args->Set(kv.first, kv.second);
+ macro = "$host.vars." + kv.first + "$";
+ if (command_line.Contains(macro))
+ args->Set(kv.first, kv.second);
+ }
+ }
+
+ if (service) {
+ Dictionary::Ptr service_vars = service->GetVars();
+
+ if (service_vars) {
+ ObjectLock olock(service_vars);
+ for (const Dictionary::Pair& kv : service_vars) {
+ String macro = "$" + kv.first + "$"; // this is too simple
+ if (command_line.Contains(macro))
+ args->Set(kv.first, kv.second);
+ macro = "$service.vars." + kv.first + "$";
+ if (command_line.Contains(macro))
+ args->Set(kv.first, kv.second);
+ }
+ }
+ }
+
+ String arg_string;
+ ObjectLock olock(args);
+ for (const Dictionary::Pair& kv : args) {
+ arg_string += Convert::ToString(kv.first) + "=" + Convert::ToString(kv.second) + "!";
+ }
+ return arg_string;
+ }
+
+ return Empty;
+}
+
+/* Used in DB IDO and Livestatus. */
+int CompatUtility::GetCheckableNotificationLastNotification(const Checkable::Ptr& checkable)
+{
+ double last_notification = 0.0;
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ if (notification->GetLastNotification() > last_notification)
+ last_notification = notification->GetLastNotification();
+ }
+
+ return static_cast<int>(last_notification);
+}
+
+/* Used in DB IDO and Livestatus. */
+int CompatUtility::GetCheckableNotificationNextNotification(const Checkable::Ptr& checkable)
+{
+ double next_notification = 0.0;
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ if (next_notification == 0 || notification->GetNextNotification() < next_notification)
+ next_notification = notification->GetNextNotification();
+ }
+
+ return static_cast<int>(next_notification);
+}
+
+/* Used in DB IDO and Livestatus. */
+int CompatUtility::GetCheckableNotificationNotificationNumber(const Checkable::Ptr& checkable)
+{
+ int notification_number = 0;
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ if (notification->GetNotificationNumber() > notification_number)
+ notification_number = notification->GetNotificationNumber();
+ }
+
+ return notification_number;
+}
+
+/* Used in DB IDO and Livestatus. */
+double CompatUtility::GetCheckableNotificationNotificationInterval(const Checkable::Ptr& checkable)
+{
+ double notification_interval = -1;
+
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ if (notification_interval == -1 || notification->GetInterval() < notification_interval)
+ notification_interval = notification->GetInterval();
+ }
+
+ if (notification_interval == -1)
+ notification_interval = 60;
+
+ return notification_interval / 60.0;
+}
+
+/* Helper. */
+int CompatUtility::GetCheckableNotificationTypeFilter(const Checkable::Ptr& checkable)
+{
+ unsigned long notification_type_filter = 0;
+
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ ObjectLock olock(notification);
+
+ notification_type_filter |= notification->GetTypeFilter();
+ }
+
+ return notification_type_filter;
+}
+
+/* Helper. */
+int CompatUtility::GetCheckableNotificationStateFilter(const Checkable::Ptr& checkable)
+{
+ unsigned long notification_state_filter = 0;
+
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ ObjectLock olock(notification);
+
+ notification_state_filter |= notification->GetStateFilter();
+ }
+
+ return notification_state_filter;
+}
+
+/* Used in DB IDO and Livestatus. */
+std::set<User::Ptr> CompatUtility::GetCheckableNotificationUsers(const Checkable::Ptr& checkable)
+{
+ /* Service -> Notifications -> (Users + UserGroups -> Users) */
+ std::set<User::Ptr> allUsers;
+ std::set<User::Ptr> users;
+
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ ObjectLock olock(notification);
+
+ users = notification->GetUsers();
+
+ std::copy(users.begin(), users.end(), std::inserter(allUsers, allUsers.begin()));
+
+ for (const UserGroup::Ptr& ug : notification->GetUserGroups()) {
+ std::set<User::Ptr> members = ug->GetMembers();
+ std::copy(members.begin(), members.end(), std::inserter(allUsers, allUsers.begin()));
+ }
+ }
+
+ return allUsers;
+}
+
+/* Used in DB IDO and Livestatus. */
+std::set<UserGroup::Ptr> CompatUtility::GetCheckableNotificationUserGroups(const Checkable::Ptr& checkable)
+{
+ std::set<UserGroup::Ptr> usergroups;
+ /* Service -> Notifications -> UserGroups */
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ ObjectLock olock(notification);
+
+ for (const UserGroup::Ptr& ug : notification->GetUserGroups()) {
+ usergroups.insert(ug);
+ }
+ }
+
+ return usergroups;
+}
+
+/* Used in DB IDO, Livestatus, CompatLogger, GelfWriter, IcingaDB. */
+String CompatUtility::GetCheckResultOutput(const CheckResult::Ptr& cr)
+{
+ if (!cr)
+ return Empty;
+
+ String output;
+
+ String raw_output = cr->GetOutput();
+
+ size_t line_end = raw_output.Find("\n");
+
+ return raw_output.SubStr(0, line_end);
+}
+
+/* Used in DB IDO, Livestatus and IcingaDB. */
+String CompatUtility::GetCheckResultLongOutput(const CheckResult::Ptr& cr)
+{
+ if (!cr)
+ return Empty;
+
+ String long_output;
+ String output;
+
+ String raw_output = cr->GetOutput();
+
+ size_t line_end = raw_output.Find("\n");
+
+ if (line_end > 0 && line_end != String::NPos) {
+ long_output = raw_output.SubStr(line_end+1, raw_output.GetLength());
+ return EscapeString(long_output);
+ }
+
+ return Empty;
+}
+
+/* Helper for DB IDO and Livestatus. */
+String CompatUtility::EscapeString(const String& str)
+{
+ String result = str;
+ boost::algorithm::replace_all(result, "\n", "\\n");
+ return result;
+}
+
+/* Used in ExternalCommandListener. */
+String CompatUtility::UnEscapeString(const String& str)
+{
+ String result = str;
+ boost::algorithm::replace_all(result, "\\n", "\n");
+ return result;
+}
diff --git a/lib/icinga/compatutility.hpp b/lib/icinga/compatutility.hpp
new file mode 100644
index 0000000..7b96fb3
--- /dev/null
+++ b/lib/icinga/compatutility.hpp
@@ -0,0 +1,56 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMPATUTILITY_H
+#define COMPATUTILITY_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/host.hpp"
+#include "icinga/command.hpp"
+
+namespace icinga
+{
+
+/**
+ * Compatibility utility functions.
+ *
+ * @ingroup icinga
+ */
+class CompatUtility
+{
+public:
+ /* command */
+ static String GetCommandLine(const Command::Ptr& command);
+ static String GetCommandName(const Command::Ptr& command);
+
+ /* service */
+ static String GetCheckableCommandArgs(const Checkable::Ptr& checkable);
+
+ /* notification */
+ static int GetCheckableNotificationsEnabled(const Checkable::Ptr& checkable);
+ static int GetCheckableNotificationLastNotification(const Checkable::Ptr& checkable);
+ static int GetCheckableNotificationNextNotification(const Checkable::Ptr& checkable);
+ static int GetCheckableNotificationNotificationNumber(const Checkable::Ptr& checkable);
+ static double GetCheckableNotificationNotificationInterval(const Checkable::Ptr& checkable);
+ static int GetCheckableNotificationTypeFilter(const Checkable::Ptr& checkable);
+ static int GetCheckableNotificationStateFilter(const Checkable::Ptr& checkable);
+
+ static std::set<User::Ptr> GetCheckableNotificationUsers(const Checkable::Ptr& checkable);
+ static std::set<UserGroup::Ptr> GetCheckableNotificationUserGroups(const Checkable::Ptr& checkable);
+
+ /* check result */
+ static String GetCheckResultOutput(const CheckResult::Ptr& cr);
+ static String GetCheckResultLongOutput(const CheckResult::Ptr& cr);
+
+ /* misc */
+ static String EscapeString(const String& str);
+ static String UnEscapeString(const String& str);
+
+private:
+ CompatUtility();
+
+ static String GetCommandNamePrefix(const Command::Ptr& command);
+};
+
+}
+
+#endif /* COMPATUTILITY_H */
diff --git a/lib/icinga/customvarobject.cpp b/lib/icinga/customvarobject.cpp
new file mode 100644
index 0000000..fc1fd27
--- /dev/null
+++ b/lib/icinga/customvarobject.cpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#include "icinga/customvarobject-ti.cpp"
+#include "icinga/macroprocessor.hpp"
+#include "base/logger.hpp"
+#include "base/function.hpp"
+#include "base/exception.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(CustomVarObject);
+
+void CustomVarObject::ValidateVars(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ MacroProcessor::ValidateCustomVars(this, lvalue());
+}
+
+int icinga::FilterArrayToInt(const Array::Ptr& typeFilters, const std::map<String, int>& filterMap, int defaultValue)
+{
+ int resultTypeFilter;
+
+ if (!typeFilters)
+ return defaultValue;
+
+ resultTypeFilter = 0;
+
+ ObjectLock olock(typeFilters);
+ for (const Value& typeFilter : typeFilters) {
+ if (typeFilter.IsNumber()) {
+ resultTypeFilter = resultTypeFilter | typeFilter;
+ continue;
+ }
+
+ if (!typeFilter.IsString())
+ return -1;
+
+ auto it = filterMap.find(typeFilter);
+
+ if (it == filterMap.end())
+ return -1;
+
+ resultTypeFilter = resultTypeFilter | it->second;
+ }
+
+ return resultTypeFilter;
+}
+
diff --git a/lib/icinga/customvarobject.hpp b/lib/icinga/customvarobject.hpp
new file mode 100644
index 0000000..e10ef32
--- /dev/null
+++ b/lib/icinga/customvarobject.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CUSTOMVAROBJECT_H
+#define CUSTOMVAROBJECT_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/customvarobject-ti.hpp"
+#include "base/configobject.hpp"
+#include "remote/messageorigin.hpp"
+
+namespace icinga
+{
+
+/**
+ * An object with custom variable attribute.
+ *
+ * @ingroup icinga
+ */
+class CustomVarObject : public ObjectImpl<CustomVarObject>
+{
+public:
+ DECLARE_OBJECT(CustomVarObject);
+
+ void ValidateVars(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) final;
+};
+
+int FilterArrayToInt(const Array::Ptr& typeFilters, const std::map<String, int>& filterMap, int defaultValue);
+
+}
+
+#endif /* CUSTOMVAROBJECT_H */
diff --git a/lib/icinga/customvarobject.ti b/lib/icinga/customvarobject.ti
new file mode 100644
index 0000000..3e40f66
--- /dev/null
+++ b/lib/icinga/customvarobject.ti
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+abstract class CustomVarObject : ConfigObject
+{
+ [config, signal_with_old_value] Dictionary::Ptr vars;
+};
+
+}
diff --git a/lib/icinga/dependency-apply.cpp b/lib/icinga/dependency-apply.cpp
new file mode 100644
index 0000000..8681c43
--- /dev/null
+++ b/lib/icinga/dependency-apply.cpp
@@ -0,0 +1,161 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/dependency.hpp"
+#include "icinga/service.hpp"
+#include "config/configitembuilder.hpp"
+#include "config/applyrule.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/workqueue.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+INITIALIZE_ONCE([]() {
+ ApplyRule::RegisterType("Dependency", { "Host", "Service" });
+});
+
+bool Dependency::EvaluateApplyRuleInstance(const Checkable::Ptr& checkable, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter)
+{
+ if (!skipFilter && !rule.EvaluateFilter(frame))
+ return false;
+
+ auto& di (rule.GetDebugInfo());
+
+#ifdef _DEBUG
+ Log(LogDebug, "Dependency")
+ << "Applying dependency '" << name << "' to object '" << checkable->GetName() << "' for rule " << di;
+#endif /* _DEBUG */
+
+ ConfigItemBuilder builder{di};
+ builder.SetType(Dependency::TypeInstance);
+ builder.SetName(name);
+ builder.SetScope(frame.Locals->ShallowClone());
+ builder.SetIgnoreOnError(rule.GetIgnoreOnError());
+
+ builder.AddExpression(new ImportDefaultTemplatesExpression());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "parent_host_name"), OpSetLiteral, MakeLiteral(host->GetName()), di));
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "child_host_name"), OpSetLiteral, MakeLiteral(host->GetName()), di));
+
+ if (service)
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "child_service_name"), OpSetLiteral, MakeLiteral(service->GetShortName()), di));
+
+ String zone = checkable->GetZoneName();
+
+ if (!zone.IsEmpty())
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "zone"), OpSetLiteral, MakeLiteral(zone), di));
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "package"), OpSetLiteral, MakeLiteral(rule.GetPackage()), di));
+
+ builder.AddExpression(new OwnedExpression(rule.GetExpression()));
+
+ ConfigItem::Ptr dependencyItem = builder.Compile();
+ dependencyItem->Register();
+
+ return true;
+}
+
+bool Dependency::EvaluateApplyRule(const Checkable::Ptr& checkable, const ApplyRule& rule, bool skipFilter)
+{
+ auto& di (rule.GetDebugInfo());
+
+ CONTEXT("Evaluating 'apply' rule (" << di << ")");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ ScriptFrame frame(true);
+ if (rule.GetScope())
+ rule.GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("host", host);
+ if (service)
+ frame.Locals->Set("service", service);
+
+ Value vinstances;
+
+ if (rule.GetFTerm()) {
+ try {
+ vinstances = rule.GetFTerm()->Evaluate(frame);
+ } catch (const std::exception&) {
+ /* Silently ignore errors here and assume there are no instances. */
+ return false;
+ }
+ } else {
+ vinstances = new Array({ "" });
+ }
+
+ bool match = false;
+
+ if (vinstances.IsObjectType<Array>()) {
+ if (!rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Dictionary iterator requires value to be a dictionary.", di));
+
+ Array::Ptr arr = vinstances;
+
+ ObjectLock olock(arr);
+ for (const Value& instance : arr) {
+ String name = rule.GetName();
+
+ if (!rule.GetFKVar().IsEmpty()) {
+ frame.Locals->Set(rule.GetFKVar(), instance);
+ name += instance;
+ }
+
+ if (EvaluateApplyRuleInstance(checkable, name, frame, rule, skipFilter))
+ match = true;
+ }
+ } else if (vinstances.IsObjectType<Dictionary>()) {
+ if (rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Array iterator requires value to be an array.", di));
+
+ Dictionary::Ptr dict = vinstances;
+
+ for (const String& key : dict->GetKeys()) {
+ frame.Locals->Set(rule.GetFKVar(), key);
+ frame.Locals->Set(rule.GetFVVar(), dict->Get(key));
+
+ if (EvaluateApplyRuleInstance(checkable, rule.GetName() + key, frame, rule, skipFilter))
+ match = true;
+ }
+ }
+
+ return match;
+}
+
+void Dependency::EvaluateApplyRules(const Host::Ptr& host)
+{
+ CONTEXT("Evaluating 'apply' rules for host '" << host->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(Dependency::TypeInstance, Host::TypeInstance)) {
+ if (EvaluateApplyRule(host, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedHostRules(Dependency::TypeInstance, host->GetName())) {
+ if (EvaluateApplyRule(host, *rule, true))
+ rule->AddMatch();
+ }
+}
+
+void Dependency::EvaluateApplyRules(const Service::Ptr& service)
+{
+ CONTEXT("Evaluating 'apply' rules for service '" << service->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(Dependency::TypeInstance, Service::TypeInstance)) {
+ if (EvaluateApplyRule(service, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedServiceRules(Dependency::TypeInstance, service->GetHost()->GetName(), service->GetShortName())) {
+ if (EvaluateApplyRule(service, *rule, true))
+ rule->AddMatch();
+ }
+}
diff --git a/lib/icinga/dependency.cpp b/lib/icinga/dependency.cpp
new file mode 100644
index 0000000..2843b90
--- /dev/null
+++ b/lib/icinga/dependency.cpp
@@ -0,0 +1,325 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/dependency.hpp"
+#include "icinga/dependency-ti.cpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/initialize.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include <map>
+#include <sstream>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(Dependency);
+
+bool Dependency::m_AssertNoCyclesForIndividualDeps = false;
+
+struct DependencyCycleNode
+{
+ bool Visited = false;
+ bool OnStack = false;
+};
+
+struct DependencyStackFrame
+{
+ ConfigObject::Ptr Node;
+ bool Implicit;
+
+ inline DependencyStackFrame(ConfigObject::Ptr node, bool implicit = false) : Node(std::move(node)), Implicit(implicit)
+ { }
+};
+
+struct DependencyCycleGraph
+{
+ std::map<Checkable::Ptr, DependencyCycleNode> Nodes;
+ std::vector<DependencyStackFrame> Stack;
+};
+
+static void AssertNoDependencyCycle(const Checkable::Ptr& checkable, DependencyCycleGraph& graph, bool implicit = false);
+
+static void AssertNoParentDependencyCycle(const Checkable::Ptr& parent, DependencyCycleGraph& graph, bool implicit)
+{
+ if (graph.Nodes[parent].OnStack) {
+ std::ostringstream oss;
+ oss << "Dependency cycle:\n";
+
+ for (auto& frame : graph.Stack) {
+ oss << frame.Node->GetReflectionType()->GetName() << " '" << frame.Node->GetName() << "'";
+
+ if (frame.Implicit) {
+ oss << " (implicit)";
+ }
+
+ oss << "\n-> ";
+ }
+
+ oss << parent->GetReflectionType()->GetName() << " '" << parent->GetName() << "'";
+
+ if (implicit) {
+ oss << " (implicit)";
+ }
+
+ BOOST_THROW_EXCEPTION(ScriptError(oss.str()));
+ }
+
+ AssertNoDependencyCycle(parent, graph, implicit);
+}
+
+static void AssertNoDependencyCycle(const Checkable::Ptr& checkable, DependencyCycleGraph& graph, bool implicit)
+{
+ auto& node (graph.Nodes[checkable]);
+
+ if (!node.Visited) {
+ node.Visited = true;
+ node.OnStack = true;
+ graph.Stack.emplace_back(checkable, implicit);
+
+ for (auto& dep : checkable->GetDependencies()) {
+ graph.Stack.emplace_back(dep);
+ AssertNoParentDependencyCycle(dep->GetParent(), graph, false);
+ graph.Stack.pop_back();
+ }
+
+ {
+ auto service (dynamic_pointer_cast<Service>(checkable));
+
+ if (service) {
+ AssertNoParentDependencyCycle(service->GetHost(), graph, true);
+ }
+ }
+
+ graph.Stack.pop_back();
+ node.OnStack = false;
+ }
+}
+
+void Dependency::AssertNoCycles()
+{
+ DependencyCycleGraph graph;
+
+ for (auto& host : ConfigType::GetObjectsByType<Host>()) {
+ AssertNoDependencyCycle(host, graph);
+ }
+
+ for (auto& service : ConfigType::GetObjectsByType<Service>()) {
+ AssertNoDependencyCycle(service, graph);
+ }
+
+ m_AssertNoCyclesForIndividualDeps = true;
+}
+
+String DependencyNameComposer::MakeName(const String& shortName, const Object::Ptr& context) const
+{
+ Dependency::Ptr dependency = dynamic_pointer_cast<Dependency>(context);
+
+ if (!dependency)
+ return "";
+
+ String name = dependency->GetChildHostName();
+
+ if (!dependency->GetChildServiceName().IsEmpty())
+ name += "!" + dependency->GetChildServiceName();
+
+ name += "!" + shortName;
+
+ return name;
+}
+
+Dictionary::Ptr DependencyNameComposer::ParseName(const String& name) const
+{
+ std::vector<String> tokens = name.Split("!");
+
+ if (tokens.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid Dependency name."));
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("child_host_name", tokens[0]);
+
+ if (tokens.size() > 2) {
+ result->Set("child_service_name", tokens[1]);
+ result->Set("name", tokens[2]);
+ } else {
+ result->Set("name", tokens[1]);
+ }
+
+ return result;
+}
+
+void Dependency::OnConfigLoaded()
+{
+ Value defaultFilter;
+
+ if (GetParentServiceName().IsEmpty())
+ defaultFilter = StateFilterUp;
+ else
+ defaultFilter = StateFilterOK | StateFilterWarning;
+
+ SetStateFilter(FilterArrayToInt(GetStates(), Notification::GetStateFilterMap(), defaultFilter));
+}
+
+void Dependency::OnAllConfigLoaded()
+{
+ ObjectImpl<Dependency>::OnAllConfigLoaded();
+
+ Host::Ptr childHost = Host::GetByName(GetChildHostName());
+
+ if (childHost) {
+ if (GetChildServiceName().IsEmpty())
+ m_Child = childHost;
+ else
+ m_Child = childHost->GetServiceByShortName(GetChildServiceName());
+ }
+
+ if (!m_Child)
+ BOOST_THROW_EXCEPTION(ScriptError("Dependency '" + GetName() + "' references a child host/service which doesn't exist.", GetDebugInfo()));
+
+ Host::Ptr parentHost = Host::GetByName(GetParentHostName());
+
+ if (parentHost) {
+ if (GetParentServiceName().IsEmpty())
+ m_Parent = parentHost;
+ else
+ m_Parent = parentHost->GetServiceByShortName(GetParentServiceName());
+ }
+
+ if (!m_Parent)
+ BOOST_THROW_EXCEPTION(ScriptError("Dependency '" + GetName() + "' references a parent host/service which doesn't exist.", GetDebugInfo()));
+
+ m_Child->AddDependency(this);
+ m_Parent->AddReverseDependency(this);
+
+ if (m_AssertNoCyclesForIndividualDeps) {
+ DependencyCycleGraph graph;
+
+ try {
+ AssertNoDependencyCycle(m_Parent, graph);
+ } catch (...) {
+ m_Child->RemoveDependency(this);
+ m_Parent->RemoveReverseDependency(this);
+ throw;
+ }
+ }
+}
+
+void Dependency::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<Dependency>::Stop(runtimeRemoved);
+
+ GetChild()->RemoveDependency(this);
+ GetParent()->RemoveReverseDependency(this);
+}
+
+bool Dependency::IsAvailable(DependencyType dt) const
+{
+ Checkable::Ptr parent = GetParent();
+
+ Host::Ptr parentHost;
+ Service::Ptr parentService;
+ tie(parentHost, parentService) = GetHostService(parent);
+
+ /* ignore if it's the same checkable object */
+ if (parent == GetChild()) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Parent and child " << (parentService ? "service" : "host") << " are identical.";
+ return true;
+ }
+
+ /* ignore pending */
+ if (!parent->GetLastCheckResult()) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Parent " << (parentService ? "service" : "host") << " '" << parent->GetName() << "' hasn't been checked yet.";
+ return true;
+ }
+
+ if (GetIgnoreSoftStates()) {
+ /* ignore soft states */
+ if (parent->GetStateType() == StateTypeSoft) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Parent " << (parentService ? "service" : "host") << " '" << parent->GetName() << "' is in a soft state.";
+ return true;
+ }
+ } else {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' failed: Parent " << (parentService ? "service" : "host") << " '" << parent->GetName() << "' is in a soft state.";
+ }
+
+ int state;
+
+ if (parentService)
+ state = ServiceStateToFilter(parentService->GetState());
+ else
+ state = HostStateToFilter(parentHost->GetState());
+
+ /* check state */
+ if (state & GetStateFilter()) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Parent " << (parentService ? "service" : "host") << " '" << parent->GetName() << "' matches state filter.";
+ return true;
+ }
+
+ /* ignore if not in time period */
+ TimePeriod::Ptr tp = GetPeriod();
+ if (tp && !tp->IsInside(Utility::GetTime())) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Outside time period.";
+ return true;
+ }
+
+ if (dt == DependencyCheckExecution && !GetDisableChecks()) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Checks are not disabled.";
+ return true;
+ } else if (dt == DependencyNotification && !GetDisableNotifications()) {
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' passed: Notifications are not disabled";
+ return true;
+ }
+
+ Log(LogNotice, "Dependency")
+ << "Dependency '" << GetName() << "' failed. Parent "
+ << (parentService ? "service" : "host") << " '" << parent->GetName() << "' is "
+ << (parentService ? Service::StateToString(parentService->GetState()) : Host::StateToString(parentHost->GetState()));
+
+ return false;
+}
+
+Checkable::Ptr Dependency::GetChild() const
+{
+ return m_Child;
+}
+
+Checkable::Ptr Dependency::GetParent() const
+{
+ return m_Parent;
+}
+
+TimePeriod::Ptr Dependency::GetPeriod() const
+{
+ return TimePeriod::GetByName(GetPeriodRaw());
+}
+
+void Dependency::ValidateStates(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Dependency>::ValidateStates(lvalue, utils);
+
+ int sfilter = FilterArrayToInt(lvalue(), Notification::GetStateFilterMap(), 0);
+
+ if (GetParentServiceName().IsEmpty() && (sfilter & ~(StateFilterUp | StateFilterDown)) != 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "states" }, "State filter is invalid for host dependency."));
+
+ if (!GetParentServiceName().IsEmpty() && (sfilter & ~(StateFilterOK | StateFilterWarning | StateFilterCritical | StateFilterUnknown)) != 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "states" }, "State filter is invalid for service dependency."));
+}
+
+void Dependency::SetParent(intrusive_ptr<Checkable> parent)
+{
+ m_Parent = parent;
+}
+
+void Dependency::SetChild(intrusive_ptr<Checkable> child)
+{
+ m_Child = child;
+}
diff --git a/lib/icinga/dependency.hpp b/lib/icinga/dependency.hpp
new file mode 100644
index 0000000..6cebfaa
--- /dev/null
+++ b/lib/icinga/dependency.hpp
@@ -0,0 +1,62 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DEPENDENCY_H
+#define DEPENDENCY_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/dependency-ti.hpp"
+
+namespace icinga
+{
+
+class ApplyRule;
+struct ScriptFrame;
+class Host;
+class Service;
+
+/**
+ * A service dependency..
+ *
+ * @ingroup icinga
+ */
+class Dependency final : public ObjectImpl<Dependency>
+{
+public:
+ DECLARE_OBJECT(Dependency);
+ DECLARE_OBJECTNAME(Dependency);
+
+ intrusive_ptr<Checkable> GetParent() const;
+ intrusive_ptr<Checkable> GetChild() const;
+
+ TimePeriod::Ptr GetPeriod() const;
+
+ bool IsAvailable(DependencyType dt) const;
+
+ void ValidateStates(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+ static void EvaluateApplyRules(const intrusive_ptr<Host>& host);
+ static void EvaluateApplyRules(const intrusive_ptr<Service>& service);
+ static void AssertNoCycles();
+
+ /* Note: Only use them for unit test mocks. Prefer OnConfigLoaded(). */
+ void SetParent(intrusive_ptr<Checkable> parent);
+ void SetChild(intrusive_ptr<Checkable> child);
+
+protected:
+ void OnConfigLoaded() override;
+ void OnAllConfigLoaded() override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ Checkable::Ptr m_Parent;
+ Checkable::Ptr m_Child;
+
+ static bool m_AssertNoCyclesForIndividualDeps;
+
+ static bool EvaluateApplyRuleInstance(const Checkable::Ptr& checkable, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter);
+ static bool EvaluateApplyRule(const Checkable::Ptr& checkable, const ApplyRule& rule, bool skipFilter = false);
+};
+
+}
+
+#endif /* DEPENDENCY_H */
diff --git a/lib/icinga/dependency.ti b/lib/icinga/dependency.ti
new file mode 100644
index 0000000..41de7ba
--- /dev/null
+++ b/lib/icinga/dependency.ti
@@ -0,0 +1,101 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#include "icinga/checkable.hpp"
+#impl_include "icinga/service.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+class DependencyNameComposer : public NameComposer
+{
+public:
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const;
+ virtual Dictionary::Ptr ParseName(const String& name) const;
+};
+}}}
+
+class Dependency : CustomVarObject < DependencyNameComposer
+{
+ load_after Host;
+ load_after Service;
+
+ [config, no_user_modify, required, navigation(child_host)] name(Host) child_host_name {
+ navigate {{{
+ return Host::GetByName(GetChildHostName());
+ }}}
+ };
+
+ [config, no_user_modify, navigation(child_service)] String child_service_name {
+ track {{{
+ if (!oldValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetChildHostName(), oldValue);
+ DependencyGraph::RemoveDependency(this, service.get());
+ }
+
+ if (!newValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetChildHostName(), newValue);
+ DependencyGraph::AddDependency(this, service.get());
+ }
+ }}}
+ navigate {{{
+ if (GetChildServiceName().IsEmpty())
+ return nullptr;
+
+ Host::Ptr host = Host::GetByName(GetChildHostName());
+ return host->GetServiceByShortName(GetChildServiceName());
+ }}}
+ };
+
+ [config, no_user_modify, required, navigation(parent_host)] name(Host) parent_host_name {
+ navigate {{{
+ return Host::GetByName(GetParentHostName());
+ }}}
+ };
+
+ [config, no_user_modify, navigation(parent_service)] String parent_service_name {
+ track {{{
+ if (!oldValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetParentHostName(), oldValue);
+ DependencyGraph::RemoveDependency(this, service.get());
+ }
+
+ if (!newValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetParentHostName(), newValue);
+ DependencyGraph::AddDependency(this, service.get());
+ }
+ }}}
+ navigate {{{
+ if (GetParentServiceName().IsEmpty())
+ return nullptr;
+
+ Host::Ptr host = Host::GetByName(GetParentHostName());
+ return host->GetServiceByShortName(GetParentServiceName());
+ }}}
+ };
+
+ [config] String redundancy_group;
+
+ [config, navigation] name(TimePeriod) period (PeriodRaw) {
+ navigate {{{
+ return TimePeriod::GetByName(GetPeriodRaw());
+ }}}
+ };
+
+ [config] array(Value) states;
+ [no_user_view, no_user_modify] int state_filter_real (StateFilter);
+
+ [config] bool ignore_soft_states {
+ default {{{ return true; }}}
+ };
+
+ [config] bool disable_checks;
+ [config] bool disable_notifications {
+ default {{{ return true; }}}
+ };
+};
+
+}
diff --git a/lib/icinga/downtime.cpp b/lib/icinga/downtime.cpp
new file mode 100644
index 0000000..2178953
--- /dev/null
+++ b/lib/icinga/downtime.cpp
@@ -0,0 +1,584 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/downtime.hpp"
+#include "icinga/downtime-ti.cpp"
+#include "icinga/host.hpp"
+#include "icinga/scheduleddowntime.hpp"
+#include "remote/configobjectutility.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/timer.hpp"
+#include <boost/thread/once.hpp>
+#include <cmath>
+#include <utility>
+
+using namespace icinga;
+
+static int l_NextDowntimeID = 1;
+static std::mutex l_DowntimeMutex;
+static std::map<int, String> l_LegacyDowntimesCache;
+static Timer::Ptr l_DowntimesOrphanedTimer;
+static Timer::Ptr l_DowntimesStartTimer;
+
+boost::signals2::signal<void (const Downtime::Ptr&)> Downtime::OnDowntimeAdded;
+boost::signals2::signal<void (const Downtime::Ptr&)> Downtime::OnDowntimeRemoved;
+boost::signals2::signal<void (const Downtime::Ptr&)> Downtime::OnDowntimeStarted;
+boost::signals2::signal<void (const Downtime::Ptr&)> Downtime::OnDowntimeTriggered;
+boost::signals2::signal<void (const Downtime::Ptr&, const String&, double, const MessageOrigin::Ptr&)> Downtime::OnRemovalInfoChanged;
+
+REGISTER_TYPE(Downtime);
+
+INITIALIZE_ONCE(&Downtime::StaticInitialize);
+
+void Downtime::StaticInitialize()
+{
+ ScriptGlobal::Set("Icinga.DowntimeNoChildren", "DowntimeNoChildren");
+ ScriptGlobal::Set("Icinga.DowntimeTriggeredChildren", "DowntimeTriggeredChildren");
+ ScriptGlobal::Set("Icinga.DowntimeNonTriggeredChildren", "DowntimeNonTriggeredChildren");
+}
+
+String DowntimeNameComposer::MakeName(const String& shortName, const Object::Ptr& context) const
+{
+ Downtime::Ptr downtime = dynamic_pointer_cast<Downtime>(context);
+
+ if (!downtime)
+ return "";
+
+ String name = downtime->GetHostName();
+
+ if (!downtime->GetServiceName().IsEmpty())
+ name += "!" + downtime->GetServiceName();
+
+ name += "!" + shortName;
+
+ return name;
+}
+
+Dictionary::Ptr DowntimeNameComposer::ParseName(const String& name) const
+{
+ std::vector<String> tokens = name.Split("!");
+
+ if (tokens.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid Downtime name."));
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("host_name", tokens[0]);
+
+ if (tokens.size() > 2) {
+ result->Set("service_name", tokens[1]);
+ result->Set("name", tokens[2]);
+ } else {
+ result->Set("name", tokens[1]);
+ }
+
+ return result;
+}
+
+void Downtime::OnAllConfigLoaded()
+{
+ ObjectImpl<Downtime>::OnAllConfigLoaded();
+
+ if (GetServiceName().IsEmpty())
+ m_Checkable = Host::GetByName(GetHostName());
+ else
+ m_Checkable = Service::GetByNamePair(GetHostName(), GetServiceName());
+
+ if (!m_Checkable)
+ BOOST_THROW_EXCEPTION(ScriptError("Downtime '" + GetName() + "' references a host/service which doesn't exist.", GetDebugInfo()));
+}
+
+void Downtime::Start(bool runtimeCreated)
+{
+ ObjectImpl<Downtime>::Start(runtimeCreated);
+
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, [this]() {
+ l_DowntimesStartTimer = Timer::Create();
+ l_DowntimesStartTimer->SetInterval(5);
+ l_DowntimesStartTimer->OnTimerExpired.connect([](const Timer * const&){ DowntimesStartTimerHandler(); });
+ l_DowntimesStartTimer->Start();
+
+ l_DowntimesOrphanedTimer = Timer::Create();
+ l_DowntimesOrphanedTimer->SetInterval(60);
+ l_DowntimesOrphanedTimer->OnTimerExpired.connect([](const Timer * const&) { DowntimesOrphanedTimerHandler(); });
+ l_DowntimesOrphanedTimer->Start();
+ });
+
+ {
+ std::unique_lock<std::mutex> lock(l_DowntimeMutex);
+
+ SetLegacyId(l_NextDowntimeID);
+ l_LegacyDowntimesCache[l_NextDowntimeID] = GetName();
+ l_NextDowntimeID++;
+ }
+
+ Checkable::Ptr checkable = GetCheckable();
+
+ checkable->RegisterDowntime(this);
+
+ Downtime::Ptr parent = GetByName(GetParent());
+
+ if (parent)
+ parent->RegisterChild(this);
+
+ if (runtimeCreated)
+ OnDowntimeAdded(this);
+
+ /* if this object is already in a NOT-OK state trigger
+ * this downtime now *after* it has been added (important
+ * for DB IDO, etc.)
+ */
+ if (!GetFixed() && !checkable->IsStateOK(checkable->GetStateRaw())) {
+ Log(LogNotice, "Downtime")
+ << "Checkable '" << checkable->GetName() << "' already in a NOT-OK state."
+ << " Triggering downtime now.";
+
+ TriggerDowntime(std::fmax(std::fmax(GetStartTime(), GetEntryTime()), checkable->GetLastStateChange()));
+ }
+
+ if (GetFixed() && CanBeTriggered()) {
+ /* Send notifications. */
+ OnDowntimeStarted(this);
+
+ /* Trigger fixed downtime immediately. */
+ TriggerDowntime(std::fmax(GetStartTime(), GetEntryTime()));
+ }
+}
+
+void Downtime::Stop(bool runtimeRemoved)
+{
+ GetCheckable()->UnregisterDowntime(this);
+
+ Downtime::Ptr parent = GetByName(GetParent());
+
+ if (parent)
+ parent->UnregisterChild(this);
+
+ if (runtimeRemoved)
+ OnDowntimeRemoved(this);
+
+ ObjectImpl<Downtime>::Stop(runtimeRemoved);
+}
+
+void Downtime::Pause()
+{
+ if (m_CleanupTimer) {
+ m_CleanupTimer->Stop();
+ }
+
+ ObjectImpl<Downtime>::Pause();
+}
+
+void Downtime::Resume()
+{
+ ObjectImpl<Downtime>::Resume();
+ SetupCleanupTimer();
+}
+
+Checkable::Ptr Downtime::GetCheckable() const
+{
+ return static_pointer_cast<Checkable>(m_Checkable);
+}
+
+bool Downtime::IsInEffect() const
+{
+ double now = Utility::GetTime();
+
+ if (GetFixed()) {
+ /* fixed downtimes are in effect during the entire [start..end) interval */
+ return (now >= GetStartTime() && now < GetEndTime());
+ }
+
+ double triggerTime = GetTriggerTime();
+
+ if (triggerTime == 0)
+ /* flexible downtime has not been triggered yet */
+ return false;
+
+ return (now < triggerTime + GetDuration());
+}
+
+bool Downtime::IsTriggered() const
+{
+ double now = Utility::GetTime();
+
+ double triggerTime = GetTriggerTime();
+
+ return (triggerTime > 0 && triggerTime <= now);
+}
+
+bool Downtime::IsExpired() const
+{
+ double now = Utility::GetTime();
+
+ if (GetFixed())
+ return (GetEndTime() < now);
+ else {
+ /* triggered flexible downtime not in effect anymore */
+ if (IsTriggered() && !IsInEffect())
+ return true;
+ /* flexible downtime never triggered */
+ else if (!IsTriggered() && (GetEndTime() < now))
+ return true;
+ else
+ return false;
+ }
+}
+
+bool Downtime::HasValidConfigOwner() const
+{
+ if (!ScheduledDowntime::AllConfigIsLoaded()) {
+ return true;
+ }
+
+ String configOwner = GetConfigOwner();
+ return configOwner.IsEmpty() || Zone::GetByName(GetAuthoritativeZone()) != Zone::GetLocalZone() || GetObject<ScheduledDowntime>(configOwner);
+}
+
+int Downtime::GetNextDowntimeID()
+{
+ std::unique_lock<std::mutex> lock(l_DowntimeMutex);
+
+ return l_NextDowntimeID;
+}
+
+Downtime::Ptr Downtime::AddDowntime(const Checkable::Ptr& checkable, const String& author,
+ const String& comment, double startTime, double endTime, bool fixed,
+ const String& triggeredBy, double duration,
+ const String& scheduledDowntime, const String& scheduledBy, const String& parent,
+ const String& id, const MessageOrigin::Ptr& origin)
+{
+ String fullName;
+
+ if (id.IsEmpty())
+ fullName = checkable->GetName() + "!" + Utility::NewUniqueID();
+ else
+ fullName = id;
+
+ Dictionary::Ptr attrs = new Dictionary();
+
+ attrs->Set("author", author);
+ attrs->Set("comment", comment);
+ attrs->Set("start_time", startTime);
+ attrs->Set("end_time", endTime);
+ attrs->Set("fixed", fixed);
+ attrs->Set("duration", duration);
+ attrs->Set("triggered_by", triggeredBy);
+ attrs->Set("scheduled_by", scheduledBy);
+ attrs->Set("parent", parent);
+ attrs->Set("config_owner", scheduledDowntime);
+ attrs->Set("entry_time", Utility::GetTime());
+
+ if (!scheduledDowntime.IsEmpty()) {
+ auto localZone (Zone::GetLocalZone());
+
+ if (localZone) {
+ attrs->Set("authoritative_zone", localZone->GetName());
+ }
+
+ auto sd (ScheduledDowntime::GetByName(scheduledDowntime));
+
+ if (sd) {
+ attrs->Set("config_owner_hash", sd->HashDowntimeOptions());
+ }
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ attrs->Set("host_name", host->GetName());
+ if (service)
+ attrs->Set("service_name", service->GetShortName());
+
+ String zone;
+
+ if (!scheduledDowntime.IsEmpty()) {
+ auto sdt (ScheduledDowntime::GetByName(scheduledDowntime));
+
+ if (sdt) {
+ auto sdtZone (sdt->GetZone());
+
+ if (sdtZone) {
+ zone = sdtZone->GetName();
+ }
+ }
+ }
+
+ if (zone.IsEmpty()) {
+ zone = checkable->GetZoneName();
+ }
+
+ if (!zone.IsEmpty())
+ attrs->Set("zone", zone);
+
+ String config = ConfigObjectUtility::CreateObjectConfig(Downtime::TypeInstance, fullName, true, nullptr, attrs);
+
+ Array::Ptr errors = new Array();
+
+ if (!ConfigObjectUtility::CreateObject(Downtime::TypeInstance, fullName, config, errors, nullptr)) {
+ ObjectLock olock(errors);
+ for (const String& error : errors) {
+ Log(LogCritical, "Downtime", error);
+ }
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not create downtime."));
+ }
+
+ if (!triggeredBy.IsEmpty()) {
+ Downtime::Ptr parentDowntime = Downtime::GetByName(triggeredBy);
+ Array::Ptr triggers = parentDowntime->GetTriggers();
+
+ ObjectLock olock(triggers);
+ if (!triggers->Contains(fullName))
+ triggers->Add(fullName);
+ }
+
+ Downtime::Ptr downtime = Downtime::GetByName(fullName);
+
+ if (!downtime)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not create downtime object."));
+
+ Log(LogInformation, "Downtime")
+ << "Added downtime '" << downtime->GetName()
+ << "' between '" << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S", startTime)
+ << "' and '" << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S", endTime) << "', author: '"
+ << author << "', " << (fixed ? "fixed" : "flexible with " + Convert::ToString(duration) + "s duration");
+
+ return downtime;
+}
+
+void Downtime::RemoveDowntime(const String& id, bool includeChildren, bool cancelled, bool expired,
+ const String& removedBy, const MessageOrigin::Ptr& origin)
+{
+ Downtime::Ptr downtime = Downtime::GetByName(id);
+
+ if (!downtime || downtime->GetPackage() != "_api")
+ return;
+
+ String config_owner = downtime->GetConfigOwner();
+
+ if (!config_owner.IsEmpty() && !expired) {
+ BOOST_THROW_EXCEPTION(invalid_downtime_removal_error("Cannot remove downtime '" + downtime->GetName() +
+ "'. It is owned by scheduled downtime object '" + config_owner + "'"));
+ }
+
+ if (includeChildren) {
+ for (const Downtime::Ptr& child : downtime->GetChildren()) {
+ Downtime::RemoveDowntime(child->GetName(), true, true);
+ }
+ }
+
+ if (cancelled) {
+ downtime->SetRemovalInfo(removedBy, Utility::GetTime());
+ }
+
+ Array::Ptr errors = new Array();
+
+ if (!ConfigObjectUtility::DeleteObject(downtime, false, errors, nullptr)) {
+ ObjectLock olock(errors);
+ for (const String& error : errors) {
+ Log(LogCritical, "Downtime", error);
+ }
+
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not remove downtime."));
+ }
+
+ String reason;
+
+ if (expired) {
+ reason = "expired at " + Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", downtime->GetEndTime());
+ } else if (cancelled) {
+ reason = "cancelled by user";
+ } else {
+ reason = "<unknown>";
+ }
+
+ Log msg (LogInformation, "Downtime");
+
+ msg << "Removed downtime '" << downtime->GetName() << "' from checkable";
+
+ {
+ auto checkable (downtime->GetCheckable());
+
+ if (checkable) {
+ msg << " '" << checkable->GetName() << "'";
+ }
+ }
+
+ msg << " (Reason: " << reason << ").";
+}
+
+void Downtime::RegisterChild(const Downtime::Ptr& downtime)
+{
+ std::unique_lock<std::mutex> lock(m_ChildrenMutex);
+ m_Children.insert(downtime);
+}
+
+void Downtime::UnregisterChild(const Downtime::Ptr& downtime)
+{
+ std::unique_lock<std::mutex> lock(m_ChildrenMutex);
+ m_Children.erase(downtime);
+}
+
+std::set<Downtime::Ptr> Downtime::GetChildren() const
+{
+ std::unique_lock<std::mutex> lock(m_ChildrenMutex);
+ return m_Children;
+}
+
+bool Downtime::CanBeTriggered()
+{
+ if (IsInEffect() && IsTriggered())
+ return false;
+
+ if (IsExpired())
+ return false;
+
+ double now = Utility::GetTime();
+
+ if (now < GetStartTime() || now > GetEndTime())
+ return false;
+
+ return true;
+}
+
+void Downtime::SetupCleanupTimer()
+{
+ if (!m_CleanupTimer) {
+ m_CleanupTimer = Timer::Create();
+
+ auto name (GetName());
+
+ m_CleanupTimer->OnTimerExpired.connect([name=std::move(name)](const Timer * const&) {
+ auto downtime (Downtime::GetByName(name));
+
+ if (downtime && downtime->IsExpired()) {
+ RemoveDowntime(name, false, false, true);
+ }
+ });
+ }
+
+ auto triggerTime (GetTriggerTime());
+
+ m_CleanupTimer->Reschedule((GetFixed() || triggerTime <= 0 ? GetEndTime() : triggerTime + GetDuration()) + 0.1);
+ m_CleanupTimer->Start();
+}
+
+void Downtime::TriggerDowntime(double triggerTime)
+{
+ if (!CanBeTriggered())
+ return;
+
+ Checkable::Ptr checkable = GetCheckable();
+
+ Log(LogInformation, "Downtime")
+ << "Triggering downtime '" << GetName() << "' for checkable '" << checkable->GetName() << "'.";
+
+ if (GetTriggerTime() == 0) {
+ SetTriggerTime(triggerTime);
+ }
+
+ {
+ ObjectLock olock (this);
+ SetupCleanupTimer();
+ }
+
+ Array::Ptr triggers = GetTriggers();
+
+ {
+ ObjectLock olock(triggers);
+ for (const String& triggerName : triggers) {
+ Downtime::Ptr downtime = Downtime::GetByName(triggerName);
+
+ if (!downtime)
+ continue;
+
+ downtime->TriggerDowntime(triggerTime);
+ }
+ }
+
+ OnDowntimeTriggered(this);
+}
+
+void Downtime::SetRemovalInfo(const String& removedBy, double removeTime, const MessageOrigin::Ptr& origin) {
+ {
+ ObjectLock olock(this);
+
+ SetRemovedBy(removedBy, false, origin);
+ SetRemoveTime(removeTime, false, origin);
+ }
+
+ OnRemovalInfoChanged(this, removedBy, removeTime, origin);
+}
+
+String Downtime::GetDowntimeIDFromLegacyID(int id)
+{
+ std::unique_lock<std::mutex> lock(l_DowntimeMutex);
+
+ auto it = l_LegacyDowntimesCache.find(id);
+
+ if (it == l_LegacyDowntimesCache.end())
+ return Empty;
+
+ return it->second;
+}
+
+void Downtime::DowntimesStartTimerHandler()
+{
+ /* Start fixed downtimes. Flexible downtimes will be triggered on-demand. */
+ for (const Downtime::Ptr& downtime : ConfigType::GetObjectsByType<Downtime>()) {
+ if (downtime->IsActive() &&
+ downtime->CanBeTriggered() &&
+ downtime->GetFixed()) {
+ /* Send notifications. */
+ OnDowntimeStarted(downtime);
+
+ /* Trigger fixed downtime immediately. */
+ downtime->TriggerDowntime(std::fmax(downtime->GetStartTime(), downtime->GetEntryTime()));
+ }
+ }
+}
+
+void Downtime::DowntimesOrphanedTimerHandler()
+{
+ for (const Downtime::Ptr& downtime : ConfigType::GetObjectsByType<Downtime>()) {
+ /* Only remove downtimes which are activated after daemon start. */
+ if (downtime->IsActive() && !downtime->HasValidConfigOwner())
+ RemoveDowntime(downtime->GetName(), false, false, true);
+ }
+}
+
+void Downtime::ValidateStartTime(const Lazy<Timestamp>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Downtime>::ValidateStartTime(lvalue, utils);
+
+ if (lvalue() <= 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "start_time" }, "Start time must be greater than 0."));
+}
+
+void Downtime::ValidateEndTime(const Lazy<Timestamp>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Downtime>::ValidateEndTime(lvalue, utils);
+
+ if (lvalue() <= 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "end_time" }, "End time must be greater than 0."));
+}
+
+DowntimeChildOptions Downtime::ChildOptionsFromValue(const Value& options)
+{
+ if (options == "DowntimeNoChildren")
+ return DowntimeNoChildren;
+ else if (options == "DowntimeTriggeredChildren")
+ return DowntimeTriggeredChildren;
+ else if (options == "DowntimeNonTriggeredChildren")
+ return DowntimeNonTriggeredChildren;
+ else if (options.IsNumber()) {
+ int number = options;
+ if (number >= 0 && number <= 2)
+ return static_cast<DowntimeChildOptions>(number);
+ }
+
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid child option specified"));
+}
diff --git a/lib/icinga/downtime.hpp b/lib/icinga/downtime.hpp
new file mode 100644
index 0000000..15aa0af
--- /dev/null
+++ b/lib/icinga/downtime.hpp
@@ -0,0 +1,99 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DOWNTIME_H
+#define DOWNTIME_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/downtime-ti.hpp"
+#include "icinga/checkable-ti.hpp"
+#include "remote/messageorigin.hpp"
+
+namespace icinga
+{
+
+enum DowntimeChildOptions
+{
+ DowntimeNoChildren,
+ DowntimeTriggeredChildren,
+ DowntimeNonTriggeredChildren
+};
+
+/**
+ * A downtime.
+ *
+ * @ingroup icinga
+ */
+class Downtime final : public ObjectImpl<Downtime>
+{
+public:
+ DECLARE_OBJECT(Downtime);
+ DECLARE_OBJECTNAME(Downtime);
+
+ static boost::signals2::signal<void (const Downtime::Ptr&)> OnDowntimeAdded;
+ static boost::signals2::signal<void (const Downtime::Ptr&)> OnDowntimeRemoved;
+ static boost::signals2::signal<void (const Downtime::Ptr&)> OnDowntimeStarted;
+ static boost::signals2::signal<void (const Downtime::Ptr&)> OnDowntimeTriggered;
+ static boost::signals2::signal<void (const Downtime::Ptr&, const String&, double, const MessageOrigin::Ptr&)> OnRemovalInfoChanged;
+
+ intrusive_ptr<Checkable> GetCheckable() const;
+
+ bool IsInEffect() const;
+ bool IsTriggered() const;
+ bool IsExpired() const;
+ bool HasValidConfigOwner() const;
+
+ static void StaticInitialize();
+
+ static int GetNextDowntimeID();
+
+ static Ptr AddDowntime(const intrusive_ptr<Checkable>& checkable, const String& author,
+ const String& comment, double startTime, double endTime, bool fixed,
+ const String& triggeredBy, double duration, const String& scheduledDowntime = String(),
+ const String& scheduledBy = String(), const String& parent = String(), const String& id = String(),
+ const MessageOrigin::Ptr& origin = nullptr);
+
+ static void RemoveDowntime(const String& id, bool includeChildren, bool cancelled, bool expired = false,
+ const String& removedBy = "", const MessageOrigin::Ptr& origin = nullptr);
+
+ void RegisterChild(const Downtime::Ptr& downtime);
+ void UnregisterChild(const Downtime::Ptr& downtime);
+ std::set<Downtime::Ptr> GetChildren() const;
+
+ void TriggerDowntime(double triggerTime);
+ void SetRemovalInfo(const String& removedBy, double removeTime, const MessageOrigin::Ptr& origin = nullptr);
+
+ void OnAllConfigLoaded() override;
+
+ static String GetDowntimeIDFromLegacyID(int id);
+
+ static DowntimeChildOptions ChildOptionsFromValue(const Value& options);
+
+protected:
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+ void Pause() override;
+ void Resume() override;
+
+ void ValidateStartTime(const Lazy<Timestamp>& lvalue, const ValidationUtils& utils) override;
+ void ValidateEndTime(const Lazy<Timestamp>& lvalue, const ValidationUtils& utils) override;
+
+private:
+ ObjectImpl<Checkable>::Ptr m_Checkable;
+
+ std::set<Downtime::Ptr> m_Children;
+ mutable std::mutex m_ChildrenMutex;
+
+ Timer::Ptr m_CleanupTimer;
+
+ bool CanBeTriggered();
+
+ void SetupCleanupTimer();
+
+ static void DowntimesStartTimerHandler();
+ static void DowntimesOrphanedTimerHandler();
+};
+
+}
+
+#endif /* DOWNTIME_H */
diff --git a/lib/icinga/downtime.ti b/lib/icinga/downtime.ti
new file mode 100644
index 0000000..21e9731
--- /dev/null
+++ b/lib/icinga/downtime.ti
@@ -0,0 +1,82 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/utility.hpp"
+#impl_include "icinga/service.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+class DowntimeNameComposer : public NameComposer
+{
+public:
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const;
+ virtual Dictionary::Ptr ParseName(const String& name) const;
+};
+}}}
+
+class Downtime : ConfigObject < DowntimeNameComposer
+{
+ activation_priority -10;
+
+ load_after Host;
+ load_after Service;
+
+ [config, no_user_modify, required, navigation(host)] name(Host) host_name {
+ navigate {{{
+ return Host::GetByName(GetHostName());
+ }}}
+ };
+ [config, no_user_modify, navigation(service)] String service_name {
+ track {{{
+ if (!oldValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), oldValue);
+ DependencyGraph::RemoveDependency(this, service.get());
+ }
+
+ if (!newValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), newValue);
+ DependencyGraph::AddDependency(this, service.get());
+ }
+ }}}
+ navigate {{{
+ if (GetServiceName().IsEmpty())
+ return nullptr;
+
+ Host::Ptr host = Host::GetByName(GetHostName());
+ return host->GetServiceByShortName(GetServiceName());
+ }}}
+ };
+
+ [config] Timestamp entry_time {
+ default {{{ return Utility::GetTime(); }}}
+ };
+ [config, required] String author;
+ [config, required] String comment;
+ [config] Timestamp start_time;
+ [config] Timestamp end_time;
+ [state] Timestamp trigger_time;
+ [config] bool fixed;
+ [config] Timestamp duration;
+ [config] String triggered_by;
+ [config] String scheduled_by;
+ [config] String parent;
+ [state] Array::Ptr triggers {
+ default {{{ return new Array(); }}}
+ };
+ [state] int legacy_id;
+ [state] Timestamp remove_time;
+ [no_storage] bool was_cancelled {
+ get {{{ return GetRemoveTime() > 0; }}}
+ };
+ [config] String config_owner;
+ [config] String config_owner_hash;
+ [config] String authoritative_zone;
+
+ [no_user_view, no_user_modify] String removed_by;
+};
+
+}
diff --git a/lib/icinga/envresolver.cpp b/lib/icinga/envresolver.cpp
new file mode 100644
index 0000000..633255c
--- /dev/null
+++ b/lib/icinga/envresolver.cpp
@@ -0,0 +1,20 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#include "base/string.hpp"
+#include "base/value.hpp"
+#include "icinga/envresolver.hpp"
+#include "icinga/checkresult.hpp"
+#include <cstdlib>
+
+using namespace icinga;
+
+bool EnvResolver::ResolveMacro(const String& macro, const CheckResult::Ptr&, Value *result) const
+{
+ auto value (getenv(macro.CStr()));
+
+ if (value) {
+ *result = value;
+ }
+
+ return value;
+}
diff --git a/lib/icinga/envresolver.hpp b/lib/icinga/envresolver.hpp
new file mode 100644
index 0000000..b3f0076
--- /dev/null
+++ b/lib/icinga/envresolver.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#ifndef ENVRESOLVER_H
+#define ENVRESOLVER_H
+
+#include "base/object.hpp"
+#include "base/string.hpp"
+#include "base/value.hpp"
+#include "icinga/macroresolver.hpp"
+#include "icinga/checkresult.hpp"
+
+namespace icinga
+{
+
+/**
+ * Resolves env var names.
+ *
+ * @ingroup icinga
+ */
+class EnvResolver final : public Object, public MacroResolver
+{
+public:
+ DECLARE_PTR_TYPEDEFS(EnvResolver);
+
+ bool ResolveMacro(const String& macro, const CheckResult::Ptr&, Value *result) const override;
+};
+
+}
+
+#endif /* ENVRESOLVER_H */
diff --git a/lib/icinga/eventcommand.cpp b/lib/icinga/eventcommand.cpp
new file mode 100644
index 0000000..39f2d31
--- /dev/null
+++ b/lib/icinga/eventcommand.cpp
@@ -0,0 +1,20 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/eventcommand.hpp"
+#include "icinga/eventcommand-ti.cpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(EventCommand);
+
+thread_local EventCommand::Ptr EventCommand::ExecuteOverride;
+
+void EventCommand::Execute(const Checkable::Ptr& checkable,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ GetExecute()->Invoke({
+ checkable,
+ resolvedMacros,
+ useResolvedMacros
+ });
+}
diff --git a/lib/icinga/eventcommand.hpp b/lib/icinga/eventcommand.hpp
new file mode 100644
index 0000000..67997e6
--- /dev/null
+++ b/lib/icinga/eventcommand.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EVENTCOMMAND_H
+#define EVENTCOMMAND_H
+
+#include "icinga/eventcommand-ti.hpp"
+#include "icinga/checkable.hpp"
+
+namespace icinga
+{
+
+/**
+ * An event handler command.
+ *
+ * @ingroup icinga
+ */
+class EventCommand final : public ObjectImpl<EventCommand>
+{
+public:
+ DECLARE_OBJECT(EventCommand);
+ DECLARE_OBJECTNAME(EventCommand);
+
+ static thread_local EventCommand::Ptr ExecuteOverride;
+
+ void Execute(const Checkable::Ptr& checkable,
+ const Dictionary::Ptr& resolvedMacros = nullptr,
+ bool useResolvedMacros = false);
+};
+
+}
+
+#endif /* EVENTCOMMAND_H */
diff --git a/lib/icinga/eventcommand.ti b/lib/icinga/eventcommand.ti
new file mode 100644
index 0000000..a166d1e
--- /dev/null
+++ b/lib/icinga/eventcommand.ti
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/command.hpp"
+
+
+library icinga;
+
+namespace icinga
+{
+
+class EventCommand : Command
+{
+};
+
+}
diff --git a/lib/icinga/externalcommandprocessor.cpp b/lib/icinga/externalcommandprocessor.cpp
new file mode 100644
index 0000000..9850da0
--- /dev/null
+++ b/lib/icinga/externalcommandprocessor.cpp
@@ -0,0 +1,2281 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/externalcommandprocessor.hpp"
+#include "icinga/checkable.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "icinga/user.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/servicegroup.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/compatutility.hpp"
+#include "remote/apifunction.hpp"
+#include "base/convert.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/application.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include <fstream>
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+boost::signals2::signal<void(double, const String&, const std::vector<String>&)> ExternalCommandProcessor::OnNewExternalCommand;
+
+void ExternalCommandProcessor::Execute(const String& line)
+{
+ if (line.IsEmpty())
+ return;
+
+ if (line[0] != '[')
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Missing timestamp in command: " + line));
+
+ size_t pos = line.FindFirstOf("]");
+
+ if (pos == String::NPos)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Missing timestamp in command: " + line));
+
+ String timestamp = line.SubStr(1, pos - 1);
+ String args = line.SubStr(pos + 2, String::NPos);
+
+ double ts = Convert::ToDouble(timestamp);
+
+ if (ts == 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid timestamp in command: " + line));
+
+ std::vector<String> argv = args.Split(";");
+
+ if (argv.empty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Missing arguments in command: " + line));
+
+ std::vector<String> argvExtra(argv.begin() + 1, argv.end());
+ Execute(ts, argv[0], argvExtra);
+}
+
+void ExternalCommandProcessor::Execute(double time, const String& command, const std::vector<String>& arguments)
+{
+ ExternalCommandInfo eci;
+
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, []() {
+ RegisterCommands();
+ });
+
+ {
+ std::unique_lock<std::mutex> lock(GetMutex());
+
+ auto it = GetCommands().find(command);
+
+ if (it == GetCommands().end())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The external command '" + command + "' does not exist."));
+
+ eci = it->second;
+ }
+
+ if (arguments.size() < eci.MinArgs)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Expected " + Convert::ToString(eci.MinArgs) + " arguments"));
+
+ size_t argnum = std::min(arguments.size(), eci.MaxArgs);
+
+ std::vector<String> realArguments;
+ realArguments.resize(argnum);
+
+ if (argnum > 0) {
+ std::copy(arguments.begin(), arguments.begin() + argnum - 1, realArguments.begin());
+
+ String last_argument;
+ for (std::vector<String>::size_type i = argnum - 1; i < arguments.size(); i++) {
+ if (!last_argument.IsEmpty())
+ last_argument += ";";
+
+ last_argument += arguments[i];
+ }
+
+ realArguments[argnum - 1] = last_argument;
+ }
+
+ OnNewExternalCommand(time, command, realArguments);
+
+ eci.Callback(time, realArguments);
+}
+
+void ExternalCommandProcessor::RegisterCommand(const String& command, const ExternalCommandCallback& callback, size_t minArgs, size_t maxArgs)
+{
+ std::unique_lock<std::mutex> lock(GetMutex());
+ ExternalCommandInfo eci;
+ eci.Callback = callback;
+ eci.MinArgs = minArgs;
+ eci.MaxArgs = (maxArgs == UINT_MAX) ? minArgs : maxArgs;
+ GetCommands()[command] = eci;
+}
+
+void ExternalCommandProcessor::RegisterCommands()
+{
+ RegisterCommand("PROCESS_HOST_CHECK_RESULT", &ExternalCommandProcessor::ProcessHostCheckResult, 3);
+ RegisterCommand("PROCESS_SERVICE_CHECK_RESULT", &ExternalCommandProcessor::ProcessServiceCheckResult, 4);
+ RegisterCommand("SCHEDULE_HOST_CHECK", &ExternalCommandProcessor::ScheduleHostCheck, 2);
+ RegisterCommand("SCHEDULE_FORCED_HOST_CHECK", &ExternalCommandProcessor::ScheduleForcedHostCheck, 2);
+ RegisterCommand("SCHEDULE_SVC_CHECK", &ExternalCommandProcessor::ScheduleSvcCheck, 3);
+ RegisterCommand("SCHEDULE_FORCED_SVC_CHECK", &ExternalCommandProcessor::ScheduleForcedSvcCheck, 3);
+ RegisterCommand("ENABLE_HOST_CHECK", &ExternalCommandProcessor::EnableHostCheck, 1);
+ RegisterCommand("DISABLE_HOST_CHECK", &ExternalCommandProcessor::DisableHostCheck, 1);
+ RegisterCommand("ENABLE_SVC_CHECK", &ExternalCommandProcessor::EnableSvcCheck, 2);
+ RegisterCommand("DISABLE_SVC_CHECK", &ExternalCommandProcessor::DisableSvcCheck, 2);
+ RegisterCommand("SHUTDOWN_PROCESS", &ExternalCommandProcessor::ShutdownProcess);
+ RegisterCommand("RESTART_PROCESS", &ExternalCommandProcessor::RestartProcess);
+ RegisterCommand("SCHEDULE_FORCED_HOST_SVC_CHECKS", &ExternalCommandProcessor::ScheduleForcedHostSvcChecks, 2);
+ RegisterCommand("SCHEDULE_HOST_SVC_CHECKS", &ExternalCommandProcessor::ScheduleHostSvcChecks, 2);
+ RegisterCommand("ENABLE_HOST_SVC_CHECKS", &ExternalCommandProcessor::EnableHostSvcChecks, 1);
+ RegisterCommand("DISABLE_HOST_SVC_CHECKS", &ExternalCommandProcessor::DisableHostSvcChecks, 1);
+ RegisterCommand("ACKNOWLEDGE_SVC_PROBLEM", &ExternalCommandProcessor::AcknowledgeSvcProblem, 7);
+ RegisterCommand("ACKNOWLEDGE_SVC_PROBLEM_EXPIRE", &ExternalCommandProcessor::AcknowledgeSvcProblemExpire, 8);
+ RegisterCommand("REMOVE_SVC_ACKNOWLEDGEMENT", &ExternalCommandProcessor::RemoveSvcAcknowledgement, 2);
+ RegisterCommand("ACKNOWLEDGE_HOST_PROBLEM", &ExternalCommandProcessor::AcknowledgeHostProblem, 6);
+ RegisterCommand("ACKNOWLEDGE_HOST_PROBLEM_EXPIRE", &ExternalCommandProcessor::AcknowledgeHostProblemExpire, 7);
+ RegisterCommand("REMOVE_HOST_ACKNOWLEDGEMENT", &ExternalCommandProcessor::RemoveHostAcknowledgement, 1);
+ RegisterCommand("DISABLE_HOST_FLAP_DETECTION", &ExternalCommandProcessor::DisableHostFlapping, 1);
+ RegisterCommand("ENABLE_HOST_FLAP_DETECTION", &ExternalCommandProcessor::EnableHostFlapping, 1);
+ RegisterCommand("DISABLE_SVC_FLAP_DETECTION", &ExternalCommandProcessor::DisableSvcFlapping, 2);
+ RegisterCommand("ENABLE_SVC_FLAP_DETECTION", &ExternalCommandProcessor::EnableSvcFlapping, 2);
+ RegisterCommand("ENABLE_HOSTGROUP_SVC_CHECKS", &ExternalCommandProcessor::EnableHostgroupSvcChecks, 1);
+ RegisterCommand("DISABLE_HOSTGROUP_SVC_CHECKS", &ExternalCommandProcessor::DisableHostgroupSvcChecks, 1);
+ RegisterCommand("ENABLE_SERVICEGROUP_SVC_CHECKS", &ExternalCommandProcessor::EnableServicegroupSvcChecks, 1);
+ RegisterCommand("DISABLE_SERVICEGROUP_SVC_CHECKS", &ExternalCommandProcessor::DisableServicegroupSvcChecks, 1);
+ RegisterCommand("ENABLE_PASSIVE_HOST_CHECKS", &ExternalCommandProcessor::EnablePassiveHostChecks, 1);
+ RegisterCommand("DISABLE_PASSIVE_HOST_CHECKS", &ExternalCommandProcessor::DisablePassiveHostChecks, 1);
+ RegisterCommand("ENABLE_PASSIVE_SVC_CHECKS", &ExternalCommandProcessor::EnablePassiveSvcChecks, 2);
+ RegisterCommand("DISABLE_PASSIVE_SVC_CHECKS", &ExternalCommandProcessor::DisablePassiveSvcChecks, 2);
+ RegisterCommand("ENABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS", &ExternalCommandProcessor::EnableServicegroupPassiveSvcChecks, 1);
+ RegisterCommand("DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS", &ExternalCommandProcessor::DisableServicegroupPassiveSvcChecks, 1);
+ RegisterCommand("ENABLE_HOSTGROUP_PASSIVE_SVC_CHECKS", &ExternalCommandProcessor::EnableHostgroupPassiveSvcChecks, 1);
+ RegisterCommand("DISABLE_HOSTGROUP_PASSIVE_SVC_CHECKS", &ExternalCommandProcessor::DisableHostgroupPassiveSvcChecks, 1);
+ RegisterCommand("PROCESS_FILE", &ExternalCommandProcessor::ProcessFile, 2);
+ RegisterCommand("SCHEDULE_SVC_DOWNTIME", &ExternalCommandProcessor::ScheduleSvcDowntime, 9);
+ RegisterCommand("DEL_SVC_DOWNTIME", &ExternalCommandProcessor::DelSvcDowntime, 1);
+ RegisterCommand("SCHEDULE_HOST_DOWNTIME", &ExternalCommandProcessor::ScheduleHostDowntime, 8);
+ RegisterCommand("SCHEDULE_AND_PROPAGATE_HOST_DOWNTIME", &ExternalCommandProcessor::ScheduleAndPropagateHostDowntime, 8);
+ RegisterCommand("SCHEDULE_AND_PROPAGATE_TRIGGERED_HOST_DOWNTIME", &ExternalCommandProcessor::ScheduleAndPropagateTriggeredHostDowntime, 8);
+ RegisterCommand("DEL_HOST_DOWNTIME", &ExternalCommandProcessor::DelHostDowntime, 1);
+ RegisterCommand("DEL_DOWNTIME_BY_HOST_NAME", &ExternalCommandProcessor::DelDowntimeByHostName, 1, 4);
+ RegisterCommand("SCHEDULE_HOST_SVC_DOWNTIME", &ExternalCommandProcessor::ScheduleHostSvcDowntime, 8);
+ RegisterCommand("SCHEDULE_HOSTGROUP_HOST_DOWNTIME", &ExternalCommandProcessor::ScheduleHostgroupHostDowntime, 8);
+ RegisterCommand("SCHEDULE_HOSTGROUP_SVC_DOWNTIME", &ExternalCommandProcessor::ScheduleHostgroupSvcDowntime, 8);
+ RegisterCommand("SCHEDULE_SERVICEGROUP_HOST_DOWNTIME", &ExternalCommandProcessor::ScheduleServicegroupHostDowntime, 8);
+ RegisterCommand("SCHEDULE_SERVICEGROUP_SVC_DOWNTIME", &ExternalCommandProcessor::ScheduleServicegroupSvcDowntime, 8);
+ RegisterCommand("ADD_HOST_COMMENT", &ExternalCommandProcessor::AddHostComment, 4);
+ RegisterCommand("DEL_HOST_COMMENT", &ExternalCommandProcessor::DelHostComment, 1);
+ RegisterCommand("ADD_SVC_COMMENT", &ExternalCommandProcessor::AddSvcComment, 5);
+ RegisterCommand("DEL_SVC_COMMENT", &ExternalCommandProcessor::DelSvcComment, 1);
+ RegisterCommand("DEL_ALL_HOST_COMMENTS", &ExternalCommandProcessor::DelAllHostComments, 1);
+ RegisterCommand("DEL_ALL_SVC_COMMENTS", &ExternalCommandProcessor::DelAllSvcComments, 2);
+ RegisterCommand("SEND_CUSTOM_HOST_NOTIFICATION", &ExternalCommandProcessor::SendCustomHostNotification, 4);
+ RegisterCommand("SEND_CUSTOM_SVC_NOTIFICATION", &ExternalCommandProcessor::SendCustomSvcNotification, 5);
+ RegisterCommand("DELAY_HOST_NOTIFICATION", &ExternalCommandProcessor::DelayHostNotification, 2);
+ RegisterCommand("DELAY_SVC_NOTIFICATION", &ExternalCommandProcessor::DelaySvcNotification, 3);
+ RegisterCommand("ENABLE_HOST_NOTIFICATIONS", &ExternalCommandProcessor::EnableHostNotifications, 1);
+ RegisterCommand("DISABLE_HOST_NOTIFICATIONS", &ExternalCommandProcessor::DisableHostNotifications, 1);
+ RegisterCommand("ENABLE_SVC_NOTIFICATIONS", &ExternalCommandProcessor::EnableSvcNotifications, 2);
+ RegisterCommand("DISABLE_SVC_NOTIFICATIONS", &ExternalCommandProcessor::DisableSvcNotifications, 2);
+ RegisterCommand("ENABLE_HOST_SVC_NOTIFICATIONS", &ExternalCommandProcessor::EnableHostSvcNotifications, 1);
+ RegisterCommand("DISABLE_HOST_SVC_NOTIFICATIONS", &ExternalCommandProcessor::DisableHostSvcNotifications, 1);
+ RegisterCommand("DISABLE_HOSTGROUP_HOST_CHECKS", &ExternalCommandProcessor::DisableHostgroupHostChecks, 1);
+ RegisterCommand("DISABLE_HOSTGROUP_PASSIVE_HOST_CHECKS", &ExternalCommandProcessor::DisableHostgroupPassiveHostChecks, 1);
+ RegisterCommand("DISABLE_SERVICEGROUP_HOST_CHECKS", &ExternalCommandProcessor::DisableServicegroupHostChecks, 1);
+ RegisterCommand("DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS", &ExternalCommandProcessor::DisableServicegroupPassiveHostChecks, 1);
+ RegisterCommand("ENABLE_HOSTGROUP_HOST_CHECKS", &ExternalCommandProcessor::EnableHostgroupHostChecks, 1);
+ RegisterCommand("ENABLE_HOSTGROUP_PASSIVE_HOST_CHECKS", &ExternalCommandProcessor::EnableHostgroupPassiveHostChecks, 1);
+ RegisterCommand("ENABLE_SERVICEGROUP_HOST_CHECKS", &ExternalCommandProcessor::EnableServicegroupHostChecks, 1);
+ RegisterCommand("ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS", &ExternalCommandProcessor::EnableServicegroupPassiveHostChecks, 1);
+ RegisterCommand("ENABLE_NOTIFICATIONS", &ExternalCommandProcessor::EnableNotifications);
+ RegisterCommand("DISABLE_NOTIFICATIONS", &ExternalCommandProcessor::DisableNotifications);
+ RegisterCommand("ENABLE_FLAP_DETECTION", &ExternalCommandProcessor::EnableFlapDetection);
+ RegisterCommand("DISABLE_FLAP_DETECTION", &ExternalCommandProcessor::DisableFlapDetection);
+ RegisterCommand("ENABLE_EVENT_HANDLERS", &ExternalCommandProcessor::EnableEventHandlers);
+ RegisterCommand("DISABLE_EVENT_HANDLERS", &ExternalCommandProcessor::DisableEventHandlers);
+ RegisterCommand("ENABLE_PERFORMANCE_DATA", &ExternalCommandProcessor::EnablePerformanceData);
+ RegisterCommand("DISABLE_PERFORMANCE_DATA", &ExternalCommandProcessor::DisablePerformanceData);
+ RegisterCommand("START_EXECUTING_SVC_CHECKS", &ExternalCommandProcessor::StartExecutingSvcChecks);
+ RegisterCommand("STOP_EXECUTING_SVC_CHECKS", &ExternalCommandProcessor::StopExecutingSvcChecks);
+ RegisterCommand("START_EXECUTING_HOST_CHECKS", &ExternalCommandProcessor::StartExecutingHostChecks);
+ RegisterCommand("STOP_EXECUTING_HOST_CHECKS", &ExternalCommandProcessor::StopExecutingHostChecks);
+ RegisterCommand("CHANGE_NORMAL_SVC_CHECK_INTERVAL", &ExternalCommandProcessor::ChangeNormalSvcCheckInterval, 3);
+ RegisterCommand("CHANGE_NORMAL_HOST_CHECK_INTERVAL", &ExternalCommandProcessor::ChangeNormalHostCheckInterval, 2);
+ RegisterCommand("CHANGE_RETRY_SVC_CHECK_INTERVAL", &ExternalCommandProcessor::ChangeRetrySvcCheckInterval, 3);
+ RegisterCommand("CHANGE_RETRY_HOST_CHECK_INTERVAL", &ExternalCommandProcessor::ChangeRetryHostCheckInterval, 2);
+ RegisterCommand("ENABLE_HOST_EVENT_HANDLER", &ExternalCommandProcessor::EnableHostEventHandler, 1);
+ RegisterCommand("DISABLE_HOST_EVENT_HANDLER", &ExternalCommandProcessor::DisableHostEventHandler, 1);
+ RegisterCommand("ENABLE_SVC_EVENT_HANDLER", &ExternalCommandProcessor::EnableSvcEventHandler, 2);
+ RegisterCommand("DISABLE_SVC_EVENT_HANDLER", &ExternalCommandProcessor::DisableSvcEventHandler, 2);
+ RegisterCommand("CHANGE_HOST_EVENT_HANDLER", &ExternalCommandProcessor::ChangeHostEventHandler, 2);
+ RegisterCommand("CHANGE_SVC_EVENT_HANDLER", &ExternalCommandProcessor::ChangeSvcEventHandler, 3);
+ RegisterCommand("CHANGE_HOST_CHECK_COMMAND", &ExternalCommandProcessor::ChangeHostCheckCommand, 2);
+ RegisterCommand("CHANGE_SVC_CHECK_COMMAND", &ExternalCommandProcessor::ChangeSvcCheckCommand, 3);
+ RegisterCommand("CHANGE_MAX_HOST_CHECK_ATTEMPTS", &ExternalCommandProcessor::ChangeMaxHostCheckAttempts, 2);
+ RegisterCommand("CHANGE_MAX_SVC_CHECK_ATTEMPTS", &ExternalCommandProcessor::ChangeMaxSvcCheckAttempts, 3);
+ RegisterCommand("CHANGE_HOST_CHECK_TIMEPERIOD", &ExternalCommandProcessor::ChangeHostCheckTimeperiod, 2);
+ RegisterCommand("CHANGE_SVC_CHECK_TIMEPERIOD", &ExternalCommandProcessor::ChangeSvcCheckTimeperiod, 3);
+ RegisterCommand("CHANGE_CUSTOM_HOST_VAR", &ExternalCommandProcessor::ChangeCustomHostVar, 3);
+ RegisterCommand("CHANGE_CUSTOM_SVC_VAR", &ExternalCommandProcessor::ChangeCustomSvcVar, 4);
+ RegisterCommand("CHANGE_CUSTOM_USER_VAR", &ExternalCommandProcessor::ChangeCustomUserVar, 3);
+ RegisterCommand("CHANGE_CUSTOM_CHECKCOMMAND_VAR", &ExternalCommandProcessor::ChangeCustomCheckcommandVar, 3);
+ RegisterCommand("CHANGE_CUSTOM_EVENTCOMMAND_VAR", &ExternalCommandProcessor::ChangeCustomEventcommandVar, 3);
+ RegisterCommand("CHANGE_CUSTOM_NOTIFICATIONCOMMAND_VAR", &ExternalCommandProcessor::ChangeCustomNotificationcommandVar, 3);
+
+ RegisterCommand("ENABLE_HOSTGROUP_HOST_NOTIFICATIONS", &ExternalCommandProcessor::EnableHostgroupHostNotifications, 1);
+ RegisterCommand("ENABLE_HOSTGROUP_SVC_NOTIFICATIONS", &ExternalCommandProcessor::EnableHostgroupSvcNotifications, 1);
+ RegisterCommand("DISABLE_HOSTGROUP_HOST_NOTIFICATIONS", &ExternalCommandProcessor::DisableHostgroupHostNotifications, 1);
+ RegisterCommand("DISABLE_HOSTGROUP_SVC_NOTIFICATIONS", &ExternalCommandProcessor::DisableHostgroupSvcNotifications, 1);
+ RegisterCommand("ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS", &ExternalCommandProcessor::EnableServicegroupHostNotifications, 1);
+ RegisterCommand("DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS", &ExternalCommandProcessor::DisableServicegroupHostNotifications, 1);
+ RegisterCommand("ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS", &ExternalCommandProcessor::EnableServicegroupSvcNotifications, 1);
+ RegisterCommand("DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS", &ExternalCommandProcessor::DisableServicegroupSvcNotifications, 1);
+}
+
+void ExternalCommandProcessor::ExecuteFromFile(const String& line, std::deque< std::vector<String> >& file_queue)
+{
+ if (line.IsEmpty())
+ return;
+
+ if (line[0] != '[')
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Missing timestamp in command: " + line));
+
+ size_t pos = line.FindFirstOf("]");
+
+ if (pos == String::NPos)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Missing timestamp in command: " + line));
+
+ String timestamp = line.SubStr(1, pos - 1);
+ String args = line.SubStr(pos + 2, String::NPos);
+
+ double ts = Convert::ToDouble(timestamp);
+
+ if (ts == 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid timestamp in command: " + line));
+
+ std::vector<String> argv = args.Split(";");
+
+ if (argv.empty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Missing arguments in command: " + line));
+
+ std::vector<String> argvExtra(argv.begin() + 1, argv.end());
+
+ if (argv[0] == "PROCESS_FILE") {
+ Log(LogDebug, "ExternalCommandProcessor")
+ << "Enqueing external command file " << argvExtra[0];
+ file_queue.push_back(argvExtra);
+ } else {
+ Execute(ts, argv[0], argvExtra);
+ }
+}
+
+void ExternalCommandProcessor::ProcessHostCheckResult(double time, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot process passive host check result for non-existent host '" + arguments[0] + "'"));
+
+ if (!host->GetEnablePassiveChecks())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Got passive check result for host '" + arguments[0] + "' which has passive checks disabled."));
+
+ if (!host->IsReachable(DependencyCheckExecution)) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Ignoring passive check result for unreachable host '" << arguments[0] << "'";
+ return;
+ }
+
+ int exitStatus = Convert::ToDouble(arguments[1]);
+ CheckResult::Ptr result = new CheckResult();
+ std::pair<String, String> co = PluginUtility::ParseCheckOutput(arguments[2]);
+ result->SetOutput(co.first);
+ result->SetPerformanceData(PluginUtility::SplitPerfdata(co.second));
+
+ ServiceState state;
+
+ if (exitStatus == 0)
+ state = ServiceOK;
+ else if (exitStatus == 1)
+ state = ServiceCritical;
+ else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid status code: " + arguments[1]));
+
+ result->SetState(state);
+
+ result->SetScheduleStart(time);
+ result->SetScheduleEnd(time);
+ result->SetExecutionStart(time);
+ result->SetExecutionEnd(time);
+
+ /* Mark this check result as passive. */
+ result->SetActive(false);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Processing passive check result for host '" << arguments[0] << "'";
+
+ host->ProcessCheckResult(result);
+}
+
+void ExternalCommandProcessor::ProcessServiceCheckResult(double time, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot process passive service check result for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ if (!service->GetEnablePassiveChecks())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Got passive check result for service '" + arguments[1] + "' which has passive checks disabled."));
+
+ if (!service->IsReachable(DependencyCheckExecution)) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Ignoring passive check result for unreachable service '" << arguments[1] << "'";
+ return;
+ }
+
+ int exitStatus = Convert::ToDouble(arguments[2]);
+ CheckResult::Ptr result = new CheckResult();
+ String output = CompatUtility::UnEscapeString(arguments[3]);
+ std::pair<String, String> co = PluginUtility::ParseCheckOutput(output);
+ result->SetOutput(co.first);
+ result->SetPerformanceData(PluginUtility::SplitPerfdata(co.second));
+ result->SetState(PluginUtility::ExitStatusToState(exitStatus));
+
+ result->SetScheduleStart(time);
+ result->SetScheduleEnd(time);
+ result->SetExecutionStart(time);
+ result->SetExecutionEnd(time);
+
+ /* Mark this check result as passive. */
+ result->SetActive(false);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Processing passive check result for service '" << arguments[1] << "'";
+
+ service->ProcessCheckResult(result);
+}
+
+void ExternalCommandProcessor::ScheduleHostCheck(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot reschedule host check for non-existent host '" + arguments[0] + "'"));
+
+ double planned_check = Convert::ToDouble(arguments[1]);
+
+ if (planned_check > host->GetNextCheck()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Ignoring reschedule request for host '"
+ << arguments[0] << "' (next check is already sooner than requested check time)";
+ return;
+ }
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Rescheduling next check for host '" << arguments[0] << "'";
+
+ if (planned_check < Utility::GetTime())
+ planned_check = Utility::GetTime();
+
+ host->SetNextCheck(planned_check);
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(host);
+}
+
+void ExternalCommandProcessor::ScheduleForcedHostCheck(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot reschedule forced host check for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Rescheduling next check for host '" << arguments[0] << "'";
+
+ host->SetForceNextCheck(true);
+ host->SetNextCheck(Convert::ToDouble(arguments[1]));
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(host);
+}
+
+void ExternalCommandProcessor::ScheduleSvcCheck(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot reschedule service check for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ double planned_check = Convert::ToDouble(arguments[2]);
+
+ if (planned_check > service->GetNextCheck()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Ignoring reschedule request for service '"
+ << arguments[1] << "' (next check is already sooner than requested check time)";
+ return;
+ }
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Rescheduling next check for service '" << arguments[1] << "'";
+
+ if (planned_check < Utility::GetTime())
+ planned_check = Utility::GetTime();
+
+ service->SetNextCheck(planned_check);
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(service);
+}
+
+void ExternalCommandProcessor::ScheduleForcedSvcCheck(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot reschedule forced service check for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Rescheduling next check for service '" << arguments[1] << "'";
+
+ service->SetForceNextCheck(true);
+ service->SetNextCheck(Convert::ToDouble(arguments[2]));
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(service);
+}
+
+void ExternalCommandProcessor::EnableHostCheck(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable host checks for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_active_checks", true);
+}
+
+void ExternalCommandProcessor::DisableHostCheck(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable host check non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_active_checks", false);
+}
+
+void ExternalCommandProcessor::EnableSvcCheck(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable service check for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_active_checks", true);
+}
+
+void ExternalCommandProcessor::DisableSvcCheck(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable service check for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_active_checks", false);
+}
+
+void ExternalCommandProcessor::ShutdownProcess(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Shutting down Icinga via external command.");
+ Application::RequestShutdown();
+}
+
+void ExternalCommandProcessor::RestartProcess(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Restarting Icinga via external command.");
+ Application::RequestRestart();
+}
+
+void ExternalCommandProcessor::ScheduleForcedHostSvcChecks(double, const std::vector<String>& arguments)
+{
+ double planned_check = Convert::ToDouble(arguments[1]);
+
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot reschedule forced host service checks for non-existent host '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Rescheduling next check for service '" << service->GetName() << "'";
+
+ service->SetNextCheck(planned_check);
+ service->SetForceNextCheck(true);
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(service);
+ }
+}
+
+void ExternalCommandProcessor::ScheduleHostSvcChecks(double, const std::vector<String>& arguments)
+{
+ double planned_check = Convert::ToDouble(arguments[1]);
+
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot reschedule host service checks for non-existent host '" + arguments[0] + "'"));
+
+ if (planned_check < Utility::GetTime())
+ planned_check = Utility::GetTime();
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (planned_check > service->GetNextCheck()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Ignoring reschedule request for service '"
+ << service->GetName() << "' (next check is already sooner than requested check time)";
+ continue;
+ }
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Rescheduling next check for service '" << service->GetName() << "'";
+
+ service->SetNextCheck(planned_check);
+
+ /* trigger update event for DB IDO */
+ Checkable::OnNextCheckUpdated(service);
+ }
+}
+
+void ExternalCommandProcessor::EnableHostSvcChecks(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable host service checks for non-existent host '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_active_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::DisableHostSvcChecks(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable host service checks for non-existent host '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_active_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::AcknowledgeSvcProblem(double, const std::vector<String>& arguments)
+{
+ bool sticky = (Convert::ToLong(arguments[2]) == 2 ? true : false);
+ bool notify = (Convert::ToLong(arguments[3]) > 0 ? true : false);
+ bool persistent = (Convert::ToLong(arguments[4]) > 0 ? true : false);
+
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+ ObjectLock oLock (service);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot acknowledge service problem for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ if (service->GetState() == ServiceOK)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The service '" + arguments[1] + "' is OK."));
+
+ if (service->IsAcknowledged()) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The service '" + arguments[1] + "' is already acknowledged."));
+ }
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Setting acknowledgement for service '" << service->GetName() << "'" << (notify ? "" : ". Disabled notification");
+
+ Comment::AddComment(service, CommentAcknowledgement, arguments[5], arguments[6], persistent, 0, sticky);
+ service->AcknowledgeProblem(arguments[5], arguments[6], sticky ? AcknowledgementSticky : AcknowledgementNormal, notify, persistent);
+}
+
+void ExternalCommandProcessor::AcknowledgeSvcProblemExpire(double, const std::vector<String>& arguments)
+{
+ bool sticky = (Convert::ToLong(arguments[2]) == 2 ? true : false);
+ bool notify = (Convert::ToLong(arguments[3]) > 0 ? true : false);
+ bool persistent = (Convert::ToLong(arguments[4]) > 0 ? true : false);
+ double timestamp = Convert::ToDouble(arguments[5]);
+
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+ ObjectLock oLock (service);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot acknowledge service problem with expire time for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ if (service->GetState() == ServiceOK)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The service '" + arguments[1] + "' is OK."));
+
+ if (timestamp != 0 && timestamp <= Utility::GetTime())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Acknowledgement expire time must be in the future for service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ if (service->IsAcknowledged()) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The service '" + arguments[1] + "' is already acknowledged."));
+ }
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Setting timed acknowledgement for service '" << service->GetName() << "'" << (notify ? "" : ". Disabled notification");
+
+ Comment::AddComment(service, CommentAcknowledgement, arguments[6], arguments[7], persistent, timestamp, sticky);
+ service->AcknowledgeProblem(arguments[6], arguments[7], sticky ? AcknowledgementSticky : AcknowledgementNormal, notify, persistent, timestamp);
+}
+
+void ExternalCommandProcessor::RemoveSvcAcknowledgement(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot remove service acknowledgement for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removing acknowledgement for service '" << service->GetName() << "'";
+
+ {
+ ObjectLock olock(service);
+ service->ClearAcknowledgement("");
+ }
+
+ service->RemoveAckComments();
+}
+
+void ExternalCommandProcessor::AcknowledgeHostProblem(double, const std::vector<String>& arguments)
+{
+ bool sticky = (Convert::ToLong(arguments[1]) == 2 ? true : false);
+ bool notify = (Convert::ToLong(arguments[2]) > 0 ? true : false);
+ bool persistent = (Convert::ToLong(arguments[3]) > 0 ? true : false);
+
+ Host::Ptr host = Host::GetByName(arguments[0]);
+ ObjectLock oLock (host);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot acknowledge host problem for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Setting acknowledgement for host '" << host->GetName() << "'" << (notify ? "" : ". Disabled notification");
+
+ if (host->GetState() == HostUp)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The host '" + arguments[0] + "' is OK."));
+
+ if (host->IsAcknowledged()) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The host '" + arguments[1] + "' is already acknowledged."));
+ }
+
+ Comment::AddComment(host, CommentAcknowledgement, arguments[4], arguments[5], persistent, 0, sticky);
+ host->AcknowledgeProblem(arguments[4], arguments[5], sticky ? AcknowledgementSticky : AcknowledgementNormal, notify, persistent);
+}
+
+void ExternalCommandProcessor::AcknowledgeHostProblemExpire(double, const std::vector<String>& arguments)
+{
+ bool sticky = (Convert::ToLong(arguments[1]) == 2 ? true : false);
+ bool notify = (Convert::ToLong(arguments[2]) > 0 ? true : false);
+ bool persistent = (Convert::ToLong(arguments[3]) > 0 ? true : false);
+ double timestamp = Convert::ToDouble(arguments[4]);
+
+ Host::Ptr host = Host::GetByName(arguments[0]);
+ ObjectLock oLock (host);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot acknowledge host problem with expire time for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Setting timed acknowledgement for host '" << host->GetName() << "'" << (notify ? "" : ". Disabled notification");
+
+ if (host->GetState() == HostUp)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The host '" + arguments[0] + "' is OK."));
+
+ if (timestamp != 0 && timestamp <= Utility::GetTime())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Acknowledgement expire time must be in the future for host '" + arguments[0] + "'"));
+
+ if (host->IsAcknowledged()) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("The host '" + arguments[1] + "' is already acknowledged."));
+ }
+
+ Comment::AddComment(host, CommentAcknowledgement, arguments[5], arguments[6], persistent, timestamp, sticky);
+ host->AcknowledgeProblem(arguments[5], arguments[6], sticky ? AcknowledgementSticky : AcknowledgementNormal, notify, persistent, timestamp);
+}
+
+void ExternalCommandProcessor::RemoveHostAcknowledgement(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot remove acknowledgement for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removing acknowledgement for host '" << host->GetName() << "'";
+
+ {
+ ObjectLock olock(host);
+ host->ClearAcknowledgement("");
+ }
+ host->RemoveAckComments();
+}
+
+void ExternalCommandProcessor::EnableHostgroupSvcChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable hostgroup service checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_active_checks", true);
+ }
+ }
+}
+
+void ExternalCommandProcessor::DisableHostgroupSvcChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable hostgroup service checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_active_checks", false);
+ }
+ }
+}
+
+void ExternalCommandProcessor::EnableServicegroupSvcChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable servicegroup service checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_active_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::DisableServicegroupSvcChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable servicegroup service checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_active_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::EnablePassiveHostChecks(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable passive host checks for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling passive checks for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_passive_checks", true);
+}
+
+void ExternalCommandProcessor::DisablePassiveHostChecks(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable passive host checks for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling passive checks for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_passive_checks", false);
+}
+
+void ExternalCommandProcessor::EnablePassiveSvcChecks(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable service checks for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling passive checks for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_passive_checks", true);
+}
+
+void ExternalCommandProcessor::DisablePassiveSvcChecks(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable service checks for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling passive checks for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_passive_checks", false);
+}
+
+void ExternalCommandProcessor::EnableServicegroupPassiveSvcChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable servicegroup passive service checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling passive checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_passive_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::DisableServicegroupPassiveSvcChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable servicegroup passive service checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling passive checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_passive_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::EnableHostgroupPassiveSvcChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable hostgroup passive service checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling passive checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_passive_checks", true);
+ }
+ }
+}
+
+void ExternalCommandProcessor::DisableHostgroupPassiveSvcChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable hostgroup passive service checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling passive checks for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_passive_checks", false);
+ }
+ }
+}
+
+void ExternalCommandProcessor::ProcessFile(double, const std::vector<String>& arguments)
+{
+ std::deque< std::vector<String> > file_queue;
+ file_queue.push_back(arguments);
+
+ while (!file_queue.empty()) {
+ std::vector<String> argument = file_queue.front();
+ file_queue.pop_front();
+
+ String file = argument[0];
+ int to_delete = Convert::ToLong(argument[1]);
+
+ std::ifstream ifp;
+ ifp.exceptions(std::ifstream::badbit);
+
+ ifp.open(file.CStr(), std::ifstream::in);
+
+ while (ifp.good()) {
+ std::string line;
+ std::getline(ifp, line);
+
+ try {
+ Log(LogNotice, "compat")
+ << "Executing external command: " << line;
+
+ ExecuteFromFile(line, file_queue);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ExternalCommandProcessor")
+ << "External command failed: " << DiagnosticInformation(ex);
+ }
+ }
+
+ ifp.close();
+
+ if (to_delete > 0)
+ (void) unlink(file.CStr());
+ }
+}
+
+void ExternalCommandProcessor::ScheduleSvcDowntime(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule service downtime for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[5]);
+ int is_fixed = Convert::ToLong(arguments[4]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for service " << service->GetName();
+ (void) Downtime::AddDowntime(service, arguments[7], arguments[8],
+ Convert::ToDouble(arguments[2]), Convert::ToDouble(arguments[3]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[6]));
+}
+
+void ExternalCommandProcessor::DelSvcDowntime(double, const std::vector<String>& arguments)
+{
+ int id = Convert::ToLong(arguments[0]);
+ String rid = Downtime::GetDowntimeIDFromLegacyID(id);
+
+ try {
+ Downtime::RemoveDowntime(rid, false, true);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removed downtime ID " << arguments[0];
+ } catch (const invalid_downtime_removal_error& error) {
+ Log(LogWarning, "ExternalCommandProcessor") << error.what();
+ }
+}
+
+void ExternalCommandProcessor::ScheduleHostDowntime(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule host downtime for non-existent host '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for host " << host->GetName();
+
+ (void) Downtime::AddDowntime(host, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+}
+
+void ExternalCommandProcessor::ScheduleAndPropagateHostDowntime(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule and propagate host downtime for non-existent host '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for host " << host->GetName();
+
+ (void) Downtime::AddDowntime(host, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+
+ /* Schedule downtime for all child hosts */
+ for (const Checkable::Ptr& child : host->GetAllChildren()) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(child);
+
+ /* ignore all service children */
+ if (service)
+ continue;
+
+ (void) Downtime::AddDowntime(child, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::ScheduleAndPropagateTriggeredHostDowntime(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule and propagate triggered host downtime for non-existent host '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for host " << host->GetName();
+
+ Downtime::Ptr parentDowntime = Downtime::AddDowntime(host, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+
+ /* Schedule downtime for all child hosts and explicitely trigger them through the parent host's downtime */
+ for (const Checkable::Ptr& child : host->GetAllChildren()) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(child);
+
+ /* ignore all service children */
+ if (service)
+ continue;
+
+ (void) Downtime::AddDowntime(child, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), parentDowntime->GetName(), Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::DelHostDowntime(double, const std::vector<String>& arguments)
+{
+ int id = Convert::ToLong(arguments[0]);
+ String rid = Downtime::GetDowntimeIDFromLegacyID(id);
+
+ try {
+ Downtime::RemoveDowntime(rid, false, true);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removed downtime ID " << arguments[0];
+ } catch (const invalid_downtime_removal_error& error) {
+ Log(LogWarning, "ExternalCommandProcessor") << error.what();
+ }
+}
+
+void ExternalCommandProcessor::DelDowntimeByHostName(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule host services downtime for non-existent host '" + arguments[0] + "'"));
+
+ String serviceName;
+ if (arguments.size() >= 2)
+ serviceName = arguments[1];
+
+ String startTime;
+ if (arguments.size() >= 3)
+ startTime = arguments[2];
+
+ String commentString;
+ if (arguments.size() >= 4)
+ commentString = arguments[3];
+
+ if (arguments.size() > 5)
+ Log(LogWarning, "ExternalCommandProcessor")
+ << ("Ignoring additional parameters for host '" + arguments[0] + "' downtime deletion.");
+
+ for (const Downtime::Ptr& downtime : host->GetDowntimes()) {
+ try {
+ String downtimeName = downtime->GetName();
+ Downtime::RemoveDowntime(downtimeName, false, true);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removed downtime '" << downtimeName << "'.";
+ } catch (const invalid_downtime_removal_error& error) {
+ Log(LogWarning, "ExternalCommandProcessor") << error.what();
+ }
+ }
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (!serviceName.IsEmpty() && serviceName != service->GetName())
+ continue;
+
+ for (const Downtime::Ptr& downtime : service->GetDowntimes()) {
+ if (!startTime.IsEmpty() && downtime->GetStartTime() != Convert::ToDouble(startTime))
+ continue;
+
+ if (!commentString.IsEmpty() && downtime->GetComment() != commentString)
+ continue;
+
+ try {
+ String downtimeName = downtime->GetName();
+ Downtime::RemoveDowntime(downtimeName, false, true);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removed downtime '" << downtimeName << "'.";
+ } catch (const invalid_downtime_removal_error& error) {
+ Log(LogWarning, "ExternalCommandProcessor") << error.what();
+ }
+ }
+ }
+}
+
+void ExternalCommandProcessor::ScheduleHostSvcDowntime(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule host services downtime for non-existent host '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for host " << host->GetName();
+
+ (void) Downtime::AddDowntime(host, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for service " << service->GetName();
+ (void) Downtime::AddDowntime(service, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::ScheduleHostgroupHostDowntime(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule hostgroup host downtime for non-existent hostgroup '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for host " << host->GetName();
+
+ (void) Downtime::AddDowntime(host, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::ScheduleHostgroupSvcDowntime(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule hostgroup service downtime for non-existent hostgroup '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ /* Note: we can't just directly create downtimes for all the services by iterating
+ * over all hosts in the host group - otherwise we might end up creating multiple
+ * downtimes for some services. */
+
+ std::set<Service::Ptr> services;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ services.insert(service);
+ }
+ }
+
+ for (const Service::Ptr& service : services) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for service " << service->GetName();
+ (void) Downtime::AddDowntime(service, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::ScheduleServicegroupHostDowntime(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule servicegroup host downtime for non-existent servicegroup '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ /* Note: we can't just directly create downtimes for all the hosts by iterating
+ * over all services in the service group - otherwise we might end up creating multiple
+ * downtimes for some hosts. */
+
+ std::set<Host::Ptr> hosts;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+ hosts.insert(host);
+ }
+
+ for (const Host::Ptr& host : hosts) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for host " << host->GetName();
+ (void) Downtime::AddDowntime(host, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::ScheduleServicegroupSvcDowntime(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot schedule servicegroup service downtime for non-existent servicegroup '" + arguments[0] + "'"));
+
+ String triggeredBy;
+ int triggeredByLegacy = Convert::ToLong(arguments[4]);
+ int is_fixed = Convert::ToLong(arguments[3]);
+ if (triggeredByLegacy != 0)
+ triggeredBy = Downtime::GetDowntimeIDFromLegacyID(triggeredByLegacy);
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating downtime for service " << service->GetName();
+ (void) Downtime::AddDowntime(service, arguments[6], arguments[7],
+ Convert::ToDouble(arguments[1]), Convert::ToDouble(arguments[2]),
+ Convert::ToBool(is_fixed), triggeredBy, Convert::ToDouble(arguments[5]));
+ }
+}
+
+void ExternalCommandProcessor::AddHostComment(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot add host comment for non-existent host '" + arguments[0] + "'"));
+
+ if (arguments[2].IsEmpty() || arguments[3].IsEmpty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Author and comment must not be empty"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating comment for host " << host->GetName();
+ (void) Comment::AddComment(host, CommentUser, arguments[2], arguments[3], false, 0);
+}
+
+void ExternalCommandProcessor::DelHostComment(double, const std::vector<String>& arguments)
+{
+ int id = Convert::ToLong(arguments[0]);
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removing comment ID " << arguments[0];
+ String rid = Comment::GetCommentIDFromLegacyID(id);
+ Comment::RemoveComment(rid);
+}
+
+void ExternalCommandProcessor::AddSvcComment(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot add service comment for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ if (arguments[3].IsEmpty() || arguments[4].IsEmpty())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Author and comment must not be empty"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Creating comment for service " << service->GetName();
+ (void) Comment::AddComment(service, CommentUser, arguments[3], arguments[4], false, 0);
+}
+
+void ExternalCommandProcessor::DelSvcComment(double, const std::vector<String>& arguments)
+{
+ int id = Convert::ToLong(arguments[0]);
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removing comment ID " << arguments[0];
+
+ String rid = Comment::GetCommentIDFromLegacyID(id);
+ Comment::RemoveComment(rid);
+}
+
+void ExternalCommandProcessor::DelAllHostComments(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot delete all host comments for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removing all comments for host " << host->GetName();
+ host->RemoveAllComments();
+}
+
+void ExternalCommandProcessor::DelAllSvcComments(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot delete all service comments for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Removing all comments for service " << service->GetName();
+ service->RemoveAllComments();
+}
+
+void ExternalCommandProcessor::SendCustomHostNotification(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot send custom host notification for non-existent host '" + arguments[0] + "'"));
+
+ int options = Convert::ToLong(arguments[1]);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Sending custom notification for host " << host->GetName();
+ if (options & 2) {
+ host->SetForceNextNotification(true);
+ }
+
+ Checkable::OnNotificationsRequested(host, NotificationCustom,
+ host->GetLastCheckResult(), arguments[2], arguments[3], nullptr);
+}
+
+void ExternalCommandProcessor::SendCustomSvcNotification(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot send custom service notification for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ int options = Convert::ToLong(arguments[2]);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Sending custom notification for service " << service->GetName();
+
+ if (options & 2) {
+ service->SetForceNextNotification(true);
+ }
+
+ Service::OnNotificationsRequested(service, NotificationCustom,
+ service->GetLastCheckResult(), arguments[3], arguments[4], nullptr);
+}
+
+void ExternalCommandProcessor::DelayHostNotification(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot delay host notification for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Delaying notifications for host '" << host->GetName() << "'";
+
+ for (const Notification::Ptr& notification : host->GetNotifications()) {
+ notification->SetNextNotification(Convert::ToDouble(arguments[1]));
+ }
+}
+
+void ExternalCommandProcessor::DelaySvcNotification(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot delay service notification for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Delaying notifications for service " << service->GetName();
+
+ for (const Notification::Ptr& notification : service->GetNotifications()) {
+ notification->SetNextNotification(Convert::ToDouble(arguments[2]));
+ }
+}
+
+void ExternalCommandProcessor::EnableHostNotifications(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable host notifications for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_notifications", true);
+}
+
+void ExternalCommandProcessor::DisableHostNotifications(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable host notifications for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_notifications", false);
+}
+
+void ExternalCommandProcessor::EnableSvcNotifications(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable service notifications for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_notifications", true);
+}
+
+void ExternalCommandProcessor::DisableSvcNotifications(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable service notifications for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_notifications", false);
+}
+
+void ExternalCommandProcessor::EnableHostSvcNotifications(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable notifications for all services for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for all services on host '" << arguments[0] << "'";
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_notifications", true);
+ }
+}
+
+void ExternalCommandProcessor::DisableHostSvcNotifications(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable notifications for all services for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for all services on host '" << arguments[0] << "'";
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_notifications", false);
+ }
+}
+
+void ExternalCommandProcessor::DisableHostgroupHostChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable hostgroup host checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_active_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::DisableHostgroupPassiveHostChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable hostgroup passive host checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling passive checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_passive_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::DisableServicegroupHostChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable servicegroup host checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling active checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_active_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::DisableServicegroupPassiveHostChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable servicegroup passive host checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling passive checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_passive_checks", false);
+ }
+}
+
+void ExternalCommandProcessor::EnableHostgroupHostChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable hostgroup host checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_active_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::EnableHostgroupPassiveHostChecks(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable hostgroup passive host checks for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling passive checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_passive_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::EnableServicegroupHostChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable servicegroup host checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling active checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_active_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::EnableServicegroupPassiveHostChecks(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable servicegroup passive host checks for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling passive checks for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_passive_checks", true);
+ }
+}
+
+void ExternalCommandProcessor::EnableHostFlapping(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable host flapping for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling flapping detection for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_flapping", true);
+}
+
+void ExternalCommandProcessor::DisableHostFlapping(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable host flapping for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling flapping detection for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_flapping", false);
+}
+
+void ExternalCommandProcessor::EnableSvcFlapping(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable service flapping for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling flapping detection for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_flapping", true);
+}
+
+void ExternalCommandProcessor::DisableSvcFlapping(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable service flapping for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling flapping detection for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_flapping", false);
+}
+
+void ExternalCommandProcessor::EnableNotifications(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally enabling notifications.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_notifications", true);
+}
+
+void ExternalCommandProcessor::DisableNotifications(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally disabling notifications.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_notifications", false);
+}
+
+void ExternalCommandProcessor::EnableFlapDetection(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally enabling flap detection.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_flapping", true);
+}
+
+void ExternalCommandProcessor::DisableFlapDetection(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally disabling flap detection.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_flapping", false);
+}
+
+void ExternalCommandProcessor::EnableEventHandlers(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally enabling event handlers.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_event_handlers", true);
+}
+
+void ExternalCommandProcessor::DisableEventHandlers(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally disabling event handlers.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_event_handlers", false);
+}
+
+void ExternalCommandProcessor::EnablePerformanceData(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally enabling performance data processing.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_perfdata", true);
+}
+
+void ExternalCommandProcessor::DisablePerformanceData(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally disabling performance data processing.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_perfdata", false);
+}
+
+void ExternalCommandProcessor::StartExecutingSvcChecks(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally enabling service checks.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_service_checks", true);
+}
+
+void ExternalCommandProcessor::StopExecutingSvcChecks(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally disabling service checks.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_service_checks", false);
+}
+
+void ExternalCommandProcessor::StartExecutingHostChecks(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally enabling host checks.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_host_checks", true);
+}
+
+void ExternalCommandProcessor::StopExecutingHostChecks(double, const std::vector<String>&)
+{
+ Log(LogNotice, "ExternalCommandProcessor", "Globally disabling host checks.");
+
+ IcingaApplication::GetInstance()->ModifyAttribute("enable_host_checks", false);
+}
+
+void ExternalCommandProcessor::ChangeNormalSvcCheckInterval(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot update check interval for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ double interval = Convert::ToDouble(arguments[2]);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Updating check interval for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("check_interval", interval * 60);
+}
+
+void ExternalCommandProcessor::ChangeNormalHostCheckInterval(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot update check interval for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Updating check interval for host '" << arguments[0] << "'";
+
+ double interval = Convert::ToDouble(arguments[1]);
+
+ host->ModifyAttribute("check_interval", interval * 60);
+}
+
+void ExternalCommandProcessor::ChangeRetrySvcCheckInterval(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot update retry interval for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ double interval = Convert::ToDouble(arguments[2]);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Updating retry interval for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("retry_interval", interval * 60);
+}
+
+void ExternalCommandProcessor::ChangeRetryHostCheckInterval(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot update retry interval for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Updating retry interval for host '" << arguments[0] << "'";
+
+ double interval = Convert::ToDouble(arguments[1]);
+
+ host->ModifyAttribute("retry_interval", interval * 60);
+}
+
+void ExternalCommandProcessor::EnableHostEventHandler(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable event handler for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling event handler for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_event_handler", true);
+}
+
+void ExternalCommandProcessor::DisableHostEventHandler(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable event handler for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling event handler for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("enable_event_handler", false);
+}
+
+void ExternalCommandProcessor::EnableSvcEventHandler(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable event handler for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling event handler for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("enable_event_handler", true);
+}
+
+void ExternalCommandProcessor::DisableSvcEventHandler(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable event handler for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling event handler for service '" << arguments[1] + "'";
+
+ service->ModifyAttribute("enable_event_handler", false);
+}
+
+void ExternalCommandProcessor::ChangeHostEventHandler(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change event handler for non-existent host '" + arguments[0] + "'"));
+
+ if (arguments[1].IsEmpty()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Unsetting event handler for host '" << arguments[0] << "'";
+
+ host->ModifyAttribute("event_command", "");
+ } else {
+ EventCommand::Ptr command = EventCommand::GetByName(arguments[1]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Event command '" + arguments[1] + "' does not exist."));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing event handler for host '" << arguments[0] << "' to '" << arguments[1] << "'";
+
+ host->ModifyAttribute("event_command", command->GetName());
+ }
+}
+
+void ExternalCommandProcessor::ChangeSvcEventHandler(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change event handler for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ if (arguments[2].IsEmpty()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Unsetting event handler for service '" << arguments[1] << "'";
+
+ service->ModifyAttribute("event_command", "");
+ } else {
+ EventCommand::Ptr command = EventCommand::GetByName(arguments[2]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Event command '" + arguments[2] + "' does not exist."));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing event handler for service '" << arguments[1] << "' to '" << arguments[2] << "'";
+
+ service->ModifyAttribute("event_command", command->GetName());
+ }
+}
+
+void ExternalCommandProcessor::ChangeHostCheckCommand(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change check command for non-existent host '" + arguments[0] + "'"));
+
+ CheckCommand::Ptr command = CheckCommand::GetByName(arguments[1]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Check command '" + arguments[1] + "' does not exist."));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing check command for host '" << arguments[0] << "' to '" << arguments[1] << "'";
+
+ host->ModifyAttribute("check_command", command->GetName());
+}
+
+void ExternalCommandProcessor::ChangeSvcCheckCommand(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change check command for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ CheckCommand::Ptr command = CheckCommand::GetByName(arguments[2]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Check command '" + arguments[2] + "' does not exist."));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing check command for service '" << arguments[1] << "' to '" << arguments[2] << "'";
+
+ service->ModifyAttribute("check_command", command->GetName());
+}
+
+void ExternalCommandProcessor::ChangeMaxHostCheckAttempts(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change max check attempts for non-existent host '" + arguments[0] + "'"));
+
+ int attempts = Convert::ToLong(arguments[1]);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing max check attempts for host '" << arguments[0] << "' to '" << arguments[1] << "'";
+
+ host->ModifyAttribute("max_check_attempts", attempts);
+}
+
+void ExternalCommandProcessor::ChangeMaxSvcCheckAttempts(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change max check attempts for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ int attempts = Convert::ToLong(arguments[2]);
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing max check attempts for service '" << arguments[1] << "' to '" << arguments[2] << "'";
+
+ service->ModifyAttribute("max_check_attempts", attempts);
+}
+
+void ExternalCommandProcessor::ChangeHostCheckTimeperiod(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change check period for non-existent host '" + arguments[0] + "'"));
+
+ TimePeriod::Ptr tp = TimePeriod::GetByName(arguments[1]);
+
+ if (!tp)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Time period '" + arguments[1] + "' does not exist."));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing check period for host '" << arguments[0] << "' to '" << arguments[1] << "'";
+
+ host->ModifyAttribute("check_period", tp->GetName());
+}
+
+void ExternalCommandProcessor::ChangeSvcCheckTimeperiod(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change check period for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ TimePeriod::Ptr tp = TimePeriod::GetByName(arguments[2]);
+
+ if (!tp)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Time period '" + arguments[2] + "' does not exist."));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing check period for service '" << arguments[1] << "' to '" << arguments[2] << "'";
+
+ service->ModifyAttribute("check_period", tp->GetName());
+}
+
+void ExternalCommandProcessor::ChangeCustomHostVar(double, const std::vector<String>& arguments)
+{
+ Host::Ptr host = Host::GetByName(arguments[0]);
+
+ if (!host)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change custom var for non-existent host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing custom var '" << arguments[1] << "' for host '" << arguments[0] << "' to value '" << arguments[2] << "'";
+
+ host->ModifyAttribute("vars." + arguments[1], arguments[2]);
+}
+
+void ExternalCommandProcessor::ChangeCustomSvcVar(double, const std::vector<String>& arguments)
+{
+ Service::Ptr service = Service::GetByNamePair(arguments[0], arguments[1]);
+
+ if (!service)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change custom var for non-existent service '" + arguments[1] + "' on host '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing custom var '" << arguments[2] << "' for service '" << arguments[1] << "' on host '"
+ << arguments[0] << "' to value '" << arguments[3] << "'";
+
+ service->ModifyAttribute("vars." + arguments[2], arguments[3]);
+}
+
+void ExternalCommandProcessor::ChangeCustomUserVar(double, const std::vector<String>& arguments)
+{
+ User::Ptr user = User::GetByName(arguments[0]);
+
+ if (!user)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change custom var for non-existent user '" + arguments[0] + "'"));
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing custom var '" << arguments[1] << "' for user '" << arguments[0] << "' to value '" << arguments[2] << "'";
+
+ user->ModifyAttribute("vars." + arguments[1], arguments[2]);
+}
+
+void ExternalCommandProcessor::ChangeCustomCheckcommandVar(double, const std::vector<String>& arguments)
+{
+ CheckCommand::Ptr command = CheckCommand::GetByName(arguments[0]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change custom var for non-existent command '" + arguments[0] + "'"));
+
+ ChangeCustomCommandVarInternal(command, arguments[1], arguments[2]);
+}
+
+void ExternalCommandProcessor::ChangeCustomEventcommandVar(double, const std::vector<String>& arguments)
+{
+ EventCommand::Ptr command = EventCommand::GetByName(arguments[0]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change custom var for non-existent command '" + arguments[0] + "'"));
+
+ ChangeCustomCommandVarInternal(command, arguments[1], arguments[2]);
+}
+
+void ExternalCommandProcessor::ChangeCustomNotificationcommandVar(double, const std::vector<String>& arguments)
+{
+ NotificationCommand::Ptr command = NotificationCommand::GetByName(arguments[0]);
+
+ if (!command)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot change custom var for non-existent command '" + arguments[0] + "'"));
+
+ ChangeCustomCommandVarInternal(command, arguments[1], arguments[2]);
+}
+
+void ExternalCommandProcessor::ChangeCustomCommandVarInternal(const Command::Ptr& command, const String& name, const Value& value)
+{
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Changing custom var '" << name << "' for command '" << command->GetName() << "' to value '" << value << "'";
+
+ command->ModifyAttribute("vars." + name, value);
+}
+
+void ExternalCommandProcessor::EnableHostgroupHostNotifications(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable host notifications for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_notifications", true);
+ }
+}
+
+void ExternalCommandProcessor::EnableHostgroupSvcNotifications(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable service notifications for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_notifications", true);
+ }
+ }
+}
+
+void ExternalCommandProcessor::DisableHostgroupHostNotifications(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable host notifications for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_notifications", false);
+ }
+}
+
+void ExternalCommandProcessor::DisableHostgroupSvcNotifications(double, const std::vector<String>& arguments)
+{
+ HostGroup::Ptr hg = HostGroup::GetByName(arguments[0]);
+
+ if (!hg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable service notifications for non-existent hostgroup '" + arguments[0] + "'"));
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_notifications", false);
+ }
+ }
+}
+
+void ExternalCommandProcessor::EnableServicegroupHostNotifications(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable host notifications for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_notifications", true);
+ }
+}
+
+void ExternalCommandProcessor::EnableServicegroupSvcNotifications(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot enable service notifications for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Enabling notifications for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_notifications", true);
+ }
+}
+
+void ExternalCommandProcessor::DisableServicegroupHostNotifications(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable host notifications for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Host::Ptr host = service->GetHost();
+
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for host '" << host->GetName() << "'";
+
+ host->ModifyAttribute("enable_notifications", false);
+ }
+}
+
+void ExternalCommandProcessor::DisableServicegroupSvcNotifications(double, const std::vector<String>& arguments)
+{
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(arguments[0]);
+
+ if (!sg)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot disable service notifications for non-existent servicegroup '" + arguments[0] + "'"));
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ Log(LogNotice, "ExternalCommandProcessor")
+ << "Disabling notifications for service '" << service->GetName() << "'";
+
+ service->ModifyAttribute("enable_notifications", false);
+ }
+}
+
+std::mutex& ExternalCommandProcessor::GetMutex()
+{
+ static std::mutex mtx;
+ return mtx;
+}
+
+std::map<String, ExternalCommandInfo>& ExternalCommandProcessor::GetCommands()
+{
+ static std::map<String, ExternalCommandInfo> commands;
+ return commands;
+}
+
diff --git a/lib/icinga/externalcommandprocessor.hpp b/lib/icinga/externalcommandprocessor.hpp
new file mode 100644
index 0000000..a7c5a30
--- /dev/null
+++ b/lib/icinga/externalcommandprocessor.hpp
@@ -0,0 +1,169 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EXTERNALCOMMANDPROCESSOR_H
+#define EXTERNALCOMMANDPROCESSOR_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/command.hpp"
+#include "base/string.hpp"
+#include <boost/signals2.hpp>
+#include <vector>
+
+namespace icinga
+{
+
+typedef std::function<void (double, const std::vector<String>& arguments)> ExternalCommandCallback;
+
+struct ExternalCommandInfo
+{
+ ExternalCommandCallback Callback;
+ size_t MinArgs;
+ size_t MaxArgs;
+};
+
+class ExternalCommandProcessor {
+public:
+ static void Execute(const String& line);
+ static void Execute(double time, const String& command, const std::vector<String>& arguments);
+
+ static boost::signals2::signal<void(double, const String&, const std::vector<String>&)> OnNewExternalCommand;
+
+private:
+ ExternalCommandProcessor();
+
+ static void ExecuteFromFile(const String& line, std::deque< std::vector<String> >& file_queue);
+
+ static void ProcessHostCheckResult(double time, const std::vector<String>& arguments);
+ static void ProcessServiceCheckResult(double time, const std::vector<String>& arguments);
+ static void ScheduleHostCheck(double time, const std::vector<String>& arguments);
+ static void ScheduleForcedHostCheck(double time, const std::vector<String>& arguments);
+ static void ScheduleSvcCheck(double time, const std::vector<String>& arguments);
+ static void ScheduleForcedSvcCheck(double time, const std::vector<String>& arguments);
+ static void EnableHostCheck(double time, const std::vector<String>& arguments);
+ static void DisableHostCheck(double time, const std::vector<String>& arguments);
+ static void EnableSvcCheck(double time, const std::vector<String>& arguments);
+ static void DisableSvcCheck(double time, const std::vector<String>& arguments);
+ static void ShutdownProcess(double time, const std::vector<String>& arguments);
+ static void RestartProcess(double time, const std::vector<String>& arguments);
+ static void ScheduleForcedHostSvcChecks(double time, const std::vector<String>& arguments);
+ static void ScheduleHostSvcChecks(double time, const std::vector<String>& arguments);
+ static void EnableHostSvcChecks(double time, const std::vector<String>& arguments);
+ static void DisableHostSvcChecks(double time, const std::vector<String>& arguments);
+ static void AcknowledgeSvcProblem(double time, const std::vector<String>& arguments);
+ static void AcknowledgeSvcProblemExpire(double time, const std::vector<String>& arguments);
+ static void RemoveSvcAcknowledgement(double time, const std::vector<String>& arguments);
+ static void AcknowledgeHostProblem(double time, const std::vector<String>& arguments);
+ static void AcknowledgeHostProblemExpire(double time, const std::vector<String>& arguments);
+ static void RemoveHostAcknowledgement(double time, const std::vector<String>& arguments);
+ static void EnableHostgroupSvcChecks(double time, const std::vector<String>& arguments);
+ static void DisableHostgroupSvcChecks(double time, const std::vector<String>& arguments);
+ static void EnableServicegroupSvcChecks(double time, const std::vector<String>& arguments);
+ static void DisableServicegroupSvcChecks(double time, const std::vector<String>& arguments);
+ static void EnablePassiveHostChecks(double time, const std::vector<String>& arguments);
+ static void DisablePassiveHostChecks(double time, const std::vector<String>& arguments);
+ static void EnablePassiveSvcChecks(double time, const std::vector<String>& arguments);
+ static void DisablePassiveSvcChecks(double time, const std::vector<String>& arguments);
+ static void EnableServicegroupPassiveSvcChecks(double time, const std::vector<String>& arguments);
+ static void DisableServicegroupPassiveSvcChecks(double time, const std::vector<String>& arguments);
+ static void EnableHostgroupPassiveSvcChecks(double time, const std::vector<String>& arguments);
+ static void DisableHostgroupPassiveSvcChecks(double time, const std::vector<String>& arguments);
+ static void ProcessFile(double time, const std::vector<String>& arguments);
+ static void ScheduleSvcDowntime(double time, const std::vector<String>& arguments);
+ static void DelSvcDowntime(double time, const std::vector<String>& arguments);
+ static void ScheduleHostDowntime(double time, const std::vector<String>& arguments);
+ static void ScheduleAndPropagateHostDowntime(double, const std::vector<String>& arguments);
+ static void ScheduleAndPropagateTriggeredHostDowntime(double, const std::vector<String>& arguments);
+ static void DelHostDowntime(double time, const std::vector<String>& arguments);
+ static void DelDowntimeByHostName(double, const std::vector<String>& arguments);
+ static void ScheduleHostSvcDowntime(double time, const std::vector<String>& arguments);
+ static void ScheduleHostgroupHostDowntime(double time, const std::vector<String>& arguments);
+ static void ScheduleHostgroupSvcDowntime(double time, const std::vector<String>& arguments);
+ static void ScheduleServicegroupHostDowntime(double time, const std::vector<String>& arguments);
+ static void ScheduleServicegroupSvcDowntime(double time, const std::vector<String>& arguments);
+ static void AddHostComment(double time, const std::vector<String>& arguments);
+ static void DelHostComment(double time, const std::vector<String>& arguments);
+ static void AddSvcComment(double time, const std::vector<String>& arguments);
+ static void DelSvcComment(double time, const std::vector<String>& arguments);
+ static void DelAllHostComments(double time, const std::vector<String>& arguments);
+ static void DelAllSvcComments(double time, const std::vector<String>& arguments);
+ static void SendCustomHostNotification(double time, const std::vector<String>& arguments);
+ static void SendCustomSvcNotification(double time, const std::vector<String>& arguments);
+ static void DelayHostNotification(double time, const std::vector<String>& arguments);
+ static void DelaySvcNotification(double time, const std::vector<String>& arguments);
+ static void EnableHostNotifications(double time, const std::vector<String>& arguments);
+ static void DisableHostNotifications(double time, const std::vector<String>& arguments);
+ static void EnableSvcNotifications(double time, const std::vector<String>& arguments);
+ static void DisableSvcNotifications(double time, const std::vector<String>& arguments);
+ static void EnableHostSvcNotifications(double, const std::vector<String>& arguments);
+ static void DisableHostSvcNotifications(double, const std::vector<String>& arguments);
+ static void DisableHostgroupHostChecks(double, const std::vector<String>& arguments);
+ static void DisableHostgroupPassiveHostChecks(double, const std::vector<String>& arguments);
+ static void DisableServicegroupHostChecks(double, const std::vector<String>& arguments);
+ static void DisableServicegroupPassiveHostChecks(double, const std::vector<String>& arguments);
+ static void EnableHostgroupHostChecks(double, const std::vector<String>& arguments);
+ static void EnableHostgroupPassiveHostChecks(double, const std::vector<String>& arguments);
+ static void EnableServicegroupHostChecks(double, const std::vector<String>& arguments);
+ static void EnableServicegroupPassiveHostChecks(double, const std::vector<String>& arguments);
+ static void EnableSvcFlapping(double time, const std::vector<String>& arguments);
+ static void DisableSvcFlapping(double time, const std::vector<String>& arguments);
+ static void EnableHostFlapping(double time, const std::vector<String>& arguments);
+ static void DisableHostFlapping(double time, const std::vector<String>& arguments);
+ static void EnableNotifications(double time, const std::vector<String>& arguments);
+ static void DisableNotifications(double time, const std::vector<String>& arguments);
+ static void EnableFlapDetection(double time, const std::vector<String>& arguments);
+ static void DisableFlapDetection(double time, const std::vector<String>& arguments);
+ static void EnableEventHandlers(double time, const std::vector<String>& arguments);
+ static void DisableEventHandlers(double time, const std::vector<String>& arguments);
+ static void EnablePerformanceData(double time, const std::vector<String>& arguments);
+ static void DisablePerformanceData(double time, const std::vector<String>& arguments);
+ static void StartExecutingSvcChecks(double time, const std::vector<String>& arguments);
+ static void StopExecutingSvcChecks(double time, const std::vector<String>& arguments);
+ static void StartExecutingHostChecks(double time, const std::vector<String>& arguments);
+ static void StopExecutingHostChecks(double time, const std::vector<String>& arguments);
+
+ static void ChangeNormalSvcCheckInterval(double time, const std::vector<String>& arguments);
+ static void ChangeNormalHostCheckInterval(double time, const std::vector<String>& arguments);
+ static void ChangeRetrySvcCheckInterval(double time, const std::vector<String>& arguments);
+ static void ChangeRetryHostCheckInterval(double time, const std::vector<String>& arguments);
+ static void EnableHostEventHandler(double time, const std::vector<String>& arguments);
+ static void DisableHostEventHandler(double time, const std::vector<String>& arguments);
+ static void EnableSvcEventHandler(double time, const std::vector<String>& arguments);
+ static void DisableSvcEventHandler(double time, const std::vector<String>& arguments);
+ static void ChangeHostEventHandler(double time, const std::vector<String>& arguments);
+ static void ChangeSvcEventHandler(double time, const std::vector<String>& arguments);
+ static void ChangeHostCheckCommand(double time, const std::vector<String>& arguments);
+ static void ChangeSvcCheckCommand(double time, const std::vector<String>& arguments);
+ static void ChangeMaxHostCheckAttempts(double time, const std::vector<String>& arguments);
+ static void ChangeMaxSvcCheckAttempts(double time, const std::vector<String>& arguments);
+ static void ChangeHostCheckTimeperiod(double time, const std::vector<String>& arguments);
+ static void ChangeSvcCheckTimeperiod(double time, const std::vector<String>& arguments);
+ static void ChangeCustomHostVar(double time, const std::vector<String>& arguments);
+ static void ChangeCustomSvcVar(double time, const std::vector<String>& arguments);
+ static void ChangeCustomUserVar(double time, const std::vector<String>& arguments);
+ static void ChangeCustomCheckcommandVar(double time, const std::vector<String>& arguments);
+ static void ChangeCustomEventcommandVar(double time, const std::vector<String>& arguments);
+ static void ChangeCustomNotificationcommandVar(double time, const std::vector<String>& arguments);
+
+ static void EnableHostgroupHostNotifications(double time, const std::vector<String>& arguments);
+ static void EnableHostgroupSvcNotifications(double time, const std::vector<String>& arguments);
+ static void DisableHostgroupHostNotifications(double time, const std::vector<String>& arguments);
+ static void DisableHostgroupSvcNotifications(double time, const std::vector<String>& arguments);
+ static void EnableServicegroupHostNotifications(double time, const std::vector<String>& arguments);
+ static void EnableServicegroupSvcNotifications(double time, const std::vector<String>& arguments);
+ static void DisableServicegroupHostNotifications(double time, const std::vector<String>& arguments);
+ static void DisableServicegroupSvcNotifications(double time, const std::vector<String>& arguments);
+
+private:
+ static void ChangeCustomCommandVarInternal(const Command::Ptr& command, const String& name, const Value& value);
+
+ static void RegisterCommand(const String& command, const ExternalCommandCallback& callback, size_t minArgs = 0, size_t maxArgs = UINT_MAX);
+ static void RegisterCommands();
+
+ static std::mutex& GetMutex();
+ static std::map<String, ExternalCommandInfo>& GetCommands();
+
+};
+
+}
+
+#endif /* EXTERNALCOMMANDPROCESSOR_H */
diff --git a/lib/icinga/host.cpp b/lib/icinga/host.cpp
new file mode 100644
index 0000000..36149d3
--- /dev/null
+++ b/lib/icinga/host.cpp
@@ -0,0 +1,330 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/host.hpp"
+#include "icinga/host-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/scheduleddowntime.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/debug.hpp"
+#include "base/json.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(Host);
+
+void Host::OnAllConfigLoaded()
+{
+ ObjectImpl<Host>::OnAllConfigLoaded();
+
+ String zoneName = GetZoneName();
+
+ if (!zoneName.IsEmpty()) {
+ Zone::Ptr zone = Zone::GetByName(zoneName);
+
+ if (zone && zone->IsGlobal())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Host '" + GetName() + "' cannot be put into global zone '" + zone->GetName() + "'."));
+ }
+
+ HostGroup::EvaluateObjectRules(this);
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups) {
+ groups = groups->ShallowClone();
+
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ HostGroup::Ptr hg = HostGroup::GetByName(name);
+
+ if (hg)
+ hg->ResolveGroupMembership(this, true);
+ }
+ }
+}
+
+void Host::CreateChildObjects(const Type::Ptr& childType)
+{
+ if (childType == ScheduledDowntime::TypeInstance)
+ ScheduledDowntime::EvaluateApplyRules(this);
+
+ if (childType == Notification::TypeInstance)
+ Notification::EvaluateApplyRules(this);
+
+ if (childType == Dependency::TypeInstance)
+ Dependency::EvaluateApplyRules(this);
+
+ if (childType == Service::TypeInstance)
+ Service::EvaluateApplyRules(this);
+}
+
+void Host::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<Host>::Stop(runtimeRemoved);
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups) {
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ HostGroup::Ptr hg = HostGroup::GetByName(name);
+
+ if (hg)
+ hg->ResolveGroupMembership(this, false);
+ }
+ }
+
+ // TODO: unregister slave services/notifications?
+}
+
+std::vector<Service::Ptr> Host::GetServices() const
+{
+ std::unique_lock<std::mutex> lock(m_ServicesMutex);
+
+ std::vector<Service::Ptr> services;
+ services.reserve(m_Services.size());
+ typedef std::pair<String, Service::Ptr> ServicePair;
+ for (const ServicePair& kv : m_Services) {
+ services.push_back(kv.second);
+ }
+
+ return services;
+}
+
+void Host::AddService(const Service::Ptr& service)
+{
+ std::unique_lock<std::mutex> lock(m_ServicesMutex);
+
+ m_Services[service->GetShortName()] = service;
+}
+
+void Host::RemoveService(const Service::Ptr& service)
+{
+ std::unique_lock<std::mutex> lock(m_ServicesMutex);
+
+ m_Services.erase(service->GetShortName());
+}
+
+int Host::GetTotalServices() const
+{
+ return GetServices().size();
+}
+
+Service::Ptr Host::GetServiceByShortName(const Value& name)
+{
+ if (name.IsScalar()) {
+ {
+ std::unique_lock<std::mutex> lock(m_ServicesMutex);
+
+ auto it = m_Services.find(name);
+
+ if (it != m_Services.end())
+ return it->second;
+ }
+
+ return nullptr;
+ } else if (name.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dict = name;
+ String short_name;
+
+ return Service::GetByNamePair(dict->Get("host"), dict->Get("service"));
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Host/Service name pair is invalid: " + JsonEncode(name)));
+ }
+}
+
+HostState Host::CalculateState(ServiceState state)
+{
+ switch (state) {
+ case ServiceOK:
+ case ServiceWarning:
+ return HostUp;
+ default:
+ return HostDown;
+ }
+}
+
+HostState Host::GetState() const
+{
+ return CalculateState(GetStateRaw());
+}
+
+HostState Host::GetLastState() const
+{
+ return CalculateState(GetLastStateRaw());
+}
+
+HostState Host::GetLastHardState() const
+{
+ return CalculateState(GetLastHardStateRaw());
+}
+
+/* keep in sync with Service::GetSeverity()
+ * One could think it may be smart to use an enum and some bitmask math here.
+ * But the only thing the consuming icingaweb2 cares about is being able to
+ * sort by severity. It is therefore easier to keep them seperated here. */
+int Host::GetSeverity() const
+{
+ int severity = 0;
+
+ ObjectLock olock(this);
+ HostState state = GetState();
+
+ if (!HasBeenChecked()) {
+ severity = 16;
+ } else if (state == HostUp) {
+ severity = 0;
+ } else {
+ if (IsReachable())
+ severity = 64;
+ else
+ severity = 32;
+
+ if (IsAcknowledged())
+ severity += 512;
+ else if (IsInDowntime())
+ severity += 256;
+ else
+ severity += 2048;
+ }
+
+ olock.Unlock();
+
+ return severity;
+
+}
+
+bool Host::IsStateOK(ServiceState state) const
+{
+ return Host::CalculateState(state) == HostUp;
+}
+
+void Host::SaveLastState(ServiceState state, double timestamp)
+{
+ if (state == ServiceOK || state == ServiceWarning)
+ SetLastStateUp(timestamp);
+ else if (state == ServiceCritical)
+ SetLastStateDown(timestamp);
+}
+
+HostState Host::StateFromString(const String& state)
+{
+ if (state == "UP")
+ return HostUp;
+ else
+ return HostDown;
+}
+
+String Host::StateToString(HostState state)
+{
+ switch (state) {
+ case HostUp:
+ return "UP";
+ case HostDown:
+ return "DOWN";
+ default:
+ return "INVALID";
+ }
+}
+
+StateType Host::StateTypeFromString(const String& type)
+{
+ if (type == "SOFT")
+ return StateTypeSoft;
+ else
+ return StateTypeHard;
+}
+
+String Host::StateTypeToString(StateType type)
+{
+ if (type == StateTypeSoft)
+ return "SOFT";
+ else
+ return "HARD";
+}
+
+bool Host::ResolveMacro(const String& macro, const CheckResult::Ptr&, Value *result) const
+{
+ if (macro == "state") {
+ *result = StateToString(GetState());
+ return true;
+ } else if (macro == "state_id") {
+ *result = GetState();
+ return true;
+ } else if (macro == "state_type") {
+ *result = StateTypeToString(GetStateType());
+ return true;
+ } else if (macro == "last_state") {
+ *result = StateToString(GetLastState());
+ return true;
+ } else if (macro == "last_state_id") {
+ *result = GetLastState();
+ return true;
+ } else if (macro == "last_state_type") {
+ *result = StateTypeToString(GetLastStateType());
+ return true;
+ } else if (macro == "last_state_change") {
+ *result = static_cast<long>(GetLastStateChange());
+ return true;
+ } else if (macro == "downtime_depth") {
+ *result = GetDowntimeDepth();
+ return true;
+ } else if (macro == "duration_sec") {
+ *result = Utility::GetTime() - GetLastStateChange();
+ return true;
+ } else if (macro == "num_services" || macro == "num_services_ok" || macro == "num_services_warning"
+ || macro == "num_services_unknown" || macro == "num_services_critical") {
+ int filter = -1;
+ int count = 0;
+
+ if (macro == "num_services_ok")
+ filter = ServiceOK;
+ else if (macro == "num_services_warning")
+ filter = ServiceWarning;
+ else if (macro == "num_services_unknown")
+ filter = ServiceUnknown;
+ else if (macro == "num_services_critical")
+ filter = ServiceCritical;
+
+ for (const Service::Ptr& service : GetServices()) {
+ if (filter != -1 && service->GetState() != filter)
+ continue;
+
+ count++;
+ }
+
+ *result = count;
+ return true;
+ }
+
+ CheckResult::Ptr cr = GetLastCheckResult();
+
+ if (cr) {
+ if (macro == "latency") {
+ *result = cr->CalculateLatency();
+ return true;
+ } else if (macro == "execution_time") {
+ *result = cr->CalculateExecutionTime();
+ return true;
+ } else if (macro == "output") {
+ *result = cr->GetOutput();
+ return true;
+ } else if (macro == "perfdata") {
+ *result = PluginUtility::FormatPerfdata(cr->GetPerformanceData());
+ return true;
+ } else if (macro == "check_source") {
+ *result = cr->GetCheckSource();
+ return true;
+ } else if (macro == "scheduling_source") {
+ *result = cr->GetSchedulingSource();
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/lib/icinga/host.hpp b/lib/icinga/host.hpp
new file mode 100644
index 0000000..d0d6c1a
--- /dev/null
+++ b/lib/icinga/host.hpp
@@ -0,0 +1,71 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HOST_H
+#define HOST_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/host-ti.hpp"
+#include "icinga/macroresolver.hpp"
+#include "icinga/checkresult.hpp"
+
+namespace icinga
+{
+
+class Service;
+
+/**
+ * An Icinga host.
+ *
+ * @ingroup icinga
+ */
+class Host final : public ObjectImpl<Host>, public MacroResolver
+{
+public:
+ DECLARE_OBJECT(Host);
+ DECLARE_OBJECTNAME(Host);
+
+ intrusive_ptr<Service> GetServiceByShortName(const Value& name);
+
+ std::vector<intrusive_ptr<Service> > GetServices() const;
+ void AddService(const intrusive_ptr<Service>& service);
+ void RemoveService(const intrusive_ptr<Service>& service);
+
+ int GetTotalServices() const;
+
+ static HostState CalculateState(ServiceState state);
+
+ HostState GetState() const override;
+ HostState GetLastState() const override;
+ HostState GetLastHardState() const override;
+ int GetSeverity() const override;
+
+ bool IsStateOK(ServiceState state) const override;
+ void SaveLastState(ServiceState state, double timestamp) override;
+
+ static HostState StateFromString(const String& state);
+ static String StateToString(HostState state);
+
+ static StateType StateTypeFromString(const String& state);
+ static String StateTypeToString(StateType state);
+
+ bool ResolveMacro(const String& macro, const CheckResult::Ptr& cr, Value *result) const override;
+
+ void OnAllConfigLoaded() override;
+
+protected:
+ void Stop(bool runtimeRemoved) override;
+
+ void CreateChildObjects(const Type::Ptr& childType) override;
+
+private:
+ mutable std::mutex m_ServicesMutex;
+ std::map<String, intrusive_ptr<Service> > m_Services;
+
+ static void RefreshServicesCache();
+};
+
+}
+
+#endif /* HOST_H */
+
+#include "icinga/service.hpp"
diff --git a/lib/icinga/host.ti b/lib/icinga/host.ti
new file mode 100644
index 0000000..f6624e3
--- /dev/null
+++ b/lib/icinga/host.ti
@@ -0,0 +1,48 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/customvarobject.hpp"
+#impl_include "icinga/hostgroup.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class Host : Checkable
+{
+ load_after ApiListener;
+ load_after Endpoint;
+ load_after Zone;
+
+ [config, no_user_modify, required, signal_with_old_value] array(name(HostGroup)) groups {
+ default {{{ return new Array(); }}}
+ };
+
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetName();
+ else
+ return displayName;
+ }}}
+ };
+
+ [config] String address;
+ [config] String address6;
+
+ [enum, no_storage] HostState "state" {
+ get;
+ };
+ [enum, no_storage] HostState last_state {
+ get;
+ };
+ [enum, no_storage] HostState last_hard_state {
+ get;
+ };
+ [state] Timestamp last_state_up;
+ [state] Timestamp last_state_down;
+};
+
+}
diff --git a/lib/icinga/hostgroup.cpp b/lib/icinga/hostgroup.cpp
new file mode 100644
index 0000000..a22f3b7
--- /dev/null
+++ b/lib/icinga/hostgroup.cpp
@@ -0,0 +1,108 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/hostgroup.hpp"
+#include "icinga/hostgroup-ti.cpp"
+#include "config/objectrule.hpp"
+#include "config/configitem.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/context.hpp"
+#include "base/workqueue.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(HostGroup);
+
+INITIALIZE_ONCE([]() {
+ ObjectRule::RegisterType("HostGroup");
+});
+
+bool HostGroup::EvaluateObjectRule(const Host::Ptr& host, const ConfigItem::Ptr& group)
+{
+ String groupName = group->GetName();
+
+ CONTEXT("Evaluating rule for group '" << groupName << "'");
+
+ ScriptFrame frame(true);
+ if (group->GetScope())
+ group->GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("host", host);
+
+ if (!group->GetFilter()->Evaluate(frame).GetValue().ToBool())
+ return false;
+
+ Log(LogDebug, "HostGroup")
+ << "Assigning membership for group '" << groupName << "' to host '" << host->GetName() << "'";
+
+ Array::Ptr groups = host->GetGroups();
+
+ if (groups && !groups->Contains(groupName))
+ groups->Add(groupName);
+
+ return true;
+}
+
+void HostGroup::EvaluateObjectRules(const Host::Ptr& host)
+{
+ CONTEXT("Evaluating group memberships for host '" << host->GetName() << "'");
+
+ for (const ConfigItem::Ptr& group : ConfigItem::GetItems(HostGroup::TypeInstance))
+ {
+ if (!group->GetFilter())
+ continue;
+
+ EvaluateObjectRule(host, group);
+ }
+}
+
+std::set<Host::Ptr> HostGroup::GetMembers() const
+{
+ std::unique_lock<std::mutex> lock(m_HostGroupMutex);
+ return m_Members;
+}
+
+void HostGroup::AddMember(const Host::Ptr& host)
+{
+ host->AddGroup(GetName());
+
+ std::unique_lock<std::mutex> lock(m_HostGroupMutex);
+ m_Members.insert(host);
+}
+
+void HostGroup::RemoveMember(const Host::Ptr& host)
+{
+ std::unique_lock<std::mutex> lock(m_HostGroupMutex);
+ m_Members.erase(host);
+}
+
+bool HostGroup::ResolveGroupMembership(const Host::Ptr& host, bool add, int rstack) {
+
+ if (add && rstack > 20) {
+ Log(LogWarning, "HostGroup")
+ << "Too many nested groups for group '" << GetName() << "': Host '"
+ << host->GetName() << "' membership assignment failed.";
+
+ return false;
+ }
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups && groups->GetLength() > 0) {
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ HostGroup::Ptr group = HostGroup::GetByName(name);
+
+ if (group && !group->ResolveGroupMembership(host, add, rstack + 1))
+ return false;
+ }
+ }
+
+ if (add)
+ AddMember(host);
+ else
+ RemoveMember(host);
+
+ return true;
+}
diff --git a/lib/icinga/hostgroup.hpp b/lib/icinga/hostgroup.hpp
new file mode 100644
index 0000000..3ad5d26
--- /dev/null
+++ b/lib/icinga/hostgroup.hpp
@@ -0,0 +1,43 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HOSTGROUP_H
+#define HOSTGROUP_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/hostgroup-ti.hpp"
+#include "icinga/host.hpp"
+
+namespace icinga
+{
+
+class ConfigItem;
+
+/**
+ * An Icinga host group.
+ *
+ * @ingroup icinga
+ */
+class HostGroup final : public ObjectImpl<HostGroup>
+{
+public:
+ DECLARE_OBJECT(HostGroup);
+ DECLARE_OBJECTNAME(HostGroup);
+
+ std::set<Host::Ptr> GetMembers() const;
+ void AddMember(const Host::Ptr& host);
+ void RemoveMember(const Host::Ptr& host);
+
+ bool ResolveGroupMembership(const Host::Ptr& host, bool add = true, int rstack = 0);
+
+ static void EvaluateObjectRules(const Host::Ptr& host);
+
+private:
+ mutable std::mutex m_HostGroupMutex;
+ std::set<Host::Ptr> m_Members;
+
+ static bool EvaluateObjectRule(const Host::Ptr& host, const intrusive_ptr<ConfigItem>& item);
+};
+
+}
+
+#endif /* HOSTGROUP_H */
diff --git a/lib/icinga/hostgroup.ti b/lib/icinga/hostgroup.ti
new file mode 100644
index 0000000..b679344
--- /dev/null
+++ b/lib/icinga/hostgroup.ti
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class HostGroup : CustomVarObject
+{
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetName();
+ else
+ return displayName;
+ }}}
+ };
+
+ [config, no_user_modify] array(name(HostGroup)) groups;
+ [config] String notes;
+ [config] String notes_url;
+ [config] String action_url;
+};
+
+}
diff --git a/lib/icinga/i2-icinga.hpp b/lib/icinga/i2-icinga.hpp
new file mode 100644
index 0000000..7163822
--- /dev/null
+++ b/lib/icinga/i2-icinga.hpp
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2ICINGA_H
+#define I2ICINGA_H
+
+/**
+ * @defgroup icinga Icinga library
+ *
+ * The Icinga library implements all Icinga-specific functionality that is
+ * common to all components (e.g. hosts, services, etc.).
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2ICINGA_H */
diff --git a/lib/icinga/icinga-itl.conf b/lib/icinga/icinga-itl.conf
new file mode 100644
index 0000000..22b688a
--- /dev/null
+++ b/lib/icinga/icinga-itl.conf
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+System.assert(Internal.run_with_activation_context(function() {
+ template TimePeriod "legacy-timeperiod" use (LegacyTimePeriod = Internal.LegacyTimePeriod) default {
+ update = LegacyTimePeriod
+ }
+}))
+
+var methods = [
+ "LegacyTimePeriod"
+]
+
+for (method in methods) {
+ Internal.remove(method)
+}
diff --git a/lib/icinga/icingaapplication.cpp b/lib/icinga/icingaapplication.cpp
new file mode 100644
index 0000000..94ae0ed
--- /dev/null
+++ b/lib/icinga/icingaapplication.cpp
@@ -0,0 +1,321 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/icingaapplication.hpp"
+#include "icinga/icingaapplication-ti.cpp"
+#include "icinga/cib.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "config/configcompiler.hpp"
+#include "base/atomic-file.hpp"
+#include "base/configwriter.hpp"
+#include "base/configtype.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/debug.hpp"
+#include "base/utility.hpp"
+#include "base/timer.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/initialize.hpp"
+#include "base/statsfunction.hpp"
+#include "base/loader.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+static Timer::Ptr l_RetentionTimer;
+
+REGISTER_TYPE(IcingaApplication);
+/* Ensure that the priority is lower than the basic System namespace initialization in scriptframe.cpp. */
+INITIALIZE_ONCE_WITH_PRIORITY(&IcingaApplication::StaticInitialize, InitializePriority::InitIcingaApplication);
+
+static Namespace::Ptr l_IcingaNS;
+
+void IcingaApplication::StaticInitialize()
+{
+ /* Pre-fill global constants, can be overridden with user input later in icinga-app/icinga.cpp. */
+ String node_name = Utility::GetFQDN();
+
+ if (node_name.IsEmpty()) {
+ Log(LogNotice, "IcingaApplication", "No FQDN available. Trying Hostname.");
+ node_name = Utility::GetHostName();
+
+ if (node_name.IsEmpty()) {
+ Log(LogWarning, "IcingaApplication", "No FQDN nor Hostname available. Setting Nodename to 'localhost'.");
+ node_name = "localhost";
+ }
+ }
+
+ ScriptGlobal::Set("NodeName", node_name);
+
+ ScriptGlobal::Set("ReloadTimeout", 300);
+ ScriptGlobal::Set("MaxConcurrentChecks", 512);
+
+ Namespace::Ptr systemNS = ScriptGlobal::Get("System");
+ /* Ensure that the System namespace is already initialized. Otherwise this is a programming error. */
+ VERIFY(systemNS);
+
+ systemNS->Set("ApplicationType", "IcingaApplication", true);
+ systemNS->Set("ApplicationVersion", Application::GetAppVersion(), true);
+
+ Namespace::Ptr globalNS = ScriptGlobal::GetGlobals();
+ VERIFY(globalNS);
+
+ l_IcingaNS = new Namespace(true);
+ globalNS->Set("Icinga", l_IcingaNS, true);
+}
+
+INITIALIZE_ONCE_WITH_PRIORITY([]() {
+ l_IcingaNS->Freeze();
+}, InitializePriority::FreezeNamespaces);
+
+REGISTER_STATSFUNCTION(IcingaApplication, &IcingaApplication::StatsFunc);
+
+void IcingaApplication::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const IcingaApplication::Ptr& icingaapplication : ConfigType::GetObjectsByType<IcingaApplication>()) {
+ nodes.emplace_back(icingaapplication->GetName(), new Dictionary({
+ { "node_name", icingaapplication->GetNodeName() },
+ { "enable_notifications", icingaapplication->GetEnableNotifications() },
+ { "enable_event_handlers", icingaapplication->GetEnableEventHandlers() },
+ { "enable_flapping", icingaapplication->GetEnableFlapping() },
+ { "enable_host_checks", icingaapplication->GetEnableHostChecks() },
+ { "enable_service_checks", icingaapplication->GetEnableServiceChecks() },
+ { "enable_perfdata", icingaapplication->GetEnablePerfdata() },
+ { "environment", icingaapplication->GetEnvironment() },
+ { "pid", Utility::GetPid() },
+ { "program_start", Application::GetStartTime() },
+ { "version", Application::GetAppVersion() }
+ }));
+ }
+
+ status->Set("icingaapplication", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * The entry point for the Icinga application.
+ *
+ * @returns An exit status.
+ */
+int IcingaApplication::Main()
+{
+ Log(LogDebug, "IcingaApplication", "In IcingaApplication::Main()");
+
+ /* periodically dump the program state */
+ l_RetentionTimer = Timer::Create();
+ l_RetentionTimer->SetInterval(300);
+ l_RetentionTimer->OnTimerExpired.connect([this](const Timer * const&) { DumpProgramState(); });
+ l_RetentionTimer->Start();
+
+ RunEventLoop();
+
+ Log(LogInformation, "IcingaApplication", "Icinga has shut down.");
+
+ return EXIT_SUCCESS;
+}
+
+void IcingaApplication::OnShutdown()
+{
+ {
+ ObjectLock olock(this);
+ l_RetentionTimer->Stop();
+ }
+
+ DumpProgramState();
+}
+
+static void PersistModAttrHelper(AtomicFile& fp, ConfigObject::Ptr& previousObject, const ConfigObject::Ptr& object, const String& attr, const Value& value)
+{
+ if (object != previousObject) {
+ if (previousObject) {
+ ConfigWriter::EmitRaw(fp, "\tobj.version = ");
+ ConfigWriter::EmitValue(fp, 0, previousObject->GetVersion());
+ ConfigWriter::EmitRaw(fp, "\n}\n\n");
+ }
+
+ ConfigWriter::EmitRaw(fp, "var obj = ");
+
+ Array::Ptr args1 = new Array({
+ object->GetReflectionType()->GetName(),
+ object->GetName()
+ });
+ ConfigWriter::EmitFunctionCall(fp, "get_object", args1);
+
+ ConfigWriter::EmitRaw(fp, "\nif (obj) {\n");
+ }
+
+ ConfigWriter::EmitRaw(fp, "\tobj.");
+
+ Array::Ptr args2 = new Array({
+ attr,
+ value
+ });
+ ConfigWriter::EmitFunctionCall(fp, "modify_attribute", args2);
+
+ ConfigWriter::EmitRaw(fp, "\n");
+
+ previousObject = object;
+}
+
+void IcingaApplication::DumpProgramState()
+{
+ ConfigObject::DumpObjects(Configuration::StatePath);
+ DumpModifiedAttributes();
+}
+
+void IcingaApplication::DumpModifiedAttributes()
+{
+ String path = Configuration::ModAttrPath;
+
+ try {
+ Utility::Glob(path + ".tmp.*", &Utility::Remove, GlobFile);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "IcingaApplication") << DiagnosticInformation(ex);
+ }
+
+ AtomicFile fp (path, 0644);
+
+ ConfigObject::Ptr previousObject;
+ ConfigObject::DumpModifiedAttributes([&fp, &previousObject](const ConfigObject::Ptr& object, const String& attr, const Value& value) {
+ PersistModAttrHelper(fp, previousObject, object, attr, value);
+ });
+
+ if (previousObject) {
+ ConfigWriter::EmitRaw(fp, "\tobj.version = ");
+ ConfigWriter::EmitValue(fp, 0, previousObject->GetVersion());
+ ConfigWriter::EmitRaw(fp, "\n}\n");
+ }
+
+ fp.Commit();
+}
+
+IcingaApplication::Ptr IcingaApplication::GetInstance()
+{
+ return static_pointer_cast<IcingaApplication>(Application::GetInstance());
+}
+
+bool IcingaApplication::ResolveMacro(const String& macro, const CheckResult::Ptr&, Value *result) const
+{
+ double now = Utility::GetTime();
+
+ if (macro == "timet") {
+ *result = static_cast<long>(now);
+ return true;
+ } else if (macro == "long_date_time") {
+ *result = Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", now);
+ return true;
+ } else if (macro == "short_date_time") {
+ *result = Utility::FormatDateTime("%Y-%m-%d %H:%M:%S", now);
+ return true;
+ } else if (macro == "date") {
+ *result = Utility::FormatDateTime("%Y-%m-%d", now);
+ return true;
+ } else if (macro == "time") {
+ *result = Utility::FormatDateTime("%H:%M:%S %z", now);
+ return true;
+ } else if (macro == "uptime") {
+ *result = Utility::FormatDuration(Application::GetUptime());
+ return true;
+ }
+
+ if (macro.Contains("num_services")) {
+ ServiceStatistics ss = CIB::CalculateServiceStats();
+
+ if (macro == "num_services_ok") {
+ *result = ss.services_ok;
+ return true;
+ } else if (macro == "num_services_warning") {
+ *result = ss.services_warning;
+ return true;
+ } else if (macro == "num_services_critical") {
+ *result = ss.services_critical;
+ return true;
+ } else if (macro == "num_services_unknown") {
+ *result = ss.services_unknown;
+ return true;
+ } else if (macro == "num_services_pending") {
+ *result = ss.services_pending;
+ return true;
+ } else if (macro == "num_services_unreachable") {
+ *result = ss.services_unreachable;
+ return true;
+ } else if (macro == "num_services_flapping") {
+ *result = ss.services_flapping;
+ return true;
+ } else if (macro == "num_services_in_downtime") {
+ *result = ss.services_in_downtime;
+ return true;
+ } else if (macro == "num_services_acknowledged") {
+ *result = ss.services_acknowledged;
+ return true;
+ } else if (macro == "num_services_handled") {
+ *result = ss.services_handled;
+ return true;
+ } else if (macro == "num_services_problem") {
+ *result = ss.services_problem;
+ return true;
+ }
+ }
+ else if (macro.Contains("num_hosts")) {
+ HostStatistics hs = CIB::CalculateHostStats();
+
+ if (macro == "num_hosts_up") {
+ *result = hs.hosts_up;
+ return true;
+ } else if (macro == "num_hosts_down") {
+ *result = hs.hosts_down;
+ return true;
+ } else if (macro == "num_hosts_pending") {
+ *result = hs.hosts_pending;
+ return true;
+ } else if (macro == "num_hosts_unreachable") {
+ *result = hs.hosts_unreachable;
+ return true;
+ } else if (macro == "num_hosts_flapping") {
+ *result = hs.hosts_flapping;
+ return true;
+ } else if (macro == "num_hosts_in_downtime") {
+ *result = hs.hosts_in_downtime;
+ return true;
+ } else if (macro == "num_hosts_acknowledged") {
+ *result = hs.hosts_acknowledged;
+ return true;
+ } else if (macro == "num_hosts_handled") {
+ *result = hs.hosts_handled;
+ return true;
+ } else if (macro == "num_hosts_problem") {
+ *result = hs.hosts_problem;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+String IcingaApplication::GetNodeName() const
+{
+ return ScriptGlobal::Get("NodeName");
+}
+
+/* Intentionally kept here, since an agent may not have the CheckerComponent loaded. */
+int IcingaApplication::GetMaxConcurrentChecks() const
+{
+ return ScriptGlobal::Get("MaxConcurrentChecks");
+}
+
+String IcingaApplication::GetEnvironment() const
+{
+ return Application::GetAppEnvironment();
+}
+
+void IcingaApplication::SetEnvironment(const String& value, bool suppress_events, const Value& cookie)
+{
+ Application::SetAppEnvironment(value);
+}
+
+void IcingaApplication::ValidateVars(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ MacroProcessor::ValidateCustomVars(this, lvalue());
+}
diff --git a/lib/icinga/icingaapplication.hpp b/lib/icinga/icingaapplication.hpp
new file mode 100644
index 0000000..7888fa6
--- /dev/null
+++ b/lib/icinga/icingaapplication.hpp
@@ -0,0 +1,52 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ICINGAAPPLICATION_H
+#define ICINGAAPPLICATION_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/icingaapplication-ti.hpp"
+#include "icinga/macroresolver.hpp"
+
+namespace icinga
+{
+
+/**
+ * The Icinga application.
+ *
+ * @ingroup icinga
+ */
+class IcingaApplication final : public ObjectImpl<IcingaApplication>, public MacroResolver
+{
+public:
+ DECLARE_OBJECT(IcingaApplication);
+ DECLARE_OBJECTNAME(IcingaApplication);
+
+ static void StaticInitialize();
+
+ int Main() override;
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ static IcingaApplication::Ptr GetInstance();
+
+ bool ResolveMacro(const String& macro, const CheckResult::Ptr& cr, Value *result) const override;
+
+ String GetNodeName() const;
+
+ int GetMaxConcurrentChecks() const;
+
+ String GetEnvironment() const override;
+ void SetEnvironment(const String& value, bool suppress_events = false, const Value& cookie = Empty) override;
+
+ void ValidateVars(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+private:
+ void DumpProgramState();
+ void DumpModifiedAttributes();
+
+ void OnShutdown() override;
+};
+
+}
+
+#endif /* ICINGAAPPLICATION_H */
diff --git a/lib/icinga/icingaapplication.ti b/lib/icinga/icingaapplication.ti
new file mode 100644
index 0000000..1cdef74
--- /dev/null
+++ b/lib/icinga/icingaapplication.ti
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/application.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class IcingaApplication : Application
+{
+ activation_priority -50;
+
+ [config, no_storage, virtual] String environment {
+ get;
+ set;
+ default {{{ return Application::GetAppEnvironment(); }}}
+ };
+
+ [config] bool enable_notifications {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_event_handlers {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_flapping {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_host_checks {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_service_checks {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_perfdata {
+ default {{{ return true; }}}
+ };
+ [config] Dictionary::Ptr vars;
+};
+
+}
diff --git a/lib/icinga/legacytimeperiod.cpp b/lib/icinga/legacytimeperiod.cpp
new file mode 100644
index 0000000..33e6665
--- /dev/null
+++ b/lib/icinga/legacytimeperiod.cpp
@@ -0,0 +1,644 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/legacytimeperiod.hpp"
+#include "base/function.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/debug.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, LegacyTimePeriod, &LegacyTimePeriod::ScriptFunc, "tp:begin:end");
+
+/**
+ * Returns the same as mktime() but does not modify its argument and takes a const pointer.
+ *
+ * @param t struct tm to convert to time_t
+ * @return time_t representing the timestamp given by t
+ */
+static time_t mktime_const(const tm *t) {
+ tm copy = *t;
+ return mktime(&copy);
+}
+
+bool LegacyTimePeriod::IsInTimeRange(const tm *begin, const tm *end, int stride, const tm *reference)
+{
+ time_t tsbegin, tsend, tsref;
+ tsbegin = mktime_const(begin);
+ tsend = mktime_const(end);
+ tsref = mktime_const(reference);
+
+ if (tsref < tsbegin || tsref > tsend)
+ return false;
+
+ int daynumber = (tsref - tsbegin) / (24 * 60 * 60);
+
+ if (stride > 1 && daynumber % stride > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * Update all day-related fields of reference (tm_year, tm_mon, tm_mday, tm_wday, tm_yday) to reference the n-th
+ * occurrence of a weekday (given by wday) in the month represented by the original value of reference.
+ *
+ * If n is negative, counting is done from the end of the month, so for example with wday=1 and n=-1, the result will be
+ * the last Monday in the month given by reference.
+ *
+ * @param wday Weekday (0 = Sunday, 1 = Monday, ..., 6 = Saturday, like tm_wday)
+ * @param n Search the n-th weekday (given by wday) in the month given by reference
+ * @param reference Input for the current month and output for the given day of that moth
+ */
+void LegacyTimePeriod::FindNthWeekday(int wday, int n, tm *reference)
+{
+ // Work on a copy to only update specific fields of reference (as documented).
+ tm t = *reference;
+
+ int dir, seen = 0;
+
+ if (n > 0) {
+ dir = 1;
+ } else {
+ n *= -1;
+ dir = -1;
+
+ /* Negative days are relative to the next month. */
+ t.tm_mon++;
+ }
+
+ ASSERT(n > 0);
+
+ t.tm_mday = 1;
+
+ for (;;) {
+ // Always operate on 00:00:00 with automatic DST detection, otherwise days could
+ // be skipped or counted twice if +-24 hours is not on the next or previous day.
+ t.tm_hour = 0;
+ t.tm_min = 0;
+ t.tm_sec = 0;
+ t.tm_isdst = -1;
+
+ mktime(&t);
+
+ if (t.tm_wday == wday) {
+ seen++;
+
+ if (seen == n)
+ break;
+ }
+
+ t.tm_mday += dir;
+ }
+
+ reference->tm_year = t.tm_year;
+ reference->tm_mon = t.tm_mon;
+ reference->tm_mday = t.tm_mday;
+ reference->tm_wday = t.tm_wday;
+ reference->tm_yday = t.tm_yday;
+}
+
+int LegacyTimePeriod::WeekdayFromString(const String& daydef)
+{
+ if (daydef == "sunday")
+ return 0;
+ else if (daydef == "monday")
+ return 1;
+ else if (daydef == "tuesday")
+ return 2;
+ else if (daydef == "wednesday")
+ return 3;
+ else if (daydef == "thursday")
+ return 4;
+ else if (daydef == "friday")
+ return 5;
+ else if (daydef == "saturday")
+ return 6;
+ else
+ return -1;
+}
+
+int LegacyTimePeriod::MonthFromString(const String& monthdef)
+{
+ if (monthdef == "january")
+ return 0;
+ else if (monthdef == "february")
+ return 1;
+ else if (monthdef == "march")
+ return 2;
+ else if (monthdef == "april")
+ return 3;
+ else if (monthdef == "may")
+ return 4;
+ else if (monthdef == "june")
+ return 5;
+ else if (monthdef == "july")
+ return 6;
+ else if (monthdef == "august")
+ return 7;
+ else if (monthdef == "september")
+ return 8;
+ else if (monthdef == "october")
+ return 9;
+ else if (monthdef == "november")
+ return 10;
+ else if (monthdef == "december")
+ return 11;
+ else
+ return -1;
+}
+
+boost::gregorian::date LegacyTimePeriod::GetEndOfMonthDay(int year, int month)
+{
+ boost::gregorian::date d(boost::gregorian::greg_year(year), boost::gregorian::greg_month(month), 1);
+
+ return d.end_of_month();
+}
+
+/**
+ * Finds the first day on or after the day given by reference and writes the beginning and end time of that day to
+ * the output parameters begin and end.
+ *
+ * @param timespec Day to find, for example "2021-10-20", "sunday", ...
+ * @param begin if != nullptr, set to 00:00:00 on that day
+ * @param end if != nullptr, set to 24:00:00 on that day (i.e. 00:00:00 of the next day)
+ * @param reference Time to begin the search at
+ */
+void LegacyTimePeriod::ParseTimeSpec(const String& timespec, tm *begin, tm *end, const tm *reference)
+{
+ /* YYYY-MM-DD */
+ if (timespec.GetLength() == 10 && timespec[4] == '-' && timespec[7] == '-') {
+ int year = Convert::ToLong(timespec.SubStr(0, 4));
+ int month = Convert::ToLong(timespec.SubStr(5, 2));
+ int day = Convert::ToLong(timespec.SubStr(8, 2));
+
+ if (month < 1 || month > 12)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid month in time specification: " + timespec));
+ if (day < 1 || day > 31)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid day in time specification: " + timespec));
+
+ if (begin) {
+ *begin = *reference;
+ begin->tm_year = year - 1900;
+ begin->tm_mon = month - 1;
+ begin->tm_mday = day;
+ begin->tm_hour = 0;
+ begin->tm_min = 0;
+ begin->tm_sec = 0;
+ begin->tm_isdst = -1;
+ }
+
+ if (end) {
+ *end = *reference;
+ end->tm_year = year - 1900;
+ end->tm_mon = month - 1;
+ end->tm_mday = day;
+ end->tm_hour = 24;
+ end->tm_min = 0;
+ end->tm_sec = 0;
+ end->tm_isdst = -1;
+ }
+
+ return;
+ }
+
+ std::vector<String> tokens = timespec.Split(" ");
+
+ int mon = -1;
+
+ if (tokens.size() > 1 && (tokens[0] == "day" || (mon = MonthFromString(tokens[0])) != -1)) {
+ if (mon == -1)
+ mon = reference->tm_mon;
+
+ int mday = Convert::ToLong(tokens[1]);
+
+ if (begin) {
+ *begin = *reference;
+ begin->tm_mon = mon;
+ begin->tm_mday = mday;
+ begin->tm_hour = 0;
+ begin->tm_min = 0;
+ begin->tm_sec = 0;
+ begin->tm_isdst = -1;
+
+ /* day -X: Negative days are relative to the next month. */
+ if (mday < 0) {
+ boost::gregorian::date d(GetEndOfMonthDay(reference->tm_year + 1900, mon + 1)); //TODO: Refactor this mess into full Boost.DateTime
+
+ //Depending on the number, we need to substract specific days (counting starts at 0).
+ d = d - boost::gregorian::days(mday * -1 - 1);
+
+ *begin = boost::gregorian::to_tm(d);
+ begin->tm_hour = 0;
+ begin->tm_min = 0;
+ begin->tm_sec = 0;
+ }
+ }
+
+ if (end) {
+ *end = *reference;
+ end->tm_mon = mon;
+ end->tm_mday = mday;
+ end->tm_hour = 24;
+ end->tm_min = 0;
+ end->tm_sec = 0;
+ end->tm_isdst = -1;
+
+ /* day -X: Negative days are relative to the next month. */
+ if (mday < 0) {
+ boost::gregorian::date d(GetEndOfMonthDay(reference->tm_year + 1900, mon + 1)); //TODO: Refactor this mess into full Boost.DateTime
+
+ //Depending on the number, we need to substract specific days (counting starts at 0).
+ d = d - boost::gregorian::days(mday * -1 - 1);
+
+ // End date is one day in the future, starting 00:00:00
+ d = d + boost::gregorian::days(1);
+
+ *end = boost::gregorian::to_tm(d);
+ end->tm_hour = 0;
+ end->tm_min = 0;
+ end->tm_sec = 0;
+ }
+ }
+
+ return;
+ }
+
+ int wday;
+
+ if (tokens.size() >= 1 && (wday = WeekdayFromString(tokens[0])) != -1) {
+ tm myref = *reference;
+ myref.tm_isdst = -1;
+
+ if (tokens.size() > 2) {
+ mon = MonthFromString(tokens[2]);
+
+ if (mon == -1)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid month in time specification: " + timespec));
+
+ myref.tm_mon = mon;
+ }
+
+ int n = 0;
+
+ if (tokens.size() > 1)
+ n = Convert::ToLong(tokens[1]);
+
+ if (begin) {
+ *begin = myref;
+
+ if (tokens.size() > 1)
+ FindNthWeekday(wday, n, begin);
+ else
+ begin->tm_mday += (7 - begin->tm_wday + wday) % 7;
+
+ begin->tm_hour = 0;
+ begin->tm_min = 0;
+ begin->tm_sec = 0;
+ }
+
+ if (end) {
+ *end = myref;
+
+ if (tokens.size() > 1)
+ FindNthWeekday(wday, n, end);
+ else
+ end->tm_mday += (7 - end->tm_wday + wday) % 7;
+
+ end->tm_hour = 0;
+ end->tm_min = 0;
+ end->tm_sec = 0;
+ end->tm_mday++;
+ }
+
+ return;
+ }
+
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid time specification: " + timespec));
+}
+
+/**
+ * Parse a range of days.
+ *
+ * The input can have the following formats:
+ * begin
+ * begin - end
+ * begin / stride
+ * begin - end / stride
+ *
+ * @param timerange Text representation of a day range or a single day, for example "2021-10-20", "monday - friday", ...
+ * @param begin Output parameter set to 00:00:00 of the first day of the range
+ * @param end Output parameter set to 24:00:00 of the last day of the range (i.e. 00:00:00 of the day after)
+ * @param stride Output parameter for the stride (for every n-th day)
+ * @param reference Expand the range relative to this timestamp
+ */
+void LegacyTimePeriod::ParseTimeRange(const String& timerange, tm *begin, tm *end, int *stride, const tm *reference)
+{
+ String def = timerange;
+
+ /* Figure out the stride. */
+ size_t pos = def.FindFirstOf('/');
+
+ if (pos != String::NPos) {
+ String strStride = def.SubStr(pos + 1).Trim();
+ *stride = Convert::ToLong(strStride);
+
+ /* Remove the stride parameter from the definition. */
+ def = def.SubStr(0, pos);
+ } else {
+ *stride = 1; /* User didn't specify anything, assume default. */
+ }
+
+ /* Figure out whether the user has specified two dates. */
+ pos = def.Find("- ");
+
+ if (pos != String::NPos) {
+ String first = def.SubStr(0, pos).Trim();
+
+ String second = def.SubStr(pos + 1).Trim();
+
+ ParseTimeSpec(first, begin, nullptr, reference);
+
+ /* If the second definition starts with a number we need
+ * to add the first word from the first definition, e.g.:
+ * day 1 - 15 --> "day 15" */
+ bool is_number = true;
+ size_t xpos = second.FindFirstOf(' ');
+ String fword = second.SubStr(0, xpos);
+
+ try {
+ Convert::ToLong(fword);
+ } catch (...) {
+ is_number = false;
+ }
+
+ if (is_number) {
+ xpos = first.FindFirstOf(' ');
+ ASSERT(xpos != String::NPos);
+ second = first.SubStr(0, xpos + 1) + second;
+ }
+
+ ParseTimeSpec(second, nullptr, end, reference);
+ } else {
+ ParseTimeSpec(def, begin, end, reference);
+ }
+}
+
+bool LegacyTimePeriod::IsInDayDefinition(const String& daydef, const tm *reference)
+{
+ tm begin, end;
+ int stride;
+
+ ParseTimeRange(daydef, &begin, &end, &stride, reference);
+
+ Log(LogDebug, "LegacyTimePeriod")
+ << "ParseTimeRange: '" << daydef << "' => " << mktime(&begin)
+ << " -> " << mktime(&end) << ", stride: " << stride;
+
+ return IsInTimeRange(&begin, &end, stride, reference);
+}
+
+static inline
+void ProcessTimeRaw(const String& in, const tm *reference, tm *out)
+{
+ *out = *reference;
+
+ auto hd (in.Split(":"));
+
+ switch (hd.size()) {
+ case 2:
+ out->tm_sec = 0;
+ break;
+ case 3:
+ out->tm_sec = Convert::ToLong(hd[2]);
+ break;
+ default:
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid time specification: " + in));
+ }
+
+ out->tm_hour = Convert::ToLong(hd[0]);
+ out->tm_min = Convert::ToLong(hd[1]);
+}
+
+void LegacyTimePeriod::ProcessTimeRangeRaw(const String& timerange, const tm *reference, tm *begin, tm *end)
+{
+ std::vector<String> times = timerange.Split("-");
+
+ if (times.size() != 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid timerange: " + timerange));
+
+ ProcessTimeRaw(times[0], reference, begin);
+ ProcessTimeRaw(times[1], reference, end);
+
+ if (begin->tm_hour * 3600 + begin->tm_min * 60 + begin->tm_sec >=
+ end->tm_hour * 3600 + end->tm_min * 60 + end->tm_sec)
+ end->tm_hour += 24;
+}
+
+Dictionary::Ptr LegacyTimePeriod::ProcessTimeRange(const String& timestamp, const tm *reference)
+{
+ tm begin, end;
+
+ ProcessTimeRangeRaw(timestamp, reference, &begin, &end);
+
+ return new Dictionary({
+ { "begin", (long)mktime(&begin) },
+ { "end", (long)mktime(&end) }
+ });
+}
+
+/**
+ * Takes a list of timeranges end expands them to concrete timestamp based on a reference time.
+ *
+ * @param timeranges String of comma separated time ranges, for example "10:00-12:00", "12:15:30-12:23:43,16:00-18:00"
+ * @param reference Starting point for searching the segments
+ * @param result For each range, a dict with keys "begin" and "end" is added
+ */
+void LegacyTimePeriod::ProcessTimeRanges(const String& timeranges, const tm *reference, const Array::Ptr& result)
+{
+ std::vector<String> ranges = timeranges.Split(",");
+
+ for (const String& range : ranges) {
+ Dictionary::Ptr segment = ProcessTimeRange(range, reference);
+
+ if (segment->Get("begin") >= segment->Get("end"))
+ continue;
+
+ result->Add(segment);
+ }
+}
+
+Dictionary::Ptr LegacyTimePeriod::FindRunningSegment(const String& daydef, const String& timeranges, const tm *reference)
+{
+ tm begin, end, iter;
+ time_t tsend, tsiter, tsref;
+ int stride;
+
+ tsref = mktime_const(reference);
+
+ ParseTimeRange(daydef, &begin, &end, &stride, reference);
+
+ iter = begin;
+
+ tsend = mktime(&end);
+
+ do {
+ if (IsInTimeRange(&begin, &end, stride, &iter)) {
+ Array::Ptr segments = new Array();
+ ProcessTimeRanges(timeranges, &iter, segments);
+
+ Dictionary::Ptr bestSegment;
+ double bestEnd = 0.0;
+
+ ObjectLock olock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ double begin = segment->Get("begin");
+ double end = segment->Get("end");
+
+ if (begin >= tsref || end < tsref)
+ continue;
+
+ if (!bestSegment || end > bestEnd) {
+ bestSegment = segment;
+ bestEnd = end;
+ }
+ }
+
+ if (bestSegment)
+ return bestSegment;
+ }
+
+ iter.tm_mday++;
+ iter.tm_hour = 0;
+ iter.tm_min = 0;
+ iter.tm_sec = 0;
+ tsiter = mktime(&iter);
+ } while (tsiter < tsend);
+
+ return nullptr;
+}
+
+Dictionary::Ptr LegacyTimePeriod::FindNextSegment(const String& daydef, const String& timeranges, const tm *reference)
+{
+ tm begin, end, iter, ref;
+ time_t tsend, tsiter, tsref;
+ int stride;
+
+ for (int pass = 1; pass <= 2; pass++) {
+ if (pass == 1) {
+ ref = *reference;
+ } else {
+ ref = end;
+ ref.tm_mday++;
+ }
+
+ tsref = mktime(&ref);
+
+ ParseTimeRange(daydef, &begin, &end, &stride, &ref);
+
+ iter = begin;
+
+ tsend = mktime(&end);
+
+ do {
+ if (IsInTimeRange(&begin, &end, stride, &iter)) {
+ Array::Ptr segments = new Array();
+ ProcessTimeRanges(timeranges, &iter, segments);
+
+ Dictionary::Ptr bestSegment;
+ double bestBegin;
+
+ ObjectLock olock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ double begin = segment->Get("begin");
+
+ if (begin < tsref)
+ continue;
+
+ if (!bestSegment || begin < bestBegin) {
+ bestSegment = segment;
+ bestBegin = begin;
+ }
+ }
+
+ if (bestSegment)
+ return bestSegment;
+ }
+
+ iter.tm_mday++;
+ iter.tm_hour = 0;
+ iter.tm_min = 0;
+ iter.tm_sec = 0;
+ tsiter = mktime(&iter);
+ } while (tsiter < tsend);
+ }
+
+ return nullptr;
+}
+
+Array::Ptr LegacyTimePeriod::ScriptFunc(const TimePeriod::Ptr& tp, double begin, double end)
+{
+ Array::Ptr segments = new Array();
+
+ Dictionary::Ptr ranges = tp->GetRanges();
+
+ if (ranges) {
+ tm tm_begin = Utility::LocalTime(begin);
+
+ // Always evaluate time periods for full days as their ranges are given per day.
+ tm_begin.tm_hour = 0;
+ tm_begin.tm_min = 0;
+ tm_begin.tm_sec = 0;
+ tm_begin.tm_isdst = -1;
+
+ // Helper to move a struct tm to midnight of the next day for the loop below.
+ // Due to DST changes, this may move the time by something else than 24 hours.
+ auto advance_to_next_day = [](tm *t) {
+ t->tm_mday++;
+ t->tm_hour = 0;
+ t->tm_min = 0;
+ t->tm_sec = 0;
+ t->tm_isdst = -1;
+
+ // Normalize fields using mktime.
+ mktime(t);
+
+ // Reset tm_isdst so that future calls figure out the correct time zone after setting tm_hour/tm_min/tm_sec.
+ t->tm_isdst = -1;
+ };
+
+ for (tm reference = tm_begin; mktime_const(&reference) <= end; advance_to_next_day(&reference)) {
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "LegacyTimePeriod")
+ << "Checking reference time " << mktime_const(&reference);
+#endif /* I2_DEBUG */
+
+ ObjectLock olock(ranges);
+ for (const Dictionary::Pair& kv : ranges) {
+ if (!IsInDayDefinition(kv.first, &reference)) {
+#ifdef I2_DEBUG
+ Log(LogDebug, "LegacyTimePeriod")
+ << "Not in day definition '" << kv.first << "'.";
+#endif /* I2_DEBUG */
+ continue;
+ }
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "LegacyTimePeriod")
+ << "In day definition '" << kv.first << "'.";
+#endif /* I2_DEBUG */
+
+ ProcessTimeRanges(kv.second, &reference, segments);
+ }
+ }
+ }
+
+ Log(LogDebug, "LegacyTimePeriod")
+ << "Legacy timeperiod update returned " << segments->GetLength() << " segments.";
+
+ return segments;
+}
diff --git a/lib/icinga/legacytimeperiod.hpp b/lib/icinga/legacytimeperiod.hpp
new file mode 100644
index 0000000..001eb5c
--- /dev/null
+++ b/lib/icinga/legacytimeperiod.hpp
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LEGACYTIMEPERIOD_H
+#define LEGACYTIMEPERIOD_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/timeperiod.hpp"
+#include "base/dictionary.hpp"
+#include <boost/date_time/gregorian/gregorian.hpp>
+
+namespace icinga
+{
+
+/**
+ * Implements Icinga 1.x time periods.
+ *
+ * @ingroup icinga
+ */
+class LegacyTimePeriod
+{
+public:
+ static Array::Ptr ScriptFunc(const TimePeriod::Ptr& tp, double start, double end);
+
+ static bool IsInTimeRange(const tm *begin, const tm *end, int stride, const tm *reference);
+ static void FindNthWeekday(int wday, int n, tm *reference);
+ static int WeekdayFromString(const String& daydef);
+ static int MonthFromString(const String& monthdef);
+ static void ParseTimeSpec(const String& timespec, tm *begin, tm *end, const tm *reference);
+ static void ParseTimeRange(const String& timerange, tm *begin, tm *end, int *stride, const tm *reference);
+ static bool IsInDayDefinition(const String& daydef, const tm *reference);
+ static void ProcessTimeRangeRaw(const String& timerange, const tm *reference, tm *begin, tm *end);
+ static Dictionary::Ptr ProcessTimeRange(const String& timerange, const tm *reference);
+ static void ProcessTimeRanges(const String& timeranges, const tm *reference, const Array::Ptr& result);
+ static Dictionary::Ptr FindNextSegment(const String& daydef, const String& timeranges, const tm *reference);
+ static Dictionary::Ptr FindRunningSegment(const String& daydef, const String& timeranges, const tm *reference);
+
+private:
+ LegacyTimePeriod();
+
+ static boost::gregorian::date GetEndOfMonthDay(int year, int month);
+};
+
+}
+
+#endif /* LEGACYTIMEPERIOD_H */
diff --git a/lib/icinga/macroprocessor.cpp b/lib/icinga/macroprocessor.cpp
new file mode 100644
index 0000000..724a4f9
--- /dev/null
+++ b/lib/icinga/macroprocessor.cpp
@@ -0,0 +1,585 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/macroprocessor.hpp"
+#include "icinga/macroresolver.hpp"
+#include "icinga/customvarobject.hpp"
+#include "icinga/envresolver.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/configobject.hpp"
+#include "base/scriptframe.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/join.hpp>
+
+using namespace icinga;
+
+thread_local Dictionary::Ptr MacroResolver::OverrideMacros;
+
+Value MacroProcessor::ResolveMacros(const Value& str, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr, String *missingMacro,
+ const MacroProcessor::EscapeCallback& escapeFn, const Dictionary::Ptr& resolvedMacros,
+ bool useResolvedMacros, int recursionLevel)
+{
+ if (useResolvedMacros)
+ REQUIRE_NOT_NULL(resolvedMacros);
+
+ Value result;
+
+ if (str.IsEmpty())
+ return Empty;
+
+ if (str.IsScalar()) {
+ result = InternalResolveMacros(str, resolvers, cr, missingMacro, escapeFn,
+ resolvedMacros, useResolvedMacros, recursionLevel + 1);
+ } else if (str.IsObjectType<Array>()) {
+ ArrayData resultArr;
+ Array::Ptr arr = str;
+
+ ObjectLock olock(arr);
+
+ for (const Value& arg : arr) {
+ /* Note: don't escape macros here. */
+ Value value = InternalResolveMacros(arg, resolvers, cr, missingMacro,
+ EscapeCallback(), resolvedMacros, useResolvedMacros, recursionLevel + 1);
+
+ if (value.IsObjectType<Array>())
+ resultArr.push_back(Utility::Join(value, ';'));
+ else
+ resultArr.push_back(value);
+ }
+
+ result = new Array(std::move(resultArr));
+ } else if (str.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr resultDict = new Dictionary();
+ Dictionary::Ptr dict = str;
+
+ ObjectLock olock(dict);
+
+ for (const Dictionary::Pair& kv : dict) {
+ /* Note: don't escape macros here. */
+ resultDict->Set(kv.first, InternalResolveMacros(kv.second, resolvers, cr, missingMacro,
+ EscapeCallback(), resolvedMacros, useResolvedMacros, recursionLevel + 1));
+ }
+
+ result = resultDict;
+ } else if (str.IsObjectType<Function>()) {
+ result = EvaluateFunction(str, resolvers, cr, escapeFn, resolvedMacros, useResolvedMacros, 0);
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Macro is not a string or array."));
+ }
+
+ return result;
+}
+
+static const EnvResolver::Ptr l_EnvResolver = new EnvResolver();
+
+static MacroProcessor::ResolverList GetDefaultResolvers()
+{
+ return {
+ { "icinga", IcingaApplication::GetInstance() },
+ { "env", l_EnvResolver, false }
+ };
+}
+
+bool MacroProcessor::ResolveMacro(const String& macro, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr, Value *result, bool *recursive_macro)
+{
+ CONTEXT("Resolving macro '" << macro << "'");
+
+ *recursive_macro = false;
+
+ std::vector<String> tokens = macro.Split(".");
+
+ String objName;
+ if (tokens.size() > 1) {
+ objName = tokens[0];
+ tokens.erase(tokens.begin());
+ }
+
+ const auto defaultResolvers (GetDefaultResolvers());
+
+ for (auto resolverList : {&resolvers, &defaultResolvers}) {
+ for (auto& resolver : *resolverList) {
+ if (!objName.IsEmpty() && objName != resolver.Name)
+ continue;
+
+ if (objName.IsEmpty()) {
+ if (!resolver.ResolveShortMacros)
+ continue;
+
+ Dictionary::Ptr vars;
+ CustomVarObject::Ptr dobj = dynamic_pointer_cast<CustomVarObject>(resolver.Obj);
+
+ if (dobj) {
+ vars = dobj->GetVars();
+ } else {
+ auto app (dynamic_pointer_cast<IcingaApplication>(resolver.Obj));
+
+ if (app) {
+ vars = app->GetVars();
+ }
+ }
+
+ if (vars && vars->Contains(macro)) {
+ *result = vars->Get(macro);
+ *recursive_macro = true;
+ return true;
+ }
+ }
+
+ auto *mresolver = dynamic_cast<MacroResolver *>(resolver.Obj.get());
+
+ if (mresolver && mresolver->ResolveMacro(boost::algorithm::join(tokens, "."), cr, result))
+ return true;
+
+ Value ref = resolver.Obj;
+ bool valid = true;
+
+ for (const String& token : tokens) {
+ if (ref.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dict = ref;
+ if (dict->Contains(token)) {
+ ref = dict->Get(token);
+ continue;
+ } else {
+ valid = false;
+ break;
+ }
+ } else if (ref.IsObject()) {
+ Object::Ptr object = ref;
+
+ Type::Ptr type = object->GetReflectionType();
+
+ if (!type) {
+ valid = false;
+ break;
+ }
+
+ int field = type->GetFieldId(token);
+
+ if (field == -1) {
+ valid = false;
+ break;
+ }
+
+ ref = object->GetField(field);
+
+ Field fieldInfo = type->GetFieldInfo(field);
+
+ if (strcmp(fieldInfo.TypeName, "Timestamp") == 0)
+ ref = static_cast<long>(ref);
+ }
+ }
+
+ if (valid) {
+ if (tokens[0] == "vars" ||
+ tokens[0] == "action_url" ||
+ tokens[0] == "notes_url" ||
+ tokens[0] == "notes")
+ *recursive_macro = true;
+
+ *result = ref;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+Value MacroProcessor::EvaluateFunction(const Function::Ptr& func, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr, const MacroProcessor::EscapeCallback& escapeFn,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros, int recursionLevel)
+{
+ Dictionary::Ptr resolvers_this = new Dictionary();
+ const auto defaultResolvers (GetDefaultResolvers());
+
+ for (auto resolverList : {&resolvers, &defaultResolvers}) {
+ for (auto& resolver: *resolverList) {
+ resolvers_this->Set(resolver.Name, resolver.Obj);
+ }
+ }
+
+ auto internalResolveMacrosShim = [resolvers, cr, resolvedMacros, useResolvedMacros, recursionLevel](const std::vector<Value>& args) {
+ if (args.size() < 1)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments for function"));
+
+ String missingMacro;
+
+ return MacroProcessor::InternalResolveMacros(args[0], resolvers, cr, &missingMacro, MacroProcessor::EscapeCallback(),
+ resolvedMacros, useResolvedMacros, recursionLevel);
+ };
+
+ resolvers_this->Set("macro", new Function("macro (temporary)", internalResolveMacrosShim, { "str" }));
+
+ auto internalResolveArgumentsShim = [resolvers, cr, resolvedMacros, useResolvedMacros, recursionLevel](const std::vector<Value>& args) {
+ if (args.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Too few arguments for function"));
+
+ return MacroProcessor::ResolveArguments(args[0], args[1], resolvers, cr,
+ resolvedMacros, useResolvedMacros, recursionLevel + 1);
+ };
+
+ resolvers_this->Set("resolve_arguments", new Function("resolve_arguments (temporary)", internalResolveArgumentsShim, { "command", "args" }));
+
+ return func->InvokeThis(resolvers_this);
+}
+
+Value MacroProcessor::InternalResolveMacros(const String& str, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr, String *missingMacro,
+ const MacroProcessor::EscapeCallback& escapeFn, const Dictionary::Ptr& resolvedMacros,
+ bool useResolvedMacros, int recursionLevel)
+{
+ CONTEXT("Resolving macros for string '" << str << "'");
+
+ if (recursionLevel > 15)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Infinite recursion detected while resolving macros"));
+
+ size_t offset, pos_first, pos_second;
+ offset = 0;
+
+ Dictionary::Ptr resolvers_this;
+
+ String result = str;
+ while ((pos_first = result.FindFirstOf("$", offset)) != String::NPos) {
+ pos_second = result.FindFirstOf("$", pos_first + 1);
+
+ if (pos_second == String::NPos)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Closing $ not found in macro format string."));
+
+ String name = result.SubStr(pos_first + 1, pos_second - pos_first - 1);
+
+ Value resolved_macro;
+ bool recursive_macro;
+ bool found;
+
+ if (useResolvedMacros) {
+ recursive_macro = false;
+ found = resolvedMacros->Contains(name);
+
+ if (found)
+ resolved_macro = resolvedMacros->Get(name);
+ } else
+ found = ResolveMacro(name, resolvers, cr, &resolved_macro, &recursive_macro);
+
+ /* $$ is an escape sequence for $. */
+ if (name.IsEmpty()) {
+ resolved_macro = "$";
+ found = true;
+ }
+
+ if (resolved_macro.IsObjectType<Function>()) {
+ resolved_macro = EvaluateFunction(resolved_macro, resolvers, cr, escapeFn,
+ resolvedMacros, useResolvedMacros, recursionLevel + 1);
+ }
+
+ if (!found) {
+ if (!missingMacro)
+ Log(LogWarning, "MacroProcessor")
+ << "Macro '" << name << "' is not defined.";
+ else
+ *missingMacro = name;
+ }
+
+ /* recursively resolve macros in the macro if it was a user macro */
+ if (recursive_macro) {
+ if (resolved_macro.IsObjectType<Array>()) {
+ Array::Ptr arr = resolved_macro;
+ ArrayData resolved_arr;
+
+ ObjectLock olock(arr);
+ for (const Value& value : arr) {
+ if (value.IsScalar()) {
+ resolved_arr.push_back(InternalResolveMacros(value,
+ resolvers, cr, missingMacro, EscapeCallback(), nullptr,
+ false, recursionLevel + 1));
+ } else
+ resolved_arr.push_back(value);
+ }
+
+ resolved_macro = new Array(std::move(resolved_arr));
+ } else if (resolved_macro.IsString()) {
+ resolved_macro = InternalResolveMacros(resolved_macro,
+ resolvers, cr, missingMacro, EscapeCallback(), nullptr,
+ false, recursionLevel + 1);
+ }
+ }
+
+ if (!useResolvedMacros && found && resolvedMacros)
+ resolvedMacros->Set(name, resolved_macro);
+
+ if (escapeFn)
+ resolved_macro = escapeFn(resolved_macro);
+
+ /* we're done if this is the only macro and there are no other non-macro parts in the string */
+ if (pos_first == 0 && pos_second == str.GetLength() - 1)
+ return resolved_macro;
+ else if (resolved_macro.IsObjectType<Array>())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Mixing both strings and non-strings in macros is not allowed."));
+
+ if (resolved_macro.IsObjectType<Array>()) {
+ /* don't allow mixing strings and arrays in macro strings */
+ if (pos_first != 0 || pos_second != str.GetLength() - 1)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Mixing both strings and non-strings in macros is not allowed."));
+
+ return resolved_macro;
+ }
+
+ String resolved_macro_str = resolved_macro;
+
+ result.Replace(pos_first, pos_second - pos_first + 1, resolved_macro_str);
+ offset = pos_first + resolved_macro_str.GetLength();
+ }
+
+ return result;
+}
+
+
+bool MacroProcessor::ValidateMacroString(const String& macro)
+{
+ if (macro.IsEmpty())
+ return true;
+
+ size_t pos_first, pos_second, offset;
+ offset = 0;
+
+ while ((pos_first = macro.FindFirstOf("$", offset)) != String::NPos) {
+ pos_second = macro.FindFirstOf("$", pos_first + 1);
+
+ if (pos_second == String::NPos)
+ return false;
+
+ offset = pos_second + 1;
+ }
+
+ return true;
+}
+
+void MacroProcessor::ValidateCustomVars(const ConfigObject::Ptr& object, const Dictionary::Ptr& value)
+{
+ if (!value)
+ return;
+
+ /* string, array, dictionary */
+ ObjectLock olock(value);
+ for (const Dictionary::Pair& kv : value) {
+ const Value& varval = kv.second;
+
+ if (varval.IsObjectType<Dictionary>()) {
+ /* only one dictonary level */
+ Dictionary::Ptr varval_dict = varval;
+
+ ObjectLock xlock(varval_dict);
+ for (const Dictionary::Pair& kv_var : varval_dict) {
+ if (!kv_var.second.IsString())
+ continue;
+
+ if (!ValidateMacroString(kv_var.second))
+ BOOST_THROW_EXCEPTION(ValidationError(object.get(), { "vars", kv.first, kv_var.first }, "Closing $ not found in macro format string '" + kv_var.second + "'."));
+ }
+ } else if (varval.IsObjectType<Array>()) {
+ /* check all array entries */
+ Array::Ptr varval_arr = varval;
+
+ ObjectLock ylock (varval_arr);
+ for (const Value& arrval : varval_arr) {
+ if (!arrval.IsString())
+ continue;
+
+ if (!ValidateMacroString(arrval)) {
+ BOOST_THROW_EXCEPTION(ValidationError(object.get(), { "vars", kv.first }, "Closing $ not found in macro format string '" + arrval + "'."));
+ }
+ }
+ } else {
+ if (!varval.IsString())
+ continue;
+
+ if (!ValidateMacroString(varval))
+ BOOST_THROW_EXCEPTION(ValidationError(object.get(), { "vars", kv.first }, "Closing $ not found in macro format string '" + varval + "'."));
+ }
+ }
+}
+
+void MacroProcessor::AddArgumentHelper(const Array::Ptr& args, const String& key, const String& value,
+ bool add_key, bool add_value, const Value& separator)
+{
+ if (add_key && separator.GetType() != ValueEmpty && add_value) {
+ args->Add(key + separator + value);
+ } else {
+ if (add_key)
+ args->Add(key);
+
+ if (add_value)
+ args->Add(value);
+ }
+}
+
+Value MacroProcessor::EscapeMacroShellArg(const Value& value)
+{
+ String result;
+
+ if (value.IsObjectType<Array>()) {
+ Array::Ptr arr = value;
+
+ ObjectLock olock(arr);
+ for (const Value& arg : arr) {
+ if (result.GetLength() > 0)
+ result += " ";
+
+ result += Utility::EscapeShellArg(arg);
+ }
+ } else
+ result = Utility::EscapeShellArg(value);
+
+ return result;
+}
+
+struct CommandArgument
+{
+ int Order{0};
+ bool SkipKey{false};
+ bool RepeatKey{true};
+ bool SkipValue{false};
+ String Key;
+ Value Separator;
+ Value AValue;
+
+ bool operator<(const CommandArgument& rhs) const
+ {
+ return Order < rhs.Order;
+ }
+};
+
+Value MacroProcessor::ResolveArguments(const Value& command, const Dictionary::Ptr& arguments,
+ const MacroProcessor::ResolverList& resolvers, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros, int recursionLevel)
+{
+ if (useResolvedMacros)
+ REQUIRE_NOT_NULL(resolvedMacros);
+
+ Value resolvedCommand;
+ if (!arguments || command.IsObjectType<Array>() || command.IsObjectType<Function>())
+ resolvedCommand = MacroProcessor::ResolveMacros(command, resolvers, cr, nullptr,
+ EscapeMacroShellArg, resolvedMacros, useResolvedMacros, recursionLevel + 1);
+ else {
+ resolvedCommand = new Array({ command });
+ }
+
+ if (arguments) {
+ std::vector<CommandArgument> args;
+
+ ObjectLock olock(arguments);
+ for (const Dictionary::Pair& kv : arguments) {
+ const Value& arginfo = kv.second;
+
+ CommandArgument arg;
+ arg.Key = kv.first;
+
+ bool required = false;
+ Value argval;
+
+ if (arginfo.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr argdict = arginfo;
+ if (argdict->Contains("key"))
+ arg.Key = argdict->Get("key");
+ argval = argdict->Get("value");
+ if (argdict->Contains("required"))
+ required = argdict->Get("required");
+ arg.SkipKey = argdict->Get("skip_key");
+ if (argdict->Contains("repeat_key"))
+ arg.RepeatKey = argdict->Get("repeat_key");
+ arg.Order = argdict->Get("order");
+ arg.Separator = argdict->Get("separator");
+
+ Value set_if = argdict->Get("set_if");
+
+ if (!set_if.IsEmpty()) {
+ String missingMacro;
+ Value set_if_resolved = MacroProcessor::ResolveMacros(set_if, resolvers,
+ cr, &missingMacro, MacroProcessor::EscapeCallback(), resolvedMacros,
+ useResolvedMacros, recursionLevel + 1);
+
+ if (!missingMacro.IsEmpty())
+ continue;
+
+ int value;
+
+ if (set_if_resolved == "true")
+ value = 1;
+ else if (set_if_resolved == "false")
+ value = 0;
+ else {
+ try {
+ value = Convert::ToLong(set_if_resolved);
+ } catch (const std::exception& ex) {
+ /* tried to convert a string */
+ Log(LogWarning, "PluginUtility")
+ << "Error evaluating set_if value '" << set_if_resolved
+ << "' used in argument '" << arg.Key << "': " << ex.what();
+ continue;
+ }
+ }
+
+ if (!value)
+ continue;
+ }
+ }
+ else
+ argval = arginfo;
+
+ if (argval.IsEmpty())
+ arg.SkipValue = true;
+
+ String missingMacro;
+ arg.AValue = MacroProcessor::ResolveMacros(argval, resolvers,
+ cr, &missingMacro, MacroProcessor::EscapeCallback(), resolvedMacros,
+ useResolvedMacros, recursionLevel + 1);
+
+ if (!missingMacro.IsEmpty()) {
+ if (required) {
+ BOOST_THROW_EXCEPTION(ScriptError("Non-optional macro '" + missingMacro + "' used in argument '" +
+ arg.Key + "' is missing."));
+ }
+
+ continue;
+ }
+
+ args.emplace_back(std::move(arg));
+ }
+
+ std::sort(args.begin(), args.end());
+
+ Array::Ptr command_arr = resolvedCommand;
+ for (const CommandArgument& arg : args) {
+
+ if (arg.AValue.IsObjectType<Dictionary>()) {
+ Log(LogWarning, "PluginUtility")
+ << "Tried to use dictionary in argument '" << arg.Key << "'.";
+ continue;
+ } else if (arg.AValue.IsObjectType<Array>()) {
+ bool first = true;
+ Array::Ptr arr = static_cast<Array::Ptr>(arg.AValue);
+
+ ObjectLock olock(arr);
+ for (const Value& value : arr) {
+ bool add_key;
+
+ if (first) {
+ first = false;
+ add_key = !arg.SkipKey;
+ } else
+ add_key = !arg.SkipKey && arg.RepeatKey;
+
+ AddArgumentHelper(command_arr, arg.Key, value, add_key, !arg.SkipValue, arg.Separator);
+ }
+ } else
+ AddArgumentHelper(command_arr, arg.Key, arg.AValue, !arg.SkipKey, !arg.SkipValue, arg.Separator);
+ }
+ }
+
+ return resolvedCommand;
+}
diff --git a/lib/icinga/macroprocessor.hpp b/lib/icinga/macroprocessor.hpp
new file mode 100644
index 0000000..7e74821
--- /dev/null
+++ b/lib/icinga/macroprocessor.hpp
@@ -0,0 +1,75 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MACROPROCESSOR_H
+#define MACROPROCESSOR_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/checkable.hpp"
+#include "base/value.hpp"
+#include <vector>
+#include <utility>
+
+namespace icinga
+{
+
+/**
+ * Resolves macros.
+ *
+ * @ingroup icinga
+ */
+class MacroProcessor
+{
+public:
+ struct ResolverSpec
+ {
+ String Name;
+ Object::Ptr Obj;
+
+ // Whether to resolve not only e.g. $host.address$, but also just $address$
+ bool ResolveShortMacros;
+
+ inline ResolverSpec(String name, Object::Ptr obj, bool resolveShortMacros = true)
+ : Name(std::move(name)), Obj(std::move(obj)), ResolveShortMacros(resolveShortMacros)
+ {
+ }
+ };
+
+ typedef std::function<Value (const Value&)> EscapeCallback;
+ typedef std::vector<ResolverSpec> ResolverList;
+
+ static Value ResolveMacros(const Value& str, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr = nullptr, String *missingMacro = nullptr,
+ const EscapeCallback& escapeFn = EscapeCallback(),
+ const Dictionary::Ptr& resolvedMacros = nullptr,
+ bool useResolvedMacros = false, int recursionLevel = 0);
+
+ static Value ResolveArguments(const Value& command, const Dictionary::Ptr& arguments,
+ const MacroProcessor::ResolverList& resolvers, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros, int recursionLevel = 0);
+
+ static bool ValidateMacroString(const String& macro);
+ static void ValidateCustomVars(const ConfigObject::Ptr& object, const Dictionary::Ptr& value);
+
+private:
+ MacroProcessor();
+
+ static bool ResolveMacro(const String& macro, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr, Value *result, bool *recursive_macro);
+ static Value InternalResolveMacros(const String& str,
+ const ResolverList& resolvers, const CheckResult::Ptr& cr,
+ String *missingMacro, const EscapeCallback& escapeFn,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros,
+ int recursionLevel = 0);
+ static Value EvaluateFunction(const Function::Ptr& func, const ResolverList& resolvers,
+ const CheckResult::Ptr& cr, const MacroProcessor::EscapeCallback& escapeFn,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros, int recursionLevel);
+
+ static void AddArgumentHelper(const Array::Ptr& args, const String& key, const String& value,
+ bool add_key, bool add_value, const Value& separator);
+ static Value EscapeMacroShellArg(const Value& value);
+
+};
+
+}
+
+#endif /* MACROPROCESSOR_H */
diff --git a/lib/icinga/macroresolver.hpp b/lib/icinga/macroresolver.hpp
new file mode 100644
index 0000000..62cd41d
--- /dev/null
+++ b/lib/icinga/macroresolver.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MACRORESOLVER_H
+#define MACRORESOLVER_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/checkresult.hpp"
+#include "base/dictionary.hpp"
+#include "base/string.hpp"
+
+namespace icinga
+{
+
+/**
+ * Resolves macros.
+ *
+ * @ingroup icinga
+ */
+class MacroResolver
+{
+public:
+ DECLARE_PTR_TYPEDEFS(MacroResolver);
+
+ static thread_local Dictionary::Ptr OverrideMacros;
+
+ virtual bool ResolveMacro(const String& macro, const CheckResult::Ptr& cr, Value *result) const = 0;
+};
+
+}
+
+#endif /* MACRORESOLVER_H */
diff --git a/lib/icinga/notification-apply.cpp b/lib/icinga/notification-apply.cpp
new file mode 100644
index 0000000..f5b3764
--- /dev/null
+++ b/lib/icinga/notification-apply.cpp
@@ -0,0 +1,161 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/notification.hpp"
+#include "icinga/service.hpp"
+#include "config/configitembuilder.hpp"
+#include "config/applyrule.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/workqueue.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+INITIALIZE_ONCE([]() {
+ ApplyRule::RegisterType("Notification", { "Host", "Service" });
+});
+
+bool Notification::EvaluateApplyRuleInstance(const Checkable::Ptr& checkable, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter)
+{
+ if (!skipFilter && !rule.EvaluateFilter(frame))
+ return false;
+
+ auto& di (rule.GetDebugInfo());
+
+#ifdef _DEBUG
+ Log(LogDebug, "Notification")
+ << "Applying notification '" << name << "' to object '" << checkable->GetName() << "' for rule " << di;
+#endif /* _DEBUG */
+
+ ConfigItemBuilder builder{di};
+ builder.SetType(Notification::TypeInstance);
+ builder.SetName(name);
+ builder.SetScope(frame.Locals->ShallowClone());
+ builder.SetIgnoreOnError(rule.GetIgnoreOnError());
+
+ builder.AddExpression(new ImportDefaultTemplatesExpression());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "host_name"), OpSetLiteral, MakeLiteral(host->GetName()), di));
+
+ if (service)
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "service_name"), OpSetLiteral, MakeLiteral(service->GetShortName()), di));
+
+ String zone = checkable->GetZoneName();
+
+ if (!zone.IsEmpty())
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "zone"), OpSetLiteral, MakeLiteral(zone), di));
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "package"), OpSetLiteral, MakeLiteral(rule.GetPackage()), di));
+
+ builder.AddExpression(new OwnedExpression(rule.GetExpression()));
+
+ ConfigItem::Ptr notificationItem = builder.Compile();
+ notificationItem->Register();
+
+ return true;
+}
+
+bool Notification::EvaluateApplyRule(const Checkable::Ptr& checkable, const ApplyRule& rule, bool skipFilter)
+{
+ auto& di (rule.GetDebugInfo());
+
+ CONTEXT("Evaluating 'apply' rule (" << di << ")");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ ScriptFrame frame(true);
+ if (rule.GetScope())
+ rule.GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("host", host);
+ if (service)
+ frame.Locals->Set("service", service);
+
+ Value vinstances;
+
+ if (rule.GetFTerm()) {
+ try {
+ vinstances = rule.GetFTerm()->Evaluate(frame);
+ } catch (const std::exception&) {
+ /* Silently ignore errors here and assume there are no instances. */
+ return false;
+ }
+ } else {
+ vinstances = new Array({ "" });
+ }
+
+ bool match = false;
+
+ if (vinstances.IsObjectType<Array>()) {
+ if (!rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Dictionary iterator requires value to be a dictionary.", di));
+
+ Array::Ptr arr = vinstances;
+
+ ObjectLock olock(arr);
+ for (const Value& instance : arr) {
+ String name = rule.GetName();
+
+ if (!rule.GetFKVar().IsEmpty()) {
+ frame.Locals->Set(rule.GetFKVar(), instance);
+ name += instance;
+ }
+
+ if (EvaluateApplyRuleInstance(checkable, name, frame, rule, skipFilter))
+ match = true;
+ }
+ } else if (vinstances.IsObjectType<Dictionary>()) {
+ if (rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Array iterator requires value to be an array.", di));
+
+ Dictionary::Ptr dict = vinstances;
+
+ for (const String& key : dict->GetKeys()) {
+ frame.Locals->Set(rule.GetFKVar(), key);
+ frame.Locals->Set(rule.GetFVVar(), dict->Get(key));
+
+ if (EvaluateApplyRuleInstance(checkable, rule.GetName() + key, frame, rule, skipFilter))
+ match = true;
+ }
+ }
+
+ return match;
+}
+
+void Notification::EvaluateApplyRules(const Host::Ptr& host)
+{
+ CONTEXT("Evaluating 'apply' rules for host '" << host->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(Notification::TypeInstance, Host::TypeInstance))
+ {
+ if (EvaluateApplyRule(host, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedHostRules(Notification::TypeInstance, host->GetName())) {
+ if (EvaluateApplyRule(host, *rule, true))
+ rule->AddMatch();
+ }
+}
+
+void Notification::EvaluateApplyRules(const Service::Ptr& service)
+{
+ CONTEXT("Evaluating 'apply' rules for service '" << service->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(Notification::TypeInstance, Service::TypeInstance)) {
+ if (EvaluateApplyRule(service, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedServiceRules(Notification::TypeInstance, service->GetHost()->GetName(), service->GetShortName())) {
+ if (EvaluateApplyRule(service, *rule, true))
+ rule->AddMatch();
+ }
+}
diff --git a/lib/icinga/notification.cpp b/lib/icinga/notification.cpp
new file mode 100644
index 0000000..ab8d42b
--- /dev/null
+++ b/lib/icinga/notification.cpp
@@ -0,0 +1,812 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/notification.hpp"
+#include "icinga/notification-ti.cpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/service.hpp"
+#include "remote/apilistener.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+#include "base/initialize.hpp"
+#include "base/scriptglobal.hpp"
+#include <algorithm>
+
+using namespace icinga;
+
+REGISTER_TYPE(Notification);
+INITIALIZE_ONCE(&Notification::StaticInitialize);
+
+std::map<String, int> Notification::m_StateFilterMap;
+std::map<String, int> Notification::m_TypeFilterMap;
+
+boost::signals2::signal<void (const Notification::Ptr&, const MessageOrigin::Ptr&)> Notification::OnNextNotificationChanged;
+boost::signals2::signal<void (const Notification::Ptr&, const String&, uint_fast8_t, const MessageOrigin::Ptr&)> Notification::OnLastNotifiedStatePerUserUpdated;
+boost::signals2::signal<void (const Notification::Ptr&, const MessageOrigin::Ptr&)> Notification::OnLastNotifiedStatePerUserCleared;
+
+String NotificationNameComposer::MakeName(const String& shortName, const Object::Ptr& context) const
+{
+ Notification::Ptr notification = dynamic_pointer_cast<Notification>(context);
+
+ if (!notification)
+ return "";
+
+ String name = notification->GetHostName();
+
+ if (!notification->GetServiceName().IsEmpty())
+ name += "!" + notification->GetServiceName();
+
+ name += "!" + shortName;
+
+ return name;
+}
+
+Dictionary::Ptr NotificationNameComposer::ParseName(const String& name) const
+{
+ std::vector<String> tokens = name.Split("!");
+
+ if (tokens.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid Notification name."));
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("host_name", tokens[0]);
+
+ if (tokens.size() > 2) {
+ result->Set("service_name", tokens[1]);
+ result->Set("name", tokens[2]);
+ } else {
+ result->Set("name", tokens[1]);
+ }
+
+ return result;
+}
+
+void Notification::StaticInitialize()
+{
+ ScriptGlobal::Set("Icinga.OK", "OK");
+ ScriptGlobal::Set("Icinga.Warning", "Warning");
+ ScriptGlobal::Set("Icinga.Critical", "Critical");
+ ScriptGlobal::Set("Icinga.Unknown", "Unknown");
+ ScriptGlobal::Set("Icinga.Up", "Up");
+ ScriptGlobal::Set("Icinga.Down", "Down");
+
+ ScriptGlobal::Set("Icinga.DowntimeStart", "DowntimeStart");
+ ScriptGlobal::Set("Icinga.DowntimeEnd", "DowntimeEnd");
+ ScriptGlobal::Set("Icinga.DowntimeRemoved", "DowntimeRemoved");
+ ScriptGlobal::Set("Icinga.Custom", "Custom");
+ ScriptGlobal::Set("Icinga.Acknowledgement", "Acknowledgement");
+ ScriptGlobal::Set("Icinga.Problem", "Problem");
+ ScriptGlobal::Set("Icinga.Recovery", "Recovery");
+ ScriptGlobal::Set("Icinga.FlappingStart", "FlappingStart");
+ ScriptGlobal::Set("Icinga.FlappingEnd", "FlappingEnd");
+
+ m_StateFilterMap["OK"] = StateFilterOK;
+ m_StateFilterMap["Warning"] = StateFilterWarning;
+ m_StateFilterMap["Critical"] = StateFilterCritical;
+ m_StateFilterMap["Unknown"] = StateFilterUnknown;
+ m_StateFilterMap["Up"] = StateFilterUp;
+ m_StateFilterMap["Down"] = StateFilterDown;
+
+ m_TypeFilterMap["DowntimeStart"] = NotificationDowntimeStart;
+ m_TypeFilterMap["DowntimeEnd"] = NotificationDowntimeEnd;
+ m_TypeFilterMap["DowntimeRemoved"] = NotificationDowntimeRemoved;
+ m_TypeFilterMap["Custom"] = NotificationCustom;
+ m_TypeFilterMap["Acknowledgement"] = NotificationAcknowledgement;
+ m_TypeFilterMap["Problem"] = NotificationProblem;
+ m_TypeFilterMap["Recovery"] = NotificationRecovery;
+ m_TypeFilterMap["FlappingStart"] = NotificationFlappingStart;
+ m_TypeFilterMap["FlappingEnd"] = NotificationFlappingEnd;
+}
+
+void Notification::OnConfigLoaded()
+{
+ ObjectImpl<Notification>::OnConfigLoaded();
+
+ SetTypeFilter(FilterArrayToInt(GetTypes(), GetTypeFilterMap(), ~0));
+ SetStateFilter(FilterArrayToInt(GetStates(), GetStateFilterMap(), ~0));
+}
+
+void Notification::OnAllConfigLoaded()
+{
+ ObjectImpl<Notification>::OnAllConfigLoaded();
+
+ Host::Ptr host = Host::GetByName(GetHostName());
+
+ if (GetServiceName().IsEmpty())
+ m_Checkable = host;
+ else
+ m_Checkable = host->GetServiceByShortName(GetServiceName());
+
+ if (!m_Checkable)
+ BOOST_THROW_EXCEPTION(ScriptError("Notification object refers to a host/service which doesn't exist.", GetDebugInfo()));
+
+ GetCheckable()->RegisterNotification(this);
+}
+
+void Notification::Start(bool runtimeCreated)
+{
+ Checkable::Ptr obj = GetCheckable();
+
+ if (obj)
+ obj->RegisterNotification(this);
+
+ if (ApiListener::IsHACluster() && GetNextNotification() < Utility::GetTime() + 60)
+ SetNextNotification(Utility::GetTime() + 60, true);
+
+ for (const UserGroup::Ptr& group : GetUserGroups())
+ group->AddNotification(this);
+
+ ObjectImpl<Notification>::Start(runtimeCreated);
+}
+
+void Notification::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<Notification>::Stop(runtimeRemoved);
+
+ Checkable::Ptr obj = GetCheckable();
+
+ if (obj)
+ obj->UnregisterNotification(this);
+
+ for (const UserGroup::Ptr& group : GetUserGroups())
+ group->RemoveNotification(this);
+}
+
+Checkable::Ptr Notification::GetCheckable() const
+{
+ return static_pointer_cast<Checkable>(m_Checkable);
+}
+
+NotificationCommand::Ptr Notification::GetCommand() const
+{
+ return NotificationCommand::GetByName(GetCommandRaw());
+}
+
+std::set<User::Ptr> Notification::GetUsers() const
+{
+ std::set<User::Ptr> result;
+
+ Array::Ptr users = GetUsersRaw();
+
+ if (users) {
+ ObjectLock olock(users);
+
+ for (const String& name : users) {
+ User::Ptr user = User::GetByName(name);
+
+ if (!user)
+ continue;
+
+ result.insert(user);
+ }
+ }
+
+ return result;
+}
+
+std::set<UserGroup::Ptr> Notification::GetUserGroups() const
+{
+ std::set<UserGroup::Ptr> result;
+
+ Array::Ptr groups = GetUserGroupsRaw();
+
+ if (groups) {
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ UserGroup::Ptr ug = UserGroup::GetByName(name);
+
+ if (!ug)
+ continue;
+
+ result.insert(ug);
+ }
+ }
+
+ return result;
+}
+
+TimePeriod::Ptr Notification::GetPeriod() const
+{
+ return TimePeriod::GetByName(GetPeriodRaw());
+}
+
+void Notification::UpdateNotificationNumber()
+{
+ SetNotificationNumber(GetNotificationNumber() + 1);
+}
+
+void Notification::ResetNotificationNumber()
+{
+ SetNotificationNumber(0);
+}
+
+void Notification::BeginExecuteNotification(NotificationType type, const CheckResult::Ptr& cr, bool force, bool reminder, const String& author, const String& text)
+{
+ String notificationName = GetName();
+ String notificationTypeName = NotificationTypeToString(type);
+
+ Log(LogNotice, "Notification")
+ << "Attempting to send " << (reminder ? "reminder " : "")
+ << "notifications of type '" << notificationTypeName
+ << "' for notification object '" << notificationName << "'.";
+
+ if (type == NotificationRecovery) {
+ auto states (GetLastNotifiedStatePerUser());
+
+ states->Clear();
+ OnLastNotifiedStatePerUserCleared(this, nullptr);
+ }
+
+ Checkable::Ptr checkable = GetCheckable();
+
+ if (!force) {
+ TimePeriod::Ptr tp = GetPeriod();
+
+ if (tp && !tp->IsInside(Utility::GetTime())) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '" << notificationName
+ << "': not in timeperiod '" << tp->GetName() << "'";
+
+ if (!reminder) {
+ switch (type) {
+ case NotificationProblem:
+ case NotificationRecovery:
+ case NotificationFlappingStart:
+ case NotificationFlappingEnd:
+ {
+ /* If a non-reminder notification was suppressed, but just because of its time period,
+ * stash it into a notification types bitmask for maybe re-sending later.
+ */
+
+ ObjectLock olock (this);
+ int suppressedTypesBefore (GetSuppressedNotifications());
+ int suppressedTypesAfter (suppressedTypesBefore | type);
+
+ for (int conflict : {NotificationProblem | NotificationRecovery, NotificationFlappingStart | NotificationFlappingEnd}) {
+ /* E.g. problem and recovery notifications neutralize each other. */
+
+ if ((suppressedTypesAfter & conflict) == conflict) {
+ suppressedTypesAfter &= ~conflict;
+ }
+ }
+
+ if (suppressedTypesAfter != suppressedTypesBefore) {
+ SetSuppressedNotifications(suppressedTypesAfter);
+ }
+ }
+ default:
+ ; // Cheating the compiler on "5 enumeration values not handled in switch"
+ }
+ }
+
+ return;
+ }
+
+ double now = Utility::GetTime();
+ Dictionary::Ptr times = GetTimes();
+
+ if (times && type == NotificationProblem) {
+ Value timesBegin = times->Get("begin");
+ Value timesEnd = times->Get("end");
+
+ if (timesBegin != Empty && timesBegin >= 0 && now < checkable->GetLastHardStateChange() + timesBegin) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '"
+ << notificationName << "': before specified begin time (" << Utility::FormatDuration(timesBegin) << ")";
+
+ /* we need to adjust the next notification time
+ * delaying the first notification
+ */
+ SetNextNotification(checkable->GetLastHardStateChange() + timesBegin + 1.0);
+
+ /*
+ * We need to set no more notifications to false, in case
+ * some notifications were sent previously
+ */
+ SetNoMoreNotifications(false);
+
+ return;
+ }
+
+ if (timesEnd != Empty && timesEnd >= 0 && now > checkable->GetLastHardStateChange() + timesEnd) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '"
+ << notificationName << "': after specified end time (" << Utility::FormatDuration(timesEnd) << ")";
+ return;
+ }
+ }
+
+ unsigned long ftype = type;
+
+ Log(LogDebug, "Notification")
+ << "Type '" << NotificationTypeToString(type)
+ << "', TypeFilter: " << NotificationFilterToString(GetTypeFilter(), GetTypeFilterMap())
+ << " (FType=" << ftype << ", TypeFilter=" << GetTypeFilter() << ")";
+
+ if (!(ftype & GetTypeFilter())) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '"
+ << notificationName << "': type '"
+ << NotificationTypeToString(type) << "' does not match type filter: "
+ << NotificationFilterToString(GetTypeFilter(), GetTypeFilterMap()) << ".";
+
+ /* Ensure to reset no_more_notifications on Recovery notifications,
+ * even if the admin did not configure them in the filter.
+ */
+ {
+ ObjectLock olock(this);
+ if (type == NotificationRecovery && GetInterval() <= 0)
+ SetNoMoreNotifications(false);
+ }
+
+ return;
+ }
+
+ /* Check state filters for problem notifications. Recovery notifications will be filtered away later. */
+ if (type == NotificationProblem) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ unsigned long fstate;
+ String stateStr;
+
+ if (service) {
+ fstate = ServiceStateToFilter(service->GetState());
+ stateStr = NotificationServiceStateToString(service->GetState());
+ } else {
+ fstate = HostStateToFilter(host->GetState());
+ stateStr = NotificationHostStateToString(host->GetState());
+ }
+
+ Log(LogDebug, "Notification")
+ << "State '" << stateStr << "', StateFilter: " << NotificationFilterToString(GetStateFilter(), GetStateFilterMap())
+ << " (FState=" << fstate << ", StateFilter=" << GetStateFilter() << ")";
+
+ if (!(fstate & GetStateFilter())) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '"
+ << notificationName << "': state '" << stateStr
+ << "' does not match state filter: " << NotificationFilterToString(GetStateFilter(), GetStateFilterMap()) << ".";
+ return;
+ }
+ }
+ } else {
+ Log(LogNotice, "Notification")
+ << "Not checking " << (reminder ? "reminder " : "") << "notification filters for notification object '"
+ << notificationName << "': Notification was forced.";
+ }
+
+ {
+ ObjectLock olock(this);
+
+ UpdateNotificationNumber();
+ double now = Utility::GetTime();
+ SetLastNotification(now);
+
+ if (type == NotificationProblem && GetInterval() <= 0)
+ SetNoMoreNotifications(true);
+ else
+ SetNoMoreNotifications(false);
+
+ if (type == NotificationProblem && GetInterval() > 0)
+ SetNextNotification(now + GetInterval());
+
+ if (type == NotificationProblem)
+ SetLastProblemNotification(now);
+ }
+
+ std::set<User::Ptr> allUsers;
+
+ std::set<User::Ptr> users = GetUsers();
+ std::copy(users.begin(), users.end(), std::inserter(allUsers, allUsers.begin()));
+
+ for (const UserGroup::Ptr& ug : GetUserGroups()) {
+ std::set<User::Ptr> members = ug->GetMembers();
+ std::copy(members.begin(), members.end(), std::inserter(allUsers, allUsers.begin()));
+ }
+
+ std::set<User::Ptr> allNotifiedUsers;
+ Array::Ptr notifiedProblemUsers = GetNotifiedProblemUsers();
+
+ for (const User::Ptr& user : allUsers) {
+ String userName = user->GetName();
+
+ if (!user->GetEnableNotifications()) {
+ Log(LogNotice, "Notification")
+ << "Notification object '" << notificationName << "': Disabled notifications for user '"
+ << userName << "'. Not sending notification.";
+ continue;
+ }
+
+ if (!CheckNotificationUserFilters(type, user, force, reminder)) {
+ Log(LogNotice, "Notification")
+ << "Notification object '" << notificationName << "': Filters for user '" << userName << "' not matched. Not sending notification.";
+ continue;
+ }
+
+ /* on recovery, check if user was notified before */
+ if (type == NotificationRecovery) {
+ if (!notifiedProblemUsers->Contains(userName) && (NotificationProblem & user->GetTypeFilter())) {
+ Log(LogNotice, "Notification")
+ << "Notification object '" << notificationName << "': We did not notify user '" << userName
+ << "' (Problem types enabled) for a problem before. Not sending Recovery notification.";
+ continue;
+ }
+ }
+
+ /* on acknowledgement, check if user was notified before */
+ if (type == NotificationAcknowledgement) {
+ if (!notifiedProblemUsers->Contains(userName) && (NotificationProblem & user->GetTypeFilter())) {
+ Log(LogNotice, "Notification")
+ << "Notification object '" << notificationName << "': We did not notify user '" << userName
+ << "' (Problem types enabled) for a problem before. Not sending acknowledgement notification.";
+ continue;
+ }
+ }
+
+ if (type == NotificationProblem && !reminder && !checkable->GetVolatile()) {
+ auto [host, service] = GetHostService(checkable);
+ uint_fast8_t state = service ? service->GetState() : host->GetState();
+
+ if (state == (uint_fast8_t)GetLastNotifiedStatePerUser()->Get(userName)) {
+ auto stateStr (service ? NotificationServiceStateToString(service->GetState()) : NotificationHostStateToString(host->GetState()));
+
+ Log(LogNotice, "Notification")
+ << "Notification object '" << notificationName << "': We already notified user '" << userName << "' for a " << stateStr
+ << " problem. Likely after that another state change notification was filtered out by config. Not sending duplicate '"
+ << stateStr << "' notification.";
+
+ continue;
+ }
+ }
+
+ Log(LogInformation, "Notification")
+ << "Sending " << (reminder ? "reminder " : "") << "'" << NotificationTypeToString(type) << "' notification '"
+ << notificationName << "' for user '" << userName << "'";
+
+ // Explicitly use Notification::Ptr to keep the reference counted while the callback is active
+ Notification::Ptr notification (this);
+ Utility::QueueAsyncCallback([notification, type, user, cr, force, author, text]() {
+ notification->ExecuteNotificationHelper(type, user, cr, force, author, text);
+ });
+
+ /* collect all notified users */
+ allNotifiedUsers.insert(user);
+
+ if (type == NotificationProblem) {
+ auto [host, service] = GetHostService(checkable);
+ uint_fast8_t state = service ? service->GetState() : host->GetState();
+
+ if (state != (uint_fast8_t)GetLastNotifiedStatePerUser()->Get(userName)) {
+ GetLastNotifiedStatePerUser()->Set(userName, state);
+ OnLastNotifiedStatePerUserUpdated(this, userName, state, nullptr);
+ }
+ }
+
+ /* store all notified users for later recovery checks */
+ if (type == NotificationProblem && !notifiedProblemUsers->Contains(userName))
+ notifiedProblemUsers->Add(userName);
+ }
+
+ /* if this was a recovery notification, reset all notified users */
+ if (type == NotificationRecovery)
+ notifiedProblemUsers->Clear();
+
+ /* used in db_ido for notification history */
+ Service::OnNotificationSentToAllUsers(this, checkable, allNotifiedUsers, type, cr, author, text, nullptr);
+}
+
+bool Notification::CheckNotificationUserFilters(NotificationType type, const User::Ptr& user, bool force, bool reminder)
+{
+ String notificationName = GetName();
+ String userName = user->GetName();
+
+ if (!force) {
+ TimePeriod::Ptr tp = user->GetPeriod();
+
+ if (tp && !tp->IsInside(Utility::GetTime())) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '"
+ << notificationName << " and user '" << userName
+ << "': user period not in timeperiod '" << tp->GetName() << "'";
+ return false;
+ }
+
+ unsigned long ftype = type;
+
+ Log(LogDebug, "Notification")
+ << "User '" << userName << "' notification '" << notificationName
+ << "', Type '" << NotificationTypeToString(type)
+ << "', TypeFilter: " << NotificationFilterToString(user->GetTypeFilter(), GetTypeFilterMap())
+ << " (FType=" << ftype << ", TypeFilter=" << GetTypeFilter() << ")";
+
+
+ if (!(ftype & user->GetTypeFilter())) {
+ Log(LogNotice, "Notification")
+ << "Not sending " << (reminder ? "reminder " : "") << "notifications for notification object '"
+ << notificationName << " and user '" << userName << "': type '"
+ << NotificationTypeToString(type) << "' does not match type filter: "
+ << NotificationFilterToString(user->GetTypeFilter(), GetTypeFilterMap()) << ".";
+ return false;
+ }
+
+ /* check state filters it this is not a recovery notification */
+ if (type != NotificationRecovery) {
+ Checkable::Ptr checkable = GetCheckable();
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ unsigned long fstate;
+ String stateStr;
+
+ if (service) {
+ fstate = ServiceStateToFilter(service->GetState());
+ stateStr = NotificationServiceStateToString(service->GetState());
+ } else {
+ fstate = HostStateToFilter(host->GetState());
+ stateStr = NotificationHostStateToString(host->GetState());
+ }
+
+ Log(LogDebug, "Notification")
+ << "User '" << userName << "' notification '" << notificationName
+ << "', State '" << stateStr << "', StateFilter: "
+ << NotificationFilterToString(user->GetStateFilter(), GetStateFilterMap())
+ << " (FState=" << fstate << ", StateFilter=" << user->GetStateFilter() << ")";
+
+ if (!(fstate & user->GetStateFilter())) {
+ Log(LogNotice, "Notification")
+ << "Not " << (reminder ? "reminder " : "") << "sending notifications for notification object '"
+ << notificationName << " and user '" << userName << "': state '" << stateStr
+ << "' does not match state filter: " << NotificationFilterToString(user->GetStateFilter(), GetStateFilterMap()) << ".";
+ return false;
+ }
+ }
+ } else {
+ Log(LogNotice, "Notification")
+ << "Not checking " << (reminder ? "reminder " : "") << "notification filters for notification object '"
+ << notificationName << "' and user '" << userName << "': Notification was forced.";
+ }
+
+ return true;
+}
+
+void Notification::ExecuteNotificationHelper(NotificationType type, const User::Ptr& user, const CheckResult::Ptr& cr, bool force, const String& author, const String& text)
+{
+ String notificationName = GetName();
+ String userName = user->GetName();
+ String checkableName = GetCheckable()->GetName();
+
+ NotificationCommand::Ptr command = GetCommand();
+
+ if (!command) {
+ Log(LogDebug, "Notification")
+ << "No command found for notification '" << notificationName << "'. Skipping execution.";
+ return;
+ }
+
+ String commandName = command->GetName();
+
+ try {
+ command->Execute(this, user, cr, type, author, text);
+
+ /* required by compatlogger */
+ Service::OnNotificationSentToUser(this, GetCheckable(), user, type, cr, author, text, commandName, nullptr);
+
+ Log(LogInformation, "Notification")
+ << "Completed sending '" << NotificationTypeToString(type)
+ << "' notification '" << notificationName
+ << "' for checkable '" << checkableName
+ << "' and user '" << userName << "' using command '" << commandName << "'.";
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "Notification")
+ << "Exception occurred during notification '" << notificationName
+ << "' for checkable '" << checkableName
+ << "' and user '" << userName << "' using command '" << commandName << "': "
+ << DiagnosticInformation(ex, false);
+ }
+}
+
+int icinga::ServiceStateToFilter(ServiceState state)
+{
+ switch (state) {
+ case ServiceOK:
+ return StateFilterOK;
+ case ServiceWarning:
+ return StateFilterWarning;
+ case ServiceCritical:
+ return StateFilterCritical;
+ case ServiceUnknown:
+ return StateFilterUnknown;
+ default:
+ VERIFY(!"Invalid state type.");
+ }
+}
+
+int icinga::HostStateToFilter(HostState state)
+{
+ switch (state) {
+ case HostUp:
+ return StateFilterUp;
+ case HostDown:
+ return StateFilterDown;
+ default:
+ VERIFY(!"Invalid state type.");
+ }
+}
+
+String Notification::NotificationFilterToString(int filter, const std::map<String, int>& filterMap)
+{
+ std::vector<String> sFilters;
+
+ typedef std::pair<String, int> kv_pair;
+ for (const kv_pair& kv : filterMap) {
+ if (filter & kv.second)
+ sFilters.push_back(kv.first);
+ }
+
+ return Utility::NaturalJoin(sFilters);
+}
+
+/*
+ * Main interface to translate NotificationType values into strings.
+ */
+String Notification::NotificationTypeToString(NotificationType type)
+{
+ auto typeMap = Notification::m_TypeFilterMap;
+
+ auto it = std::find_if(typeMap.begin(), typeMap.end(),
+ [&type](const std::pair<String, int>& p) {
+ return p.second == type;
+ });
+
+ if (it == typeMap.end())
+ return Empty;
+
+ return it->first;
+}
+
+
+/*
+ * Compat interface used in external features.
+ */
+String Notification::NotificationTypeToStringCompat(NotificationType type)
+{
+ switch (type) {
+ case NotificationDowntimeStart:
+ return "DOWNTIMESTART";
+ case NotificationDowntimeEnd:
+ return "DOWNTIMEEND";
+ case NotificationDowntimeRemoved:
+ return "DOWNTIMECANCELLED";
+ case NotificationCustom:
+ return "CUSTOM";
+ case NotificationAcknowledgement:
+ return "ACKNOWLEDGEMENT";
+ case NotificationProblem:
+ return "PROBLEM";
+ case NotificationRecovery:
+ return "RECOVERY";
+ case NotificationFlappingStart:
+ return "FLAPPINGSTART";
+ case NotificationFlappingEnd:
+ return "FLAPPINGEND";
+ default:
+ return "UNKNOWN_NOTIFICATION";
+ }
+}
+
+String Notification::NotificationServiceStateToString(ServiceState state)
+{
+ switch (state) {
+ case ServiceOK:
+ return "OK";
+ case ServiceWarning:
+ return "Warning";
+ case ServiceCritical:
+ return "Critical";
+ case ServiceUnknown:
+ return "Unknown";
+ default:
+ VERIFY(!"Invalid state type.");
+ }
+}
+
+String Notification::NotificationHostStateToString(HostState state)
+{
+ switch (state) {
+ case HostUp:
+ return "Up";
+ case HostDown:
+ return "Down";
+ default:
+ VERIFY(!"Invalid state type.");
+ }
+}
+
+void Notification::Validate(int types, const ValidationUtils& utils)
+{
+ ObjectImpl<Notification>::Validate(types, utils);
+
+ if (!(types & FAConfig))
+ return;
+
+ Array::Ptr users = GetUsersRaw();
+ Array::Ptr groups = GetUserGroupsRaw();
+
+ if ((!users || users->GetLength() == 0) && (!groups || groups->GetLength() == 0))
+ BOOST_THROW_EXCEPTION(ValidationError(this, std::vector<String>(), "Validation failed: No users/user_groups specified."));
+}
+
+void Notification::ValidateStates(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Notification>::ValidateStates(lvalue, utils);
+
+ int filter = FilterArrayToInt(lvalue(), GetStateFilterMap(), 0);
+
+ if (GetServiceName().IsEmpty() && (filter == -1 || (filter & ~(StateFilterUp | StateFilterDown)) != 0))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "states" }, "State filter is invalid."));
+
+ if (!GetServiceName().IsEmpty() && (filter == -1 || (filter & ~(StateFilterOK | StateFilterWarning | StateFilterCritical | StateFilterUnknown)) != 0))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "states" }, "State filter is invalid."));
+}
+
+void Notification::ValidateTypes(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Notification>::ValidateTypes(lvalue, utils);
+
+ int filter = FilterArrayToInt(lvalue(), GetTypeFilterMap(), 0);
+
+ if (filter == -1 || (filter & ~(NotificationDowntimeStart | NotificationDowntimeEnd | NotificationDowntimeRemoved |
+ NotificationCustom | NotificationAcknowledgement | NotificationProblem | NotificationRecovery |
+ NotificationFlappingStart | NotificationFlappingEnd)) != 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "types" }, "Type filter is invalid."));
+}
+
+void Notification::ValidateTimes(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Notification>::ValidateTimes(lvalue, utils);
+
+ Dictionary::Ptr times = lvalue();
+
+ if (!times)
+ return;
+
+ double begin;
+ double end;
+
+ try {
+ begin = Convert::ToDouble(times->Get("begin"));
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "times" }, "'begin' is invalid, must be duration or number." ));
+ }
+
+ try {
+ end = Convert::ToDouble(times->Get("end"));
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "times" }, "'end' is invalid, must be duration or number." ));
+ }
+
+ /* Also solve logical errors where begin > end. */
+ if (begin > 0 && end > 0 && begin > end)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "times" }, "'begin' must be smaller than 'end'."));
+}
+
+Endpoint::Ptr Notification::GetCommandEndpoint() const
+{
+ return Endpoint::GetByName(GetCommandEndpointRaw());
+}
+
+const std::map<String, int>& Notification::GetStateFilterMap()
+{
+ return m_StateFilterMap;
+}
+
+const std::map<String, int>& Notification::GetTypeFilterMap()
+{
+ return m_TypeFilterMap;
+}
diff --git a/lib/icinga/notification.hpp b/lib/icinga/notification.hpp
new file mode 100644
index 0000000..1b6cbed
--- /dev/null
+++ b/lib/icinga/notification.hpp
@@ -0,0 +1,135 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NOTIFICATION_H
+#define NOTIFICATION_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/notification-ti.hpp"
+#include "icinga/checkable-ti.hpp"
+#include "icinga/user.hpp"
+#include "icinga/usergroup.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/checkresult.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/messageorigin.hpp"
+#include "base/array.hpp"
+#include <cstdint>
+
+namespace icinga
+{
+
+/**
+ * @ingroup icinga
+ */
+enum NotificationFilter
+{
+ StateFilterOK = 1,
+ StateFilterWarning = 2,
+ StateFilterCritical = 4,
+ StateFilterUnknown = 8,
+
+ StateFilterUp = 16,
+ StateFilterDown = 32
+};
+
+/**
+ * The notification type.
+ *
+ * @ingroup icinga
+ */
+enum NotificationType
+{
+ NotificationDowntimeStart = 1,
+ NotificationDowntimeEnd = 2,
+ NotificationDowntimeRemoved = 4,
+ NotificationCustom = 8,
+ NotificationAcknowledgement = 16,
+ NotificationProblem = 32,
+ NotificationRecovery = 64,
+ NotificationFlappingStart = 128,
+ NotificationFlappingEnd = 256
+};
+
+class NotificationCommand;
+class ApplyRule;
+struct ScriptFrame;
+class Host;
+class Service;
+
+/**
+ * An Icinga notification specification.
+ *
+ * @ingroup icinga
+ */
+class Notification final : public ObjectImpl<Notification>
+{
+public:
+ DECLARE_OBJECT(Notification);
+ DECLARE_OBJECTNAME(Notification);
+
+ static void StaticInitialize();
+
+ intrusive_ptr<Checkable> GetCheckable() const;
+ intrusive_ptr<NotificationCommand> GetCommand() const;
+ TimePeriod::Ptr GetPeriod() const;
+ std::set<User::Ptr> GetUsers() const;
+ std::set<UserGroup::Ptr> GetUserGroups() const;
+
+ void UpdateNotificationNumber();
+ void ResetNotificationNumber();
+
+ void BeginExecuteNotification(NotificationType type, const CheckResult::Ptr& cr, bool force,
+ bool reminder = false, const String& author = "", const String& text = "");
+
+ Endpoint::Ptr GetCommandEndpoint() const;
+
+ // Logging, etc.
+ static String NotificationTypeToString(NotificationType type);
+ // Compat, used for notifications, etc.
+ static String NotificationTypeToStringCompat(NotificationType type);
+ static String NotificationFilterToString(int filter, const std::map<String, int>& filterMap);
+
+ static String NotificationServiceStateToString(ServiceState state);
+ static String NotificationHostStateToString(HostState state);
+
+ static boost::signals2::signal<void (const Notification::Ptr&, const MessageOrigin::Ptr&)> OnNextNotificationChanged;
+ static boost::signals2::signal<void (const Notification::Ptr&, const String&, uint_fast8_t, const MessageOrigin::Ptr&)> OnLastNotifiedStatePerUserUpdated;
+ static boost::signals2::signal<void (const Notification::Ptr&, const MessageOrigin::Ptr&)> OnLastNotifiedStatePerUserCleared;
+
+ void Validate(int types, const ValidationUtils& utils) override;
+
+ void ValidateStates(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) override;
+ void ValidateTypes(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) override;
+ void ValidateTimes(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+ static void EvaluateApplyRules(const intrusive_ptr<Host>& host);
+ static void EvaluateApplyRules(const intrusive_ptr<Service>& service);
+
+ static const std::map<String, int>& GetStateFilterMap();
+ static const std::map<String, int>& GetTypeFilterMap();
+
+ void OnConfigLoaded() override;
+ void OnAllConfigLoaded() override;
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ ObjectImpl<Checkable>::Ptr m_Checkable;
+
+ bool CheckNotificationUserFilters(NotificationType type, const User::Ptr& user, bool force, bool reminder);
+
+ void ExecuteNotificationHelper(NotificationType type, const User::Ptr& user, const CheckResult::Ptr& cr, bool force, const String& author = "", const String& text = "");
+
+ static bool EvaluateApplyRuleInstance(const intrusive_ptr<Checkable>& checkable, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter);
+ static bool EvaluateApplyRule(const intrusive_ptr<Checkable>& checkable, const ApplyRule& rule, bool skipFilter = false);
+
+ static std::map<String, int> m_StateFilterMap;
+ static std::map<String, int> m_TypeFilterMap;
+};
+
+int ServiceStateToFilter(ServiceState state);
+int HostStateToFilter(HostState state);
+
+}
+
+#endif /* NOTIFICATION_H */
diff --git a/lib/icinga/notification.ti b/lib/icinga/notification.ti
new file mode 100644
index 0000000..be07846
--- /dev/null
+++ b/lib/icinga/notification.ti
@@ -0,0 +1,111 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#impl_include "icinga/notificationcommand.hpp"
+#impl_include "icinga/service.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+class NotificationNameComposer : public NameComposer
+{
+public:
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const;
+ virtual Dictionary::Ptr ParseName(const String& name) const;
+};
+}}}
+
+class Notification : CustomVarObject < NotificationNameComposer
+{
+ load_after Host;
+ load_after Service;
+
+ [config, protected, required, navigation] name(NotificationCommand) command (CommandRaw) {
+ navigate {{{
+ return NotificationCommand::GetByName(GetCommandRaw());
+ }}}
+ };
+ [config] double interval {
+ default {{{ return 1800; }}}
+ };
+ [config, navigation] name(TimePeriod) period (PeriodRaw) {
+ navigate {{{
+ return TimePeriod::GetByName(GetPeriodRaw());
+ }}}
+ };
+ [config, signal_with_old_value] array(name(User)) users (UsersRaw);
+ [config, signal_with_old_value] array(name(UserGroup)) user_groups (UserGroupsRaw);
+ [config] Dictionary::Ptr times;
+ [config] array(Value) types;
+ [no_user_view, no_user_modify] int type_filter_real (TypeFilter);
+ [config] array(Value) states;
+ [no_user_view, no_user_modify] int state_filter_real (StateFilter);
+ [config, no_user_modify, protected, required, navigation(host)] name(Host) host_name {
+ navigate {{{
+ return Host::GetByName(GetHostName());
+ }}}
+ };
+ [config, protected, no_user_modify, navigation(service)] String service_name {
+ track {{{
+ if (!oldValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), oldValue);
+ DependencyGraph::RemoveDependency(this, service.get());
+ }
+
+ if (!newValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), newValue);
+ DependencyGraph::AddDependency(this, service.get());
+ }
+ }}}
+ navigate {{{
+ if (GetServiceName().IsEmpty())
+ return nullptr;
+
+ Host::Ptr host = Host::GetByName(GetHostName());
+ return host->GetServiceByShortName(GetServiceName());
+ }}}
+ };
+
+ [state, no_user_modify] Array::Ptr notified_problem_users {
+ default {{{ return new Array(); }}}
+ };
+
+ [state, no_user_modify] bool no_more_notifications {
+ default {{{ return false; }}}
+ };
+
+ [state, no_user_view, no_user_modify] Array::Ptr stashed_notifications {
+ default {{{ return new Array(); }}}
+ };
+
+ [state] Timestamp last_notification;
+ [state] Timestamp next_notification;
+ [state] int notification_number;
+ [state] Timestamp last_problem_notification;
+
+ [state, no_user_view, no_user_modify] int suppressed_notifications {
+ default {{{ return 0; }}}
+ };
+
+ [state, no_user_view, no_user_modify] Dictionary::Ptr last_notified_state_per_user {
+ default {{{ return new Dictionary(); }}}
+ };
+
+ [config, navigation] name(Endpoint) command_endpoint (CommandEndpointRaw) {
+ navigate {{{
+ return Endpoint::GetByName(GetCommandEndpointRaw());
+ }}}
+ };
+};
+
+validator Notification {
+ Dictionary times {
+ Number begin;
+ Number end;
+ };
+};
+
+}
diff --git a/lib/icinga/notificationcommand.cpp b/lib/icinga/notificationcommand.cpp
new file mode 100644
index 0000000..d4a5fd6
--- /dev/null
+++ b/lib/icinga/notificationcommand.cpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/notificationcommand.hpp"
+#include "icinga/notificationcommand-ti.cpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(NotificationCommand);
+
+thread_local NotificationCommand::Ptr NotificationCommand::ExecuteOverride;
+
+Dictionary::Ptr NotificationCommand::Execute(const Notification::Ptr& notification,
+ const User::Ptr& user, const CheckResult::Ptr& cr, const NotificationType& type,
+ const String& author, const String& comment, const Dictionary::Ptr& resolvedMacros,
+ bool useResolvedMacros)
+{
+ return GetExecute()->Invoke({
+ notification,
+ user,
+ cr,
+ type,
+ author,
+ comment,
+ resolvedMacros,
+ useResolvedMacros,
+ });
+}
diff --git a/lib/icinga/notificationcommand.hpp b/lib/icinga/notificationcommand.hpp
new file mode 100644
index 0000000..f0f6899
--- /dev/null
+++ b/lib/icinga/notificationcommand.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NOTIFICATIONCOMMAND_H
+#define NOTIFICATIONCOMMAND_H
+
+#include "icinga/notificationcommand-ti.hpp"
+#include "icinga/notification.hpp"
+
+namespace icinga
+{
+
+class Notification;
+
+/**
+ * A notification command.
+ *
+ * @ingroup icinga
+ */
+class NotificationCommand final : public ObjectImpl<NotificationCommand>
+{
+public:
+ DECLARE_OBJECT(NotificationCommand);
+ DECLARE_OBJECTNAME(NotificationCommand);
+
+ static thread_local NotificationCommand::Ptr ExecuteOverride;
+
+ virtual Dictionary::Ptr Execute(const intrusive_ptr<Notification>& notification,
+ const User::Ptr& user, const CheckResult::Ptr& cr, const NotificationType& type,
+ const String& author, const String& comment,
+ const Dictionary::Ptr& resolvedMacros = nullptr,
+ bool useResolvedMacros = false);
+};
+
+}
+
+#endif /* NOTIFICATIONCOMMAND_H */
diff --git a/lib/icinga/notificationcommand.ti b/lib/icinga/notificationcommand.ti
new file mode 100644
index 0000000..51207a3
--- /dev/null
+++ b/lib/icinga/notificationcommand.ti
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/command.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class NotificationCommand : Command
+{
+};
+
+}
diff --git a/lib/icinga/objectutils.cpp b/lib/icinga/objectutils.cpp
new file mode 100644
index 0000000..559ca43
--- /dev/null
+++ b/lib/icinga/objectutils.cpp
@@ -0,0 +1,55 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/objectutils.hpp"
+#include "icinga/host.hpp"
+#include "icinga/user.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/servicegroup.hpp"
+#include "icinga/usergroup.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION(Icinga, get_host, &Host::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_service, &ObjectUtils::GetService, "host:name");
+REGISTER_FUNCTION(Icinga, get_services, &ObjectUtils::GetServices, "host");
+REGISTER_FUNCTION(Icinga, get_user, &User::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_check_command, &CheckCommand::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_event_command, &EventCommand::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_notification_command, &NotificationCommand::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_host_group, &HostGroup::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_service_group, &ServiceGroup::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_user_group, &UserGroup::GetByName, "name");
+REGISTER_FUNCTION(Icinga, get_time_period, &TimePeriod::GetByName, "name");
+
+Service::Ptr ObjectUtils::GetService(const Value& host, const String& name)
+{
+ Host::Ptr hostObj;
+
+ if (host.IsObjectType<Host>())
+ hostObj = host;
+ else
+ hostObj = Host::GetByName(host);
+
+ if (!hostObj)
+ return nullptr;
+
+ return hostObj->GetServiceByShortName(name);
+}
+
+Array::Ptr ObjectUtils::GetServices(const Value& host)
+{
+ Host::Ptr hostObj;
+
+ if (host.IsObjectType<Host>())
+ hostObj = host;
+ else
+ hostObj = Host::GetByName(host);
+
+ if (!hostObj)
+ return nullptr;
+
+ return Array::FromVector(hostObj->GetServices());
+}
diff --git a/lib/icinga/objectutils.hpp b/lib/icinga/objectutils.hpp
new file mode 100644
index 0000000..42e2953
--- /dev/null
+++ b/lib/icinga/objectutils.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTUTILS_H
+#define OBJECTUTILS_H
+
+#include "base/i2-base.hpp"
+#include "base/string.hpp"
+#include "base/array.hpp"
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup icinga
+ */
+class ObjectUtils
+{
+public:
+ static Service::Ptr GetService(const Value& host, const String& name);
+ static Array::Ptr GetServices(const Value& host);
+
+private:
+ ObjectUtils();
+};
+
+}
+
+#endif /* OBJECTUTILS_H */
diff --git a/lib/icinga/pluginutility.cpp b/lib/icinga/pluginutility.cpp
new file mode 100644
index 0000000..4dc46f7
--- /dev/null
+++ b/lib/icinga/pluginutility.cpp
@@ -0,0 +1,218 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/pluginutility.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/convert.hpp"
+#include "base/process.hpp"
+#include "base/objectlock.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/trim.hpp>
+
+using namespace icinga;
+
+void PluginUtility::ExecuteCommand(const Command::Ptr& commandObj, const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MacroProcessor::ResolverList& macroResolvers,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros, int timeout,
+ const std::function<void(const Value& commandLine, const ProcessResult&)>& callback)
+{
+ Value raw_command = commandObj->GetCommandLine();
+ Dictionary::Ptr raw_arguments = commandObj->GetArguments();
+
+ Value command;
+
+ try {
+ command = MacroProcessor::ResolveArguments(raw_command, raw_arguments,
+ macroResolvers, cr, resolvedMacros, useResolvedMacros);
+ } catch (const std::exception& ex) {
+ String message = DiagnosticInformation(ex);
+
+ Log(LogWarning, "PluginUtility", message);
+
+ if (callback) {
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.ExecutionStart = Utility::GetTime();
+ pr.ExecutionEnd = pr.ExecutionStart;
+ pr.ExitStatus = 3; /* Unknown */
+ pr.Output = message;
+ callback(Empty, pr);
+ }
+
+ return;
+ }
+
+ Dictionary::Ptr envMacros = new Dictionary();
+
+ Dictionary::Ptr env = commandObj->GetEnv();
+
+ if (env) {
+ ObjectLock olock(env);
+ for (const Dictionary::Pair& kv : env) {
+ String name = kv.second;
+
+ String missingMacro;
+ Value value = MacroProcessor::ResolveMacros(name, macroResolvers, cr,
+ &missingMacro, MacroProcessor::EscapeCallback(), resolvedMacros,
+ useResolvedMacros);
+
+#ifdef I2_DEBUG
+ if (!missingMacro.IsEmpty())
+ Log(LogDebug, "PluginUtility")
+ << "Macro '" << name << "' is not defined.";
+#endif /* I2_DEBUG */
+
+ if (value.IsObjectType<Array>())
+ value = Utility::Join(value, ';');
+
+ envMacros->Set(kv.first, value);
+ }
+ }
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ Process::Ptr process = new Process(Process::PrepareCommand(command), envMacros);
+
+ process->SetTimeout(timeout);
+ process->SetAdjustPriority(true);
+
+ process->Run([callback, command](const ProcessResult& pr) { callback(command, pr); });
+}
+
+ServiceState PluginUtility::ExitStatusToState(int exitStatus)
+{
+ switch (exitStatus) {
+ case 0:
+ return ServiceOK;
+ case 1:
+ return ServiceWarning;
+ case 2:
+ return ServiceCritical;
+ default:
+ return ServiceUnknown;
+ }
+}
+
+std::pair<String, String> PluginUtility::ParseCheckOutput(const String& output)
+{
+ String text;
+ String perfdata;
+
+ std::vector<String> lines = output.Split("\r\n");
+
+ for (const String& line : lines) {
+ size_t delim = line.FindFirstOf("|");
+
+ if (!text.IsEmpty())
+ text += "\n";
+
+ if (delim != String::NPos && line.FindFirstOf("=", delim) != String::NPos) {
+ text += line.SubStr(0, delim);
+
+ if (!perfdata.IsEmpty())
+ perfdata += " ";
+
+ perfdata += line.SubStr(delim + 1, line.GetLength());
+ } else {
+ text += line;
+ }
+ }
+
+ boost::algorithm::trim(perfdata);
+
+ return std::make_pair(text, perfdata);
+}
+
+Array::Ptr PluginUtility::SplitPerfdata(const String& perfdata)
+{
+ ArrayData result;
+
+ size_t begin = 0;
+ String multi_prefix;
+
+ for (;;) {
+ size_t eqp = perfdata.FindFirstOf('=', begin);
+
+ if (eqp == String::NPos)
+ break;
+
+ String label = perfdata.SubStr(begin, eqp - begin);
+ boost::algorithm::trim_left(label);
+
+ if (label.GetLength() > 2 && label[0] == '\'' && label[label.GetLength() - 1] == '\'')
+ label = label.SubStr(1, label.GetLength() - 2);
+
+ size_t multi_index = label.RFind("::");
+
+ if (multi_index != String::NPos)
+ multi_prefix = "";
+
+ size_t spq = perfdata.FindFirstOf(' ', eqp);
+
+ if (spq == String::NPos)
+ spq = perfdata.GetLength();
+
+ String value = perfdata.SubStr(eqp + 1, spq - eqp - 1);
+
+ if (!multi_prefix.IsEmpty())
+ label = multi_prefix + "::" + label;
+
+ String pdv;
+ if (label.FindFirstOf(" ") != String::NPos)
+ pdv = "'" + label + "'=" + value;
+ else
+ pdv = label + "=" + value;
+
+ result.emplace_back(std::move(pdv));
+
+ if (multi_index != String::NPos)
+ multi_prefix = label.SubStr(0, multi_index);
+
+ begin = spq + 1;
+ }
+
+ return new Array(std::move(result));
+}
+
+String PluginUtility::FormatPerfdata(const Array::Ptr& perfdata, bool normalize)
+{
+ if (!perfdata)
+ return "";
+
+ std::ostringstream result;
+
+ ObjectLock olock(perfdata);
+
+ bool first = true;
+ for (const Value& pdv : perfdata) {
+ if (!first)
+ result << " ";
+ else
+ first = false;
+
+ if (pdv.IsObjectType<PerfdataValue>()) {
+ result << static_cast<PerfdataValue::Ptr>(pdv)->Format();
+ } else if (normalize) {
+ PerfdataValue::Ptr normalized;
+
+ try {
+ normalized = PerfdataValue::Parse(pdv);
+ } catch (const std::invalid_argument& ex) {
+ Log(LogDebug, "PerfdataValue") << ex.what();
+ }
+
+ if (normalized) {
+ result << normalized->Format();
+ } else {
+ result << pdv;
+ }
+ } else {
+ result << pdv;
+ }
+ }
+
+ return result.str();
+}
diff --git a/lib/icinga/pluginutility.hpp b/lib/icinga/pluginutility.hpp
new file mode 100644
index 0000000..3f6a844
--- /dev/null
+++ b/lib/icinga/pluginutility.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PLUGINUTILITY_H
+#define PLUGINUTILITY_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/checkable.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+struct ProcessResult;
+
+/**
+ * Utility functions for plugin-based checks.
+ *
+ * @ingroup icinga
+ */
+class PluginUtility
+{
+public:
+ static void ExecuteCommand(const Command::Ptr& commandObj, const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MacroProcessor::ResolverList& macroResolvers,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros, int timeout,
+ const std::function<void(const Value& commandLine, const ProcessResult&)>& callback = std::function<void(const Value& commandLine, const ProcessResult&)>());
+
+ static ServiceState ExitStatusToState(int exitStatus);
+ static std::pair<String, String> ParseCheckOutput(const String& output);
+
+ static Array::Ptr SplitPerfdata(const String& perfdata);
+ static String FormatPerfdata(const Array::Ptr& perfdata, bool normalize = false);
+
+private:
+ PluginUtility();
+};
+
+}
+
+#endif /* PLUGINUTILITY_H */
diff --git a/lib/icinga/scheduleddowntime-apply.cpp b/lib/icinga/scheduleddowntime-apply.cpp
new file mode 100644
index 0000000..4f8aa47
--- /dev/null
+++ b/lib/icinga/scheduleddowntime-apply.cpp
@@ -0,0 +1,159 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/scheduleddowntime.hpp"
+#include "icinga/service.hpp"
+#include "config/configitembuilder.hpp"
+#include "config/applyrule.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+INITIALIZE_ONCE([]() {
+ ApplyRule::RegisterType("ScheduledDowntime", { "Host", "Service" });
+});
+
+bool ScheduledDowntime::EvaluateApplyRuleInstance(const Checkable::Ptr& checkable, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter)
+{
+ if (!skipFilter && !rule.EvaluateFilter(frame))
+ return false;
+
+ auto& di (rule.GetDebugInfo());
+
+#ifdef _DEBUG
+ Log(LogDebug, "ScheduledDowntime")
+ << "Applying scheduled downtime '" << rule.GetName() << "' to object '" << checkable->GetName() << "' for rule " << di;
+#endif /* _DEBUG */
+
+ ConfigItemBuilder builder{di};
+ builder.SetType(ScheduledDowntime::TypeInstance);
+ builder.SetName(name);
+ builder.SetScope(frame.Locals->ShallowClone());
+ builder.SetIgnoreOnError(rule.GetIgnoreOnError());
+
+ builder.AddExpression(new ImportDefaultTemplatesExpression());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "host_name"), OpSetLiteral, MakeLiteral(host->GetName()), di));
+
+ if (service)
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "service_name"), OpSetLiteral, MakeLiteral(service->GetShortName()), di));
+
+ String zone = checkable->GetZoneName();
+
+ if (!zone.IsEmpty())
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "zone"), OpSetLiteral, MakeLiteral(zone), di));
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "package"), OpSetLiteral, MakeLiteral(rule.GetPackage()), di));
+
+ builder.AddExpression(new OwnedExpression(rule.GetExpression()));
+
+ ConfigItem::Ptr downtimeItem = builder.Compile();
+ downtimeItem->Register();
+
+ return true;
+}
+
+bool ScheduledDowntime::EvaluateApplyRule(const Checkable::Ptr& checkable, const ApplyRule& rule, bool skipFilter)
+{
+ auto& di (rule.GetDebugInfo());
+
+ CONTEXT("Evaluating 'apply' rule (" << di << ")");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ ScriptFrame frame(true);
+ if (rule.GetScope())
+ rule.GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("host", host);
+ if (service)
+ frame.Locals->Set("service", service);
+
+ Value vinstances;
+
+ if (rule.GetFTerm()) {
+ try {
+ vinstances = rule.GetFTerm()->Evaluate(frame);
+ } catch (const std::exception&) {
+ /* Silently ignore errors here and assume there are no instances. */
+ return false;
+ }
+ } else {
+ vinstances = new Array({ "" });
+ }
+
+ bool match = false;
+
+ if (vinstances.IsObjectType<Array>()) {
+ if (!rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Dictionary iterator requires value to be a dictionary.", di));
+
+ Array::Ptr arr = vinstances;
+
+ ObjectLock olock(arr);
+ for (const Value& instance : arr) {
+ String name = rule.GetName();
+
+ if (!rule.GetFKVar().IsEmpty()) {
+ frame.Locals->Set(rule.GetFKVar(), instance);
+ name += instance;
+ }
+
+ if (EvaluateApplyRuleInstance(checkable, name, frame, rule, skipFilter))
+ match = true;
+ }
+ } else if (vinstances.IsObjectType<Dictionary>()) {
+ if (rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Array iterator requires value to be an array.", di));
+
+ Dictionary::Ptr dict = vinstances;
+
+ for (const String& key : dict->GetKeys()) {
+ frame.Locals->Set(rule.GetFKVar(), key);
+ frame.Locals->Set(rule.GetFVVar(), dict->Get(key));
+
+ if (EvaluateApplyRuleInstance(checkable, rule.GetName() + key, frame, rule, skipFilter))
+ match = true;
+ }
+ }
+
+ return match;
+}
+
+void ScheduledDowntime::EvaluateApplyRules(const Host::Ptr& host)
+{
+ CONTEXT("Evaluating 'apply' rules for host '" << host->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(ScheduledDowntime::TypeInstance, Host::TypeInstance)) {
+ if (EvaluateApplyRule(host, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedHostRules(ScheduledDowntime::TypeInstance, host->GetName())) {
+ if (EvaluateApplyRule(host, *rule, true))
+ rule->AddMatch();
+ }
+}
+
+void ScheduledDowntime::EvaluateApplyRules(const Service::Ptr& service)
+{
+ CONTEXT("Evaluating 'apply' rules for service '" << service->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(ScheduledDowntime::TypeInstance, Service::TypeInstance)) {
+ if (EvaluateApplyRule(service, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedServiceRules(ScheduledDowntime::TypeInstance, service->GetHost()->GetName(), service->GetShortName())) {
+ if (EvaluateApplyRule(service, *rule, true))
+ rule->AddMatch();
+ }
+}
diff --git a/lib/icinga/scheduleddowntime.cpp b/lib/icinga/scheduleddowntime.cpp
new file mode 100644
index 0000000..f23d3e4
--- /dev/null
+++ b/lib/icinga/scheduleddowntime.cpp
@@ -0,0 +1,393 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/scheduleddowntime.hpp"
+#include "icinga/scheduleddowntime-ti.cpp"
+#include "icinga/legacytimeperiod.hpp"
+#include "icinga/downtime.hpp"
+#include "icinga/service.hpp"
+#include "base/timer.hpp"
+#include "base/tlsutility.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/objectlock.hpp"
+#include "base/object-packer.hpp"
+#include "base/serializer.hpp"
+#include "base/convert.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include <boost/thread/once.hpp>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_TYPE(ScheduledDowntime);
+
+static Timer::Ptr l_Timer;
+
+String ScheduledDowntimeNameComposer::MakeName(const String& shortName, const Object::Ptr& context) const
+{
+ ScheduledDowntime::Ptr downtime = dynamic_pointer_cast<ScheduledDowntime>(context);
+
+ if (!downtime)
+ return "";
+
+ String name = downtime->GetHostName();
+
+ if (!downtime->GetServiceName().IsEmpty())
+ name += "!" + downtime->GetServiceName();
+
+ name += "!" + shortName;
+
+ return name;
+}
+
+Dictionary::Ptr ScheduledDowntimeNameComposer::ParseName(const String& name) const
+{
+ std::vector<String> tokens = name.Split("!");
+
+ if (tokens.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid ScheduledDowntime name."));
+
+ Dictionary::Ptr result = new Dictionary();
+ result->Set("host_name", tokens[0]);
+
+ if (tokens.size() > 2) {
+ result->Set("service_name", tokens[1]);
+ result->Set("name", tokens[2]);
+ } else {
+ result->Set("name", tokens[1]);
+ }
+
+ return result;
+}
+
+void ScheduledDowntime::OnAllConfigLoaded()
+{
+ ObjectImpl<ScheduledDowntime>::OnAllConfigLoaded();
+
+ if (!GetCheckable())
+ BOOST_THROW_EXCEPTION(ScriptError("ScheduledDowntime '" + GetName() + "' references a host/service which doesn't exist.", GetDebugInfo()));
+
+ m_AllConfigLoaded.store(true);
+}
+
+void ScheduledDowntime::Start(bool runtimeCreated)
+{
+ ObjectImpl<ScheduledDowntime>::Start(runtimeCreated);
+
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, [this]() {
+ l_Timer = Timer::Create();
+ l_Timer->SetInterval(60);
+ l_Timer->OnTimerExpired.connect([](const Timer * const&) { TimerProc(); });
+ l_Timer->Start();
+ });
+
+ if (!IsPaused())
+ Utility::QueueAsyncCallback([this]() { CreateNextDowntime(); });
+}
+
+void ScheduledDowntime::TimerProc()
+{
+ for (const ScheduledDowntime::Ptr& sd : ConfigType::GetObjectsByType<ScheduledDowntime>()) {
+ if (sd->IsActive() && !sd->IsPaused()) {
+ try {
+ sd->CreateNextDowntime();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ScheduledDowntime")
+ << "Exception occurred during creation of next downtime for scheduled downtime '"
+ << sd->GetName() << "': " << DiagnosticInformation(ex, false);
+ continue;
+ }
+
+ try {
+ sd->RemoveObsoleteDowntimes();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ScheduledDowntime")
+ << "Exception occurred during removal of obsolete downtime for scheduled downtime '"
+ << sd->GetName() << "': " << DiagnosticInformation(ex, false);
+ }
+ }
+ }
+}
+
+Checkable::Ptr ScheduledDowntime::GetCheckable() const
+{
+ Host::Ptr host = Host::GetByName(GetHostName());
+
+ if (GetServiceName().IsEmpty())
+ return host;
+ else
+ return host->GetServiceByShortName(GetServiceName());
+}
+
+std::pair<double, double> ScheduledDowntime::FindRunningSegment(double minEnd)
+{
+ time_t refts = Utility::GetTime();
+ tm reference = Utility::LocalTime(refts);
+
+ Log(LogDebug, "ScheduledDowntime")
+ << "Finding running scheduled downtime segment for time " << refts
+ << " (minEnd " << (minEnd > 0 ? Utility::FormatDateTime("%c", minEnd) : "-") << ")";
+
+ Dictionary::Ptr ranges = GetRanges();
+
+ if (!ranges)
+ return std::make_pair(0, 0);
+
+ Array::Ptr segments = new Array();
+
+ Dictionary::Ptr bestSegment;
+ double bestBegin = 0.0, bestEnd = 0.0;
+ double now = Utility::GetTime();
+
+ ObjectLock olock(ranges);
+
+ /* Find the longest lasting (and longer than minEnd, if given) segment that's already running */
+ for (const Dictionary::Pair& kv : ranges) {
+ Log(LogDebug, "ScheduledDowntime")
+ << "Evaluating (running?) segment: " << kv.first << ": " << kv.second;
+
+ Dictionary::Ptr segment = LegacyTimePeriod::FindRunningSegment(kv.first, kv.second, &reference);
+
+ if (!segment)
+ continue;
+
+ double begin = segment->Get("begin");
+ double end = segment->Get("end");
+
+ Log(LogDebug, "ScheduledDowntime")
+ << "Considering (running?) segment: " << Utility::FormatDateTime("%c", begin) << " -> " << Utility::FormatDateTime("%c", end);
+
+ if (begin >= now || end < now) {
+ Log(LogDebug, "ScheduledDowntime") << "not running.";
+ continue;
+ }
+ if (minEnd && end <= minEnd) {
+ Log(LogDebug, "ScheduledDowntime") << "ending too early.";
+ continue;
+ }
+
+ if (!bestSegment || end > bestEnd) {
+ Log(LogDebug, "ScheduledDowntime") << "(best match yet)";
+ bestSegment = segment;
+ bestBegin = begin;
+ bestEnd = end;
+ }
+ }
+
+ if (bestSegment)
+ return std::make_pair(bestBegin, bestEnd);
+
+ return std::make_pair(0, 0);
+}
+
+std::pair<double, double> ScheduledDowntime::FindNextSegment()
+{
+ time_t refts = Utility::GetTime();
+ tm reference = Utility::LocalTime(refts);
+
+ Log(LogDebug, "ScheduledDowntime")
+ << "Finding next scheduled downtime segment for time " << refts;
+
+ Dictionary::Ptr ranges = GetRanges();
+
+ if (!ranges)
+ return std::make_pair(0, 0);
+
+ Array::Ptr segments = new Array();
+
+ Dictionary::Ptr bestSegment;
+ double bestBegin = 0.0, bestEnd = 0.0;
+ double now = Utility::GetTime();
+
+ ObjectLock olock(ranges);
+
+ /* Find the segment starting earliest */
+ for (const Dictionary::Pair& kv : ranges) {
+ Log(LogDebug, "ScheduledDowntime")
+ << "Evaluating segment: " << kv.first << ": " << kv.second;
+
+ Dictionary::Ptr segment = LegacyTimePeriod::FindNextSegment(kv.first, kv.second, &reference);
+
+ if (!segment)
+ continue;
+
+ double begin = segment->Get("begin");
+ double end = segment->Get("end");
+
+ Log(LogDebug, "ScheduledDowntime")
+ << "Considering segment: " << Utility::FormatDateTime("%c", begin) << " -> " << Utility::FormatDateTime("%c", end);
+
+ if (begin < now) {
+ Log(LogDebug, "ScheduledDowntime") << "already running.";
+ continue;
+ }
+
+ if (!bestSegment || begin < bestBegin) {
+ Log(LogDebug, "ScheduledDowntime") << "(best match yet)";
+ bestSegment = segment;
+ bestBegin = begin;
+ bestEnd = end;
+ }
+ }
+
+ if (bestSegment)
+ return std::make_pair(bestBegin, bestEnd);
+
+ return std::make_pair(0, 0);
+}
+
+void ScheduledDowntime::CreateNextDowntime()
+{
+ /* HA enabled zones. */
+ if (IsActive() && IsPaused()) {
+ Log(LogNotice, "Checkable")
+ << "Skipping downtime creation for HA-paused Scheduled Downtime object '" << GetName() << "'";
+ return;
+ }
+
+ double minEnd = 0;
+ auto downtimeOptionsHash (HashDowntimeOptions());
+
+ for (const Downtime::Ptr& downtime : GetCheckable()->GetDowntimes()) {
+ if (downtime->GetScheduledBy() != GetName())
+ continue;
+
+ auto configOwnerHash (downtime->GetConfigOwnerHash());
+ if (!configOwnerHash.IsEmpty() && configOwnerHash != downtimeOptionsHash)
+ continue;
+
+ double end = downtime->GetEndTime();
+ if (end > minEnd)
+ minEnd = end;
+
+ if (downtime->GetStartTime() < Utility::GetTime())
+ continue;
+
+ /* We've found a downtime that is owned by us and that hasn't started yet - we're done. */
+ return;
+ }
+
+ Log(LogDebug, "ScheduledDowntime")
+ << "Creating new Downtime for ScheduledDowntime \"" << GetName() << "\"";
+
+ std::pair<double, double> segment = FindRunningSegment(minEnd);
+ if (segment.first == 0 && segment.second == 0) {
+ segment = FindNextSegment();
+ if (segment.first == 0 && segment.second == 0)
+ return;
+ }
+
+ Downtime::Ptr downtime = Downtime::AddDowntime(GetCheckable(), GetAuthor(), GetComment(),
+ segment.first, segment.second,
+ GetFixed(), String(), GetDuration(), GetName(), GetName());
+ String downtimeName = downtime->GetName();
+
+ int childOptions = Downtime::ChildOptionsFromValue(GetChildOptions());
+ if (childOptions > 0) {
+ /* 'DowntimeTriggeredChildren' schedules child downtimes triggered by the parent downtime.
+ * 'DowntimeNonTriggeredChildren' schedules non-triggered downtimes for all children.
+ */
+ String triggerName;
+ if (childOptions == 1)
+ triggerName = downtimeName;
+
+ Log(LogNotice, "ScheduledDowntime")
+ << "Processing child options " << childOptions << " for downtime " << downtimeName;
+
+ for (const Checkable::Ptr& child : GetCheckable()->GetAllChildren()) {
+ Log(LogNotice, "ScheduledDowntime")
+ << "Scheduling downtime for child object " << child->GetName();
+
+ Downtime::Ptr childDowntime = Downtime::AddDowntime(child, GetAuthor(), GetComment(),
+ segment.first, segment.second, GetFixed(), triggerName, GetDuration(), GetName(), GetName());
+
+ Log(LogNotice, "ScheduledDowntime")
+ << "Add child downtime '" << childDowntime->GetName() << "'.";
+ }
+ }
+}
+
+void ScheduledDowntime::RemoveObsoleteDowntimes()
+{
+ auto name (GetName());
+ auto downtimeOptionsHash (HashDowntimeOptions());
+
+ // Just to be sure start and removal don't happen at the same time
+ auto threshold (Utility::GetTime() + 5 * 60);
+
+ for (const Downtime::Ptr& downtime : GetCheckable()->GetDowntimes()) {
+ if (downtime->GetScheduledBy() == name && downtime->GetStartTime() > threshold) {
+ auto configOwnerHash (downtime->GetConfigOwnerHash());
+
+ if (!configOwnerHash.IsEmpty() && configOwnerHash != downtimeOptionsHash)
+ Downtime::RemoveDowntime(downtime->GetName(), false, true);
+ }
+ }
+}
+
+void ScheduledDowntime::ValidateRanges(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<ScheduledDowntime>::ValidateRanges(lvalue, utils);
+
+ if (!lvalue())
+ return;
+
+ /* create a fake time environment to validate the definitions */
+ time_t refts = Utility::GetTime();
+ tm reference = Utility::LocalTime(refts);
+ Array::Ptr segments = new Array();
+
+ ObjectLock olock(lvalue());
+ for (const Dictionary::Pair& kv : lvalue()) {
+ try {
+ tm begin_tm, end_tm;
+ int stride;
+ LegacyTimePeriod::ParseTimeRange(kv.first, &begin_tm, &end_tm, &stride, &reference);
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "ranges" }, "Invalid time specification '" + kv.first + "': " + ex.what()));
+ }
+
+ try {
+ LegacyTimePeriod::ProcessTimeRanges(kv.second, &reference, segments);
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "ranges" }, "Invalid time range definition '" + kv.second + "': " + ex.what()));
+ }
+ }
+}
+
+void ScheduledDowntime::ValidateChildOptions(const Lazy<Value>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<ScheduledDowntime>::ValidateChildOptions(lvalue, utils);
+
+ try {
+ Downtime::ChildOptionsFromValue(lvalue());
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "child_options" }, "Invalid child_options specified"));
+ }
+}
+
+static const std::set<String> l_SDDowntimeOptions ({
+ "author", "child_options", "comment", "duration", "fixed", "ranges", "vars"
+});
+
+String ScheduledDowntime::HashDowntimeOptions()
+{
+ Dictionary::Ptr allOpts = Serialize(this, FAConfig);
+ Dictionary::Ptr opts = new Dictionary();
+
+ for (auto& opt : l_SDDowntimeOptions) {
+ opts->Set(opt, allOpts->Get(opt));
+ }
+
+ return SHA256(PackObject(opts));
+}
+
+bool ScheduledDowntime::AllConfigIsLoaded()
+{
+ return m_AllConfigLoaded.load();
+}
+
+std::atomic<bool> ScheduledDowntime::m_AllConfigLoaded (false);
diff --git a/lib/icinga/scheduleddowntime.hpp b/lib/icinga/scheduleddowntime.hpp
new file mode 100644
index 0000000..e701236
--- /dev/null
+++ b/lib/icinga/scheduleddowntime.hpp
@@ -0,0 +1,60 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SCHEDULEDDOWNTIME_H
+#define SCHEDULEDDOWNTIME_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/scheduleddowntime-ti.hpp"
+#include "icinga/checkable.hpp"
+#include <atomic>
+
+namespace icinga
+{
+
+class ApplyRule;
+struct ScriptFrame;
+class Host;
+class Service;
+
+/**
+ * An Icinga scheduled downtime specification.
+ *
+ * @ingroup icinga
+ */
+class ScheduledDowntime final : public ObjectImpl<ScheduledDowntime>
+{
+public:
+ DECLARE_OBJECT(ScheduledDowntime);
+ DECLARE_OBJECTNAME(ScheduledDowntime);
+
+ Checkable::Ptr GetCheckable() const;
+
+ static void EvaluateApplyRules(const intrusive_ptr<Host>& host);
+ static void EvaluateApplyRules(const intrusive_ptr<Service>& service);
+ static bool AllConfigIsLoaded();
+
+ void ValidateRanges(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+ void ValidateChildOptions(const Lazy<Value>& lvalue, const ValidationUtils& utils) override;
+ String HashDowntimeOptions();
+
+protected:
+ void OnAllConfigLoaded() override;
+ void Start(bool runtimeCreated) override;
+
+private:
+ static void TimerProc();
+
+ std::pair<double, double> FindRunningSegment(double minEnd = 0);
+ std::pair<double, double> FindNextSegment();
+ void CreateNextDowntime();
+ void RemoveObsoleteDowntimes();
+
+ static std::atomic<bool> m_AllConfigLoaded;
+
+ static bool EvaluateApplyRuleInstance(const Checkable::Ptr& checkable, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter);
+ static bool EvaluateApplyRule(const Checkable::Ptr& checkable, const ApplyRule& rule, bool skipFilter = false);
+};
+
+}
+
+#endif /* SCHEDULEDDOWNTIME_H */
diff --git a/lib/icinga/scheduleddowntime.ti b/lib/icinga/scheduleddowntime.ti
new file mode 100644
index 0000000..1653f27
--- /dev/null
+++ b/lib/icinga/scheduleddowntime.ti
@@ -0,0 +1,76 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#impl_include "icinga/service.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+class ScheduledDowntimeNameComposer : public NameComposer
+{
+public:
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const;
+ virtual Dictionary::Ptr ParseName(const String& name) const;
+};
+}}}
+
+class ScheduledDowntime : CustomVarObject < ScheduledDowntimeNameComposer
+{
+ // Scheduled Downtimes have a dependency on Downtimes. This is to make sure ScheduledDowntimes are activated after
+ // the Downtimes (and other checkables)
+ activation_priority 20;
+
+ load_after Host;
+ load_after Service;
+
+ [config, protected, no_user_modify, required, navigation(host)] name(Host) host_name {
+ navigate {{{
+ return Host::GetByName(GetHostName());
+ }}}
+ };
+ [config, protected, no_user_modify, navigation(service)] String service_name {
+ track {{{
+ if (!oldValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), oldValue);
+ DependencyGraph::RemoveDependency(this, service.get());
+ }
+
+ if (!newValue.IsEmpty()) {
+ Service::Ptr service = Service::GetByNamePair(GetHostName(), newValue);
+ DependencyGraph::AddDependency(this, service.get());
+ }
+ }}}
+ navigate {{{
+ if (GetServiceName().IsEmpty())
+ return nullptr;
+
+ Host::Ptr host = Host::GetByName(GetHostName());
+ return host->GetServiceByShortName(GetServiceName());
+ }}}
+ };
+
+ [config, required] String author;
+ [config, required] String comment;
+
+ [config] double duration;
+ [config] bool fixed {
+ default {{{ return true; }}}
+ };
+
+ [config] Value child_options {
+ default {{{ return "DowntimeNoChildren"; }}}
+ };
+
+ [config, required] Dictionary::Ptr ranges;
+};
+
+validator ScheduledDowntime {
+ Dictionary ranges {
+ String "*";
+ };
+};
+
+}
diff --git a/lib/icinga/service-apply.cpp b/lib/icinga/service-apply.cpp
new file mode 100644
index 0000000..4419e0b
--- /dev/null
+++ b/lib/icinga/service-apply.cpp
@@ -0,0 +1,133 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/service.hpp"
+#include "config/configitembuilder.hpp"
+#include "config/applyrule.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/workqueue.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+INITIALIZE_ONCE([]() {
+ ApplyRule::RegisterType("Service", { "Host" });
+});
+
+bool Service::EvaluateApplyRuleInstance(const Host::Ptr& host, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter)
+{
+ if (!skipFilter && !rule.EvaluateFilter(frame))
+ return false;
+
+ auto& di (rule.GetDebugInfo());
+
+#ifdef _DEBUG
+ Log(LogDebug, "Service")
+ << "Applying service '" << name << "' to host '" << host->GetName() << "' for rule " << di;
+#endif /* _DEBUG */
+
+ ConfigItemBuilder builder{di};
+ builder.SetType(Service::TypeInstance);
+ builder.SetName(name);
+ builder.SetScope(frame.Locals->ShallowClone());
+ builder.SetIgnoreOnError(rule.GetIgnoreOnError());
+
+ builder.AddExpression(new ImportDefaultTemplatesExpression());
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "host_name"), OpSetLiteral, MakeLiteral(host->GetName()), di));
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "name"), OpSetLiteral, MakeLiteral(name), di));
+
+ String zone = host->GetZoneName();
+
+ if (!zone.IsEmpty())
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "zone"), OpSetLiteral, MakeLiteral(zone), di));
+
+ builder.AddExpression(new SetExpression(MakeIndexer(ScopeThis, "package"), OpSetLiteral, MakeLiteral(rule.GetPackage()), di));
+
+ builder.AddExpression(new OwnedExpression(rule.GetExpression()));
+
+ ConfigItem::Ptr serviceItem = builder.Compile();
+ serviceItem->Register();
+
+ return true;
+}
+
+bool Service::EvaluateApplyRule(const Host::Ptr& host, const ApplyRule& rule, bool skipFilter)
+{
+ auto& di (rule.GetDebugInfo());
+
+ CONTEXT("Evaluating 'apply' rule (" << di << ")");
+
+ ScriptFrame frame(true);
+ if (rule.GetScope())
+ rule.GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("host", host);
+
+ Value vinstances;
+
+ if (rule.GetFTerm()) {
+ try {
+ vinstances = rule.GetFTerm()->Evaluate(frame);
+ } catch (const std::exception&) {
+ /* Silently ignore errors here and assume there are no instances. */
+ return false;
+ }
+ } else {
+ vinstances = new Array({ "" });
+ }
+
+ bool match = false;
+
+ if (vinstances.IsObjectType<Array>()) {
+ if (!rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Dictionary iterator requires value to be a dictionary.", di));
+
+ Array::Ptr arr = vinstances;
+
+ ObjectLock olock(arr);
+ for (const Value& instance : arr) {
+ String name = rule.GetName();
+
+ if (!rule.GetFKVar().IsEmpty()) {
+ frame.Locals->Set(rule.GetFKVar(), instance);
+ name += instance;
+ }
+
+ if (EvaluateApplyRuleInstance(host, name, frame, rule, skipFilter))
+ match = true;
+ }
+ } else if (vinstances.IsObjectType<Dictionary>()) {
+ if (rule.GetFVVar().IsEmpty())
+ BOOST_THROW_EXCEPTION(ScriptError("Array iterator requires value to be an array.", di));
+
+ Dictionary::Ptr dict = vinstances;
+
+ for (const String& key : dict->GetKeys()) {
+ frame.Locals->Set(rule.GetFKVar(), key);
+ frame.Locals->Set(rule.GetFVVar(), dict->Get(key));
+
+ if (EvaluateApplyRuleInstance(host, rule.GetName() + key, frame, rule, skipFilter))
+ match = true;
+ }
+ }
+
+ return match;
+}
+
+void Service::EvaluateApplyRules(const Host::Ptr& host)
+{
+ CONTEXT("Evaluating 'apply' rules for host '" << host->GetName() << "'");
+
+ for (auto& rule : ApplyRule::GetRules(Service::TypeInstance, Host::TypeInstance)) {
+ if (EvaluateApplyRule(host, *rule))
+ rule->AddMatch();
+ }
+
+ for (auto& rule : ApplyRule::GetTargetedHostRules(Service::TypeInstance, host->GetName())) {
+ if (EvaluateApplyRule(host, *rule, true))
+ rule->AddMatch();
+ }
+}
diff --git a/lib/icinga/service.cpp b/lib/icinga/service.cpp
new file mode 100644
index 0000000..d831136
--- /dev/null
+++ b/lib/icinga/service.cpp
@@ -0,0 +1,287 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/service.hpp"
+#include "icinga/service-ti.cpp"
+#include "icinga/servicegroup.hpp"
+#include "icinga/scheduleddowntime.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(Service);
+
+boost::signals2::signal<void (const Service::Ptr&, const CheckResult::Ptr&, const MessageOrigin::Ptr&)> Service::OnHostProblemChanged;
+
+String ServiceNameComposer::MakeName(const String& shortName, const Object::Ptr& context) const
+{
+ Service::Ptr service = dynamic_pointer_cast<Service>(context);
+
+ if (!service)
+ return "";
+
+ return service->GetHostName() + "!" + shortName;
+}
+
+Dictionary::Ptr ServiceNameComposer::ParseName(const String& name) const
+{
+ std::vector<String> tokens = name.Split("!");
+
+ if (tokens.size() < 2)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid Service name."));
+
+ return new Dictionary({
+ { "host_name", tokens[0] },
+ { "name", tokens[1] }
+ });
+}
+
+void Service::OnAllConfigLoaded()
+{
+ ObjectImpl<Service>::OnAllConfigLoaded();
+
+ String zoneName = GetZoneName();
+
+ if (!zoneName.IsEmpty()) {
+ Zone::Ptr zone = Zone::GetByName(zoneName);
+
+ if (zone && zone->IsGlobal())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Service '" + GetName() + "' cannot be put into global zone '" + zone->GetName() + "'."));
+ }
+
+ m_Host = Host::GetByName(GetHostName());
+
+ if (m_Host)
+ m_Host->AddService(this);
+
+ ServiceGroup::EvaluateObjectRules(this);
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups) {
+ groups = groups->ShallowClone();
+
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ ServiceGroup::Ptr sg = ServiceGroup::GetByName(name);
+
+ if (sg)
+ sg->ResolveGroupMembership(this, true);
+ }
+ }
+}
+
+void Service::CreateChildObjects(const Type::Ptr& childType)
+{
+ if (childType == ScheduledDowntime::TypeInstance)
+ ScheduledDowntime::EvaluateApplyRules(this);
+
+ if (childType == Notification::TypeInstance)
+ Notification::EvaluateApplyRules(this);
+
+ if (childType == Dependency::TypeInstance)
+ Dependency::EvaluateApplyRules(this);
+}
+
+Service::Ptr Service::GetByNamePair(const String& hostName, const String& serviceName)
+{
+ if (!hostName.IsEmpty()) {
+ Host::Ptr host = Host::GetByName(hostName);
+
+ if (!host)
+ return nullptr;
+
+ return host->GetServiceByShortName(serviceName);
+ } else {
+ return Service::GetByName(serviceName);
+ }
+}
+
+Host::Ptr Service::GetHost() const
+{
+ return m_Host;
+}
+
+/* keep in sync with Host::GetSeverity()
+ * One could think it may be smart to use an enum and some bitmask math here.
+ * But the only thing the consuming icingaweb2 cares about is being able to
+ * sort by severity. It is therefore easier to keep them seperated here. */
+int Service::GetSeverity() const
+{
+ int severity;
+
+ ObjectLock olock(this);
+ ServiceState state = GetStateRaw();
+
+ if (!HasBeenChecked()) {
+ severity = 16;
+ } else if (state == ServiceOK) {
+ severity = 0;
+ } else {
+ switch (state) {
+ case ServiceWarning:
+ severity = 32;
+ break;
+ case ServiceUnknown:
+ severity = 64;
+ break;
+ case ServiceCritical:
+ severity = 128;
+ break;
+ default:
+ severity = 256;
+ }
+
+ Host::Ptr host = GetHost();
+ ObjectLock hlock (host);
+ if (host->GetState() != HostUp) {
+ severity += 1024;
+ } else {
+ if (IsAcknowledged())
+ severity += 512;
+ else if (IsInDowntime())
+ severity += 256;
+ else
+ severity += 2048;
+ }
+ hlock.Unlock();
+ }
+
+ olock.Unlock();
+
+ return severity;
+}
+
+bool Service::GetHandled() const
+{
+ return Checkable::GetHandled() || (m_Host && m_Host->GetProblem());
+}
+
+bool Service::IsStateOK(ServiceState state) const
+{
+ return state == ServiceOK;
+}
+
+void Service::SaveLastState(ServiceState state, double timestamp)
+{
+ if (state == ServiceOK)
+ SetLastStateOK(timestamp);
+ else if (state == ServiceWarning)
+ SetLastStateWarning(timestamp);
+ else if (state == ServiceCritical)
+ SetLastStateCritical(timestamp);
+ else if (state == ServiceUnknown)
+ SetLastStateUnknown(timestamp);
+}
+
+ServiceState Service::StateFromString(const String& state)
+{
+ if (state == "OK")
+ return ServiceOK;
+ else if (state == "WARNING")
+ return ServiceWarning;
+ else if (state == "CRITICAL")
+ return ServiceCritical;
+ else
+ return ServiceUnknown;
+}
+
+String Service::StateToString(ServiceState state)
+{
+ switch (state) {
+ case ServiceOK:
+ return "OK";
+ case ServiceWarning:
+ return "WARNING";
+ case ServiceCritical:
+ return "CRITICAL";
+ case ServiceUnknown:
+ default:
+ return "UNKNOWN";
+ }
+}
+
+StateType Service::StateTypeFromString(const String& type)
+{
+ if (type == "SOFT")
+ return StateTypeSoft;
+ else
+ return StateTypeHard;
+}
+
+String Service::StateTypeToString(StateType type)
+{
+ if (type == StateTypeSoft)
+ return "SOFT";
+ else
+ return "HARD";
+}
+
+bool Service::ResolveMacro(const String& macro, const CheckResult::Ptr& cr, Value *result) const
+{
+ if (macro == "state") {
+ *result = StateToString(GetState());
+ return true;
+ } else if (macro == "state_id") {
+ *result = GetState();
+ return true;
+ } else if (macro == "state_type") {
+ *result = StateTypeToString(GetStateType());
+ return true;
+ } else if (macro == "last_state") {
+ *result = StateToString(GetLastState());
+ return true;
+ } else if (macro == "last_state_id") {
+ *result = GetLastState();
+ return true;
+ } else if (macro == "last_state_type") {
+ *result = StateTypeToString(GetLastStateType());
+ return true;
+ } else if (macro == "last_state_change") {
+ *result = static_cast<long>(GetLastStateChange());
+ return true;
+ } else if (macro == "downtime_depth") {
+ *result = GetDowntimeDepth();
+ return true;
+ } else if (macro == "duration_sec") {
+ *result = Utility::GetTime() - GetLastStateChange();
+ return true;
+ }
+
+ if (cr) {
+ if (macro == "latency") {
+ *result = cr->CalculateLatency();
+ return true;
+ } else if (macro == "execution_time") {
+ *result = cr->CalculateExecutionTime();
+ return true;
+ } else if (macro == "output") {
+ *result = cr->GetOutput();
+ return true;
+ } else if (macro == "perfdata") {
+ *result = PluginUtility::FormatPerfdata(cr->GetPerformanceData());
+ return true;
+ } else if (macro == "check_source") {
+ *result = cr->GetCheckSource();
+ return true;
+ } else if (macro == "scheduling_source") {
+ *result = cr->GetSchedulingSource();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+std::pair<Host::Ptr, Service::Ptr> icinga::GetHostService(const Checkable::Ptr& checkable)
+{
+ Service::Ptr service = dynamic_pointer_cast<Service>(checkable);
+
+ if (service)
+ return std::make_pair(service->GetHost(), service);
+ else
+ return std::make_pair(static_pointer_cast<Host>(checkable), nullptr);
+}
diff --git a/lib/icinga/service.hpp b/lib/icinga/service.hpp
new file mode 100644
index 0000000..ac27c3d
--- /dev/null
+++ b/lib/icinga/service.hpp
@@ -0,0 +1,65 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERVICE_H
+#define SERVICE_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/service-ti.hpp"
+#include "icinga/macroresolver.hpp"
+#include "icinga/host.hpp"
+#include <tuple>
+
+using std::tie;
+
+namespace icinga
+{
+
+/**
+ * An Icinga service.
+ *
+ * @ingroup icinga
+ */
+class Service final : public ObjectImpl<Service>, public MacroResolver
+{
+public:
+ DECLARE_OBJECT(Service);
+ DECLARE_OBJECTNAME(Service);
+
+ static Service::Ptr GetByNamePair(const String& hostName, const String& serviceName);
+
+ Host::Ptr GetHost() const override;
+ int GetSeverity() const override;
+ bool GetHandled() const override;
+
+ bool ResolveMacro(const String& macro, const CheckResult::Ptr& cr, Value *result) const override;
+
+ bool IsStateOK(ServiceState state) const override;
+ void SaveLastState(ServiceState state, double timestamp) override;
+
+ static ServiceState StateFromString(const String& state);
+ static String StateToString(ServiceState state);
+
+ static StateType StateTypeFromString(const String& state);
+ static String StateTypeToString(StateType state);
+
+ static void EvaluateApplyRules(const Host::Ptr& host);
+
+ void OnAllConfigLoaded() override;
+
+ static boost::signals2::signal<void (const Service::Ptr&, const CheckResult::Ptr&, const MessageOrigin::Ptr&)> OnHostProblemChanged;
+
+protected:
+ void CreateChildObjects(const Type::Ptr& childType) override;
+
+private:
+ Host::Ptr m_Host;
+
+ static bool EvaluateApplyRuleInstance(const Host::Ptr& host, const String& name, ScriptFrame& frame, const ApplyRule& rule, bool skipFilter);
+ static bool EvaluateApplyRule(const Host::Ptr& host, const ApplyRule& rule, bool skipFilter = false);
+};
+
+std::pair<Host::Ptr, Service::Ptr> GetHostService(const Checkable::Ptr& checkable);
+
+}
+
+#endif /* SERVICE_H */
diff --git a/lib/icinga/service.ti b/lib/icinga/service.ti
new file mode 100644
index 0000000..12c2d8c
--- /dev/null
+++ b/lib/icinga/service.ti
@@ -0,0 +1,71 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/checkable.hpp"
+#include "icinga/host.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/customvarobject.hpp"
+#impl_include "icinga/servicegroup.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+code {{{
+class ServiceNameComposer : public NameComposer
+{
+public:
+ virtual String MakeName(const String& shortName, const Object::Ptr& context) const;
+ virtual Dictionary::Ptr ParseName(const String& name) const;
+};
+}}}
+
+class Service : Checkable < ServiceNameComposer
+{
+ load_after ApiListener;
+ load_after Endpoint;
+ load_after Host;
+ load_after Zone;
+
+ [config, no_user_modify, required, signal_with_old_value] array(name(ServiceGroup)) groups {
+ default {{{ return new Array(); }}}
+ };
+
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetShortName();
+ else
+ return displayName;
+ }}}
+ };
+ [config, no_user_modify, required] name(Host) host_name;
+ [no_storage, navigation] Host::Ptr host {
+ get;
+ navigate {{{
+ return GetHost();
+ }}}
+ };
+ [enum, no_storage] ServiceState "state" {
+ get {{{
+ return GetStateRaw();
+ }}}
+ };
+ [enum, no_storage] ServiceState last_state {
+ get {{{
+ return GetLastStateRaw();
+ }}}
+ };
+ [enum, no_storage] ServiceState last_hard_state {
+ get {{{
+ return GetLastHardStateRaw();
+ }}}
+ };
+ [state] Timestamp last_state_ok (LastStateOK);
+ [state] Timestamp last_state_warning;
+ [state] Timestamp last_state_critical;
+ [state] Timestamp last_state_unknown;
+};
+
+}
diff --git a/lib/icinga/servicegroup.cpp b/lib/icinga/servicegroup.cpp
new file mode 100644
index 0000000..d21f852
--- /dev/null
+++ b/lib/icinga/servicegroup.cpp
@@ -0,0 +1,111 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/servicegroup.hpp"
+#include "icinga/servicegroup-ti.cpp"
+#include "config/objectrule.hpp"
+#include "config/configitem.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/workqueue.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(ServiceGroup);
+
+INITIALIZE_ONCE([]() {
+ ObjectRule::RegisterType("ServiceGroup");
+});
+
+bool ServiceGroup::EvaluateObjectRule(const Service::Ptr& service, const ConfigItem::Ptr& group)
+{
+ String groupName = group->GetName();
+
+ CONTEXT("Evaluating rule for group '" << groupName << "'");
+
+ Host::Ptr host = service->GetHost();
+
+ ScriptFrame frame(true);
+ if (group->GetScope())
+ group->GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("host", host);
+ frame.Locals->Set("service", service);
+
+ if (!group->GetFilter()->Evaluate(frame).GetValue().ToBool())
+ return false;
+
+ Log(LogDebug, "ServiceGroup")
+ << "Assigning membership for group '" << groupName << "' to service '" << service->GetName() << "'";
+
+ Array::Ptr groups = service->GetGroups();
+
+ if (groups && !groups->Contains(groupName))
+ groups->Add(groupName);
+
+ return true;
+}
+
+void ServiceGroup::EvaluateObjectRules(const Service::Ptr& service)
+{
+ CONTEXT("Evaluating group membership for service '" << service->GetName() << "'");
+
+ for (const ConfigItem::Ptr& group : ConfigItem::GetItems(ServiceGroup::TypeInstance))
+ {
+ if (!group->GetFilter())
+ continue;
+
+ EvaluateObjectRule(service, group);
+ }
+}
+
+std::set<Service::Ptr> ServiceGroup::GetMembers() const
+{
+ std::unique_lock<std::mutex> lock(m_ServiceGroupMutex);
+ return m_Members;
+}
+
+void ServiceGroup::AddMember(const Service::Ptr& service)
+{
+ service->AddGroup(GetName());
+
+ std::unique_lock<std::mutex> lock(m_ServiceGroupMutex);
+ m_Members.insert(service);
+}
+
+void ServiceGroup::RemoveMember(const Service::Ptr& service)
+{
+ std::unique_lock<std::mutex> lock(m_ServiceGroupMutex);
+ m_Members.erase(service);
+}
+
+bool ServiceGroup::ResolveGroupMembership(const Service::Ptr& service, bool add, int rstack) {
+
+ if (add && rstack > 20) {
+ Log(LogWarning, "ServiceGroup")
+ << "Too many nested groups for group '" << GetName() << "': Service '"
+ << service->GetName() << "' membership assignment failed.";
+
+ return false;
+ }
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups && groups->GetLength() > 0) {
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ ServiceGroup::Ptr group = ServiceGroup::GetByName(name);
+
+ if (group && !group->ResolveGroupMembership(service, add, rstack + 1))
+ return false;
+ }
+ }
+
+ if (add)
+ AddMember(service);
+ else
+ RemoveMember(service);
+
+ return true;
+}
diff --git a/lib/icinga/servicegroup.hpp b/lib/icinga/servicegroup.hpp
new file mode 100644
index 0000000..f2d0ab7
--- /dev/null
+++ b/lib/icinga/servicegroup.hpp
@@ -0,0 +1,43 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERVICEGROUP_H
+#define SERVICEGROUP_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/servicegroup-ti.hpp"
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+class ConfigItem;
+
+/**
+ * An Icinga service group.
+ *
+ * @ingroup icinga
+ */
+class ServiceGroup final : public ObjectImpl<ServiceGroup>
+{
+public:
+ DECLARE_OBJECT(ServiceGroup);
+ DECLARE_OBJECTNAME(ServiceGroup);
+
+ std::set<Service::Ptr> GetMembers() const;
+ void AddMember(const Service::Ptr& service);
+ void RemoveMember(const Service::Ptr& service);
+
+ bool ResolveGroupMembership(const Service::Ptr& service, bool add = true, int rstack = 0);
+
+ static void EvaluateObjectRules(const Service::Ptr& service);
+
+private:
+ mutable std::mutex m_ServiceGroupMutex;
+ std::set<Service::Ptr> m_Members;
+
+ static bool EvaluateObjectRule(const Service::Ptr& service, const intrusive_ptr<ConfigItem>& group);
+};
+
+}
+
+#endif /* SERVICEGROUP_H */
diff --git a/lib/icinga/servicegroup.ti b/lib/icinga/servicegroup.ti
new file mode 100644
index 0000000..7daf9d4
--- /dev/null
+++ b/lib/icinga/servicegroup.ti
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class ServiceGroup : CustomVarObject
+{
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetName();
+ else
+ return displayName;
+ }}}
+ };
+
+ [config, no_user_modify] array(name(ServiceGroup)) groups;
+ [config] String notes;
+ [config] String notes_url;
+ [config] String action_url;
+};
+
+}
diff --git a/lib/icinga/timeperiod.cpp b/lib/icinga/timeperiod.cpp
new file mode 100644
index 0000000..db3272e
--- /dev/null
+++ b/lib/icinga/timeperiod.cpp
@@ -0,0 +1,399 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/timeperiod.hpp"
+#include "icinga/timeperiod-ti.cpp"
+#include "icinga/legacytimeperiod.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE(TimePeriod);
+
+static Timer::Ptr l_UpdateTimer;
+
+void TimePeriod::Start(bool runtimeCreated)
+{
+ ObjectImpl<TimePeriod>::Start(runtimeCreated);
+
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, [this]() {
+ l_UpdateTimer = Timer::Create();
+ l_UpdateTimer->SetInterval(300);
+ l_UpdateTimer->OnTimerExpired.connect([](const Timer * const&) { UpdateTimerHandler(); });
+ l_UpdateTimer->Start();
+ });
+
+ /* Pre-fill the time period for the next 24 hours. */
+ double now = Utility::GetTime();
+ UpdateRegion(now, now + 24 * 3600, true);
+#ifdef _DEBUG
+ Dump();
+#endif /* _DEBUG */
+}
+
+void TimePeriod::AddSegment(double begin, double end)
+{
+ ASSERT(OwnsLock());
+
+ Log(LogDebug, "TimePeriod")
+ << "Adding segment '" << Utility::FormatDateTime("%c", begin) << "' <-> '"
+ << Utility::FormatDateTime("%c", end) << "' to TimePeriod '" << GetName() << "'";
+
+ if (GetValidBegin().IsEmpty() || begin < GetValidBegin())
+ SetValidBegin(begin);
+
+ if (GetValidEnd().IsEmpty() || end > GetValidEnd())
+ SetValidEnd(end);
+
+ Array::Ptr segments = GetSegments();
+
+ if (segments) {
+ /* Try to merge the new segment into an existing segment. */
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ if (segment->Get("begin") <= begin && segment->Get("end") >= end)
+ return; /* New segment is fully contained in this segment. */
+
+ if (segment->Get("begin") >= begin && segment->Get("end") <= end) {
+ segment->Set("begin", begin);
+ segment->Set("end", end); /* Extend an existing segment to both sides */
+ return;
+ }
+
+ if (segment->Get("end") >= begin && segment->Get("end") <= end) {
+ segment->Set("end", end); /* Extend an existing segment to right. */
+ return;
+ }
+
+ if (segment->Get("begin") >= begin && segment->Get("begin") <= end) {
+ segment->Set("begin", begin); /* Extend an existing segment to left. */
+ return;
+ }
+
+ }
+ }
+
+ /* Create new segment if we weren't able to merge this into an existing segment. */
+ Dictionary::Ptr segment = new Dictionary({
+ { "begin", begin },
+ { "end", end }
+ });
+
+ if (!segments) {
+ segments = new Array();
+ SetSegments(segments);
+ }
+
+ segments->Add(segment);
+}
+
+void TimePeriod::AddSegment(const Dictionary::Ptr& segment)
+{
+ AddSegment(segment->Get("begin"), segment->Get("end"));
+}
+
+void TimePeriod::RemoveSegment(double begin, double end)
+{
+ ASSERT(OwnsLock());
+
+ Log(LogDebug, "TimePeriod")
+ << "Removing segment '" << Utility::FormatDateTime("%c", begin) << "' <-> '"
+ << Utility::FormatDateTime("%c", end) << "' from TimePeriod '" << GetName() << "'";
+
+ if (GetValidBegin().IsEmpty() || begin < GetValidBegin())
+ SetValidBegin(begin);
+
+ if (GetValidEnd().IsEmpty() || end > GetValidEnd())
+ SetValidEnd(end);
+
+ Array::Ptr segments = GetSegments();
+
+ if (!segments)
+ return;
+
+ Array::Ptr newSegments = new Array();
+
+ /* Try to split or adjust an existing segment. */
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ /* Fully contained in the specified range? */
+ if (segment->Get("begin") >= begin && segment->Get("end") <= end)
+ // Don't add the old segment, because the segment is fully contained into our range
+ continue;
+
+ /* Not overlapping at all? */
+ if (segment->Get("end") < begin || segment->Get("begin") > end) {
+ newSegments->Add(segment);
+ continue;
+ }
+
+ /* Cut between */
+ if (segment->Get("begin") < begin && segment->Get("end") > end) {
+ newSegments->Add(new Dictionary({
+ { "begin", segment->Get("begin") },
+ { "end", begin }
+ }));
+
+ newSegments->Add(new Dictionary({
+ { "begin", end },
+ { "end", segment->Get("end") }
+ }));
+ // Don't add the old segment, because we have now two new segments and a gap between
+ continue;
+ }
+
+ /* Adjust the begin/end timestamps so as to not overlap with the specified range. */
+ if (segment->Get("begin") > begin && segment->Get("begin") < end)
+ segment->Set("begin", end);
+
+ if (segment->Get("end") > begin && segment->Get("end") < end)
+ segment->Set("end", begin);
+
+ newSegments->Add(segment);
+ }
+
+ SetSegments(newSegments);
+
+#ifdef _DEBUG
+ Dump();
+#endif /* _DEBUG */
+}
+
+void TimePeriod::RemoveSegment(const Dictionary::Ptr& segment)
+{
+ RemoveSegment(segment->Get("begin"), segment->Get("end"));
+}
+
+void TimePeriod::PurgeSegments(double end)
+{
+ ASSERT(OwnsLock());
+
+ Log(LogDebug, "TimePeriod")
+ << "Purging segments older than '" << Utility::FormatDateTime("%c", end)
+ << "' from TimePeriod '" << GetName() << "'";
+
+ if (GetValidBegin().IsEmpty() || end < GetValidBegin())
+ return;
+
+ SetValidBegin(end);
+
+ Array::Ptr segments = GetSegments();
+
+ if (!segments)
+ return;
+
+ Array::Ptr newSegments = new Array();
+
+ /* Remove old segments. */
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ if (segment->Get("end") >= end)
+ newSegments->Add(segment);
+ }
+
+ SetSegments(newSegments);
+}
+
+void TimePeriod::Merge(const TimePeriod::Ptr& timeperiod, bool include)
+{
+ Log(LogDebug, "TimePeriod")
+ << "Merge TimePeriod '" << GetName() << "' with '" << timeperiod->GetName() << "' "
+ << "Method: " << (include ? "include" : "exclude");
+
+ Array::Ptr segments = timeperiod->GetSegments();
+
+ if (segments) {
+ ObjectLock dlock(segments);
+ ObjectLock ilock(this);
+ for (const Dictionary::Ptr& segment : segments) {
+ include ? AddSegment(segment) : RemoveSegment(segment);
+ }
+ }
+}
+
+void TimePeriod::UpdateRegion(double begin, double end, bool clearExisting)
+{
+ if (clearExisting) {
+ ObjectLock olock(this);
+ SetSegments(new Array());
+ } else {
+ if (begin < GetValidEnd())
+ begin = GetValidEnd();
+
+ if (end < GetValidEnd())
+ return;
+ }
+
+ Array::Ptr segments = GetUpdate()->Invoke({ this, begin, end });
+
+ {
+ ObjectLock olock(this);
+ RemoveSegment(begin, end);
+
+ if (segments) {
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ AddSegment(segment);
+ }
+ }
+ }
+
+ bool preferInclude = GetPreferIncludes();
+
+ /* First handle the non preferred timeranges */
+ Array::Ptr timeranges = preferInclude ? GetExcludes() : GetIncludes();
+
+ if (timeranges) {
+ ObjectLock olock(timeranges);
+ for (const String& name : timeranges) {
+ const TimePeriod::Ptr timeperiod = TimePeriod::GetByName(name);
+
+ if (timeperiod)
+ Merge(timeperiod, !preferInclude);
+ }
+ }
+
+ /* Preferred timeranges must be handled at the end */
+ timeranges = preferInclude ? GetIncludes() : GetExcludes();
+
+ if (timeranges) {
+ ObjectLock olock(timeranges);
+ for (const String& name : timeranges) {
+ const TimePeriod::Ptr timeperiod = TimePeriod::GetByName(name);
+
+ if (timeperiod)
+ Merge(timeperiod, preferInclude);
+ }
+ }
+}
+
+bool TimePeriod::GetIsInside() const
+{
+ return IsInside(Utility::GetTime());
+}
+
+bool TimePeriod::IsInside(double ts) const
+{
+ ObjectLock olock(this);
+
+ if (GetValidBegin().IsEmpty() || ts < GetValidBegin() || GetValidEnd().IsEmpty() || ts > GetValidEnd())
+ return true; /* Assume that all invalid regions are "inside". */
+
+ Array::Ptr segments = GetSegments();
+
+ if (segments) {
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ if (ts > segment->Get("begin") && ts < segment->Get("end"))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+double TimePeriod::FindNextTransition(double begin)
+{
+ ObjectLock olock(this);
+
+ Array::Ptr segments = GetSegments();
+
+ double closestTransition = -1;
+
+ if (segments) {
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ if (segment->Get("begin") > begin && (segment->Get("begin") < closestTransition || closestTransition == -1))
+ closestTransition = segment->Get("begin");
+
+ if (segment->Get("end") > begin && (segment->Get("end") < closestTransition || closestTransition == -1))
+ closestTransition = segment->Get("end");
+ }
+ }
+
+ return closestTransition;
+}
+
+void TimePeriod::UpdateTimerHandler()
+{
+ double now = Utility::GetTime();
+
+ for (const TimePeriod::Ptr& tp : ConfigType::GetObjectsByType<TimePeriod>()) {
+ if (!tp->IsActive())
+ continue;
+
+ double valid_end;
+
+ {
+ ObjectLock olock(tp);
+ tp->PurgeSegments(now - 3600);
+
+ valid_end = tp->GetValidEnd();
+ }
+
+ tp->UpdateRegion(valid_end, now + 24 * 3600, false);
+#ifdef _DEBUG
+ tp->Dump();
+#endif /* _DEBUG */
+ }
+}
+
+void TimePeriod::Dump()
+{
+ ObjectLock olock(this);
+
+ Array::Ptr segments = GetSegments();
+
+ Log(LogDebug, "TimePeriod")
+ << "Dumping TimePeriod '" << GetName() << "'";
+
+ Log(LogDebug, "TimePeriod")
+ << "Valid from '" << Utility::FormatDateTime("%c", GetValidBegin())
+ << "' until '" << Utility::FormatDateTime("%c", GetValidEnd());
+
+ if (segments) {
+ ObjectLock dlock(segments);
+ for (const Dictionary::Ptr& segment : segments) {
+ Log(LogDebug, "TimePeriod")
+ << "Segment: " << Utility::FormatDateTime("%c", segment->Get("begin")) << " <-> "
+ << Utility::FormatDateTime("%c", segment->Get("end"));
+ }
+ }
+
+ Log(LogDebug, "TimePeriod", "---");
+}
+
+void TimePeriod::ValidateRanges(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ if (!lvalue())
+ return;
+
+ /* create a fake time environment to validate the definitions */
+ time_t refts = Utility::GetTime();
+ tm reference = Utility::LocalTime(refts);
+ Array::Ptr segments = new Array();
+
+ ObjectLock olock(lvalue());
+ for (const Dictionary::Pair& kv : lvalue()) {
+ try {
+ tm begin_tm, end_tm;
+ int stride;
+ LegacyTimePeriod::ParseTimeRange(kv.first, &begin_tm, &end_tm, &stride, &reference);
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "ranges" }, "Invalid time specification '" + kv.first + "': " + ex.what()));
+ }
+
+ try {
+ LegacyTimePeriod::ProcessTimeRanges(kv.second, &reference, segments);
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "ranges" }, "Invalid time range definition '" + kv.second + "': " + ex.what()));
+ }
+ }
+}
diff --git a/lib/icinga/timeperiod.hpp b/lib/icinga/timeperiod.hpp
new file mode 100644
index 0000000..a5a2f73
--- /dev/null
+++ b/lib/icinga/timeperiod.hpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TIMEPERIOD_H
+#define TIMEPERIOD_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/timeperiod-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * A time period.
+ *
+ * @ingroup icinga
+ */
+class TimePeriod final : public ObjectImpl<TimePeriod>
+{
+public:
+ DECLARE_OBJECT(TimePeriod);
+ DECLARE_OBJECTNAME(TimePeriod);
+
+ void Start(bool runtimeCreated) override;
+
+ void UpdateRegion(double begin, double end, bool clearExisting);
+
+ bool GetIsInside() const override;
+
+ bool IsInside(double ts) const;
+ double FindNextTransition(double begin);
+
+ void ValidateRanges(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+private:
+ void AddSegment(double s, double end);
+ void AddSegment(const Dictionary::Ptr& segment);
+ void RemoveSegment(double begin, double end);
+ void RemoveSegment(const Dictionary::Ptr& segment);
+ void PurgeSegments(double end);
+
+ void Merge(const TimePeriod::Ptr& timeperiod, bool include = true);
+
+ void Dump();
+
+ static void UpdateTimerHandler();
+};
+
+}
+
+#endif /* TIMEPERIOD_H */
diff --git a/lib/icinga/timeperiod.ti b/lib/icinga/timeperiod.ti
new file mode 100644
index 0000000..bba272e
--- /dev/null
+++ b/lib/icinga/timeperiod.ti
@@ -0,0 +1,47 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#include "base/function.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class TimePeriod : CustomVarObject
+{
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetName();
+ else
+ return displayName;
+ }}}
+ };
+ [config, signal_with_old_value] Dictionary::Ptr ranges;
+ [config, required] Function::Ptr update;
+ [config] bool prefer_includes {
+ default {{{ return true; }}}
+ };
+ [config, required, signal_with_old_value] array(name(TimePeriod)) excludes {
+ default {{{ return new Array(); }}}
+ };
+ [config, required, signal_with_old_value] array(name(TimePeriod)) includes {
+ default {{{ return new Array(); }}}
+ };
+ [state, no_user_modify] Value valid_begin;
+ [state, no_user_modify] Value valid_end;
+ [state, no_user_modify] Array::Ptr segments;
+ [no_storage] bool is_inside {
+ get;
+ };
+};
+
+validator TimePeriod {
+ Dictionary ranges {
+ String "*";
+ };
+};
+
+}
diff --git a/lib/icinga/user.cpp b/lib/icinga/user.cpp
new file mode 100644
index 0000000..4d99db7
--- /dev/null
+++ b/lib/icinga/user.cpp
@@ -0,0 +1,103 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/user.hpp"
+#include "icinga/user-ti.cpp"
+#include "icinga/usergroup.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/usergroup.hpp"
+#include "base/objectlock.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(User);
+
+void User::OnConfigLoaded()
+{
+ ObjectImpl<User>::OnConfigLoaded();
+
+ SetTypeFilter(FilterArrayToInt(GetTypes(), Notification::GetTypeFilterMap(), ~0));
+ SetStateFilter(FilterArrayToInt(GetStates(), Notification::GetStateFilterMap(), ~0));
+}
+
+void User::OnAllConfigLoaded()
+{
+ ObjectImpl<User>::OnAllConfigLoaded();
+
+ UserGroup::EvaluateObjectRules(this);
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups) {
+ groups = groups->ShallowClone();
+
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ UserGroup::Ptr ug = UserGroup::GetByName(name);
+
+ if (ug)
+ ug->ResolveGroupMembership(this, true);
+ }
+ }
+}
+
+void User::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<User>::Stop(runtimeRemoved);
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups) {
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ UserGroup::Ptr ug = UserGroup::GetByName(name);
+
+ if (ug)
+ ug->ResolveGroupMembership(this, false);
+ }
+ }
+}
+
+void User::AddGroup(const String& name)
+{
+ std::unique_lock<std::mutex> lock(m_UserMutex);
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups && groups->Contains(name))
+ return;
+
+ if (!groups)
+ groups = new Array();
+
+ groups->Add(name);
+}
+
+TimePeriod::Ptr User::GetPeriod() const
+{
+ return TimePeriod::GetByName(GetPeriodRaw());
+}
+
+void User::ValidateStates(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<User>::ValidateStates(lvalue, utils);
+
+ int filter = FilterArrayToInt(lvalue(), Notification::GetStateFilterMap(), 0);
+
+ if (filter == -1 || (filter & ~(StateFilterUp | StateFilterDown | StateFilterOK | StateFilterWarning | StateFilterCritical | StateFilterUnknown)) != 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "states" }, "State filter is invalid."));
+}
+
+void User::ValidateTypes(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<User>::ValidateTypes(lvalue, utils);
+
+ int filter = FilterArrayToInt(lvalue(), Notification::GetTypeFilterMap(), 0);
+
+ if (filter == -1 || (filter & ~(NotificationDowntimeStart | NotificationDowntimeEnd | NotificationDowntimeRemoved |
+ NotificationCustom | NotificationAcknowledgement | NotificationProblem | NotificationRecovery |
+ NotificationFlappingStart | NotificationFlappingEnd)) != 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "types" }, "Type filter is invalid."));
+}
diff --git a/lib/icinga/user.hpp b/lib/icinga/user.hpp
new file mode 100644
index 0000000..14e59c2
--- /dev/null
+++ b/lib/icinga/user.hpp
@@ -0,0 +1,44 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef USER_H
+#define USER_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/user-ti.hpp"
+#include "icinga/timeperiod.hpp"
+#include "remote/messageorigin.hpp"
+
+namespace icinga
+{
+
+/**
+ * A User.
+ *
+ * @ingroup icinga
+ */
+class User final : public ObjectImpl<User>
+{
+public:
+ DECLARE_OBJECT(User);
+ DECLARE_OBJECTNAME(User);
+
+ void AddGroup(const String& name);
+
+ /* Notifications */
+ TimePeriod::Ptr GetPeriod() const;
+
+ void ValidateStates(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) override;
+ void ValidateTypes(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void Stop(bool runtimeRemoved) override;
+
+ void OnConfigLoaded() override;
+ void OnAllConfigLoaded() override;
+private:
+ mutable std::mutex m_UserMutex;
+};
+
+}
+
+#endif /* USER_H */
diff --git a/lib/icinga/user.ti b/lib/icinga/user.ti
new file mode 100644
index 0000000..8b8c43a
--- /dev/null
+++ b/lib/icinga/user.ti
@@ -0,0 +1,47 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+#include "base/array.hpp"
+#impl_include "icinga/usergroup.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class User : CustomVarObject
+{
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetName();
+ else
+ return displayName;
+ }}}
+ };
+ [config, no_user_modify, required, signal_with_old_value] array(name(UserGroup)) groups {
+ default {{{ return new Array(); }}}
+ };
+ [config, navigation] name(TimePeriod) period (PeriodRaw) {
+ navigate {{{
+ return TimePeriod::GetByName(GetPeriodRaw());
+ }}}
+ };
+
+ [config] array(Value) types;
+ [no_user_view, no_user_modify] int type_filter_real (TypeFilter);
+ [config] array(Value) states;
+ [no_user_view, no_user_modify] int state_filter_real (StateFilter);
+
+ [config] String email;
+ [config] String pager;
+
+ [config] bool enable_notifications {
+ default {{{ return true; }}}
+ };
+
+ [state] Timestamp last_notification;
+};
+
+}
diff --git a/lib/icinga/usergroup.cpp b/lib/icinga/usergroup.cpp
new file mode 100644
index 0000000..27ae45b
--- /dev/null
+++ b/lib/icinga/usergroup.cpp
@@ -0,0 +1,128 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/usergroup.hpp"
+#include "icinga/usergroup-ti.cpp"
+#include "icinga/notification.hpp"
+#include "config/objectrule.hpp"
+#include "config/configitem.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/context.hpp"
+#include "base/workqueue.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(UserGroup);
+
+INITIALIZE_ONCE([]() {
+ ObjectRule::RegisterType("UserGroup");
+});
+
+bool UserGroup::EvaluateObjectRule(const User::Ptr& user, const ConfigItem::Ptr& group)
+{
+ String groupName = group->GetName();
+
+ CONTEXT("Evaluating rule for group '" << groupName << "'");
+
+ ScriptFrame frame(true);
+ if (group->GetScope())
+ group->GetScope()->CopyTo(frame.Locals);
+ frame.Locals->Set("user", user);
+
+ if (!group->GetFilter()->Evaluate(frame).GetValue().ToBool())
+ return false;
+
+ Log(LogDebug, "UserGroup")
+ << "Assigning membership for group '" << groupName << "' to user '" << user->GetName() << "'";
+
+ Array::Ptr groups = user->GetGroups();
+
+ if (groups && !groups->Contains(groupName))
+ groups->Add(groupName);
+
+ return true;
+}
+
+void UserGroup::EvaluateObjectRules(const User::Ptr& user)
+{
+ CONTEXT("Evaluating group membership for user '" << user->GetName() << "'");
+
+ for (const ConfigItem::Ptr& group : ConfigItem::GetItems(UserGroup::TypeInstance))
+ {
+ if (!group->GetFilter())
+ continue;
+
+ EvaluateObjectRule(user, group);
+ }
+}
+
+std::set<User::Ptr> UserGroup::GetMembers() const
+{
+ std::unique_lock<std::mutex> lock(m_UserGroupMutex);
+ return m_Members;
+}
+
+void UserGroup::AddMember(const User::Ptr& user)
+{
+ user->AddGroup(GetName());
+
+ std::unique_lock<std::mutex> lock(m_UserGroupMutex);
+ m_Members.insert(user);
+}
+
+void UserGroup::RemoveMember(const User::Ptr& user)
+{
+ std::unique_lock<std::mutex> lock(m_UserGroupMutex);
+ m_Members.erase(user);
+}
+
+std::set<Notification::Ptr> UserGroup::GetNotifications() const
+{
+ std::unique_lock<std::mutex> lock(m_UserGroupMutex);
+ return m_Notifications;
+}
+
+void UserGroup::AddNotification(const Notification::Ptr& notification)
+{
+ std::unique_lock<std::mutex> lock(m_UserGroupMutex);
+ m_Notifications.insert(notification);
+}
+
+void UserGroup::RemoveNotification(const Notification::Ptr& notification)
+{
+ std::unique_lock<std::mutex> lock(m_UserGroupMutex);
+ m_Notifications.erase(notification);
+}
+
+bool UserGroup::ResolveGroupMembership(const User::Ptr& user, bool add, int rstack) {
+
+ if (add && rstack > 20) {
+ Log(LogWarning, "UserGroup")
+ << "Too many nested groups for group '" << GetName() << "': User '"
+ << user->GetName() << "' membership assignment failed.";
+
+ return false;
+ }
+
+ Array::Ptr groups = GetGroups();
+
+ if (groups && groups->GetLength() > 0) {
+ ObjectLock olock(groups);
+
+ for (const String& name : groups) {
+ UserGroup::Ptr group = UserGroup::GetByName(name);
+
+ if (group && !group->ResolveGroupMembership(user, add, rstack + 1))
+ return false;
+ }
+ }
+
+ if (add)
+ AddMember(user);
+ else
+ RemoveMember(user);
+
+ return true;
+}
+
diff --git a/lib/icinga/usergroup.hpp b/lib/icinga/usergroup.hpp
new file mode 100644
index 0000000..c6f82a1
--- /dev/null
+++ b/lib/icinga/usergroup.hpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef USERGROUP_H
+#define USERGROUP_H
+
+#include "icinga/i2-icinga.hpp"
+#include "icinga/usergroup-ti.hpp"
+#include "icinga/user.hpp"
+
+namespace icinga
+{
+
+class ConfigItem;
+class Notification;
+
+/**
+ * An Icinga user group.
+ *
+ * @ingroup icinga
+ */
+class UserGroup final : public ObjectImpl<UserGroup>
+{
+public:
+ DECLARE_OBJECT(UserGroup);
+ DECLARE_OBJECTNAME(UserGroup);
+
+ std::set<User::Ptr> GetMembers() const;
+ void AddMember(const User::Ptr& user);
+ void RemoveMember(const User::Ptr& user);
+
+ std::set<intrusive_ptr<Notification>> GetNotifications() const;
+ void AddNotification(const intrusive_ptr<Notification>& notification);
+ void RemoveNotification(const intrusive_ptr<Notification>& notification);
+
+ bool ResolveGroupMembership(const User::Ptr& user, bool add = true, int rstack = 0);
+
+ static void EvaluateObjectRules(const User::Ptr& user);
+
+private:
+ mutable std::mutex m_UserGroupMutex;
+ std::set<User::Ptr> m_Members;
+ std::set<intrusive_ptr<Notification>> m_Notifications;
+
+ static bool EvaluateObjectRule(const User::Ptr& user, const intrusive_ptr<ConfigItem>& group);
+};
+
+}
+
+#endif /* USERGROUP_H */
diff --git a/lib/icinga/usergroup.ti b/lib/icinga/usergroup.ti
new file mode 100644
index 0000000..e955c5e
--- /dev/null
+++ b/lib/icinga/usergroup.ti
@@ -0,0 +1,25 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/customvarobject.hpp"
+
+library icinga;
+
+namespace icinga
+{
+
+class UserGroup : CustomVarObject
+{
+ [config] String display_name {
+ get {{{
+ String displayName = m_DisplayName.load();
+ if (displayName.IsEmpty())
+ return GetName();
+ else
+ return displayName;
+ }}}
+ };
+
+ [config, no_user_modify] array(name(UserGroup)) groups;
+};
+
+}
diff --git a/lib/icingadb/CMakeLists.txt b/lib/icingadb/CMakeLists.txt
new file mode 100644
index 0000000..de8e4ad
--- /dev/null
+++ b/lib/icingadb/CMakeLists.txt
@@ -0,0 +1,32 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(icingadb.ti icingadb-ti.cpp icingadb-ti.hpp)
+
+mkembedconfig_target(icingadb-itl.conf icingadb-itl.cpp)
+
+set(icingadb_SOURCES
+ icingadb.cpp icingadb-objects.cpp icingadb-stats.cpp icingadb-utility.cpp redisconnection.cpp icingadb-ti.hpp
+ icingadbchecktask.cpp icingadb-itl.cpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(icingadb icingadb icingadb_SOURCES)
+endif()
+
+add_library(icingadb OBJECT ${icingadb_SOURCES})
+
+include_directories(${icinga2_SOURCE_DIR}/third-party)
+
+add_dependencies(icingadb base config icinga remote)
+
+set_target_properties (
+ icingadb PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/icingadb.conf
+ ${CMAKE_INSTALL_SYSCONFDIR}/icinga2/features-available
+)
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/icingadb/icingadb-itl.conf b/lib/icingadb/icingadb-itl.conf
new file mode 100644
index 0000000..5f3950e
--- /dev/null
+++ b/lib/icingadb/icingadb-itl.conf
@@ -0,0 +1,24 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+System.assert(Internal.run_with_activation_context(function() {
+ template CheckCommand "icingadb-check-command" use (checkFunc = Internal.IcingadbCheck) {
+ execute = checkFunc
+ }
+
+ object CheckCommand "icingadb" {
+ import "icingadb-check-command"
+
+ vars.icingadb_name = "icingadb"
+
+ vars.icingadb_full_dump_duration_warning = 5m
+ vars.icingadb_full_dump_duration_critical = 10m
+ vars.icingadb_full_sync_duration_warning = 5m
+ vars.icingadb_full_sync_duration_critical = 10m
+ vars.icingadb_redis_backlog_warning = 5m
+ vars.icingadb_redis_backlog_critical = 15m
+ vars.icingadb_database_backlog_warning = 5m
+ vars.icingadb_database_backlog_critical = 15m
+ }
+}))
+
+Internal.remove("IcingadbCheck")
diff --git a/lib/icingadb/icingadb-objects.cpp b/lib/icingadb/icingadb-objects.cpp
new file mode 100644
index 0000000..ff7a833
--- /dev/null
+++ b/lib/icingadb/icingadb-objects.cpp
@@ -0,0 +1,2966 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icingadb/icingadb.hpp"
+#include "icingadb/redisconnection.hpp"
+#include "base/configtype.hpp"
+#include "base/configobject.hpp"
+#include "base/defer.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/shared.hpp"
+#include "base/tlsutility.hpp"
+#include "base/initialize.hpp"
+#include "base/convert.hpp"
+#include "base/array.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include "base/object-packer.hpp"
+#include "icinga/command.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/customvarobject.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/servicegroup.hpp"
+#include "icinga/usergroup.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/pluginutility.hpp"
+#include "remote/zone.hpp"
+#include <algorithm>
+#include <chrono>
+#include <cmath>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <utility>
+#include <type_traits>
+
+using namespace icinga;
+
+using Prio = RedisConnection::QueryPriority;
+
+std::unordered_set<Type*> IcingaDB::m_IndexedTypes;
+
+INITIALIZE_ONCE(&IcingaDB::ConfigStaticInitialize);
+
+std::vector<Type::Ptr> IcingaDB::GetTypes()
+{
+ // The initial config sync will queue the types in the following order.
+ return {
+ // Sync them first to get their states ASAP.
+ Host::TypeInstance,
+ Service::TypeInstance,
+
+ // Then sync them for similar reasons.
+ Downtime::TypeInstance,
+ Comment::TypeInstance,
+
+ HostGroup::TypeInstance,
+ ServiceGroup::TypeInstance,
+ CheckCommand::TypeInstance,
+ Endpoint::TypeInstance,
+ EventCommand::TypeInstance,
+ Notification::TypeInstance,
+ NotificationCommand::TypeInstance,
+ TimePeriod::TypeInstance,
+ User::TypeInstance,
+ UserGroup::TypeInstance,
+ Zone::TypeInstance
+ };
+}
+
+void IcingaDB::ConfigStaticInitialize()
+{
+ for (auto& type : GetTypes()) {
+ m_IndexedTypes.emplace(type.get());
+ }
+
+ /* triggered in ProcessCheckResult(), requires UpdateNextCheck() to be called before */
+ Checkable::OnStateChange.connect([](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type, const MessageOrigin::Ptr&) {
+ IcingaDB::StateChangeHandler(checkable, cr, type);
+ });
+
+ Checkable::OnAcknowledgementSet.connect([](const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type, bool, bool persistent, double changeTime, double expiry, const MessageOrigin::Ptr&) {
+ AcknowledgementSetHandler(checkable, author, comment, type, persistent, changeTime, expiry);
+ });
+ Checkable::OnAcknowledgementCleared.connect([](const Checkable::Ptr& checkable, const String& removedBy, double changeTime, const MessageOrigin::Ptr&) {
+ AcknowledgementClearedHandler(checkable, removedBy, changeTime);
+ });
+
+ Checkable::OnReachabilityChanged.connect([](const Checkable::Ptr&, const CheckResult::Ptr&, std::set<Checkable::Ptr> children, const MessageOrigin::Ptr&) {
+ IcingaDB::ReachabilityChangeHandler(children);
+ });
+
+ /* triggered on create, update and delete objects */
+ ConfigObject::OnActiveChanged.connect([](const ConfigObject::Ptr& object, const Value&) {
+ IcingaDB::VersionChangedHandler(object);
+ });
+ ConfigObject::OnVersionChanged.connect([](const ConfigObject::Ptr& object, const Value&) {
+ IcingaDB::VersionChangedHandler(object);
+ });
+
+ /* downtime start */
+ Downtime::OnDowntimeTriggered.connect(&IcingaDB::DowntimeStartedHandler);
+ /* fixed/flexible downtime end or remove */
+ Downtime::OnDowntimeRemoved.connect(&IcingaDB::DowntimeRemovedHandler);
+
+ Checkable::OnNotificationSentToAllUsers.connect([](
+ const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ const NotificationType& type, const CheckResult::Ptr& cr, const String& author, const String& text,
+ const MessageOrigin::Ptr&
+ ) {
+ IcingaDB::NotificationSentToAllUsersHandler(notification, checkable, users, type, cr, author, text);
+ });
+
+ Comment::OnCommentAdded.connect(&IcingaDB::CommentAddedHandler);
+ Comment::OnCommentRemoved.connect(&IcingaDB::CommentRemovedHandler);
+
+ Checkable::OnFlappingChange.connect(&IcingaDB::FlappingChangeHandler);
+
+ Checkable::OnNewCheckResult.connect([](const Checkable::Ptr& checkable, const CheckResult::Ptr&, const MessageOrigin::Ptr&) {
+ IcingaDB::NewCheckResultHandler(checkable);
+ });
+
+ Checkable::OnNextCheckUpdated.connect([](const Checkable::Ptr& checkable) {
+ IcingaDB::NextCheckUpdatedHandler(checkable);
+ });
+
+ Service::OnHostProblemChanged.connect([](const Service::Ptr& service, const CheckResult::Ptr&, const MessageOrigin::Ptr&) {
+ IcingaDB::HostProblemChangedHandler(service);
+ });
+
+ Notification::OnUsersRawChangedWithOldValue.connect([](const Notification::Ptr& notification, const Value& oldValues, const Value& newValues) {
+ IcingaDB::NotificationUsersChangedHandler(notification, oldValues, newValues);
+ });
+ Notification::OnUserGroupsRawChangedWithOldValue.connect([](const Notification::Ptr& notification, const Value& oldValues, const Value& newValues) {
+ IcingaDB::NotificationUserGroupsChangedHandler(notification, oldValues, newValues);
+ });
+ TimePeriod::OnRangesChangedWithOldValue.connect([](const TimePeriod::Ptr& timeperiod, const Value& oldValues, const Value& newValues) {
+ IcingaDB::TimePeriodRangesChangedHandler(timeperiod, oldValues, newValues);
+ });
+ TimePeriod::OnIncludesChangedWithOldValue.connect([](const TimePeriod::Ptr& timeperiod, const Value& oldValues, const Value& newValues) {
+ IcingaDB::TimePeriodIncludesChangedHandler(timeperiod, oldValues, newValues);
+ });
+ TimePeriod::OnExcludesChangedWithOldValue.connect([](const TimePeriod::Ptr& timeperiod, const Value& oldValues, const Value& newValues) {
+ IcingaDB::TimePeriodExcludesChangedHandler(timeperiod, oldValues, newValues);
+ });
+ User::OnGroupsChangedWithOldValue.connect([](const User::Ptr& user, const Value& oldValues, const Value& newValues) {
+ IcingaDB::UserGroupsChangedHandler(user, oldValues, newValues);
+ });
+ Host::OnGroupsChangedWithOldValue.connect([](const Host::Ptr& host, const Value& oldValues, const Value& newValues) {
+ IcingaDB::HostGroupsChangedHandler(host, oldValues, newValues);
+ });
+ Service::OnGroupsChangedWithOldValue.connect([](const Service::Ptr& service, const Value& oldValues, const Value& newValues) {
+ IcingaDB::ServiceGroupsChangedHandler(service, oldValues, newValues);
+ });
+ Command::OnEnvChangedWithOldValue.connect([](const ConfigObject::Ptr& command, const Value& oldValues, const Value& newValues) {
+ IcingaDB::CommandEnvChangedHandler(command, oldValues, newValues);
+ });
+ Command::OnArgumentsChangedWithOldValue.connect([](const ConfigObject::Ptr& command, const Value& oldValues, const Value& newValues) {
+ IcingaDB::CommandArgumentsChangedHandler(command, oldValues, newValues);
+ });
+ CustomVarObject::OnVarsChangedWithOldValue.connect([](const ConfigObject::Ptr& object, const Value& oldValues, const Value& newValues) {
+ IcingaDB::CustomVarsChangedHandler(object, oldValues, newValues);
+ });
+}
+
+void IcingaDB::UpdateAllConfigObjects()
+{
+ m_Rcon->Sync();
+ m_Rcon->FireAndForgetQuery({"XADD", "icinga:schema", "MAXLEN", "1", "*", "version", "5"}, Prio::Heartbeat);
+
+ Log(LogInformation, "IcingaDB") << "Starting initial config/status dump";
+ double startTime = Utility::GetTime();
+
+ SetOngoingDumpStart(startTime);
+
+ Defer resetOngoingDumpStart ([this]() {
+ SetOngoingDumpStart(0);
+ });
+
+ // Use a Workqueue to pack objects in parallel
+ WorkQueue upq(25000, Configuration::Concurrency, LogNotice);
+ upq.SetName("IcingaDB:ConfigDump");
+
+ std::vector<Type::Ptr> types = GetTypes();
+
+ m_Rcon->SuppressQueryKind(Prio::CheckResult);
+ m_Rcon->SuppressQueryKind(Prio::RuntimeStateSync);
+
+ Defer unSuppress ([this]() {
+ m_Rcon->UnsuppressQueryKind(Prio::RuntimeStateSync);
+ m_Rcon->UnsuppressQueryKind(Prio::CheckResult);
+ });
+
+ // Add a new type=* state=wip entry to the stream and remove all previous entries (MAXLEN 1).
+ m_Rcon->FireAndForgetQuery({"XADD", "icinga:dump", "MAXLEN", "1", "*", "key", "*", "state", "wip"}, Prio::Config);
+
+ const std::vector<String> globalKeys = {
+ m_PrefixConfigObject + "customvar",
+ m_PrefixConfigObject + "action:url",
+ m_PrefixConfigObject + "notes:url",
+ m_PrefixConfigObject + "icon:image",
+ };
+ DeleteKeys(m_Rcon, globalKeys, Prio::Config);
+ DeleteKeys(m_Rcon, {"icinga:nextupdate:host", "icinga:nextupdate:service"}, Prio::Config);
+ m_Rcon->Sync();
+
+ Defer resetDumpedGlobals ([this]() {
+ m_DumpedGlobals.CustomVar.Reset();
+ m_DumpedGlobals.ActionUrl.Reset();
+ m_DumpedGlobals.NotesUrl.Reset();
+ m_DumpedGlobals.IconImage.Reset();
+ });
+
+ upq.ParallelFor(types, false, [this](const Type::Ptr& type) {
+ String lcType = type->GetName().ToLower();
+ ConfigType *ctype = dynamic_cast<ConfigType *>(type.get());
+ if (!ctype)
+ return;
+
+ auto& rcon (m_Rcons.at(ctype));
+
+ std::vector<String> keys = GetTypeOverwriteKeys(lcType);
+ DeleteKeys(rcon, keys, Prio::Config);
+
+ WorkQueue upqObjectType(25000, Configuration::Concurrency, LogNotice);
+ upqObjectType.SetName("IcingaDB:ConfigDump:" + lcType);
+
+ std::map<String, String> redisCheckSums;
+ String configCheckSum = m_PrefixConfigCheckSum + lcType;
+
+ upqObjectType.Enqueue([&rcon, &configCheckSum, &redisCheckSums]() {
+ String cursor = "0";
+
+ do {
+ Array::Ptr res = rcon->GetResultOfQuery({
+ "HSCAN", configCheckSum, cursor, "COUNT", "1000"
+ }, Prio::Config);
+
+ AddKvsToMap(res->Get(1), redisCheckSums);
+
+ cursor = res->Get(0);
+ } while (cursor != "0");
+ });
+
+ auto objectChunks (ChunkObjects(ctype->GetObjects(), 500));
+ String configObject = m_PrefixConfigObject + lcType;
+
+ // Skimmed away attributes and checksums HMSETs' keys and values by Redis key.
+ std::map<String, std::vector<std::vector<String>>> ourContentRaw {{configCheckSum, {}}, {configObject, {}}};
+ std::mutex ourContentMutex;
+
+ upqObjectType.ParallelFor(objectChunks, [&](decltype(objectChunks)::const_reference chunk) {
+ std::map<String, std::vector<String>> hMSets;
+ // Two values are appended per object: Object ID (Hash encoded) and Object State (IcingaDB::SerializeState() -> JSON encoded)
+ std::vector<String> states = {"HMSET", m_PrefixConfigObject + lcType + ":state"};
+ // Two values are appended per object: Object ID (Hash encoded) and State Checksum ({ "checksum": checksum } -> JSON encoded)
+ std::vector<String> statesChksms = {"HMSET", m_PrefixConfigCheckSum + lcType + ":state"};
+ std::vector<std::vector<String> > transaction = {{"MULTI"}};
+ std::vector<String> hostZAdds = {"ZADD", "icinga:nextupdate:host"}, serviceZAdds = {"ZADD", "icinga:nextupdate:service"};
+
+ auto skimObjects ([&]() {
+ std::lock_guard<std::mutex> l (ourContentMutex);
+
+ for (auto& kv : ourContentRaw) {
+ auto pos (hMSets.find(kv.first));
+
+ if (pos != hMSets.end()) {
+ kv.second.emplace_back(std::move(pos->second));
+ hMSets.erase(pos);
+ }
+ }
+ });
+
+ bool dumpState = (lcType == "host" || lcType == "service");
+
+ size_t bulkCounter = 0;
+ for (const ConfigObject::Ptr& object : chunk) {
+ if (lcType != GetLowerCaseTypeNameDB(object))
+ continue;
+
+ std::vector<Dictionary::Ptr> runtimeUpdates;
+ CreateConfigUpdate(object, lcType, hMSets, runtimeUpdates, false);
+
+ // Write out inital state for checkables
+ if (dumpState) {
+ String objectKey = GetObjectIdentifier(object);
+ Dictionary::Ptr state = SerializeState(dynamic_pointer_cast<Checkable>(object));
+
+ states.emplace_back(objectKey);
+ states.emplace_back(JsonEncode(state));
+
+ statesChksms.emplace_back(objectKey);
+ statesChksms.emplace_back(JsonEncode(new Dictionary({{"checksum", HashValue(state)}})));
+ }
+
+ bulkCounter++;
+ if (!(bulkCounter % 100)) {
+ skimObjects();
+
+ for (auto& kv : hMSets) {
+ if (!kv.second.empty()) {
+ kv.second.insert(kv.second.begin(), {"HMSET", kv.first});
+ transaction.emplace_back(std::move(kv.second));
+ }
+ }
+
+ if (states.size() > 2) {
+ transaction.emplace_back(std::move(states));
+ transaction.emplace_back(std::move(statesChksms));
+ states = {"HMSET", m_PrefixConfigObject + lcType + ":state"};
+ statesChksms = {"HMSET", m_PrefixConfigCheckSum + lcType + ":state"};
+ }
+
+ hMSets = decltype(hMSets)();
+
+ if (transaction.size() > 1) {
+ transaction.push_back({"EXEC"});
+ rcon->FireAndForgetQueries(std::move(transaction), Prio::Config);
+ transaction = {{"MULTI"}};
+ }
+ }
+
+ auto checkable (dynamic_pointer_cast<Checkable>(object));
+
+ if (checkable && checkable->GetEnableActiveChecks()) {
+ auto zAdds (dynamic_pointer_cast<Service>(checkable) ? &serviceZAdds : &hostZAdds);
+
+ zAdds->emplace_back(Convert::ToString(checkable->GetNextUpdate()));
+ zAdds->emplace_back(GetObjectIdentifier(checkable));
+
+ if (zAdds->size() >= 102u) {
+ std::vector<String> header (zAdds->begin(), zAdds->begin() + 2u);
+
+ rcon->FireAndForgetQuery(std::move(*zAdds), Prio::CheckResult);
+
+ *zAdds = std::move(header);
+ }
+ }
+ }
+
+ skimObjects();
+
+ for (auto& kv : hMSets) {
+ if (!kv.second.empty()) {
+ kv.second.insert(kv.second.begin(), {"HMSET", kv.first});
+ transaction.emplace_back(std::move(kv.second));
+ }
+ }
+
+ if (states.size() > 2) {
+ transaction.emplace_back(std::move(states));
+ transaction.emplace_back(std::move(statesChksms));
+ }
+
+ if (transaction.size() > 1) {
+ transaction.push_back({"EXEC"});
+ rcon->FireAndForgetQueries(std::move(transaction), Prio::Config);
+ }
+
+ for (auto zAdds : {&hostZAdds, &serviceZAdds}) {
+ if (zAdds->size() > 2u) {
+ rcon->FireAndForgetQuery(std::move(*zAdds), Prio::CheckResult);
+ }
+ }
+
+ Log(LogNotice, "IcingaDB")
+ << "Dumped " << bulkCounter << " objects of type " << lcType;
+ });
+
+ upqObjectType.Join();
+
+ if (upqObjectType.HasExceptions()) {
+ for (boost::exception_ptr exc : upqObjectType.GetExceptions()) {
+ if (exc) {
+ boost::rethrow_exception(exc);
+ }
+ }
+ }
+
+ std::map<String, std::map<String, String>> ourContent;
+
+ for (auto& source : ourContentRaw) {
+ auto& dest (ourContent[source.first]);
+
+ upqObjectType.Enqueue([&]() {
+ for (auto& hMSet : source.second) {
+ for (decltype(hMSet.size()) i = 0, stop = hMSet.size() - 1u; i < stop; i += 2u) {
+ dest.emplace(std::move(hMSet[i]), std::move(hMSet[i + 1u]));
+ }
+
+ hMSet.clear();
+ }
+
+ source.second.clear();
+ });
+ }
+
+ upqObjectType.Join();
+ ourContentRaw.clear();
+
+ auto& ourCheckSums (ourContent[configCheckSum]);
+ auto& ourObjects (ourContent[configObject]);
+ std::vector<String> setChecksum, setObject, delChecksum, delObject;
+
+ auto redisCurrent (redisCheckSums.begin());
+ auto redisEnd (redisCheckSums.end());
+ auto ourCurrent (ourCheckSums.begin());
+ auto ourEnd (ourCheckSums.end());
+
+ auto flushSets ([&]() {
+ auto affectedConfig (setObject.size() / 2u);
+
+ setChecksum.insert(setChecksum.begin(), {"HMSET", configCheckSum});
+ setObject.insert(setObject.begin(), {"HMSET", configObject});
+
+ std::vector<std::vector<String>> transaction;
+
+ transaction.emplace_back(std::vector<String>{"MULTI"});
+ transaction.emplace_back(std::move(setChecksum));
+ transaction.emplace_back(std::move(setObject));
+ transaction.emplace_back(std::vector<String>{"EXEC"});
+
+ setChecksum.clear();
+ setObject.clear();
+
+ rcon->FireAndForgetQueries(std::move(transaction), Prio::Config, {affectedConfig});
+ });
+
+ auto flushDels ([&]() {
+ auto affectedConfig (delObject.size());
+
+ delChecksum.insert(delChecksum.begin(), {"HDEL", configCheckSum});
+ delObject.insert(delObject.begin(), {"HDEL", configObject});
+
+ std::vector<std::vector<String>> transaction;
+
+ transaction.emplace_back(std::vector<String>{"MULTI"});
+ transaction.emplace_back(std::move(delChecksum));
+ transaction.emplace_back(std::move(delObject));
+ transaction.emplace_back(std::vector<String>{"EXEC"});
+
+ delChecksum.clear();
+ delObject.clear();
+
+ rcon->FireAndForgetQueries(std::move(transaction), Prio::Config, {affectedConfig});
+ });
+
+ auto setOne ([&]() {
+ setChecksum.emplace_back(ourCurrent->first);
+ setChecksum.emplace_back(ourCurrent->second);
+ setObject.emplace_back(ourCurrent->first);
+ setObject.emplace_back(ourObjects[ourCurrent->first]);
+
+ if (setChecksum.size() == 100u) {
+ flushSets();
+ }
+ });
+
+ auto delOne ([&]() {
+ delChecksum.emplace_back(redisCurrent->first);
+ delObject.emplace_back(redisCurrent->first);
+
+ if (delChecksum.size() == 100u) {
+ flushDels();
+ }
+ });
+
+ for (;;) {
+ if (redisCurrent == redisEnd) {
+ for (; ourCurrent != ourEnd; ++ourCurrent) {
+ setOne();
+ }
+
+ break;
+ } else if (ourCurrent == ourEnd) {
+ for (; redisCurrent != redisEnd; ++redisCurrent) {
+ delOne();
+ }
+
+ break;
+ } else if (redisCurrent->first < ourCurrent->first) {
+ delOne();
+ ++redisCurrent;
+ } else if (redisCurrent->first > ourCurrent->first) {
+ setOne();
+ ++ourCurrent;
+ } else {
+ if (redisCurrent->second != ourCurrent->second) {
+ setOne();
+ }
+
+ ++redisCurrent;
+ ++ourCurrent;
+ }
+ }
+
+ if (delChecksum.size()) {
+ flushDels();
+ }
+
+ if (setChecksum.size()) {
+ flushSets();
+ }
+
+ for (auto& key : GetTypeDumpSignalKeys(type)) {
+ rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", key, "state", "done"}, Prio::Config);
+ }
+ rcon->Sync();
+ });
+
+ upq.Join();
+
+ if (upq.HasExceptions()) {
+ for (boost::exception_ptr exc : upq.GetExceptions()) {
+ try {
+ if (exc) {
+ boost::rethrow_exception(exc);
+ }
+ } catch(const std::exception& e) {
+ Log(LogCritical, "IcingaDB")
+ << "Exception during ConfigDump: " << e.what();
+ }
+ }
+ }
+
+ for (auto& key : globalKeys) {
+ m_Rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", key, "state", "done"}, Prio::Config);
+ }
+
+ m_Rcon->FireAndForgetQuery({"XADD", "icinga:dump", "*", "key", "*", "state", "done"}, Prio::Config);
+
+ // enqueue a callback that will notify us once all previous queries were executed and wait for this event
+ std::promise<void> p;
+ m_Rcon->EnqueueCallback([&p](boost::asio::yield_context& yc) { p.set_value(); }, Prio::Config);
+ p.get_future().wait();
+
+ auto endTime (Utility::GetTime());
+ auto took (endTime - startTime);
+
+ SetLastdumpTook(took);
+ SetLastdumpEnd(endTime);
+
+ Log(LogInformation, "IcingaDB")
+ << "Initial config/status dump finished in " << took << " seconds.";
+}
+
+std::vector<std::vector<intrusive_ptr<ConfigObject>>> IcingaDB::ChunkObjects(std::vector<intrusive_ptr<ConfigObject>> objects, size_t chunkSize) {
+ std::vector<std::vector<intrusive_ptr<ConfigObject>>> chunks;
+ auto offset (objects.begin());
+ auto end (objects.end());
+
+ chunks.reserve((std::distance(offset, end) + chunkSize - 1) / chunkSize);
+
+ while (std::distance(offset, end) >= chunkSize) {
+ auto until (offset + chunkSize);
+ chunks.emplace_back(offset, until);
+ offset = until;
+ }
+
+ if (offset != end) {
+ chunks.emplace_back(offset, end);
+ }
+
+ return chunks;
+}
+
+void IcingaDB::DeleteKeys(const RedisConnection::Ptr& conn, const std::vector<String>& keys, RedisConnection::QueryPriority priority) {
+ std::vector<String> query = {"DEL"};
+ for (auto& key : keys) {
+ query.emplace_back(key);
+ }
+
+ conn->FireAndForgetQuery(std::move(query), priority);
+}
+
+std::vector<String> IcingaDB::GetTypeOverwriteKeys(const String& type)
+{
+ std::vector<String> keys = {
+ m_PrefixConfigObject + type + ":customvar",
+ };
+
+ if (type == "host" || type == "service" || type == "user") {
+ keys.emplace_back(m_PrefixConfigObject + type + "group:member");
+ keys.emplace_back(m_PrefixConfigObject + type + ":state");
+ keys.emplace_back(m_PrefixConfigCheckSum + type + ":state");
+ } else if (type == "timeperiod") {
+ keys.emplace_back(m_PrefixConfigObject + type + ":override:include");
+ keys.emplace_back(m_PrefixConfigObject + type + ":override:exclude");
+ keys.emplace_back(m_PrefixConfigObject + type + ":range");
+ } else if (type == "notification") {
+ keys.emplace_back(m_PrefixConfigObject + type + ":user");
+ keys.emplace_back(m_PrefixConfigObject + type + ":usergroup");
+ keys.emplace_back(m_PrefixConfigObject + type + ":recipient");
+ } else if (type == "checkcommand" || type == "notificationcommand" || type == "eventcommand") {
+ keys.emplace_back(m_PrefixConfigObject + type + ":envvar");
+ keys.emplace_back(m_PrefixConfigCheckSum + type + ":envvar");
+ keys.emplace_back(m_PrefixConfigObject + type + ":argument");
+ keys.emplace_back(m_PrefixConfigCheckSum + type + ":argument");
+ }
+
+ return keys;
+}
+
+std::vector<String> IcingaDB::GetTypeDumpSignalKeys(const Type::Ptr& type)
+{
+ String lcType = type->GetName().ToLower();
+ std::vector<String> keys = {m_PrefixConfigObject + lcType};
+
+ if (CustomVarObject::TypeInstance->IsAssignableFrom(type)) {
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":customvar");
+ }
+
+ if (type == Host::TypeInstance || type == Service::TypeInstance) {
+ keys.emplace_back(m_PrefixConfigObject + lcType + "group:member");
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":state");
+ } else if (type == User::TypeInstance) {
+ keys.emplace_back(m_PrefixConfigObject + lcType + "group:member");
+ } else if (type == TimePeriod::TypeInstance) {
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":override:include");
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":override:exclude");
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":range");
+ } else if (type == Notification::TypeInstance) {
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":user");
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":usergroup");
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":recipient");
+ } else if (type == CheckCommand::TypeInstance || type == NotificationCommand::TypeInstance || type == EventCommand::TypeInstance) {
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":envvar");
+ keys.emplace_back(m_PrefixConfigObject + lcType + ":argument");
+ }
+
+ return keys;
+}
+
+template<typename ConfigType>
+static ConfigObject::Ptr GetObjectByName(const String& name)
+{
+ return ConfigObject::GetObject<ConfigType>(name);
+}
+
+void IcingaDB::InsertObjectDependencies(const ConfigObject::Ptr& object, const String typeName, std::map<String, std::vector<String>>& hMSets,
+ std::vector<Dictionary::Ptr>& runtimeUpdates, bool runtimeUpdate)
+{
+ String objectKey = GetObjectIdentifier(object);
+ String objectKeyName = typeName + "_id";
+
+ Type::Ptr type = object->GetReflectionType();
+
+ CustomVarObject::Ptr customVarObject = dynamic_pointer_cast<CustomVarObject>(object);
+
+ if (customVarObject) {
+ auto vars(SerializeVars(customVarObject->GetVars()));
+ if (vars) {
+ auto& typeCvs (hMSets[m_PrefixConfigObject + typeName + ":customvar"]);
+ auto& allCvs (hMSets[m_PrefixConfigObject + "customvar"]);
+
+ ObjectLock varsLock(vars);
+ Array::Ptr varsArray(new Array);
+
+ varsArray->Reserve(vars->GetLength());
+
+ for (auto& kv : vars) {
+ if (runtimeUpdate || m_DumpedGlobals.CustomVar.IsNew(kv.first)) {
+ allCvs.emplace_back(kv.first);
+ allCvs.emplace_back(JsonEncode(kv.second));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, kv.first, m_PrefixConfigObject + "customvar", kv.second);
+ }
+ }
+
+ String id = HashValue(new Array({m_EnvironmentId, kv.first, object->GetName()}));
+ typeCvs.emplace_back(id);
+
+ Dictionary::Ptr data = new Dictionary({{objectKeyName, objectKey}, {"environment_id", m_EnvironmentId}, {"customvar_id", kv.first}});
+ typeCvs.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":customvar", data);
+ }
+ }
+ }
+ }
+
+ if (type == Host::TypeInstance || type == Service::TypeInstance) {
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ String actionUrl = checkable->GetActionUrl();
+ String notesUrl = checkable->GetNotesUrl();
+ String iconImage = checkable->GetIconImage();
+ if (!actionUrl.IsEmpty()) {
+ auto& actionUrls (hMSets[m_PrefixConfigObject + "action:url"]);
+
+ auto id (HashValue(new Array({m_EnvironmentId, actionUrl})));
+
+ if (runtimeUpdate || m_DumpedGlobals.ActionUrl.IsNew(id)) {
+ actionUrls.emplace_back(std::move(id));
+ Dictionary::Ptr data = new Dictionary({{"environment_id", m_EnvironmentId}, {"action_url", actionUrl}});
+ actionUrls.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, actionUrls.at(actionUrls.size() - 2u), m_PrefixConfigObject + "action:url", data);
+ }
+ }
+ }
+ if (!notesUrl.IsEmpty()) {
+ auto& notesUrls (hMSets[m_PrefixConfigObject + "notes:url"]);
+
+ auto id (HashValue(new Array({m_EnvironmentId, notesUrl})));
+
+ if (runtimeUpdate || m_DumpedGlobals.NotesUrl.IsNew(id)) {
+ notesUrls.emplace_back(std::move(id));
+ Dictionary::Ptr data = new Dictionary({{"environment_id", m_EnvironmentId}, {"notes_url", notesUrl}});
+ notesUrls.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, notesUrls.at(notesUrls.size() - 2u), m_PrefixConfigObject + "notes:url", data);
+ }
+ }
+ }
+ if (!iconImage.IsEmpty()) {
+ auto& iconImages (hMSets[m_PrefixConfigObject + "icon:image"]);
+
+ auto id (HashValue(new Array({m_EnvironmentId, iconImage})));
+
+ if (runtimeUpdate || m_DumpedGlobals.IconImage.IsNew(id)) {
+ iconImages.emplace_back(std::move(id));
+ Dictionary::Ptr data = new Dictionary({{"environment_id", m_EnvironmentId}, {"icon_image", iconImage}});
+ iconImages.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, iconImages.at(iconImages.size() - 2u), m_PrefixConfigObject + "icon:image", data);
+ }
+ }
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ ConfigObject::Ptr (*getGroup)(const String& name);
+ Array::Ptr groups;
+ if (service) {
+ groups = service->GetGroups();
+ getGroup = &::GetObjectByName<ServiceGroup>;
+ } else {
+ groups = host->GetGroups();
+ getGroup = &::GetObjectByName<HostGroup>;
+ }
+
+ if (groups) {
+ ObjectLock groupsLock(groups);
+ Array::Ptr groupIds(new Array);
+
+ groupIds->Reserve(groups->GetLength());
+
+ auto& members (hMSets[m_PrefixConfigObject + typeName + "group:member"]);
+
+ for (auto& group : groups) {
+ auto groupObj ((*getGroup)(group));
+ String groupId = GetObjectIdentifier(groupObj);
+ String id = HashValue(new Array({m_EnvironmentId, groupObj->GetName(), object->GetName()}));
+ members.emplace_back(id);
+ Dictionary::Ptr data = new Dictionary({{objectKeyName, objectKey}, {"environment_id", m_EnvironmentId}, {typeName + "group_id", groupId}});
+ members.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + "group:member", data);
+ }
+
+ groupIds->Add(groupId);
+ }
+ }
+
+ return;
+ }
+
+ if (type == TimePeriod::TypeInstance) {
+ TimePeriod::Ptr timeperiod = static_pointer_cast<TimePeriod>(object);
+
+ Dictionary::Ptr ranges = timeperiod->GetRanges();
+ if (ranges) {
+ ObjectLock rangesLock(ranges);
+ Array::Ptr rangeIds(new Array);
+ auto& typeRanges (hMSets[m_PrefixConfigObject + typeName + ":range"]);
+
+ rangeIds->Reserve(ranges->GetLength());
+
+ for (auto& kv : ranges) {
+ String rangeId = HashValue(new Array({m_EnvironmentId, kv.first, kv.second}));
+ rangeIds->Add(rangeId);
+
+ String id = HashValue(new Array({m_EnvironmentId, kv.first, kv.second, object->GetName()}));
+ typeRanges.emplace_back(id);
+ Dictionary::Ptr data = new Dictionary({{"environment_id", m_EnvironmentId}, {"timeperiod_id", objectKey}, {"range_key", kv.first}, {"range_value", kv.second}});
+ typeRanges.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":range", data);
+ }
+ }
+ }
+
+ Array::Ptr includes;
+ ConfigObject::Ptr (*getInclude)(const String& name);
+ includes = timeperiod->GetIncludes();
+ getInclude = &::GetObjectByName<TimePeriod>;
+
+ Array::Ptr includeChecksums = new Array();
+
+ ObjectLock includesLock(includes);
+ ObjectLock includeChecksumsLock(includeChecksums);
+
+ includeChecksums->Reserve(includes->GetLength());
+
+
+ auto& includs (hMSets[m_PrefixConfigObject + typeName + ":override:include"]);
+ for (auto include : includes) {
+ auto includeTp ((*getInclude)(include.Get<String>()));
+ String includeId = GetObjectIdentifier(includeTp);
+ includeChecksums->Add(includeId);
+
+ String id = HashValue(new Array({m_EnvironmentId, includeTp->GetName(), object->GetName()}));
+ includs.emplace_back(id);
+ Dictionary::Ptr data = new Dictionary({{"environment_id", m_EnvironmentId}, {"timeperiod_id", objectKey}, {"include_id", includeId}});
+ includs.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":override:include", data);
+ }
+ }
+
+ Array::Ptr excludes;
+ ConfigObject::Ptr (*getExclude)(const String& name);
+
+ excludes = timeperiod->GetExcludes();
+ getExclude = &::GetObjectByName<TimePeriod>;
+
+ Array::Ptr excludeChecksums = new Array();
+
+ ObjectLock excludesLock(excludes);
+ ObjectLock excludeChecksumsLock(excludeChecksums);
+
+ excludeChecksums->Reserve(excludes->GetLength());
+
+ auto& excluds (hMSets[m_PrefixConfigObject + typeName + ":override:exclude"]);
+
+ for (auto exclude : excludes) {
+ auto excludeTp ((*getExclude)(exclude.Get<String>()));
+ String excludeId = GetObjectIdentifier(excludeTp);
+ excludeChecksums->Add(excludeId);
+
+ String id = HashValue(new Array({m_EnvironmentId, excludeTp->GetName(), object->GetName()}));
+ excluds.emplace_back(id);
+ Dictionary::Ptr data = new Dictionary({{"environment_id", m_EnvironmentId}, {"timeperiod_id", objectKey}, {"exclude_id", excludeId}});
+ excluds.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":override:exclude", data);
+ }
+ }
+
+ return;
+ }
+
+ if (type == User::TypeInstance) {
+ User::Ptr user = static_pointer_cast<User>(object);
+ Array::Ptr groups = user->GetGroups();
+
+ if (groups) {
+ ObjectLock groupsLock(groups);
+ Array::Ptr groupIds(new Array);
+
+ groupIds->Reserve(groups->GetLength());
+
+ auto& members (hMSets[m_PrefixConfigObject + typeName + "group:member"]);
+ auto& notificationRecipients (hMSets[m_PrefixConfigObject + "notification:recipient"]);
+
+ for (auto& group : groups) {
+ UserGroup::Ptr groupObj = UserGroup::GetByName(group);
+ String groupId = GetObjectIdentifier(groupObj);
+ String id = HashValue(new Array({m_EnvironmentId, groupObj->GetName(), object->GetName()}));
+ members.emplace_back(id);
+ Dictionary::Ptr data = new Dictionary({{"user_id", objectKey}, {"environment_id", m_EnvironmentId}, {"usergroup_id", groupId}});
+ members.emplace_back(JsonEncode(data));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + "group:member", data);
+
+ // Recipients are handled by notifications during initial dumps and only need to be handled here during runtime (e.g. User creation).
+ for (auto& notification : groupObj->GetNotifications()) {
+ String recipientId = HashValue(new Array({m_EnvironmentId, "usergroupuser", user->GetName(), groupObj->GetName(), notification->GetName()}));
+ notificationRecipients.emplace_back(recipientId);
+ Dictionary::Ptr recipientData = new Dictionary({{"notification_id", GetObjectIdentifier(notification)}, {"environment_id", m_EnvironmentId}, {"user_id", objectKey}, {"usergroup_id", groupId}});
+ notificationRecipients.emplace_back(JsonEncode(recipientData));
+
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, recipientId, m_PrefixConfigObject + "notification:recipient", recipientData);
+ }
+ }
+
+ groupIds->Add(groupId);
+ }
+ }
+
+ return;
+ }
+
+ if (type == Notification::TypeInstance) {
+ Notification::Ptr notification = static_pointer_cast<Notification>(object);
+
+ std::set<User::Ptr> users = notification->GetUsers();
+ Array::Ptr userIds = new Array();
+
+ auto usergroups(notification->GetUserGroups());
+ Array::Ptr usergroupIds = new Array();
+
+ userIds->Reserve(users.size());
+
+ auto& usrs (hMSets[m_PrefixConfigObject + typeName + ":user"]);
+ auto& notificationRecipients (hMSets[m_PrefixConfigObject + typeName + ":recipient"]);
+
+ for (auto& user : users) {
+ String userId = GetObjectIdentifier(user);
+ String id = HashValue(new Array({m_EnvironmentId, "user", user->GetName(), object->GetName()}));
+ usrs.emplace_back(id);
+ notificationRecipients.emplace_back(id);
+
+ Dictionary::Ptr data = new Dictionary({{"notification_id", objectKey}, {"environment_id", m_EnvironmentId}, {"user_id", userId}});
+ String dataJson = JsonEncode(data);
+ usrs.emplace_back(dataJson);
+ notificationRecipients.emplace_back(dataJson);
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":user", data);
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":recipient", data);
+ }
+
+ userIds->Add(userId);
+ }
+
+ usergroupIds->Reserve(usergroups.size());
+
+ auto& groups (hMSets[m_PrefixConfigObject + typeName + ":usergroup"]);
+
+ for (auto& usergroup : usergroups) {
+ String usergroupId = GetObjectIdentifier(usergroup);
+ String id = HashValue(new Array({m_EnvironmentId, "usergroup", usergroup->GetName(), object->GetName()}));
+ groups.emplace_back(id);
+ notificationRecipients.emplace_back(id);
+
+ Dictionary::Ptr groupData = new Dictionary({{"notification_id", objectKey}, {"environment_id", m_EnvironmentId}, {"usergroup_id", usergroupId}});
+ String groupDataJson = JsonEncode(groupData);
+ groups.emplace_back(groupDataJson);
+ notificationRecipients.emplace_back(groupDataJson);
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":usergroup", groupData);
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":recipient", groupData);
+ }
+
+ for (const User::Ptr& user : usergroup->GetMembers()) {
+ String userId = GetObjectIdentifier(user);
+ String recipientId = HashValue(new Array({m_EnvironmentId, "usergroupuser", user->GetName(), usergroup->GetName(), notification->GetName()}));
+ notificationRecipients.emplace_back(recipientId);
+ Dictionary::Ptr userData = new Dictionary({{"notification_id", objectKey}, {"environment_id", m_EnvironmentId}, {"user_id", userId}, {"usergroup_id", usergroupId}});
+ notificationRecipients.emplace_back(JsonEncode(userData));
+
+ if (runtimeUpdate) {
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, recipientId, m_PrefixConfigObject + typeName + ":recipient", userData);
+ }
+ }
+
+ usergroupIds->Add(usergroupId);
+ }
+
+ return;
+ }
+
+ if (type == CheckCommand::TypeInstance || type == NotificationCommand::TypeInstance || type == EventCommand::TypeInstance) {
+ Command::Ptr command = static_pointer_cast<Command>(object);
+
+ Dictionary::Ptr arguments = command->GetArguments();
+ if (arguments) {
+ ObjectLock argumentsLock(arguments);
+ auto& typeArgs (hMSets[m_PrefixConfigObject + typeName + ":argument"]);
+ auto& argChksms (hMSets[m_PrefixConfigCheckSum + typeName + ":argument"]);
+
+ for (auto& kv : arguments) {
+ Dictionary::Ptr values;
+ if (kv.second.IsObjectType<Dictionary>()) {
+ values = kv.second;
+ values = values->ShallowClone();
+ } else if (kv.second.IsObjectType<Array>()) {
+ values = new Dictionary({{"value", JsonEncode(kv.second)}});
+ } else {
+ values = new Dictionary({{"value", kv.second}});
+ }
+
+ for (const char *attr : {"value", "set_if", "separator"}) {
+ Value value;
+
+ // Stringify if set.
+ if (values->Get(attr, &value)) {
+ switch (value.GetType()) {
+ case ValueEmpty:
+ case ValueString:
+ break;
+ case ValueObject:
+ values->Set(attr, value.Get<Object::Ptr>()->ToString());
+ break;
+ default:
+ values->Set(attr, JsonEncode(value));
+ }
+ }
+ }
+
+ for (const char *attr : {"repeat_key", "required", "skip_key"}) {
+ Value value;
+
+ // Boolify if set.
+ if (values->Get(attr, &value)) {
+ values->Set(attr, value.ToBool());
+ }
+ }
+
+ {
+ Value order;
+
+ // Intify if set.
+ if (values->Get("order", &order)) {
+ values->Set("order", (int)order);
+ }
+ }
+
+ values->Set(objectKeyName, objectKey);
+ values->Set("argument_key", kv.first);
+ values->Set("environment_id", m_EnvironmentId);
+
+ String id = HashValue(new Array({m_EnvironmentId, kv.first, object->GetName()}));
+
+ typeArgs.emplace_back(id);
+ typeArgs.emplace_back(JsonEncode(values));
+
+ argChksms.emplace_back(id);
+ String checksum = HashValue(kv.second);
+ argChksms.emplace_back(JsonEncode(new Dictionary({{"checksum", checksum}})));
+
+ if (runtimeUpdate) {
+ values->Set("checksum", checksum);
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":argument", values);
+ }
+ }
+ }
+
+ Dictionary::Ptr envvars = command->GetEnv();
+ if (envvars) {
+ ObjectLock envvarsLock(envvars);
+ Array::Ptr envvarIds(new Array);
+ auto& typeVars (hMSets[m_PrefixConfigObject + typeName + ":envvar"]);
+ auto& varChksms (hMSets[m_PrefixConfigCheckSum + typeName + ":envvar"]);
+
+ envvarIds->Reserve(envvars->GetLength());
+
+ for (auto& kv : envvars) {
+ Dictionary::Ptr values;
+ if (kv.second.IsObjectType<Dictionary>()) {
+ values = kv.second;
+ values = values->ShallowClone();
+ } else if (kv.second.IsObjectType<Array>()) {
+ values = new Dictionary({{"value", JsonEncode(kv.second)}});
+ } else {
+ values = new Dictionary({{"value", kv.second}});
+ }
+
+ {
+ Value value;
+
+ // JsonEncode() the value if it's set.
+ if (values->Get("value", &value)) {
+ values->Set("value", JsonEncode(value));
+ }
+ }
+
+ values->Set(objectKeyName, objectKey);
+ values->Set("envvar_key", kv.first);
+ values->Set("environment_id", m_EnvironmentId);
+
+ String id = HashValue(new Array({m_EnvironmentId, kv.first, object->GetName()}));
+
+ typeVars.emplace_back(id);
+ typeVars.emplace_back(JsonEncode(values));
+
+ varChksms.emplace_back(id);
+ String checksum = HashValue(kv.second);
+ varChksms.emplace_back(JsonEncode(new Dictionary({{"checksum", checksum}})));
+
+ if (runtimeUpdate) {
+ values->Set("checksum", checksum);
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, id, m_PrefixConfigObject + typeName + ":envvar", values);
+ }
+ }
+ }
+
+ return;
+ }
+}
+
+/**
+ * Update the state information of a checkable in Redis.
+ *
+ * What is updated exactly depends on the mode parameter:
+ * - Volatile: Update the volatile state information stored in icinga:host:state or icinga:service:state as well as
+ * the corresponding checksum stored in icinga:checksum:host:state or icinga:checksum:service:state.
+ * - RuntimeOnly: Write a runtime update to the icinga:runtime:state stream. It is up to the caller to ensure that
+ * identical volatile state information was already written before to avoid inconsistencies. This mode is only
+ * useful to upgrade a previous Volatile to a Full operation, otherwise Full should be used.
+ * - Full: Perform an update of all state information in Redis, that is updating the volatile information and sending
+ * a corresponding runtime update so that this state update gets written through to the persistent database by a
+ * running icingadb process.
+ *
+ * @param checkable State of this checkable is updated in Redis
+ * @param mode Mode of operation (StateUpdate::Volatile, StateUpdate::RuntimeOnly, or StateUpdate::Full)
+ */
+void IcingaDB::UpdateState(const Checkable::Ptr& checkable, StateUpdate mode)
+{
+ if (!m_Rcon || !m_Rcon->IsConnected())
+ return;
+
+ String objectType = GetLowerCaseTypeNameDB(checkable);
+ String objectKey = GetObjectIdentifier(checkable);
+
+ Dictionary::Ptr stateAttrs = SerializeState(checkable);
+
+ String redisStateKey = m_PrefixConfigObject + objectType + ":state";
+ String redisChecksumKey = m_PrefixConfigCheckSum + objectType + ":state";
+ String checksum = HashValue(stateAttrs);
+
+ if (mode & StateUpdate::Volatile) {
+ m_Rcon->FireAndForgetQueries({
+ {"HSET", redisStateKey, objectKey, JsonEncode(stateAttrs)},
+ {"HSET", redisChecksumKey, objectKey, JsonEncode(new Dictionary({{"checksum", checksum}}))},
+ }, Prio::RuntimeStateSync);
+ }
+
+ if (mode & StateUpdate::RuntimeOnly) {
+ ObjectLock olock(stateAttrs);
+
+ std::vector<String> streamadd({
+ "XADD", "icinga:runtime:state", "MAXLEN", "~", "1000000", "*",
+ "runtime_type", "upsert",
+ "redis_key", redisStateKey,
+ "checksum", checksum,
+ });
+
+ for (const Dictionary::Pair& kv : stateAttrs) {
+ streamadd.emplace_back(kv.first);
+ streamadd.emplace_back(IcingaToStreamValue(kv.second));
+ }
+
+ m_Rcon->FireAndForgetQuery(std::move(streamadd), Prio::RuntimeStateStream, {0, 1});
+ }
+}
+
+// Used to update a single object, used for runtime updates
+void IcingaDB::SendConfigUpdate(const ConfigObject::Ptr& object, bool runtimeUpdate)
+{
+ if (!m_Rcon || !m_Rcon->IsConnected())
+ return;
+
+ String typeName = GetLowerCaseTypeNameDB(object);
+
+ std::map<String, std::vector<String>> hMSets;
+ std::vector<Dictionary::Ptr> runtimeUpdates;
+
+ CreateConfigUpdate(object, typeName, hMSets, runtimeUpdates, runtimeUpdate);
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+ if (checkable) {
+ UpdateState(checkable, runtimeUpdate ? StateUpdate::Full : StateUpdate::Volatile);
+ }
+
+ std::vector<std::vector<String> > transaction = {{"MULTI"}};
+
+ for (auto& kv : hMSets) {
+ if (!kv.second.empty()) {
+ kv.second.insert(kv.second.begin(), {"HMSET", kv.first});
+ transaction.emplace_back(std::move(kv.second));
+ }
+ }
+
+ for (auto& objectAttributes : runtimeUpdates) {
+ std::vector<String> xAdd({"XADD", "icinga:runtime", "MAXLEN", "~", "1000000", "*"});
+ ObjectLock olock(objectAttributes);
+
+ for (const Dictionary::Pair& kv : objectAttributes) {
+ String value = IcingaToStreamValue(kv.second);
+ if (!value.IsEmpty()) {
+ xAdd.emplace_back(kv.first);
+ xAdd.emplace_back(value);
+ }
+ }
+
+ transaction.emplace_back(std::move(xAdd));
+ }
+
+ if (transaction.size() > 1) {
+ transaction.push_back({"EXEC"});
+ m_Rcon->FireAndForgetQueries(std::move(transaction), Prio::Config, {1});
+ }
+
+ if (checkable) {
+ SendNextUpdate(checkable);
+ }
+}
+
+void IcingaDB::AddObjectDataToRuntimeUpdates(std::vector<Dictionary::Ptr>& runtimeUpdates, const String& objectKey,
+ const String& redisKey, const Dictionary::Ptr& data)
+{
+ Dictionary::Ptr dataClone = data->ShallowClone();
+ dataClone->Set("id", objectKey);
+ dataClone->Set("redis_key", redisKey);
+ dataClone->Set("runtime_type", "upsert");
+ runtimeUpdates.emplace_back(dataClone);
+}
+
+// Takes object and collects IcingaDB relevant attributes and computes checksums. Returns whether the object is relevant
+// for IcingaDB.
+bool IcingaDB::PrepareObject(const ConfigObject::Ptr& object, Dictionary::Ptr& attributes, Dictionary::Ptr& checksums)
+{
+ auto originalAttrs (object->GetOriginalAttributes());
+
+ if (originalAttrs) {
+ originalAttrs = originalAttrs->ShallowClone();
+ }
+
+ attributes->Set("name_checksum", SHA1(object->GetName()));
+ attributes->Set("environment_id", m_EnvironmentId);
+ attributes->Set("name", object->GetName());
+ attributes->Set("original_attributes", originalAttrs);
+
+ Zone::Ptr ObjectsZone;
+ Type::Ptr type = object->GetReflectionType();
+
+ if (type == Endpoint::TypeInstance) {
+ ObjectsZone = static_cast<Endpoint*>(object.get())->GetZone();
+ } else {
+ ObjectsZone = static_pointer_cast<Zone>(object->GetZone());
+ }
+
+ if (ObjectsZone) {
+ attributes->Set("zone_id", GetObjectIdentifier(ObjectsZone));
+ attributes->Set("zone_name", ObjectsZone->GetName());
+ }
+
+ if (type == Endpoint::TypeInstance) {
+ return true;
+ }
+
+ if (type == Zone::TypeInstance) {
+ Zone::Ptr zone = static_pointer_cast<Zone>(object);
+
+ attributes->Set("is_global", zone->GetGlobal());
+
+ Zone::Ptr parent = zone->GetParent();
+ if (parent) {
+ attributes->Set("parent_id", GetObjectIdentifier(parent));
+ }
+
+ auto parentsRaw (zone->GetAllParentsRaw());
+ attributes->Set("depth", parentsRaw.size());
+
+ return true;
+ }
+
+ if (type == Host::TypeInstance || type == Service::TypeInstance) {
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+ auto checkTimeout (checkable->GetCheckTimeout());
+
+ attributes->Set("checkcommand_name", checkable->GetCheckCommand()->GetName());
+ attributes->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+ attributes->Set("check_timeout", checkTimeout.IsEmpty() ? checkable->GetCheckCommand()->GetTimeout() : (double)checkTimeout);
+ attributes->Set("check_interval", checkable->GetCheckInterval());
+ attributes->Set("check_retry_interval", checkable->GetRetryInterval());
+ attributes->Set("active_checks_enabled", checkable->GetEnableActiveChecks());
+ attributes->Set("passive_checks_enabled", checkable->GetEnablePassiveChecks());
+ attributes->Set("event_handler_enabled", checkable->GetEnableEventHandler());
+ attributes->Set("notifications_enabled", checkable->GetEnableNotifications());
+ attributes->Set("flapping_enabled", checkable->GetEnableFlapping());
+ attributes->Set("flapping_threshold_low", checkable->GetFlappingThresholdLow());
+ attributes->Set("flapping_threshold_high", checkable->GetFlappingThresholdHigh());
+ attributes->Set("perfdata_enabled", checkable->GetEnablePerfdata());
+ attributes->Set("is_volatile", checkable->GetVolatile());
+ attributes->Set("notes", checkable->GetNotes());
+ attributes->Set("icon_image_alt", checkable->GetIconImageAlt());
+
+ attributes->Set("checkcommand_id", GetObjectIdentifier(checkable->GetCheckCommand()));
+
+ Endpoint::Ptr commandEndpoint = checkable->GetCommandEndpoint();
+ if (commandEndpoint) {
+ attributes->Set("command_endpoint_id", GetObjectIdentifier(commandEndpoint));
+ attributes->Set("command_endpoint_name", commandEndpoint->GetName());
+ }
+
+ TimePeriod::Ptr timePeriod = checkable->GetCheckPeriod();
+ if (timePeriod) {
+ attributes->Set("check_timeperiod_id", GetObjectIdentifier(timePeriod));
+ attributes->Set("check_timeperiod_name", timePeriod->GetName());
+ }
+
+ EventCommand::Ptr eventCommand = checkable->GetEventCommand();
+ if (eventCommand) {
+ attributes->Set("eventcommand_id", GetObjectIdentifier(eventCommand));
+ attributes->Set("eventcommand_name", eventCommand->GetName());
+ }
+
+ String actionUrl = checkable->GetActionUrl();
+ String notesUrl = checkable->GetNotesUrl();
+ String iconImage = checkable->GetIconImage();
+ if (!actionUrl.IsEmpty())
+ attributes->Set("action_url_id", HashValue(new Array({m_EnvironmentId, actionUrl})));
+ if (!notesUrl.IsEmpty())
+ attributes->Set("notes_url_id", HashValue(new Array({m_EnvironmentId, notesUrl})));
+ if (!iconImage.IsEmpty())
+ attributes->Set("icon_image_id", HashValue(new Array({m_EnvironmentId, iconImage})));
+
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ if (service) {
+ attributes->Set("host_id", GetObjectIdentifier(service->GetHost()));
+ attributes->Set("display_name", service->GetDisplayName());
+
+ // Overwrite name here, `object->name` is 'HostName!ServiceName' but we only want the name of the Service
+ attributes->Set("name", service->GetShortName());
+ } else {
+ attributes->Set("display_name", host->GetDisplayName());
+ attributes->Set("address", host->GetAddress());
+ attributes->Set("address6", host->GetAddress6());
+ }
+
+ return true;
+ }
+
+ if (type == User::TypeInstance) {
+ User::Ptr user = static_pointer_cast<User>(object);
+
+ attributes->Set("display_name", user->GetDisplayName());
+ attributes->Set("email", user->GetEmail());
+ attributes->Set("pager", user->GetPager());
+ attributes->Set("notifications_enabled", user->GetEnableNotifications());
+ attributes->Set("states", user->GetStates());
+ attributes->Set("types", user->GetTypes());
+
+ if (user->GetPeriod())
+ attributes->Set("timeperiod_id", GetObjectIdentifier(user->GetPeriod()));
+
+ return true;
+ }
+
+ if (type == TimePeriod::TypeInstance) {
+ TimePeriod::Ptr timeperiod = static_pointer_cast<TimePeriod>(object);
+
+ attributes->Set("display_name", timeperiod->GetDisplayName());
+ attributes->Set("prefer_includes", timeperiod->GetPreferIncludes());
+ return true;
+ }
+
+ if (type == Notification::TypeInstance) {
+ Notification::Ptr notification = static_pointer_cast<Notification>(object);
+
+ Host::Ptr host;
+ Service::Ptr service;
+
+ tie(host, service) = GetHostService(notification->GetCheckable());
+
+ attributes->Set("notificationcommand_id", GetObjectIdentifier(notification->GetCommand()));
+
+ attributes->Set("host_id", GetObjectIdentifier(host));
+ if (service)
+ attributes->Set("service_id", GetObjectIdentifier(service));
+
+ TimePeriod::Ptr timeperiod = notification->GetPeriod();
+ if (timeperiod)
+ attributes->Set("timeperiod_id", GetObjectIdentifier(timeperiod));
+
+ if (notification->GetTimes()) {
+ auto begin (notification->GetTimes()->Get("begin"));
+ auto end (notification->GetTimes()->Get("end"));
+
+ if (begin != Empty && (double)begin >= 0) {
+ attributes->Set("times_begin", std::round((double)begin));
+ }
+
+ if (end != Empty && (double)end >= 0) {
+ attributes->Set("times_end", std::round((double)end));
+ }
+ }
+
+ attributes->Set("notification_interval", std::max(0.0, std::round(notification->GetInterval())));
+ attributes->Set("states", notification->GetStates());
+ attributes->Set("types", notification->GetTypes());
+
+ return true;
+ }
+
+ if (type == Comment::TypeInstance) {
+ Comment::Ptr comment = static_pointer_cast<Comment>(object);
+
+ attributes->Set("author", comment->GetAuthor());
+ attributes->Set("text", comment->GetText());
+ attributes->Set("entry_type", comment->GetEntryType());
+ attributes->Set("entry_time", TimestampToMilliseconds(comment->GetEntryTime()));
+ attributes->Set("is_persistent", comment->GetPersistent());
+ attributes->Set("is_sticky", comment->GetSticky());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(comment->GetCheckable());
+
+ attributes->Set("host_id", GetObjectIdentifier(host));
+ if (service) {
+ attributes->Set("object_type", "service");
+ attributes->Set("service_id", GetObjectIdentifier(service));
+ } else
+ attributes->Set("object_type", "host");
+
+ auto expireTime (comment->GetExpireTime());
+
+ if (expireTime > 0) {
+ attributes->Set("expire_time", TimestampToMilliseconds(expireTime));
+ }
+
+ return true;
+ }
+
+ if (type == Downtime::TypeInstance) {
+ Downtime::Ptr downtime = static_pointer_cast<Downtime>(object);
+
+ attributes->Set("author", downtime->GetAuthor());
+ attributes->Set("comment", downtime->GetComment());
+ attributes->Set("entry_time", TimestampToMilliseconds(downtime->GetEntryTime()));
+ attributes->Set("scheduled_start_time", TimestampToMilliseconds(downtime->GetStartTime()));
+ attributes->Set("scheduled_end_time", TimestampToMilliseconds(downtime->GetEndTime()));
+ attributes->Set("scheduled_duration", TimestampToMilliseconds(std::max(0.0, downtime->GetEndTime() - downtime->GetStartTime())));
+ attributes->Set("flexible_duration", TimestampToMilliseconds(std::max(0.0, downtime->GetDuration())));
+ attributes->Set("is_flexible", !downtime->GetFixed());
+ attributes->Set("is_in_effect", downtime->IsInEffect());
+ if (downtime->IsInEffect()) {
+ attributes->Set("start_time", TimestampToMilliseconds(downtime->GetTriggerTime()));
+
+ attributes->Set("end_time", TimestampToMilliseconds(
+ downtime->GetFixed() ? downtime->GetEndTime() : (downtime->GetTriggerTime() + std::max(0.0, downtime->GetDuration()))
+ ));
+ }
+
+ auto duration = downtime->GetDuration();
+ if (downtime->GetFixed()) {
+ duration = downtime->GetEndTime() - downtime->GetStartTime();
+ }
+ attributes->Set("duration", TimestampToMilliseconds(std::max(0.0, duration)));
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(downtime->GetCheckable());
+
+ attributes->Set("host_id", GetObjectIdentifier(host));
+ if (service) {
+ attributes->Set("object_type", "service");
+ attributes->Set("service_id", GetObjectIdentifier(service));
+ } else
+ attributes->Set("object_type", "host");
+
+ auto triggeredBy (Downtime::GetByName(downtime->GetTriggeredBy()));
+ if (triggeredBy) {
+ attributes->Set("triggered_by_id", GetObjectIdentifier(triggeredBy));
+ }
+
+ auto scheduledBy (downtime->GetScheduledBy());
+ if (!scheduledBy.IsEmpty()) {
+ attributes->Set("scheduled_by", scheduledBy);
+ }
+
+ auto parent (Downtime::GetByName(downtime->GetParent()));
+ if (parent) {
+ attributes->Set("parent_id", GetObjectIdentifier(parent));
+ }
+
+ return true;
+ }
+
+ if (type == UserGroup::TypeInstance) {
+ UserGroup::Ptr userGroup = static_pointer_cast<UserGroup>(object);
+
+ attributes->Set("display_name", userGroup->GetDisplayName());
+
+ return true;
+ }
+
+ if (type == HostGroup::TypeInstance) {
+ HostGroup::Ptr hostGroup = static_pointer_cast<HostGroup>(object);
+
+ attributes->Set("display_name", hostGroup->GetDisplayName());
+
+ return true;
+ }
+
+ if (type == ServiceGroup::TypeInstance) {
+ ServiceGroup::Ptr serviceGroup = static_pointer_cast<ServiceGroup>(object);
+
+ attributes->Set("display_name", serviceGroup->GetDisplayName());
+
+ return true;
+ }
+
+ if (type == CheckCommand::TypeInstance || type == NotificationCommand::TypeInstance || type == EventCommand::TypeInstance) {
+ Command::Ptr command = static_pointer_cast<Command>(object);
+
+ attributes->Set("command", JsonEncode(command->GetCommandLine()));
+ attributes->Set("timeout", std::max(0, command->GetTimeout()));
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Creates a config update with computed checksums etc.
+ * Writes attributes, customVars and checksums into the respective supplied vectors. Adds two values to each vector
+ * (if applicable), first the key then the value. To use in a Redis command the command (e.g. HSET) and the key (e.g.
+ * icinga:config:object:downtime) need to be prepended. There is nothing to indicate success or failure.
+ */
+void
+IcingaDB::CreateConfigUpdate(const ConfigObject::Ptr& object, const String typeName, std::map<String, std::vector<String>>& hMSets,
+ std::vector<Dictionary::Ptr>& runtimeUpdates, bool runtimeUpdate)
+{
+ /* TODO: This isn't essentially correct as we don't keep track of config objects ourselves. This would avoid duplicated config updates at startup.
+ if (!runtimeUpdate && m_ConfigDumpInProgress)
+ return;
+ */
+
+ if (m_Rcon == nullptr)
+ return;
+
+ Dictionary::Ptr attr = new Dictionary;
+ Dictionary::Ptr chksm = new Dictionary;
+
+ if (!PrepareObject(object, attr, chksm))
+ return;
+
+ InsertObjectDependencies(object, typeName, hMSets, runtimeUpdates, runtimeUpdate);
+
+ String objectKey = GetObjectIdentifier(object);
+ auto& attrs (hMSets[m_PrefixConfigObject + typeName]);
+ auto& chksms (hMSets[m_PrefixConfigCheckSum + typeName]);
+
+ attrs.emplace_back(objectKey);
+ attrs.emplace_back(JsonEncode(attr));
+
+ String checksum = HashValue(attr);
+ chksms.emplace_back(objectKey);
+ chksms.emplace_back(JsonEncode(new Dictionary({{"checksum", checksum}})));
+
+ /* Send an update event to subscribers. */
+ if (runtimeUpdate) {
+ attr->Set("checksum", checksum);
+ AddObjectDataToRuntimeUpdates(runtimeUpdates, objectKey, m_PrefixConfigObject + typeName, attr);
+ }
+}
+
+void IcingaDB::SendConfigDelete(const ConfigObject::Ptr& object)
+{
+ if (!m_Rcon || !m_Rcon->IsConnected())
+ return;
+
+ Type::Ptr type = object->GetReflectionType();
+ String typeName = type->GetName().ToLower();
+ String objectKey = GetObjectIdentifier(object);
+
+ m_Rcon->FireAndForgetQueries({
+ {"HDEL", m_PrefixConfigObject + typeName, objectKey},
+ {"HDEL", m_PrefixConfigCheckSum + typeName, objectKey},
+ {
+ "XADD", "icinga:runtime", "MAXLEN", "~", "1000000", "*",
+ "redis_key", m_PrefixConfigObject + typeName, "id", objectKey, "runtime_type", "delete"
+ }
+ }, Prio::Config);
+
+ CustomVarObject::Ptr customVarObject = dynamic_pointer_cast<CustomVarObject>(object);
+
+ if (customVarObject) {
+ Dictionary::Ptr vars = customVarObject->GetVars();
+ SendCustomVarsChanged(object, vars, nullptr);
+ }
+
+ if (type == Host::TypeInstance || type == Service::TypeInstance) {
+ Checkable::Ptr checkable = static_pointer_cast<Checkable>(object);
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ m_Rcon->FireAndForgetQuery({
+ "ZREM",
+ service ? "icinga:nextupdate:service" : "icinga:nextupdate:host",
+ GetObjectIdentifier(checkable)
+ }, Prio::CheckResult);
+
+ m_Rcon->FireAndForgetQueries({
+ {"HDEL", m_PrefixConfigObject + typeName + ":state", objectKey},
+ {"HDEL", m_PrefixConfigCheckSum + typeName + ":state", objectKey}
+ }, Prio::RuntimeStateSync);
+
+ if (service) {
+ SendGroupsChanged<ServiceGroup>(checkable, service->GetGroups(), nullptr);
+ } else {
+ SendGroupsChanged<HostGroup>(checkable, host->GetGroups(), nullptr);
+ }
+
+ return;
+ }
+
+ if (type == TimePeriod::TypeInstance) {
+ TimePeriod::Ptr timeperiod = static_pointer_cast<TimePeriod>(object);
+ SendTimePeriodRangesChanged(timeperiod, timeperiod->GetRanges(), nullptr);
+ SendTimePeriodIncludesChanged(timeperiod, timeperiod->GetIncludes(), nullptr);
+ SendTimePeriodExcludesChanged(timeperiod, timeperiod->GetExcludes(), nullptr);
+ return;
+ }
+
+ if (type == User::TypeInstance) {
+ User::Ptr user = static_pointer_cast<User>(object);
+ SendGroupsChanged<UserGroup>(user, user->GetGroups(), nullptr);
+ return;
+ }
+
+ if (type == Notification::TypeInstance) {
+ Notification::Ptr notification = static_pointer_cast<Notification>(object);
+ SendNotificationUsersChanged(notification, notification->GetUsersRaw(), nullptr);
+ SendNotificationUserGroupsChanged(notification, notification->GetUserGroupsRaw(), nullptr);
+ return;
+ }
+
+ if (type == CheckCommand::TypeInstance || type == NotificationCommand::TypeInstance || type == EventCommand::TypeInstance) {
+ Command::Ptr command = static_pointer_cast<Command>(object);
+ SendCommandArgumentsChanged(command, command->GetArguments(), nullptr);
+ SendCommandEnvChanged(command, command->GetEnv(), nullptr);
+ return;
+ }
+}
+
+static inline
+unsigned short GetPreviousState(const Checkable::Ptr& checkable, const Service::Ptr& service, StateType type)
+{
+ auto phs ((type == StateTypeHard ? checkable->GetLastHardStatesRaw() : checkable->GetLastSoftStatesRaw()) % 100u);
+
+ if (service) {
+ return phs;
+ } else {
+ return phs == 99 ? phs : Host::CalculateState(ServiceState(phs));
+ }
+}
+
+void IcingaDB::SendStateChange(const ConfigObject::Ptr& object, const CheckResult::Ptr& cr, StateType type)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ Checkable::Ptr checkable = dynamic_pointer_cast<Checkable>(object);
+ if (!checkable)
+ return;
+
+ if (!cr)
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+
+ tie(host, service) = GetHostService(checkable);
+
+ UpdateState(checkable, StateUpdate::RuntimeOnly);
+
+ int hard_state;
+ if (!cr) {
+ hard_state = 99;
+ } else {
+ hard_state = service ? Convert::ToLong(service->GetLastHardState()) : Convert::ToLong(host->GetLastHardState());
+ }
+
+ auto eventTime (cr->GetExecutionEnd());
+ auto eventTs (TimestampToMilliseconds(eventTime));
+
+ Array::Ptr rawId = new Array({m_EnvironmentId, object->GetName()});
+ rawId->Add(eventTs);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:state", "*",
+ "id", HashValue(rawId),
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "state_type", Convert::ToString(type),
+ "soft_state", Convert::ToString(cr ? service ? Convert::ToLong(cr->GetState()) : Convert::ToLong(Host::CalculateState(cr->GetState())) : 99),
+ "hard_state", Convert::ToString(hard_state),
+ "check_attempt", Convert::ToString(checkable->GetCheckAttempt()),
+ "previous_soft_state", Convert::ToString(GetPreviousState(checkable, service, StateTypeSoft)),
+ "previous_hard_state", Convert::ToString(GetPreviousState(checkable, service, StateTypeHard)),
+ "max_check_attempts", Convert::ToString(checkable->GetMaxCheckAttempts()),
+ "event_time", Convert::ToString(eventTs),
+ "event_id", CalcEventID("state_change", object, eventTime),
+ "event_type", "state_change"
+ });
+
+ if (cr) {
+ auto output (cr->GetOutput());
+ auto pos (output.Find("\n"));
+
+ if (pos != String::NPos) {
+ auto longOutput (output.SubStr(pos + 1u));
+ output.erase(output.Begin() + pos, output.End());
+
+ xAdd.emplace_back("long_output");
+ xAdd.emplace_back(Utility::ValidateUTF8(std::move(longOutput)));
+ }
+
+ xAdd.emplace_back("output");
+ xAdd.emplace_back(Utility::ValidateUTF8(std::move(output)));
+ xAdd.emplace_back("check_source");
+ xAdd.emplace_back(cr->GetCheckSource());
+ xAdd.emplace_back("scheduling_source");
+ xAdd.emplace_back(cr->GetSchedulingSource());
+ }
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::SendSentNotification(
+ const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ NotificationType type, const CheckResult::Ptr& cr, const String& author, const String& text, double sendTime
+)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ auto finalText = text;
+ if (finalText == "" && cr) {
+ finalText = cr->GetOutput();
+ }
+
+ auto usersAmount (users.size());
+ auto sendTs (TimestampToMilliseconds(sendTime));
+
+ Array::Ptr rawId = new Array({m_EnvironmentId, notification->GetName()});
+ rawId->Add(GetNotificationTypeByEnum(type));
+ rawId->Add(sendTs);
+
+ auto notificationHistoryId (HashValue(rawId));
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:notification", "*",
+ "id", notificationHistoryId,
+ "environment_id", m_EnvironmentId,
+ "notification_id", GetObjectIdentifier(notification),
+ "host_id", GetObjectIdentifier(host),
+ "type", Convert::ToString(type),
+ "state", Convert::ToString(cr ? service ? Convert::ToLong(cr->GetState()) : Convert::ToLong(Host::CalculateState(cr->GetState())) : 99),
+ "previous_hard_state", Convert::ToString(cr ? Convert::ToLong(service ? cr->GetPreviousHardState() : Host::CalculateState(cr->GetPreviousHardState())) : 99),
+ "author", Utility::ValidateUTF8(author),
+ "text", Utility::ValidateUTF8(finalText),
+ "users_notified", Convert::ToString(usersAmount),
+ "send_time", Convert::ToString(sendTs),
+ "event_id", CalcEventID("notification", notification, sendTime, type),
+ "event_type", "notification"
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ if (!users.empty()) {
+ Array::Ptr users_notified = new Array();
+ for (const User::Ptr& user : users) {
+ users_notified->Add(GetObjectIdentifier(user));
+ }
+ xAdd.emplace_back("users_notified_ids");
+ xAdd.emplace_back(JsonEncode(users_notified));
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::SendStartedDowntime(const Downtime::Ptr& downtime)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ SendConfigUpdate(downtime, true);
+
+ auto checkable (downtime->GetCheckable());
+ auto triggeredBy (Downtime::GetByName(downtime->GetTriggeredBy()));
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ /* Update checkable state as in_downtime may have changed. */
+ UpdateState(checkable, StateUpdate::Full);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:downtime", "*",
+ "downtime_id", GetObjectIdentifier(downtime),
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "entry_time", Convert::ToString(TimestampToMilliseconds(downtime->GetEntryTime())),
+ "author", Utility::ValidateUTF8(downtime->GetAuthor()),
+ "comment", Utility::ValidateUTF8(downtime->GetComment()),
+ "is_flexible", Convert::ToString((unsigned short)!downtime->GetFixed()),
+ "flexible_duration", Convert::ToString(TimestampToMilliseconds(std::max(0.0, downtime->GetDuration()))),
+ "scheduled_start_time", Convert::ToString(TimestampToMilliseconds(downtime->GetStartTime())),
+ "scheduled_end_time", Convert::ToString(TimestampToMilliseconds(downtime->GetEndTime())),
+ "has_been_cancelled", Convert::ToString((unsigned short)downtime->GetWasCancelled()),
+ "trigger_time", Convert::ToString(TimestampToMilliseconds(downtime->GetTriggerTime())),
+ "cancel_time", Convert::ToString(TimestampToMilliseconds(downtime->GetRemoveTime())),
+ "event_id", CalcEventID("downtime_start", downtime),
+ "event_type", "downtime_start"
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ if (triggeredBy) {
+ xAdd.emplace_back("triggered_by_id");
+ xAdd.emplace_back(GetObjectIdentifier(triggeredBy));
+ }
+
+ if (downtime->GetFixed()) {
+ xAdd.emplace_back("start_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetStartTime())));
+ xAdd.emplace_back("end_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetEndTime())));
+ } else {
+ xAdd.emplace_back("start_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetTriggerTime())));
+ xAdd.emplace_back("end_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetTriggerTime() + std::max(0.0, downtime->GetDuration()))));
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ auto parent (Downtime::GetByName(downtime->GetParent()));
+
+ if (parent) {
+ xAdd.emplace_back("parent_id");
+ xAdd.emplace_back(GetObjectIdentifier(parent));
+ }
+
+ auto scheduledBy (downtime->GetScheduledBy());
+
+ if (!scheduledBy.IsEmpty()) {
+ xAdd.emplace_back("scheduled_by");
+ xAdd.emplace_back(scheduledBy);
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::SendRemovedDowntime(const Downtime::Ptr& downtime)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ auto checkable (downtime->GetCheckable());
+ auto triggeredBy (Downtime::GetByName(downtime->GetTriggeredBy()));
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ // Downtime never got triggered (didn't send "downtime_start") so we don't want to send "downtime_end"
+ if (downtime->GetTriggerTime() == 0)
+ return;
+
+ /* Update checkable state as in_downtime may have changed. */
+ UpdateState(checkable, StateUpdate::Full);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:downtime", "*",
+ "downtime_id", GetObjectIdentifier(downtime),
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "entry_time", Convert::ToString(TimestampToMilliseconds(downtime->GetEntryTime())),
+ "author", Utility::ValidateUTF8(downtime->GetAuthor()),
+ "cancelled_by", Utility::ValidateUTF8(downtime->GetRemovedBy()),
+ "comment", Utility::ValidateUTF8(downtime->GetComment()),
+ "is_flexible", Convert::ToString((unsigned short)!downtime->GetFixed()),
+ "flexible_duration", Convert::ToString(TimestampToMilliseconds(std::max(0.0, downtime->GetDuration()))),
+ "scheduled_start_time", Convert::ToString(TimestampToMilliseconds(downtime->GetStartTime())),
+ "scheduled_end_time", Convert::ToString(TimestampToMilliseconds(downtime->GetEndTime())),
+ "has_been_cancelled", Convert::ToString((unsigned short)downtime->GetWasCancelled()),
+ "trigger_time", Convert::ToString(TimestampToMilliseconds(downtime->GetTriggerTime())),
+ "cancel_time", Convert::ToString(TimestampToMilliseconds(downtime->GetRemoveTime())),
+ "event_id", CalcEventID("downtime_end", downtime),
+ "event_type", "downtime_end"
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ if (triggeredBy) {
+ xAdd.emplace_back("triggered_by_id");
+ xAdd.emplace_back(GetObjectIdentifier(triggeredBy));
+ }
+
+ if (downtime->GetFixed()) {
+ xAdd.emplace_back("start_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetStartTime())));
+ xAdd.emplace_back("end_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetEndTime())));
+ } else {
+ xAdd.emplace_back("start_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetTriggerTime())));
+ xAdd.emplace_back("end_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(downtime->GetTriggerTime() + std::max(0.0, downtime->GetDuration()))));
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ auto parent (Downtime::GetByName(downtime->GetParent()));
+
+ if (parent) {
+ xAdd.emplace_back("parent_id");
+ xAdd.emplace_back(GetObjectIdentifier(parent));
+ }
+
+ auto scheduledBy (downtime->GetScheduledBy());
+
+ if (!scheduledBy.IsEmpty()) {
+ xAdd.emplace_back("scheduled_by");
+ xAdd.emplace_back(scheduledBy);
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::SendAddedComment(const Comment::Ptr& comment)
+{
+ if (comment->GetEntryType() != CommentUser || !GetActive())
+ return;
+
+ auto checkable (comment->GetCheckable());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:comment", "*",
+ "comment_id", GetObjectIdentifier(comment),
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "entry_time", Convert::ToString(TimestampToMilliseconds(comment->GetEntryTime())),
+ "author", Utility::ValidateUTF8(comment->GetAuthor()),
+ "comment", Utility::ValidateUTF8(comment->GetText()),
+ "entry_type", Convert::ToString(comment->GetEntryType()),
+ "is_persistent", Convert::ToString((unsigned short)comment->GetPersistent()),
+ "is_sticky", Convert::ToString((unsigned short)comment->GetSticky()),
+ "event_id", CalcEventID("comment_add", comment),
+ "event_type", "comment_add"
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ {
+ auto expireTime (comment->GetExpireTime());
+
+ if (expireTime > 0) {
+ xAdd.emplace_back("expire_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(expireTime)));
+ }
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+ UpdateState(checkable, StateUpdate::Full);
+}
+
+void IcingaDB::SendRemovedComment(const Comment::Ptr& comment)
+{
+ if (comment->GetEntryType() != CommentUser || !GetActive()) {
+ return;
+ }
+
+ double removeTime = comment->GetRemoveTime();
+ bool wasRemoved = removeTime > 0;
+
+ double expireTime = comment->GetExpireTime();
+ bool hasExpireTime = expireTime > 0;
+ bool isExpired = hasExpireTime && expireTime <= Utility::GetTime();
+
+ if (!wasRemoved && !isExpired) {
+ /* The comment object disappeared for no apparent reason, most likely because it simply was deleted instead
+ * of using the proper remove-comment API action. In this case, information that should normally be set is
+ * missing and a proper history event cannot be generated.
+ */
+ return;
+ }
+
+ auto checkable (comment->GetCheckable());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:comment", "*",
+ "comment_id", GetObjectIdentifier(comment),
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "entry_time", Convert::ToString(TimestampToMilliseconds(comment->GetEntryTime())),
+ "author", Utility::ValidateUTF8(comment->GetAuthor()),
+ "comment", Utility::ValidateUTF8(comment->GetText()),
+ "entry_type", Convert::ToString(comment->GetEntryType()),
+ "is_persistent", Convert::ToString((unsigned short)comment->GetPersistent()),
+ "is_sticky", Convert::ToString((unsigned short)comment->GetSticky()),
+ "event_id", CalcEventID("comment_remove", comment),
+ "event_type", "comment_remove"
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ if (wasRemoved) {
+ xAdd.emplace_back("remove_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(removeTime)));
+ xAdd.emplace_back("has_been_removed");
+ xAdd.emplace_back("1");
+ xAdd.emplace_back("removed_by");
+ xAdd.emplace_back(Utility::ValidateUTF8(comment->GetRemovedBy()));
+ } else {
+ xAdd.emplace_back("has_been_removed");
+ xAdd.emplace_back("0");
+ }
+
+ if (hasExpireTime) {
+ xAdd.emplace_back("expire_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(expireTime)));
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+ UpdateState(checkable, StateUpdate::Full);
+}
+
+void IcingaDB::SendFlappingChange(const Checkable::Ptr& checkable, double changeTime, double flappingLastChange)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:flapping", "*",
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "flapping_threshold_low", Convert::ToString(checkable->GetFlappingThresholdLow()),
+ "flapping_threshold_high", Convert::ToString(checkable->GetFlappingThresholdHigh())
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ long long startTime;
+
+ if (checkable->IsFlapping()) {
+ startTime = TimestampToMilliseconds(changeTime);
+
+ xAdd.emplace_back("event_type");
+ xAdd.emplace_back("flapping_start");
+ xAdd.emplace_back("percent_state_change_start");
+ xAdd.emplace_back(Convert::ToString(checkable->GetFlappingCurrent()));
+ } else {
+ startTime = TimestampToMilliseconds(flappingLastChange);
+
+ xAdd.emplace_back("event_type");
+ xAdd.emplace_back("flapping_end");
+ xAdd.emplace_back("end_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(changeTime)));
+ xAdd.emplace_back("percent_state_change_end");
+ xAdd.emplace_back(Convert::ToString(checkable->GetFlappingCurrent()));
+ }
+
+ xAdd.emplace_back("start_time");
+ xAdd.emplace_back(Convert::ToString(startTime));
+ xAdd.emplace_back("event_id");
+ xAdd.emplace_back(CalcEventID(checkable->IsFlapping() ? "flapping_start" : "flapping_end", checkable, startTime));
+ xAdd.emplace_back("id");
+ xAdd.emplace_back(HashValue(new Array({m_EnvironmentId, checkable->GetName(), startTime})));
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::SendNextUpdate(const Checkable::Ptr& checkable)
+{
+ if (!m_Rcon || !m_Rcon->IsConnected())
+ return;
+
+ if (checkable->GetEnableActiveChecks()) {
+ m_Rcon->FireAndForgetQuery(
+ {
+ "ZADD",
+ dynamic_pointer_cast<Service>(checkable) ? "icinga:nextupdate:service" : "icinga:nextupdate:host",
+ Convert::ToString(checkable->GetNextUpdate()),
+ GetObjectIdentifier(checkable)
+ },
+ Prio::CheckResult
+ );
+ } else {
+ m_Rcon->FireAndForgetQuery(
+ {
+ "ZREM",
+ dynamic_pointer_cast<Service>(checkable) ? "icinga:nextupdate:service" : "icinga:nextupdate:host",
+ GetObjectIdentifier(checkable)
+ },
+ Prio::CheckResult
+ );
+ }
+}
+
+void IcingaDB::SendAcknowledgementSet(const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type, bool persistent, double changeTime, double expiry)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ /* Update checkable state as is_acknowledged may have changed. */
+ UpdateState(checkable, StateUpdate::Full);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:acknowledgement", "*",
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "event_type", "ack_set",
+ "author", author,
+ "comment", comment,
+ "is_sticky", Convert::ToString((unsigned short)(type == AcknowledgementSticky)),
+ "is_persistent", Convert::ToString((unsigned short)persistent)
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ if (expiry > 0) {
+ xAdd.emplace_back("expire_time");
+ xAdd.emplace_back(Convert::ToString(TimestampToMilliseconds(expiry)));
+ }
+
+ long long setTime = TimestampToMilliseconds(changeTime);
+
+ xAdd.emplace_back("set_time");
+ xAdd.emplace_back(Convert::ToString(setTime));
+ xAdd.emplace_back("event_id");
+ xAdd.emplace_back(CalcEventID("ack_set", checkable, setTime));
+ xAdd.emplace_back("id");
+ xAdd.emplace_back(HashValue(new Array({m_EnvironmentId, checkable->GetName(), setTime})));
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::SendAcknowledgementCleared(const Checkable::Ptr& checkable, const String& removedBy, double changeTime, double ackLastChange)
+{
+ if (!GetActive()) {
+ return;
+ }
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ /* Update checkable state as is_acknowledged may have changed. */
+ UpdateState(checkable, StateUpdate::Full);
+
+ std::vector<String> xAdd ({
+ "XADD", "icinga:history:stream:acknowledgement", "*",
+ "environment_id", m_EnvironmentId,
+ "host_id", GetObjectIdentifier(host),
+ "clear_time", Convert::ToString(TimestampToMilliseconds(changeTime)),
+ "event_type", "ack_clear"
+ });
+
+ if (service) {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("service");
+ xAdd.emplace_back("service_id");
+ xAdd.emplace_back(GetObjectIdentifier(checkable));
+ } else {
+ xAdd.emplace_back("object_type");
+ xAdd.emplace_back("host");
+ }
+
+ auto endpoint (Endpoint::GetLocalEndpoint());
+
+ if (endpoint) {
+ xAdd.emplace_back("endpoint_id");
+ xAdd.emplace_back(GetObjectIdentifier(endpoint));
+ }
+
+ long long setTime = TimestampToMilliseconds(ackLastChange);
+
+ xAdd.emplace_back("set_time");
+ xAdd.emplace_back(Convert::ToString(setTime));
+ xAdd.emplace_back("event_id");
+ xAdd.emplace_back(CalcEventID("ack_clear", checkable, setTime));
+ xAdd.emplace_back("id");
+ xAdd.emplace_back(HashValue(new Array({m_EnvironmentId, checkable->GetName(), setTime})));
+
+ if (!removedBy.IsEmpty()) {
+ xAdd.emplace_back("cleared_by");
+ xAdd.emplace_back(removedBy);
+ }
+
+ m_HistoryBulker.ProduceOne(std::move(xAdd));
+}
+
+void IcingaDB::ForwardHistoryEntries()
+{
+ using clock = std::chrono::steady_clock;
+
+ const std::chrono::seconds logInterval (10);
+ auto nextLog (clock::now() + logInterval);
+
+ auto logPeriodically ([this, logInterval, &nextLog]() {
+ if (clock::now() > nextLog) {
+ nextLog += logInterval;
+
+ auto size (m_HistoryBulker.Size());
+
+ Log(size > m_HistoryBulker.GetBulkSize() ? LogInformation : LogNotice, "IcingaDB")
+ << "Pending history queries: " << size;
+ }
+ });
+
+ for (;;) {
+ logPeriodically();
+
+ auto haystack (m_HistoryBulker.ConsumeMany());
+
+ if (haystack.empty()) {
+ if (!GetActive()) {
+ break;
+ }
+
+ continue;
+ }
+
+ uintmax_t attempts = 0;
+
+ auto logFailure ([&haystack, &attempts](const char* err = nullptr) {
+ Log msg (LogNotice, "IcingaDB");
+
+ msg << "history: " << haystack.size() << " queries failed temporarily (attempt #" << ++attempts << ")";
+
+ if (err) {
+ msg << ": " << err;
+ }
+ });
+
+ for (;;) {
+ logPeriodically();
+
+ if (m_Rcon && m_Rcon->IsConnected()) {
+ try {
+ m_Rcon->GetResultsOfQueries(haystack, Prio::History, {0, 0, haystack.size()});
+ break;
+ } catch (const std::exception& ex) {
+ logFailure(ex.what());
+ } catch (...) {
+ logFailure();
+ }
+ } else {
+ logFailure("not connected to Redis");
+ }
+
+ if (!GetActive()) {
+ Log(LogCritical, "IcingaDB") << "history: " << haystack.size() << " queries failed (attempt #" << attempts
+ << ") while we're about to shut down. Giving up and discarding additional "
+ << m_HistoryBulker.Size() << " queued history queries.";
+
+ return;
+ }
+
+ Utility::Sleep(2);
+ }
+ }
+}
+
+void IcingaDB::SendNotificationUsersChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<Value> deletedUsers = GetArrayDeletedValues(oldValues, newValues);
+
+ for (const auto& userName : deletedUsers) {
+ String id = HashValue(new Array({m_EnvironmentId, "user", userName, notification->GetName()}));
+ DeleteRelationship(id, "notification:user");
+ DeleteRelationship(id, "notification:recipient");
+ }
+}
+
+void IcingaDB::SendNotificationUserGroupsChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<Value> deletedUserGroups = GetArrayDeletedValues(oldValues, newValues);
+
+ for (const auto& userGroupName : deletedUserGroups) {
+ UserGroup::Ptr userGroup = UserGroup::GetByName(userGroupName);
+ String id = HashValue(new Array({m_EnvironmentId, "usergroup", userGroupName, notification->GetName()}));
+ DeleteRelationship(id, "notification:usergroup");
+ DeleteRelationship(id, "notification:recipient");
+
+ for (const User::Ptr& user : userGroup->GetMembers()) {
+ String userId = HashValue(new Array({m_EnvironmentId, "usergroupuser", user->GetName(), userGroupName, notification->GetName()}));
+ DeleteRelationship(userId, "notification:recipient");
+ }
+ }
+}
+
+void IcingaDB::SendTimePeriodRangesChanged(const TimePeriod::Ptr& timeperiod, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<String> deletedKeys = GetDictionaryDeletedKeys(oldValues, newValues);
+ String typeName = GetLowerCaseTypeNameDB(timeperiod);
+
+ for (const auto& rangeKey : deletedKeys) {
+ String id = HashValue(new Array({m_EnvironmentId, rangeKey, oldValues->Get(rangeKey), timeperiod->GetName()}));
+ DeleteRelationship(id, "timeperiod:range");
+ }
+}
+
+void IcingaDB::SendTimePeriodIncludesChanged(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<Value> deletedIncludes = GetArrayDeletedValues(oldValues, newValues);
+
+ for (const auto& includeName : deletedIncludes) {
+ String id = HashValue(new Array({m_EnvironmentId, includeName, timeperiod->GetName()}));
+ DeleteRelationship(id, "timeperiod:override:include");
+ }
+}
+
+void IcingaDB::SendTimePeriodExcludesChanged(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<Value> deletedExcludes = GetArrayDeletedValues(oldValues, newValues);
+
+ for (const auto& excludeName : deletedExcludes) {
+ String id = HashValue(new Array({m_EnvironmentId, excludeName, timeperiod->GetName()}));
+ DeleteRelationship(id, "timeperiod:override:exclude");
+ }
+}
+
+template<typename T>
+void IcingaDB::SendGroupsChanged(const ConfigObject::Ptr& object, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<Value> deletedGroups = GetArrayDeletedValues(oldValues, newValues);
+ String typeName = GetLowerCaseTypeNameDB(object);
+
+ for (const auto& groupName : deletedGroups) {
+ typename T::Ptr group = ConfigObject::GetObject<T>(groupName);
+ String id = HashValue(new Array({m_EnvironmentId, group->GetName(), object->GetName()}));
+ DeleteRelationship(id, typeName + "group:member");
+
+ if (std::is_same<T, UserGroup>::value) {
+ UserGroup::Ptr userGroup = dynamic_pointer_cast<UserGroup>(group);
+
+ for (const auto& notification : userGroup->GetNotifications()) {
+ String userId = HashValue(new Array({m_EnvironmentId, "usergroupuser", object->GetName(), groupName, notification->GetName()}));
+ DeleteRelationship(userId, "notification:recipient");
+ }
+ }
+ }
+}
+
+void IcingaDB::SendCommandEnvChanged(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<String> deletedKeys = GetDictionaryDeletedKeys(oldValues, newValues);
+ String typeName = GetLowerCaseTypeNameDB(command);
+
+ for (const auto& envvarKey : deletedKeys) {
+ String id = HashValue(new Array({m_EnvironmentId, envvarKey, command->GetName()}));
+ DeleteRelationship(id, typeName + ":envvar", true);
+ }
+}
+
+void IcingaDB::SendCommandArgumentsChanged(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ std::vector<String> deletedKeys = GetDictionaryDeletedKeys(oldValues, newValues);
+ String typeName = GetLowerCaseTypeNameDB(command);
+
+ for (const auto& argumentKey : deletedKeys) {
+ String id = HashValue(new Array({m_EnvironmentId, argumentKey, command->GetName()}));
+ DeleteRelationship(id, typeName + ":argument", true);
+ }
+}
+
+void IcingaDB::SendCustomVarsChanged(const ConfigObject::Ptr& object, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ if (m_IndexedTypes.find(object->GetReflectionType().get()) == m_IndexedTypes.end()) {
+ return;
+ }
+
+ if (!m_Rcon || !m_Rcon->IsConnected() || oldValues == newValues) {
+ return;
+ }
+
+ Dictionary::Ptr oldVars = SerializeVars(oldValues);
+ Dictionary::Ptr newVars = SerializeVars(newValues);
+
+ std::vector<String> deletedVars = GetDictionaryDeletedKeys(oldVars, newVars);
+ String typeName = GetLowerCaseTypeNameDB(object);
+
+ for (const auto& varId : deletedVars) {
+ String id = HashValue(new Array({m_EnvironmentId, varId, object->GetName()}));
+ DeleteRelationship(id, typeName + ":customvar");
+ }
+}
+
+Dictionary::Ptr IcingaDB::SerializeState(const Checkable::Ptr& checkable)
+{
+ Dictionary::Ptr attrs = new Dictionary();
+
+ Host::Ptr host;
+ Service::Ptr service;
+
+ tie(host, service) = GetHostService(checkable);
+
+ String id = GetObjectIdentifier(checkable);
+
+ /*
+ * As there is a 1:1 relationship between host and host state, the host ID ('host_id')
+ * is also used as the host state ID ('id'). These are duplicated to 1) avoid having
+ * special handling for this in Icinga DB and 2) to have both a primary key and a foreign key
+ * in the SQL database in the end. In the database 'host_id' ends up as foreign key 'host_state.host_id'
+ * referring to 'host.id' while 'id' ends up as the primary key 'host_state.id'. This also applies for service.
+ */
+ attrs->Set("id", id);
+ attrs->Set("environment_id", m_EnvironmentId);
+ attrs->Set("state_type", checkable->HasBeenChecked() ? checkable->GetStateType() : StateTypeHard);
+
+ // TODO: last_hard/soft_state should be "previous".
+ if (service) {
+ attrs->Set("service_id", id);
+ auto state = service->HasBeenChecked() ? service->GetState() : 99;
+ attrs->Set("soft_state", state);
+ attrs->Set("hard_state", service->HasBeenChecked() ? service->GetLastHardState() : 99);
+ attrs->Set("severity", service->GetSeverity());
+ attrs->Set("host_id", GetObjectIdentifier(host));
+ } else {
+ attrs->Set("host_id", id);
+ auto state = host->HasBeenChecked() ? host->GetState() : 99;
+ attrs->Set("soft_state", state);
+ attrs->Set("hard_state", host->HasBeenChecked() ? host->GetLastHardState() : 99);
+ attrs->Set("severity", host->GetSeverity());
+ }
+
+ attrs->Set("previous_soft_state", GetPreviousState(checkable, service, StateTypeSoft));
+ attrs->Set("previous_hard_state", GetPreviousState(checkable, service, StateTypeHard));
+ attrs->Set("check_attempt", checkable->GetCheckAttempt());
+
+ attrs->Set("is_active", checkable->IsActive());
+
+ CheckResult::Ptr cr = checkable->GetLastCheckResult();
+
+ if (cr) {
+ String rawOutput = cr->GetOutput();
+ if (!rawOutput.IsEmpty()) {
+ size_t lineBreak = rawOutput.Find("\n");
+ String output = rawOutput.SubStr(0, lineBreak);
+ if (!output.IsEmpty())
+ attrs->Set("output", rawOutput.SubStr(0, lineBreak));
+
+ if (lineBreak > 0 && lineBreak != String::NPos) {
+ String longOutput = rawOutput.SubStr(lineBreak+1, rawOutput.GetLength());
+ if (!longOutput.IsEmpty())
+ attrs->Set("long_output", longOutput);
+ }
+ }
+
+ String perfData = PluginUtility::FormatPerfdata(cr->GetPerformanceData());
+ if (!perfData.IsEmpty())
+ attrs->Set("performance_data", perfData);
+
+ String normedPerfData = PluginUtility::FormatPerfdata(cr->GetPerformanceData(), true);
+ if (!normedPerfData.IsEmpty())
+ attrs->Set("normalized_performance_data", normedPerfData);
+
+ if (!cr->GetCommand().IsEmpty())
+ attrs->Set("check_commandline", FormatCommandLine(cr->GetCommand()));
+ attrs->Set("execution_time", TimestampToMilliseconds(fmax(0.0, cr->CalculateExecutionTime())));
+ attrs->Set("latency", TimestampToMilliseconds(cr->CalculateLatency()));
+ attrs->Set("check_source", cr->GetCheckSource());
+ attrs->Set("scheduling_source", cr->GetSchedulingSource());
+ }
+
+ attrs->Set("is_problem", checkable->GetProblem());
+ attrs->Set("is_handled", checkable->GetHandled());
+ attrs->Set("is_reachable", checkable->IsReachable());
+ attrs->Set("is_flapping", checkable->IsFlapping());
+
+ attrs->Set("is_acknowledged", checkable->GetAcknowledgement());
+ if (checkable->IsAcknowledged()) {
+ Timestamp entry = 0;
+ Comment::Ptr AckComment;
+ for (const Comment::Ptr& c : checkable->GetComments()) {
+ if (c->GetEntryType() == CommentAcknowledgement) {
+ if (c->GetEntryTime() > entry) {
+ entry = c->GetEntryTime();
+ AckComment = c;
+ }
+ }
+ }
+ if (AckComment != nullptr) {
+ attrs->Set("acknowledgement_comment_id", GetObjectIdentifier(AckComment));
+ }
+ }
+
+ {
+ auto lastComment (checkable->GetLastComment());
+
+ if (lastComment) {
+ attrs->Set("last_comment_id", GetObjectIdentifier(lastComment));
+ }
+ }
+
+ attrs->Set("in_downtime", checkable->IsInDowntime());
+
+ if (checkable->GetCheckTimeout().IsEmpty())
+ attrs->Set("check_timeout", TimestampToMilliseconds(checkable->GetCheckCommand()->GetTimeout()));
+ else
+ attrs->Set("check_timeout", TimestampToMilliseconds(checkable->GetCheckTimeout()));
+
+ long long lastCheck = TimestampToMilliseconds(checkable->GetLastCheck());
+ if (lastCheck > 0)
+ attrs->Set("last_update", lastCheck);
+
+ attrs->Set("last_state_change", TimestampToMilliseconds(checkable->GetLastStateChange()));
+ attrs->Set("next_check", TimestampToMilliseconds(checkable->GetNextCheck()));
+ attrs->Set("next_update", TimestampToMilliseconds(checkable->GetNextUpdate()));
+
+ return attrs;
+}
+
+std::vector<String>
+IcingaDB::UpdateObjectAttrs(const ConfigObject::Ptr& object, int fieldType,
+ const String& typeNameOverride)
+{
+ Type::Ptr type = object->GetReflectionType();
+ Dictionary::Ptr attrs(new Dictionary);
+
+ for (int fid = 0; fid < type->GetFieldCount(); fid++) {
+ Field field = type->GetFieldInfo(fid);
+
+ if ((field.Attributes & fieldType) == 0)
+ continue;
+
+ Value val = object->GetField(fid);
+
+ /* hide attributes which shouldn't be user-visible */
+ if (field.Attributes & FANoUserView)
+ continue;
+
+ /* hide internal navigation fields */
+ if (field.Attributes & FANavigation && !(field.Attributes & (FAConfig | FAState)))
+ continue;
+
+ attrs->Set(field.Name, Serialize(val));
+ }
+
+ /* Downtimes require in_effect, which is not an attribute */
+ Downtime::Ptr downtime = dynamic_pointer_cast<Downtime>(object);
+ if (downtime) {
+ attrs->Set("in_effect", Serialize(downtime->IsInEffect()));
+ attrs->Set("trigger_time", Serialize(TimestampToMilliseconds(downtime->GetTriggerTime())));
+ }
+
+
+ /* Use the name checksum as unique key. */
+ String typeName = type->GetName().ToLower();
+ if (!typeNameOverride.IsEmpty())
+ typeName = typeNameOverride.ToLower();
+
+ return {GetObjectIdentifier(object), JsonEncode(attrs)};
+ //m_Rcon->FireAndForgetQuery({"HSET", keyPrefix + typeName, GetObjectIdentifier(object), JsonEncode(attrs)});
+}
+
+void IcingaDB::StateChangeHandler(const ConfigObject::Ptr& object, const CheckResult::Ptr& cr, StateType type)
+{
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendStateChange(object, cr, type);
+ }
+}
+
+void IcingaDB::ReachabilityChangeHandler(const std::set<Checkable::Ptr>& children)
+{
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ for (auto& checkable : children) {
+ rw->UpdateState(checkable, StateUpdate::Full);
+ }
+ }
+}
+
+void IcingaDB::VersionChangedHandler(const ConfigObject::Ptr& object)
+{
+ Type::Ptr type = object->GetReflectionType();
+
+ if (m_IndexedTypes.find(type.get()) == m_IndexedTypes.end()) {
+ return;
+ }
+
+ if (object->IsActive()) {
+ // Create or update the object config
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ if (rw)
+ rw->SendConfigUpdate(object, true);
+ }
+ } else if (!object->IsActive() &&
+ object->GetExtension("ConfigObjectDeleted")) { // same as in apilistener-configsync.cpp
+ // Delete object config
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ if (rw)
+ rw->SendConfigDelete(object);
+ }
+ }
+}
+
+void IcingaDB::DowntimeStartedHandler(const Downtime::Ptr& downtime)
+{
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendStartedDowntime(downtime);
+ }
+}
+
+void IcingaDB::DowntimeRemovedHandler(const Downtime::Ptr& downtime)
+{
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendRemovedDowntime(downtime);
+ }
+}
+
+void IcingaDB::NotificationSentToAllUsersHandler(
+ const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ NotificationType type, const CheckResult::Ptr& cr, const String& author, const String& text
+)
+{
+ auto rws (ConfigType::GetObjectsByType<IcingaDB>());
+ auto sendTime (notification->GetLastNotification());
+
+ if (!rws.empty()) {
+ for (auto& rw : rws) {
+ rw->SendSentNotification(notification, checkable, users, type, cr, author, text, sendTime);
+ }
+ }
+}
+
+void IcingaDB::CommentAddedHandler(const Comment::Ptr& comment)
+{
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendAddedComment(comment);
+ }
+}
+
+void IcingaDB::CommentRemovedHandler(const Comment::Ptr& comment)
+{
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendRemovedComment(comment);
+ }
+}
+
+void IcingaDB::FlappingChangeHandler(const Checkable::Ptr& checkable, double changeTime)
+{
+ auto flappingLastChange (checkable->GetFlappingLastChange());
+
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendFlappingChange(checkable, changeTime, flappingLastChange);
+ }
+}
+
+void IcingaDB::NewCheckResultHandler(const Checkable::Ptr& checkable)
+{
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->UpdateState(checkable, StateUpdate::Volatile);
+ rw->SendNextUpdate(checkable);
+ }
+}
+
+void IcingaDB::NextCheckUpdatedHandler(const Checkable::Ptr& checkable)
+{
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->UpdateState(checkable, StateUpdate::Volatile);
+ rw->SendNextUpdate(checkable);
+ }
+}
+
+void IcingaDB::HostProblemChangedHandler(const Service::Ptr& service) {
+ for (auto& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ /* Host state changes affect is_handled and severity of services. */
+ rw->UpdateState(service, StateUpdate::Full);
+ }
+}
+
+void IcingaDB::AcknowledgementSetHandler(const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type, bool persistent, double changeTime, double expiry)
+{
+ auto rws (ConfigType::GetObjectsByType<IcingaDB>());
+
+ if (!rws.empty()) {
+ for (auto& rw : rws) {
+ rw->SendAcknowledgementSet(checkable, author, comment, type, persistent, changeTime, expiry);
+ }
+ }
+}
+
+void IcingaDB::AcknowledgementClearedHandler(const Checkable::Ptr& checkable, const String& removedBy, double changeTime)
+{
+ auto rws (ConfigType::GetObjectsByType<IcingaDB>());
+
+ if (!rws.empty()) {
+ auto rb (Shared<String>::Make(removedBy));
+ auto ackLastChange (checkable->GetAcknowledgementLastChange());
+
+ for (auto& rw : rws) {
+ rw->SendAcknowledgementCleared(checkable, *rb, changeTime, ackLastChange);
+ }
+ }
+}
+
+void IcingaDB::NotificationUsersChangedHandler(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendNotificationUsersChanged(notification, oldValues, newValues);
+ }
+}
+
+void IcingaDB::NotificationUserGroupsChangedHandler(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendNotificationUserGroupsChanged(notification, oldValues, newValues);
+ }
+}
+
+void IcingaDB::TimePeriodRangesChangedHandler(const TimePeriod::Ptr& timeperiod, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendTimePeriodRangesChanged(timeperiod, oldValues, newValues);
+ }
+}
+
+void IcingaDB::TimePeriodIncludesChangedHandler(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendTimePeriodIncludesChanged(timeperiod, oldValues, newValues);
+ }
+}
+
+void IcingaDB::TimePeriodExcludesChangedHandler(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendTimePeriodExcludesChanged(timeperiod, oldValues, newValues);
+ }
+}
+
+void IcingaDB::UserGroupsChangedHandler(const User::Ptr& user, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendGroupsChanged<UserGroup>(user, oldValues, newValues);
+ }
+}
+
+void IcingaDB::HostGroupsChangedHandler(const Host::Ptr& host, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendGroupsChanged<HostGroup>(host, oldValues, newValues);
+ }
+}
+
+void IcingaDB::ServiceGroupsChangedHandler(const Service::Ptr& service, const Array::Ptr& oldValues, const Array::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendGroupsChanged<ServiceGroup>(service, oldValues, newValues);
+ }
+}
+
+void IcingaDB::CommandEnvChangedHandler(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendCommandEnvChanged(command, oldValues, newValues);
+ }
+}
+
+void IcingaDB::CommandArgumentsChangedHandler(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendCommandArgumentsChanged(command, oldValues, newValues);
+ }
+}
+
+void IcingaDB::CustomVarsChangedHandler(const ConfigObject::Ptr& object, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues) {
+ for (const IcingaDB::Ptr& rw : ConfigType::GetObjectsByType<IcingaDB>()) {
+ rw->SendCustomVarsChanged(object, oldValues, newValues);
+ }
+}
+
+void IcingaDB::DeleteRelationship(const String& id, const String& redisKeyWithoutPrefix, bool hasChecksum) {
+ Log(LogNotice, "IcingaDB") << "Deleting relationship '" << redisKeyWithoutPrefix << " -> '" << id << "'";
+
+ String redisKey = m_PrefixConfigObject + redisKeyWithoutPrefix;
+
+ std::vector<std::vector<String>> queries;
+
+ if (hasChecksum) {
+ queries.push_back({"HDEL", m_PrefixConfigCheckSum + redisKeyWithoutPrefix, id});
+ }
+
+ queries.push_back({"HDEL", redisKey, id});
+ queries.push_back({
+ "XADD", "icinga:runtime", "MAXLEN", "~", "1000000", "*",
+ "redis_key", redisKey, "id", id, "runtime_type", "delete"
+ });
+
+ m_Rcon->FireAndForgetQueries(queries, Prio::Config);
+}
diff --git a/lib/icingadb/icingadb-stats.cpp b/lib/icingadb/icingadb-stats.cpp
new file mode 100644
index 0000000..476756b
--- /dev/null
+++ b/lib/icingadb/icingadb-stats.cpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icingadb/icingadb.hpp"
+#include "base/application.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/statsfunction.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+Dictionary::Ptr IcingaDB::GetStats()
+{
+ Dictionary::Ptr stats = new Dictionary();
+
+ //TODO: Figure out if more stats can be useful here.
+ Namespace::Ptr statsFunctions = ScriptGlobal::Get("StatsFunctions", &Empty);
+
+ if (!statsFunctions)
+ Dictionary::Ptr();
+
+ ObjectLock olock(statsFunctions);
+
+ for (auto& kv : statsFunctions)
+ {
+ Function::Ptr func = kv.second.Val;
+
+ if (!func)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid status function name."));
+
+ Dictionary::Ptr status = new Dictionary();
+ Array::Ptr perfdata = new Array();
+ func->Invoke({ status, perfdata });
+
+ stats->Set(kv.first, new Dictionary({
+ { "status", status },
+ { "perfdata", Serialize(perfdata, FAState) }
+ }));
+ }
+
+ typedef Dictionary::Ptr DP;
+ DP app = DP(DP(DP(stats->Get("IcingaApplication"))->Get("status"))->Get("icingaapplication"))->Get("app");
+
+ app->Set("program_start", TimestampToMilliseconds(Application::GetStartTime()));
+
+ auto localEndpoint (Endpoint::GetLocalEndpoint());
+ if (localEndpoint) {
+ app->Set("endpoint_id", GetObjectIdentifier(localEndpoint));
+ }
+
+ return stats;
+}
+
diff --git a/lib/icingadb/icingadb-utility.cpp b/lib/icingadb/icingadb-utility.cpp
new file mode 100644
index 0000000..b247ed8
--- /dev/null
+++ b/lib/icingadb/icingadb-utility.cpp
@@ -0,0 +1,319 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icingadb/icingadb.hpp"
+#include "base/configtype.hpp"
+#include "base/object-packer.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/tlsutility.hpp"
+#include "base/initialize.hpp"
+#include "base/objectlock.hpp"
+#include "base/array.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/convert.hpp"
+#include "base/json.hpp"
+#include "icinga/customvarobject.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/host.hpp"
+#include <boost/algorithm/string.hpp>
+#include <map>
+#include <utility>
+#include <vector>
+
+using namespace icinga;
+
+String IcingaDB::FormatCheckSumBinary(const String& str)
+{
+ char output[20*2+1];
+ for (int i = 0; i < 20; i++)
+ sprintf(output + 2 * i, "%02x", str[i]);
+
+ return output;
+}
+
+String IcingaDB::FormatCommandLine(const Value& commandLine)
+{
+ String result;
+ if (commandLine.IsObjectType<Array>()) {
+ Array::Ptr args = commandLine;
+ bool first = true;
+
+ ObjectLock olock(args);
+ for (const Value& arg : args) {
+ String token = "'" + Convert::ToString(arg) + "'";
+
+ if (first)
+ first = false;
+ else
+ result += String(1, ' ');
+
+ result += token;
+ }
+ } else if (!commandLine.IsEmpty()) {
+ result = commandLine;
+ boost::algorithm::replace_all(result, "\'", "\\'");
+ result = "'" + result + "'";
+ }
+
+ return result;
+}
+
+String IcingaDB::GetObjectIdentifier(const ConfigObject::Ptr& object)
+{
+ String identifier = object->GetIcingadbIdentifier();
+ if (identifier.IsEmpty()) {
+ identifier = HashValue(new Array({m_EnvironmentId, object->GetName()}));
+ object->SetIcingadbIdentifier(identifier);
+ }
+
+ return identifier;
+}
+
+/**
+ * Calculates a deterministic history event ID like SHA1(env, eventType, x...[, nt][, eventTime])
+ *
+ * Where SHA1(env, x...) = GetObjectIdentifier(object)
+ */
+String IcingaDB::CalcEventID(const char* eventType, const ConfigObject::Ptr& object, double eventTime, NotificationType nt)
+{
+ Array::Ptr rawId = new Array({object->GetName()});
+ rawId->Insert(0, m_EnvironmentId);
+ rawId->Insert(1, eventType);
+
+ if (nt) {
+ rawId->Add(GetNotificationTypeByEnum(nt));
+ }
+
+ if (eventTime) {
+ rawId->Add(TimestampToMilliseconds(eventTime));
+ }
+
+ return HashValue(std::move(rawId));
+}
+
+static const std::set<String> metadataWhitelist ({"package", "source_location", "templates"});
+
+/**
+ * Prepare custom vars for being written to Redis
+ *
+ * object.vars = {
+ * "disks": {
+ * "disk": {},
+ * "disk /": {
+ * "disk_partitions": "/"
+ * }
+ * }
+ * }
+ *
+ * return {
+ * SHA1(PackObject([
+ * EnvironmentId,
+ * "disks",
+ * {
+ * "disk": {},
+ * "disk /": {
+ * "disk_partitions": "/"
+ * }
+ * }
+ * ])): {
+ * "environment_id": EnvironmentId,
+ * "name_checksum": SHA1("disks"),
+ * "name": "disks",
+ * "value": {
+ * "disk": {},
+ * "disk /": {
+ * "disk_partitions": "/"
+ * }
+ * }
+ * }
+ * }
+ *
+ * @param Dictionary Config object with custom vars
+ *
+ * @return JSON-like data structure for Redis
+ */
+Dictionary::Ptr IcingaDB::SerializeVars(const Dictionary::Ptr& vars)
+{
+ if (!vars)
+ return nullptr;
+
+ Dictionary::Ptr res = new Dictionary();
+
+ ObjectLock olock(vars);
+
+ for (auto& kv : vars) {
+ res->Set(
+ SHA1(PackObject((Array::Ptr)new Array({m_EnvironmentId, kv.first, kv.second}))),
+ (Dictionary::Ptr)new Dictionary({
+ {"environment_id", m_EnvironmentId},
+ {"name_checksum", SHA1(kv.first)},
+ {"name", kv.first},
+ {"value", JsonEncode(kv.second)},
+ })
+ );
+ }
+
+ return res;
+}
+
+const char* IcingaDB::GetNotificationTypeByEnum(NotificationType type)
+{
+ switch (type) {
+ case NotificationDowntimeStart:
+ return "downtime_start";
+ case NotificationDowntimeEnd:
+ return "downtime_end";
+ case NotificationDowntimeRemoved:
+ return "downtime_removed";
+ case NotificationCustom:
+ return "custom";
+ case NotificationAcknowledgement:
+ return "acknowledgement";
+ case NotificationProblem:
+ return "problem";
+ case NotificationRecovery:
+ return "recovery";
+ case NotificationFlappingStart:
+ return "flapping_start";
+ case NotificationFlappingEnd:
+ return "flapping_end";
+ }
+
+ VERIFY(!"Invalid notification type.");
+}
+
+static const std::set<String> propertiesBlacklistEmpty;
+
+String IcingaDB::HashValue(const Value& value)
+{
+ return HashValue(value, propertiesBlacklistEmpty);
+}
+
+String IcingaDB::HashValue(const Value& value, const std::set<String>& propertiesBlacklist, bool propertiesWhitelist)
+{
+ Value temp;
+ bool mutabl;
+
+ Type::Ptr type = value.GetReflectionType();
+
+ if (ConfigObject::TypeInstance->IsAssignableFrom(type)) {
+ temp = Serialize(value, FAConfig);
+ mutabl = true;
+ } else {
+ temp = value;
+ mutabl = false;
+ }
+
+ if (propertiesBlacklist.size() && temp.IsObject()) {
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>((Object::Ptr)temp);
+
+ if (dict) {
+ if (!mutabl)
+ dict = dict->ShallowClone();
+
+ ObjectLock olock(dict);
+
+ if (propertiesWhitelist) {
+ auto current = dict->Begin();
+ auto propertiesBlacklistEnd = propertiesBlacklist.end();
+
+ while (current != dict->End()) {
+ if (propertiesBlacklist.find(current->first) == propertiesBlacklistEnd) {
+ dict->Remove(current++);
+ } else {
+ ++current;
+ }
+ }
+ } else {
+ for (auto& property : propertiesBlacklist)
+ dict->Remove(property);
+ }
+
+ if (!mutabl)
+ temp = dict;
+ }
+ }
+
+ return SHA1(PackObject(temp));
+}
+
+String IcingaDB::GetLowerCaseTypeNameDB(const ConfigObject::Ptr& obj)
+{
+ return obj->GetReflectionType()->GetName().ToLower();
+}
+
+long long IcingaDB::TimestampToMilliseconds(double timestamp) {
+ return static_cast<long long>(timestamp * 1000);
+}
+
+String IcingaDB::IcingaToStreamValue(const Value& value)
+{
+ switch (value.GetType()) {
+ case ValueBoolean:
+ return Convert::ToString(int(value));
+ case ValueString:
+ return Utility::ValidateUTF8(value);
+ case ValueNumber:
+ case ValueEmpty:
+ return Convert::ToString(value);
+ default:
+ return JsonEncode(value);
+ }
+}
+
+// Returns the items that exist in "arrayOld" but not in "arrayNew"
+std::vector<Value> IcingaDB::GetArrayDeletedValues(const Array::Ptr& arrayOld, const Array::Ptr& arrayNew) {
+ std::vector<Value> deletedValues;
+
+ if (!arrayOld) {
+ return deletedValues;
+ }
+
+ if (!arrayNew) {
+ ObjectLock olock (arrayOld);
+ return std::vector<Value>(arrayOld->Begin(), arrayOld->End());
+ }
+
+ std::vector<Value> vectorOld;
+ {
+ ObjectLock olock (arrayOld);
+ vectorOld.assign(arrayOld->Begin(), arrayOld->End());
+ }
+ std::sort(vectorOld.begin(), vectorOld.end());
+ vectorOld.erase(std::unique(vectorOld.begin(), vectorOld.end()), vectorOld.end());
+
+ std::vector<Value> vectorNew;
+ {
+ ObjectLock olock (arrayNew);
+ vectorNew.assign(arrayNew->Begin(), arrayNew->End());
+ }
+ std::sort(vectorNew.begin(), vectorNew.end());
+ vectorNew.erase(std::unique(vectorNew.begin(), vectorNew.end()), vectorNew.end());
+
+ std::set_difference(vectorOld.begin(), vectorOld.end(), vectorNew.begin(), vectorNew.end(), std::back_inserter(deletedValues));
+
+ return deletedValues;
+}
+
+// Returns the keys that exist in "dictOld" but not in "dictNew"
+std::vector<String> IcingaDB::GetDictionaryDeletedKeys(const Dictionary::Ptr& dictOld, const Dictionary::Ptr& dictNew) {
+ std::vector<String> deletedKeys;
+
+ if (!dictOld) {
+ return deletedKeys;
+ }
+
+ std::vector<String> oldKeys = dictOld->GetKeys();
+
+ if (!dictNew) {
+ return oldKeys;
+ }
+
+ std::vector<String> newKeys = dictNew->GetKeys();
+
+ std::set_difference(oldKeys.begin(), oldKeys.end(), newKeys.begin(), newKeys.end(), std::back_inserter(deletedKeys));
+
+ return deletedKeys;
+}
diff --git a/lib/icingadb/icingadb.cpp b/lib/icingadb/icingadb.cpp
new file mode 100644
index 0000000..6d5ded9
--- /dev/null
+++ b/lib/icingadb/icingadb.cpp
@@ -0,0 +1,311 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icingadb/icingadb.hpp"
+#include "icingadb/icingadb-ti.cpp"
+#include "icingadb/redisconnection.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/eventqueue.hpp"
+#include "base/configuration.hpp"
+#include "base/json.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/statsfunction.hpp"
+#include "base/tlsutility.hpp"
+#include "base/utility.hpp"
+#include "icinga/checkable.hpp"
+#include "icinga/host.hpp"
+#include <boost/algorithm/string.hpp>
+#include <fstream>
+#include <memory>
+#include <utility>
+
+using namespace icinga;
+
+#define MAX_EVENTS_DEFAULT 5000
+
+using Prio = RedisConnection::QueryPriority;
+
+String IcingaDB::m_EnvironmentId;
+std::mutex IcingaDB::m_EnvironmentIdInitMutex;
+
+REGISTER_TYPE(IcingaDB);
+
+IcingaDB::IcingaDB()
+ : m_Rcon(nullptr)
+{
+ m_RconLocked.store(nullptr);
+
+ m_WorkQueue.SetName("IcingaDB");
+
+ m_PrefixConfigObject = "icinga:";
+ m_PrefixConfigCheckSum = "icinga:checksum:";
+}
+
+void IcingaDB::Validate(int types, const ValidationUtils& utils)
+{
+ ObjectImpl<IcingaDB>::Validate(types, utils);
+
+ if (!(types & FAConfig))
+ return;
+
+ if (GetEnableTls() && GetCertPath().IsEmpty() != GetKeyPath().IsEmpty()) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, std::vector<String>(), "Validation failed: Either both a client certificate (cert_path) and its private key (key_path) or none of them must be given."));
+ }
+
+ try {
+ InitEnvironmentId();
+ } catch (const std::exception& e) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, std::vector<String>(),
+ String("Validation failed: ") + e.what()));
+ }
+}
+
+/**
+ * Starts the component.
+ */
+void IcingaDB::Start(bool runtimeCreated)
+{
+ ObjectImpl<IcingaDB>::Start(runtimeCreated);
+
+ VERIFY(!m_EnvironmentId.IsEmpty());
+ PersistEnvironmentId();
+
+ Log(LogInformation, "IcingaDB")
+ << "'" << GetName() << "' started.";
+
+ m_ConfigDumpInProgress = false;
+ m_ConfigDumpDone = false;
+
+ m_WorkQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ m_Rcon = new RedisConnection(GetHost(), GetPort(), GetPath(), GetPassword(), GetDbIndex(),
+ GetEnableTls(), GetInsecureNoverify(), GetCertPath(), GetKeyPath(), GetCaPath(), GetCrlPath(),
+ GetTlsProtocolmin(), GetCipherList(), GetConnectTimeout(), GetDebugInfo());
+ m_RconLocked.store(m_Rcon);
+
+ for (const Type::Ptr& type : GetTypes()) {
+ auto ctype (dynamic_cast<ConfigType*>(type.get()));
+ if (!ctype)
+ continue;
+
+ RedisConnection::Ptr con = new RedisConnection(GetHost(), GetPort(), GetPath(), GetPassword(), GetDbIndex(),
+ GetEnableTls(), GetInsecureNoverify(), GetCertPath(), GetKeyPath(), GetCaPath(), GetCrlPath(),
+ GetTlsProtocolmin(), GetCipherList(), GetConnectTimeout(), GetDebugInfo(), m_Rcon);
+
+ con->SetConnectedCallback([this, con](boost::asio::yield_context& yc) {
+ con->SetConnectedCallback(nullptr);
+
+ size_t pending = --m_PendingRcons;
+ Log(LogDebug, "IcingaDB") << pending << " pending child connections remaining";
+ if (pending == 0) {
+ m_WorkQueue.Enqueue([this]() { OnConnectedHandler(); });
+ }
+ });
+
+ m_Rcons[ctype] = std::move(con);
+ }
+
+ m_PendingRcons = m_Rcons.size();
+
+ m_Rcon->SetConnectedCallback([this](boost::asio::yield_context& yc) {
+ m_Rcon->SetConnectedCallback(nullptr);
+
+ for (auto& kv : m_Rcons) {
+ kv.second->Start();
+ }
+ });
+ m_Rcon->Start();
+
+ m_StatsTimer = Timer::Create();
+ m_StatsTimer->SetInterval(1);
+ m_StatsTimer->OnTimerExpired.connect([this](const Timer * const&) { PublishStatsTimerHandler(); });
+ m_StatsTimer->Start();
+
+ m_WorkQueue.SetName("IcingaDB");
+
+ m_Rcon->SuppressQueryKind(Prio::CheckResult);
+ m_Rcon->SuppressQueryKind(Prio::RuntimeStateSync);
+
+ Ptr keepAlive (this);
+
+ m_HistoryThread = std::async(std::launch::async, [this, keepAlive]() { ForwardHistoryEntries(); });
+}
+
+void IcingaDB::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "IcingaDB", "Exception during redis query. Verify that Redis is operational.");
+
+ Log(LogDebug, "IcingaDB")
+ << "Exception during redis operation: " << DiagnosticInformation(exp);
+}
+
+void IcingaDB::OnConnectedHandler()
+{
+ AssertOnWorkQueue();
+
+ if (m_ConfigDumpInProgress || m_ConfigDumpDone)
+ return;
+
+ /* Config dump */
+ m_ConfigDumpInProgress = true;
+ PublishStats();
+
+ UpdateAllConfigObjects();
+
+ m_ConfigDumpDone = true;
+
+ m_ConfigDumpInProgress = false;
+}
+
+void IcingaDB::PublishStatsTimerHandler(void)
+{
+ PublishStats();
+}
+
+void IcingaDB::PublishStats()
+{
+ if (!m_Rcon || !m_Rcon->IsConnected())
+ return;
+
+ Dictionary::Ptr status = GetStats();
+ status->Set("config_dump_in_progress", m_ConfigDumpInProgress);
+ status->Set("timestamp", TimestampToMilliseconds(Utility::GetTime()));
+ status->Set("icingadb_environment", m_EnvironmentId);
+
+ std::vector<String> query {"XADD", "icinga:stats", "MAXLEN", "1", "*"};
+
+ {
+ ObjectLock statusLock (status);
+ for (auto& kv : status) {
+ query.emplace_back(kv.first);
+ query.emplace_back(JsonEncode(kv.second));
+ }
+ }
+
+ m_Rcon->FireAndForgetQuery(std::move(query), Prio::Heartbeat);
+}
+
+void IcingaDB::Stop(bool runtimeRemoved)
+{
+ Log(LogInformation, "IcingaDB")
+ << "Flushing history data buffer to Redis.";
+
+ if (m_HistoryThread.wait_for(std::chrono::minutes(1)) == std::future_status::timeout) {
+ Log(LogCritical, "IcingaDB")
+ << "Flushing takes more than one minute (while we're about to shut down). Giving up and discarding "
+ << m_HistoryBulker.Size() << " queued history queries.";
+ }
+
+ m_StatsTimer->Stop(true);
+
+ Log(LogInformation, "IcingaDB")
+ << "'" << GetName() << "' stopped.";
+
+ ObjectImpl<IcingaDB>::Stop(runtimeRemoved);
+}
+
+void IcingaDB::ValidateTlsProtocolmin(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<IcingaDB>::ValidateTlsProtocolmin(lvalue, utils);
+
+ try {
+ ResolveTlsProtocolVersion(lvalue());
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "tls_protocolmin" }, ex.what()));
+ }
+}
+
+void IcingaDB::ValidateConnectTimeout(const Lazy<double>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<IcingaDB>::ValidateConnectTimeout(lvalue, utils);
+
+ if (lvalue() <= 0) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "connect_timeout" }, "Value must be greater than 0."));
+ }
+}
+
+void IcingaDB::AssertOnWorkQueue()
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+void IcingaDB::DumpedGlobals::Reset()
+{
+ std::lock_guard<std::mutex> l (m_Mutex);
+ m_Ids.clear();
+}
+
+String IcingaDB::GetEnvironmentId() const {
+ return m_EnvironmentId;
+}
+
+bool IcingaDB::DumpedGlobals::IsNew(const String& id)
+{
+ std::lock_guard<std::mutex> l (m_Mutex);
+ return m_Ids.emplace(id).second;
+}
+
+/**
+ * Initializes the m_EnvironmentId attribute or throws an exception on failure to do so. Can be called concurrently.
+ */
+void IcingaDB::InitEnvironmentId()
+{
+ // Initialize m_EnvironmentId once across all IcingaDB objects. In theory, this could be done using
+ // std::call_once, however, due to a bug in libstdc++ (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66146),
+ // this can result in a deadlock when an exception is thrown (which is explicitly allowed by the standard).
+ std::unique_lock<std::mutex> lock (m_EnvironmentIdInitMutex);
+
+ if (m_EnvironmentId.IsEmpty()) {
+ String path = Configuration::DataDir + "/icingadb.env";
+ String envId;
+
+ if (Utility::PathExists(path)) {
+ envId = Utility::LoadJsonFile(path);
+
+ if (envId.GetLength() != 2*SHA_DIGEST_LENGTH) {
+ throw std::runtime_error("environment ID stored at " + path + " is corrupt: wrong length.");
+ }
+
+ for (unsigned char c : envId) {
+ if (!std::isxdigit(c)) {
+ throw std::runtime_error("environment ID stored at " + path + " is corrupt: invalid hex string.");
+ }
+ }
+ } else {
+ String caPath = ApiListener::GetDefaultCaPath();
+
+ if (!Utility::PathExists(caPath)) {
+ throw std::runtime_error("Cannot find the CA certificate at '" + caPath + "'. "
+ "Please ensure the ApiListener is enabled first using 'icinga2 api setup'.");
+ }
+
+ std::shared_ptr<X509> cert = GetX509Certificate(caPath);
+
+ unsigned int n;
+ unsigned char digest[EVP_MAX_MD_SIZE];
+ if (X509_pubkey_digest(cert.get(), EVP_sha1(), digest, &n) != 1) {
+ BOOST_THROW_EXCEPTION(openssl_error()
+ << boost::errinfo_api_function("X509_pubkey_digest")
+ << errinfo_openssl_error(ERR_peek_error()));
+ }
+
+ envId = BinaryToHex(digest, n);
+ }
+
+ m_EnvironmentId = envId.ToLower();
+ }
+}
+
+/**
+ * Ensures that the environment ID is persisted on disk or throws an exception on failure to do so.
+ * Can be called concurrently.
+ */
+void IcingaDB::PersistEnvironmentId()
+{
+ String path = Configuration::DataDir + "/icingadb.env";
+
+ std::unique_lock<std::mutex> lock (m_EnvironmentIdInitMutex);
+
+ if (!Utility::PathExists(path)) {
+ Utility::SaveJsonFile(path, 0600, m_EnvironmentId);
+ }
+}
diff --git a/lib/icingadb/icingadb.hpp b/lib/icingadb/icingadb.hpp
new file mode 100644
index 0000000..6652d9c
--- /dev/null
+++ b/lib/icingadb/icingadb.hpp
@@ -0,0 +1,241 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ICINGADB_H
+#define ICINGADB_H
+
+#include "icingadb/icingadb-ti.hpp"
+#include "icingadb/redisconnection.hpp"
+#include "base/atomic.hpp"
+#include "base/bulker.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include "icinga/customvarobject.hpp"
+#include "icinga/checkable.hpp"
+#include "icinga/service.hpp"
+#include "icinga/downtime.hpp"
+#include "remote/messageorigin.hpp"
+#include <atomic>
+#include <chrono>
+#include <future>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+
+namespace icinga
+{
+
+/**
+ * @ingroup icingadb
+ */
+class IcingaDB : public ObjectImpl<IcingaDB>
+{
+public:
+ DECLARE_OBJECT(IcingaDB);
+ DECLARE_OBJECTNAME(IcingaDB);
+
+ IcingaDB();
+
+ static void ConfigStaticInitialize();
+
+ void Validate(int types, const ValidationUtils& utils) override;
+ virtual void Start(bool runtimeCreated) override;
+ virtual void Stop(bool runtimeRemoved) override;
+
+ String GetEnvironmentId() const override;
+
+ inline RedisConnection::Ptr GetConnection()
+ {
+ return m_RconLocked.load();
+ }
+
+ template<class T>
+ static void AddKvsToMap(const Array::Ptr& kvs, T& map)
+ {
+ Value* key = nullptr;
+ ObjectLock oLock (kvs);
+
+ for (auto& kv : kvs) {
+ if (key) {
+ map.emplace(std::move(*key), std::move(kv));
+ key = nullptr;
+ } else {
+ key = &kv;
+ }
+ }
+ }
+
+protected:
+ void ValidateTlsProtocolmin(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+ void ValidateConnectTimeout(const Lazy<double>& lvalue, const ValidationUtils& utils) override;
+
+private:
+ class DumpedGlobals
+ {
+ public:
+ void Reset();
+ bool IsNew(const String& id);
+
+ private:
+ std::set<String> m_Ids;
+ std::mutex m_Mutex;
+ };
+
+ enum StateUpdate
+ {
+ Volatile = 1ull << 0,
+ RuntimeOnly = 1ull << 1,
+ Full = Volatile | RuntimeOnly,
+ };
+
+ void OnConnectedHandler();
+
+ void PublishStatsTimerHandler();
+ void PublishStats();
+
+ /* config & status dump */
+ void UpdateAllConfigObjects();
+ std::vector<std::vector<intrusive_ptr<ConfigObject>>> ChunkObjects(std::vector<intrusive_ptr<ConfigObject>> objects, size_t chunkSize);
+ void DeleteKeys(const RedisConnection::Ptr& conn, const std::vector<String>& keys, RedisConnection::QueryPriority priority);
+ std::vector<String> GetTypeOverwriteKeys(const String& type);
+ std::vector<String> GetTypeDumpSignalKeys(const Type::Ptr& type);
+ void InsertObjectDependencies(const ConfigObject::Ptr& object, const String typeName, std::map<String, std::vector<String>>& hMSets,
+ std::vector<Dictionary::Ptr>& runtimeUpdates, bool runtimeUpdate);
+ void UpdateState(const Checkable::Ptr& checkable, StateUpdate mode);
+ void SendConfigUpdate(const ConfigObject::Ptr& object, bool runtimeUpdate);
+ void CreateConfigUpdate(const ConfigObject::Ptr& object, const String type, std::map<String, std::vector<String>>& hMSets,
+ std::vector<Dictionary::Ptr>& runtimeUpdates, bool runtimeUpdate);
+ void SendConfigDelete(const ConfigObject::Ptr& object);
+ void SendStateChange(const ConfigObject::Ptr& object, const CheckResult::Ptr& cr, StateType type);
+ void AddObjectDataToRuntimeUpdates(std::vector<Dictionary::Ptr>& runtimeUpdates, const String& objectKey,
+ const String& redisKey, const Dictionary::Ptr& data);
+ void DeleteRelationship(const String& id, const String& redisKeyWithoutPrefix, bool hasChecksum = false);
+
+ void SendSentNotification(
+ const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ NotificationType type, const CheckResult::Ptr& cr, const String& author, const String& text, double sendTime
+ );
+
+ void SendStartedDowntime(const Downtime::Ptr& downtime);
+ void SendRemovedDowntime(const Downtime::Ptr& downtime);
+ void SendAddedComment(const Comment::Ptr& comment);
+ void SendRemovedComment(const Comment::Ptr& comment);
+ void SendFlappingChange(const Checkable::Ptr& checkable, double changeTime, double flappingLastChange);
+ void SendNextUpdate(const Checkable::Ptr& checkable);
+ void SendAcknowledgementSet(const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type, bool persistent, double changeTime, double expiry);
+ void SendAcknowledgementCleared(const Checkable::Ptr& checkable, const String& removedBy, double changeTime, double ackLastChange);
+ void SendNotificationUsersChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ void SendNotificationUserGroupsChanged(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ void SendTimePeriodRangesChanged(const TimePeriod::Ptr& timeperiod, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+ void SendTimePeriodIncludesChanged(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ void SendTimePeriodExcludesChanged(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ template<class T>
+ void SendGroupsChanged(const ConfigObject::Ptr& command, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ void SendCommandEnvChanged(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+ void SendCommandArgumentsChanged(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+ void SendCustomVarsChanged(const ConfigObject::Ptr& object, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+
+ void ForwardHistoryEntries();
+
+ std::vector<String> UpdateObjectAttrs(const ConfigObject::Ptr& object, int fieldType, const String& typeNameOverride);
+ Dictionary::Ptr SerializeState(const Checkable::Ptr& checkable);
+
+ /* Stats */
+ Dictionary::Ptr GetStats();
+
+ /* utilities */
+ static String FormatCheckSumBinary(const String& str);
+ static String FormatCommandLine(const Value& commandLine);
+ static long long TimestampToMilliseconds(double timestamp);
+ static String IcingaToStreamValue(const Value& value);
+ static std::vector<Value> GetArrayDeletedValues(const Array::Ptr& arrayOld, const Array::Ptr& arrayNew);
+ static std::vector<String> GetDictionaryDeletedKeys(const Dictionary::Ptr& dictOld, const Dictionary::Ptr& dictNew);
+
+ static String GetObjectIdentifier(const ConfigObject::Ptr& object);
+ static String CalcEventID(const char* eventType, const ConfigObject::Ptr& object, double eventTime = 0, NotificationType nt = NotificationType(0));
+ static const char* GetNotificationTypeByEnum(NotificationType type);
+ static Dictionary::Ptr SerializeVars(const Dictionary::Ptr& vars);
+
+ static String HashValue(const Value& value);
+ static String HashValue(const Value& value, const std::set<String>& propertiesBlacklist, bool propertiesWhitelist = false);
+
+ static String GetLowerCaseTypeNameDB(const ConfigObject::Ptr& obj);
+ static bool PrepareObject(const ConfigObject::Ptr& object, Dictionary::Ptr& attributes, Dictionary::Ptr& checkSums);
+
+ static void ReachabilityChangeHandler(const std::set<Checkable::Ptr>& children);
+ static void StateChangeHandler(const ConfigObject::Ptr& object, const CheckResult::Ptr& cr, StateType type);
+ static void VersionChangedHandler(const ConfigObject::Ptr& object);
+ static void DowntimeStartedHandler(const Downtime::Ptr& downtime);
+ static void DowntimeRemovedHandler(const Downtime::Ptr& downtime);
+
+ static void NotificationSentToAllUsersHandler(
+ const Notification::Ptr& notification, const Checkable::Ptr& checkable, const std::set<User::Ptr>& users,
+ NotificationType type, const CheckResult::Ptr& cr, const String& author, const String& text
+ );
+
+ static void CommentAddedHandler(const Comment::Ptr& comment);
+ static void CommentRemovedHandler(const Comment::Ptr& comment);
+ static void FlappingChangeHandler(const Checkable::Ptr& checkable, double changeTime);
+ static void NewCheckResultHandler(const Checkable::Ptr& checkable);
+ static void NextCheckUpdatedHandler(const Checkable::Ptr& checkable);
+ static void HostProblemChangedHandler(const Service::Ptr& service);
+ static void AcknowledgementSetHandler(const Checkable::Ptr& checkable, const String& author, const String& comment, AcknowledgementType type, bool persistent, double changeTime, double expiry);
+ static void AcknowledgementClearedHandler(const Checkable::Ptr& checkable, const String& removedBy, double changeTime);
+ static void NotificationUsersChangedHandler(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ static void NotificationUserGroupsChangedHandler(const Notification::Ptr& notification, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ static void TimePeriodRangesChangedHandler(const TimePeriod::Ptr& timeperiod, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+ static void TimePeriodIncludesChangedHandler(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ static void TimePeriodExcludesChangedHandler(const TimePeriod::Ptr& timeperiod, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ static void UserGroupsChangedHandler(const User::Ptr& user, const Array::Ptr&, const Array::Ptr& newValues);
+ static void HostGroupsChangedHandler(const Host::Ptr& host, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ static void ServiceGroupsChangedHandler(const Service::Ptr& service, const Array::Ptr& oldValues, const Array::Ptr& newValues);
+ static void CommandEnvChangedHandler(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+ static void CommandArgumentsChangedHandler(const ConfigObject::Ptr& command, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+ static void CustomVarsChangedHandler(const ConfigObject::Ptr& object, const Dictionary::Ptr& oldValues, const Dictionary::Ptr& newValues);
+
+ void AssertOnWorkQueue();
+
+ void ExceptionHandler(boost::exception_ptr exp);
+
+ static std::vector<Type::Ptr> GetTypes();
+
+ static void InitEnvironmentId();
+ static void PersistEnvironmentId();
+
+ Timer::Ptr m_StatsTimer;
+ WorkQueue m_WorkQueue{0, 1, LogNotice};
+
+ std::future<void> m_HistoryThread;
+ Bulker<RedisConnection::Query> m_HistoryBulker {4096, std::chrono::milliseconds(250)};
+
+ String m_PrefixConfigObject;
+ String m_PrefixConfigCheckSum;
+
+ bool m_ConfigDumpInProgress;
+ bool m_ConfigDumpDone;
+
+ RedisConnection::Ptr m_Rcon;
+ // m_RconLocked containes a copy of the value in m_Rcon where all accesses are guarded by a mutex to allow safe
+ // concurrent access like from the icingadb check command. It's a copy to still allow fast access without additional
+ // syncronization to m_Rcon within the IcingaDB feature itself.
+ Locked<RedisConnection::Ptr> m_RconLocked;
+ std::unordered_map<ConfigType*, RedisConnection::Ptr> m_Rcons;
+ std::atomic_size_t m_PendingRcons;
+
+ struct {
+ DumpedGlobals CustomVar, ActionUrl, NotesUrl, IconImage;
+ } m_DumpedGlobals;
+
+ // m_EnvironmentId is shared across all IcingaDB objects (typically there is at most one, but it is perfectly fine
+ // to have multiple ones). It is initialized once (synchronized using m_EnvironmentIdInitMutex). After successful
+ // initialization, the value is read-only and can be accessed without further synchronization.
+ static String m_EnvironmentId;
+ static std::mutex m_EnvironmentIdInitMutex;
+
+ static std::unordered_set<Type*> m_IndexedTypes;
+};
+}
+
+#endif /* ICINGADB_H */
diff --git a/lib/icingadb/icingadb.ti b/lib/icingadb/icingadb.ti
new file mode 100644
index 0000000..1c649c8
--- /dev/null
+++ b/lib/icingadb/icingadb.ti
@@ -0,0 +1,63 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/tlsutility.hpp"
+
+library icingadb;
+
+namespace icinga
+{
+
+class IcingaDB : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config] int port {
+ default {{{ return 6380; }}}
+ };
+ [config] String path;
+ [config, no_user_view, no_user_modify] String password;
+ [config] int db_index;
+
+ [config] bool enable_tls {
+ default {{{ return false; }}}
+ };
+
+ [config] bool insecure_noverify {
+ default {{{ return false; }}}
+ };
+
+ [config] String cert_path;
+ [config] String key_path;
+ [config] String ca_path;
+ [config] String crl_path;
+ [config] String cipher_list {
+ default {{{ return DEFAULT_TLS_CIPHERS; }}}
+ };
+ [config] String tls_protocolmin {
+ default {{{ return DEFAULT_TLS_PROTOCOLMIN; }}}
+ };
+
+ [config] double connect_timeout {
+ default {{{ return DEFAULT_CONNECT_TIMEOUT; }}}
+ };
+
+ [no_storage] String environment_id {
+ get;
+ };
+
+ [set_protected] double ongoing_dump_start {
+ default {{{ return 0; }}}
+ };
+ [state, set_protected] double lastdump_end {
+ default {{{ return 0; }}}
+ };
+ [state, set_protected] double lastdump_took {
+ default {{{ return 0; }}}
+ };
+};
+
+}
diff --git a/lib/icingadb/icingadbchecktask.cpp b/lib/icingadb/icingadbchecktask.cpp
new file mode 100644
index 0000000..f7c5964
--- /dev/null
+++ b/lib/icingadb/icingadbchecktask.cpp
@@ -0,0 +1,513 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#include "icingadb/icingadbchecktask.hpp"
+#include "icinga/host.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/function.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/convert.hpp"
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, IcingadbCheck, &IcingadbCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+static void ReportIcingadbCheck(
+ const Checkable::Ptr& checkable, const CheckCommand::Ptr& commandObj,
+ const CheckResult::Ptr& cr, String output, ServiceState state)
+{
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = std::move(output);
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandObj->GetName(), pr);
+ } else {
+ cr->SetState(state);
+ cr->SetOutput(output);
+ checkable->ProcessCheckResult(cr);
+ }
+}
+
+static inline
+double GetXMessageTs(const Array::Ptr& xMessage)
+{
+ return Convert::ToLong(String(xMessage->Get(0)).Split("-")[0]) / 1000.0;
+}
+
+void IcingadbCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ CheckCommand::Ptr commandObj = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+ String silenceMissingMacroWarning;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", commandObj);
+
+ auto resolve ([&](const String& macro) {
+ return MacroProcessor::ResolveMacros(macro, resolvers, checkable->GetLastCheckResult(),
+ &silenceMissingMacroWarning, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+ });
+
+ struct Thresholds
+ {
+ Value Warning, Critical;
+ };
+
+ auto resolveThresholds ([&resolve](const String& wmacro, const String& cmacro) {
+ return Thresholds{resolve(wmacro), resolve(cmacro)};
+ });
+
+ String icingadbName = resolve("$icingadb_name$");
+
+ auto dumpTakesThresholds (resolveThresholds("$icingadb_full_dump_duration_warning$", "$icingadb_full_dump_duration_critical$"));
+ auto syncTakesThresholds (resolveThresholds("$icingadb_full_sync_duration_warning$", "$icingadb_full_sync_duration_critical$"));
+ auto icingaBacklogThresholds (resolveThresholds("$icingadb_redis_backlog_warning$", "$icingadb_redis_backlog_critical$"));
+ auto icingadbBacklogThresholds (resolveThresholds("$icingadb_database_backlog_warning$", "$icingadb_database_backlog_critical$"));
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ if (icingadbName.IsEmpty()) {
+ ReportIcingadbCheck(checkable, commandObj, cr, "Icinga DB UNKNOWN: Attribute 'icingadb_name' must be set.", ServiceUnknown);
+ return;
+ }
+
+ auto conn (IcingaDB::GetByName(icingadbName));
+
+ if (!conn) {
+ ReportIcingadbCheck(checkable, commandObj, cr, "Icinga DB UNKNOWN: Icinga DB connection '" + icingadbName + "' does not exist.", ServiceUnknown);
+ return;
+ }
+
+ auto redis (conn->GetConnection());
+
+ if (!redis || !redis->GetConnected()) {
+ ReportIcingadbCheck(checkable, commandObj, cr, "Icinga DB CRITICAL: Not connected to Redis.", ServiceCritical);
+ return;
+ }
+
+ auto now (Utility::GetTime());
+ Array::Ptr redisTime, xReadHeartbeat, xReadStats, xReadRuntimeBacklog, xReadHistoryBacklog;
+
+ try {
+ auto replies (redis->GetResultsOfQueries(
+ {
+ {"TIME"},
+ {"XREAD", "STREAMS", "icingadb:telemetry:heartbeat", "0-0"},
+ {"XREAD", "STREAMS", "icingadb:telemetry:stats", "0-0"},
+ {"XREAD", "COUNT", "1", "STREAMS", "icinga:runtime", "icinga:runtime:state", "0-0", "0-0"},
+ {
+ "XREAD", "COUNT", "1", "STREAMS",
+ "icinga:history:stream:acknowledgement",
+ "icinga:history:stream:comment",
+ "icinga:history:stream:downtime",
+ "icinga:history:stream:flapping",
+ "icinga:history:stream:notification",
+ "icinga:history:stream:state",
+ "0-0", "0-0", "0-0", "0-0", "0-0", "0-0",
+ }
+ },
+ RedisConnection::QueryPriority::Heartbeat
+ ));
+
+ redisTime = std::move(replies.at(0));
+ xReadHeartbeat = std::move(replies.at(1));
+ xReadStats = std::move(replies.at(2));
+ xReadRuntimeBacklog = std::move(replies.at(3));
+ xReadHistoryBacklog = std::move(replies.at(4));
+ } catch (const std::exception& ex) {
+ ReportIcingadbCheck(
+ checkable, commandObj, cr,
+ String("Icinga DB CRITICAL: Could not query Redis: ") + ex.what(), ServiceCritical
+ );
+ return;
+ }
+
+ if (!xReadHeartbeat) {
+ ReportIcingadbCheck(
+ checkable, commandObj, cr,
+ "Icinga DB CRITICAL: The Icinga DB daemon seems to have never run. (Missing heartbeat)",
+ ServiceCritical
+ );
+
+ return;
+ }
+
+ auto redisOldestPending (redis->GetOldestPendingQueryTs());
+ auto ongoingDumpStart (conn->GetOngoingDumpStart());
+ auto dumpWhen (conn->GetLastdumpEnd());
+ auto dumpTook (conn->GetLastdumpTook());
+
+ auto redisNow (Convert::ToLong(redisTime->Get(0)) + Convert::ToLong(redisTime->Get(1)) / 1000000.0);
+ Array::Ptr heartbeatMessage = Array::Ptr(Array::Ptr(xReadHeartbeat->Get(0))->Get(1))->Get(0);
+ auto heartbeatTime (GetXMessageTs(heartbeatMessage));
+ std::map<String, String> heartbeatData;
+
+ IcingaDB::AddKvsToMap(heartbeatMessage->Get(1), heartbeatData);
+
+ String version = heartbeatData.at("version");
+ auto icingadbNow (Convert::ToLong(heartbeatData.at("time")) / 1000.0 + (redisNow - heartbeatTime));
+ auto icingadbStartTime (Convert::ToLong(heartbeatData.at("start-time")) / 1000.0);
+ String errMsg (heartbeatData.at("error"));
+ auto errSince (Convert::ToLong(heartbeatData.at("error-since")) / 1000.0);
+ String perfdataFromRedis = heartbeatData.at("performance-data");
+ auto heartbeatLastReceived (Convert::ToLong(heartbeatData.at("last-heartbeat-received")) / 1000.0);
+ bool weResponsible = Convert::ToLong(heartbeatData.at("ha-responsible"));
+ auto weResponsibleTs (Convert::ToLong(heartbeatData.at("ha-responsible-ts")) / 1000.0);
+ bool otherResponsible = Convert::ToLong(heartbeatData.at("ha-other-responsible"));
+ auto syncOngoingSince (Convert::ToLong(heartbeatData.at("sync-ongoing-since")) / 1000.0);
+ auto syncSuccessWhen (Convert::ToLong(heartbeatData.at("sync-success-finish")) / 1000.0);
+ auto syncSuccessTook (Convert::ToLong(heartbeatData.at("sync-success-duration")) / 1000.0);
+
+ std::ostringstream i2okmsgs, idbokmsgs, warnmsgs, critmsgs;
+ Array::Ptr perfdata = new Array();
+
+ i2okmsgs << std::fixed << std::setprecision(3);
+ idbokmsgs << std::fixed << std::setprecision(3);
+ warnmsgs << std::fixed << std::setprecision(3);
+ critmsgs << std::fixed << std::setprecision(3);
+
+ const auto downForCritical (10);
+ auto downFor (redisNow - heartbeatTime);
+ bool down = false;
+
+ if (downFor > downForCritical) {
+ down = true;
+
+ critmsgs << " Last seen " << Utility::FormatDuration(downFor)
+ << " ago, greater than CRITICAL threshold (" << Utility::FormatDuration(downForCritical) << ")!";
+ } else {
+ idbokmsgs << "\n* Last seen: " << Utility::FormatDuration(downFor) << " ago";
+ }
+
+ perfdata->Add(new PerfdataValue("icingadb_heartbeat_age", downFor, false, "seconds", Empty, downForCritical, 0));
+
+ const auto errForCritical (10);
+ auto err (!errMsg.IsEmpty());
+ auto errFor (icingadbNow - errSince);
+
+ if (err) {
+ if (errFor > errForCritical) {
+ critmsgs << " ERROR: " << errMsg << "!";
+ }
+
+ perfdata->Add(new PerfdataValue("error_for", errFor * (err ? 1 : -1), false, "seconds", Empty, errForCritical, 0));
+ }
+
+ if (!down) {
+ const auto heartbeatLagWarning (3/* Icinga DB read freq. */ + 1/* Icinga DB write freq. */ + 2/* threshold */);
+ auto heartbeatLag (fmin(icingadbNow - heartbeatLastReceived, 10 * 60));
+
+ if (!heartbeatLastReceived) {
+ critmsgs << " Lost Icinga 2 heartbeat!";
+ } else if (heartbeatLag > heartbeatLagWarning) {
+ warnmsgs << " Icinga 2 heartbeat lag: " << Utility::FormatDuration(heartbeatLag)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(heartbeatLagWarning) << ").";
+ }
+
+ perfdata->Add(new PerfdataValue("icinga2_heartbeat_age", heartbeatLag, false, "seconds", heartbeatLagWarning, Empty, 0));
+ }
+
+ if (weResponsible) {
+ idbokmsgs << "\n* Responsible";
+ } else if (otherResponsible) {
+ idbokmsgs << "\n* Not responsible, but another instance is";
+ } else {
+ critmsgs << " No instance is responsible!";
+ }
+
+ perfdata->Add(new PerfdataValue("icingadb_responsible_instances", int(weResponsible || otherResponsible), false, "", Empty, Empty, 0, 1));
+
+ const auto clockDriftWarning (5);
+ const auto clockDriftCritical (30);
+ auto clockDrift (std::max({
+ fabs(now - redisNow),
+ fabs(redisNow - icingadbNow),
+ fabs(icingadbNow - now),
+ }));
+
+ if (clockDrift > clockDriftCritical) {
+ critmsgs << " Icinga 2/Redis/Icinga DB clock drift: " << Utility::FormatDuration(clockDrift)
+ << ", greater than CRITICAL threshold (" << Utility::FormatDuration(clockDriftCritical) << ")!";
+ } else if (clockDrift > clockDriftWarning) {
+ warnmsgs << " Icinga 2/Redis/Icinga DB clock drift: " << Utility::FormatDuration(clockDrift)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(clockDriftWarning) << ").";
+ }
+
+ perfdata->Add(new PerfdataValue("clock_drift", clockDrift, false, "seconds", clockDriftWarning, clockDriftCritical, 0));
+
+ if (ongoingDumpStart) {
+ auto ongoingDumpTakes (now - ongoingDumpStart);
+
+ if (!dumpTakesThresholds.Critical.IsEmpty() && ongoingDumpTakes > dumpTakesThresholds.Critical) {
+ critmsgs << " Current Icinga 2 full dump already takes " << Utility::FormatDuration(ongoingDumpTakes)
+ << ", greater than CRITICAL threshold (" << Utility::FormatDuration(dumpTakesThresholds.Critical) << ")!";
+ } else if (!dumpTakesThresholds.Warning.IsEmpty() && ongoingDumpTakes > dumpTakesThresholds.Warning) {
+ warnmsgs << " Current Icinga 2 full dump already takes " << Utility::FormatDuration(ongoingDumpTakes)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(dumpTakesThresholds.Warning) << ").";
+ } else {
+ i2okmsgs << "\n* Current full dump running for " << Utility::FormatDuration(ongoingDumpTakes);
+ }
+
+ perfdata->Add(new PerfdataValue("icinga2_current_full_dump_duration", ongoingDumpTakes, false, "seconds",
+ dumpTakesThresholds.Warning, dumpTakesThresholds.Critical, 0));
+ }
+
+ if (!down && syncOngoingSince) {
+ auto ongoingSyncTakes (icingadbNow - syncOngoingSince);
+
+ if (!syncTakesThresholds.Critical.IsEmpty() && ongoingSyncTakes > syncTakesThresholds.Critical) {
+ critmsgs << " Current full sync already takes " << Utility::FormatDuration(ongoingSyncTakes)
+ << ", greater than CRITICAL threshold (" << Utility::FormatDuration(syncTakesThresholds.Critical) << ")!";
+ } else if (!syncTakesThresholds.Warning.IsEmpty() && ongoingSyncTakes > syncTakesThresholds.Warning) {
+ warnmsgs << " Current full sync already takes " << Utility::FormatDuration(ongoingSyncTakes)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(syncTakesThresholds.Warning) << ").";
+ } else {
+ idbokmsgs << "\n* Current full sync running for " << Utility::FormatDuration(ongoingSyncTakes);
+ }
+
+ perfdata->Add(new PerfdataValue("icingadb_current_full_sync_duration", ongoingSyncTakes, false, "seconds",
+ syncTakesThresholds.Warning, syncTakesThresholds.Critical, 0));
+ }
+
+ auto redisBacklog (now - redisOldestPending);
+
+ if (!redisOldestPending) {
+ redisBacklog = 0;
+ }
+
+ if (!icingaBacklogThresholds.Critical.IsEmpty() && redisBacklog > icingaBacklogThresholds.Critical) {
+ critmsgs << " Icinga 2 Redis query backlog: " << Utility::FormatDuration(redisBacklog)
+ << ", greater than CRITICAL threshold (" << Utility::FormatDuration(icingaBacklogThresholds.Critical) << ")!";
+ } else if (!icingaBacklogThresholds.Warning.IsEmpty() && redisBacklog > icingaBacklogThresholds.Warning) {
+ warnmsgs << " Icinga 2 Redis query backlog: " << Utility::FormatDuration(redisBacklog)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(icingaBacklogThresholds.Warning) << ").";
+ }
+
+ perfdata->Add(new PerfdataValue("icinga2_redis_query_backlog", redisBacklog, false, "seconds",
+ icingaBacklogThresholds.Warning, icingaBacklogThresholds.Critical, 0));
+
+ if (!down) {
+ auto getBacklog = [redisNow](const Array::Ptr& streams) -> double {
+ if (!streams) {
+ return 0;
+ }
+
+ double minTs = 0;
+ ObjectLock lock (streams);
+
+ for (Array::Ptr stream : streams) {
+ auto ts (GetXMessageTs(Array::Ptr(stream->Get(1))->Get(0)));
+
+ if (minTs == 0 || ts < minTs) {
+ minTs = ts;
+ }
+ }
+
+ if (minTs > 0) {
+ return redisNow - minTs;
+ } else {
+ return 0;
+ }
+ };
+
+ double historyBacklog = getBacklog(xReadHistoryBacklog);
+
+ if (!icingadbBacklogThresholds.Critical.IsEmpty() && historyBacklog > icingadbBacklogThresholds.Critical) {
+ critmsgs << " History backlog: " << Utility::FormatDuration(historyBacklog)
+ << ", greater than CRITICAL threshold (" << Utility::FormatDuration(icingadbBacklogThresholds.Critical) << ")!";
+ } else if (!icingadbBacklogThresholds.Warning.IsEmpty() && historyBacklog > icingadbBacklogThresholds.Warning) {
+ warnmsgs << " History backlog: " << Utility::FormatDuration(historyBacklog)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(icingadbBacklogThresholds.Warning) << ").";
+ }
+
+ perfdata->Add(new PerfdataValue("icingadb_history_backlog", historyBacklog, false, "seconds",
+ icingadbBacklogThresholds.Warning, icingadbBacklogThresholds.Critical, 0));
+
+ double runtimeBacklog = 0;
+
+ if (weResponsible && !syncOngoingSince) {
+ // These streams are only processed by the responsible instance after the full sync finished,
+ // it's fine for some backlog to exist otherwise.
+ runtimeBacklog = getBacklog(xReadRuntimeBacklog);
+
+ if (!icingadbBacklogThresholds.Critical.IsEmpty() && runtimeBacklog > icingadbBacklogThresholds.Critical) {
+ critmsgs << " Runtime update backlog: " << Utility::FormatDuration(runtimeBacklog)
+ << ", greater than CRITICAL threshold (" << Utility::FormatDuration(icingadbBacklogThresholds.Critical) << ")!";
+ } else if (!icingadbBacklogThresholds.Warning.IsEmpty() && runtimeBacklog > icingadbBacklogThresholds.Warning) {
+ warnmsgs << " Runtime update backlog: " << Utility::FormatDuration(runtimeBacklog)
+ << ", greater than WARNING threshold (" << Utility::FormatDuration(icingadbBacklogThresholds.Warning) << ").";
+ }
+ }
+
+ // Also report the perfdata value on the standby instance or during a full sync (as 0 in this case).
+ perfdata->Add(new PerfdataValue("icingadb_runtime_update_backlog", runtimeBacklog, false, "seconds",
+ icingadbBacklogThresholds.Warning, icingadbBacklogThresholds.Critical, 0));
+ }
+
+ auto dumpAgo (now - dumpWhen);
+
+ if (dumpWhen) {
+ perfdata->Add(new PerfdataValue("icinga2_last_full_dump_ago", dumpAgo, false, "seconds", Empty, Empty, 0));
+ }
+
+ if (dumpTook) {
+ perfdata->Add(new PerfdataValue("icinga2_last_full_dump_duration", dumpTook, false, "seconds", Empty, Empty, 0));
+ }
+
+ if (dumpWhen && dumpTook) {
+ i2okmsgs << "\n* Last full dump: " << Utility::FormatDuration(dumpAgo)
+ << " ago, took " << Utility::FormatDuration(dumpTook);
+ }
+
+ auto icingadbUptime (icingadbNow - icingadbStartTime);
+
+ if (!down) {
+ perfdata->Add(new PerfdataValue("icingadb_uptime", icingadbUptime, false, "seconds", Empty, Empty, 0));
+ }
+
+ {
+ Array::Ptr values = PluginUtility::SplitPerfdata(perfdataFromRedis);
+ ObjectLock lock (values);
+
+ for (auto& v : values) {
+ perfdata->Add(PerfdataValue::Parse(v));
+ }
+ }
+
+ if (weResponsibleTs) {
+ perfdata->Add(new PerfdataValue("icingadb_responsible_for",
+ (weResponsible ? 1 : -1) * (icingadbNow - weResponsibleTs), false, "seconds"));
+ }
+
+ auto syncAgo (icingadbNow - syncSuccessWhen);
+
+ if (syncSuccessWhen) {
+ perfdata->Add(new PerfdataValue("icingadb_last_full_sync_ago", syncAgo, false, "seconds", Empty, Empty, 0));
+ }
+
+ if (syncSuccessTook) {
+ perfdata->Add(new PerfdataValue("icingadb_last_full_sync_duration", syncSuccessTook, false, "seconds", Empty, Empty, 0));
+ }
+
+ if (syncSuccessWhen && syncSuccessTook) {
+ idbokmsgs << "\n* Last full sync: " << Utility::FormatDuration(syncAgo)
+ << " ago, took " << Utility::FormatDuration(syncSuccessTook);
+ }
+
+ std::map<String, RingBuffer> statsPerOp;
+
+ const char * const icingadbKnownStats[] = {
+ "config_sync", "state_sync", "history_sync", "overdue_sync", "history_cleanup"
+ };
+
+ for (auto metric : icingadbKnownStats) {
+ statsPerOp.emplace(std::piecewise_construct, std::forward_as_tuple(metric), std::forward_as_tuple(15 * 60));
+ }
+
+ if (xReadStats) {
+ Array::Ptr messages = Array::Ptr(xReadStats->Get(0))->Get(1);
+ ObjectLock lock (messages);
+
+ for (Array::Ptr message : messages) {
+ auto ts (GetXMessageTs(message));
+ std::map<String, String> opsPerSec;
+
+ IcingaDB::AddKvsToMap(message->Get(1), opsPerSec);
+
+ for (auto& kv : opsPerSec) {
+ auto buf (statsPerOp.find(kv.first));
+
+ if (buf == statsPerOp.end()) {
+ buf = statsPerOp.emplace(
+ std::piecewise_construct,
+ std::forward_as_tuple(kv.first), std::forward_as_tuple(15 * 60)
+ ).first;
+ }
+
+ buf->second.InsertValue(ts, Convert::ToLong(kv.second));
+ }
+ }
+ }
+
+ for (auto& kv : statsPerOp) {
+ perfdata->Add(new PerfdataValue("icingadb_" + kv.first + "_items_1min", kv.second.UpdateAndGetValues(now, 60), false, "", Empty, Empty, 0));
+ perfdata->Add(new PerfdataValue("icingadb_" + kv.first + "_items_5mins", kv.second.UpdateAndGetValues(now, 5 * 60), false, "", Empty, Empty, 0));
+ perfdata->Add(new PerfdataValue("icingadb_" + kv.first + "_items_15mins", kv.second.UpdateAndGetValues(now, 15 * 60), false, "", Empty, Empty, 0));
+ }
+
+ perfdata->Add(new PerfdataValue("icinga2_redis_queries_1min", redis->GetQueryCount(60), false, "", Empty, Empty, 0));
+ perfdata->Add(new PerfdataValue("icinga2_redis_queries_5mins", redis->GetQueryCount(5 * 60), false, "", Empty, Empty, 0));
+ perfdata->Add(new PerfdataValue("icinga2_redis_queries_15mins", redis->GetQueryCount(15 * 60), false, "", Empty, Empty, 0));
+
+ perfdata->Add(new PerfdataValue("icinga2_redis_pending_queries", redis->GetPendingQueryCount(), false, "", Empty, Empty, 0));
+
+ struct {
+ const char * Name;
+ int (RedisConnection::* Getter)(RingBuffer::SizeType span, RingBuffer::SizeType tv);
+ } const icingaWriteSubjects[] = {
+ {"config_dump", &RedisConnection::GetWrittenConfigFor},
+ {"state_dump", &RedisConnection::GetWrittenStateFor},
+ {"history_dump", &RedisConnection::GetWrittenHistoryFor}
+ };
+
+ for (auto subject : icingaWriteSubjects) {
+ perfdata->Add(new PerfdataValue(String("icinga2_") + subject.Name + "_items_1min", (redis.get()->*subject.Getter)(60, now), false, "", Empty, Empty, 0));
+ perfdata->Add(new PerfdataValue(String("icinga2_") + subject.Name + "_items_5mins", (redis.get()->*subject.Getter)(5 * 60, now), false, "", Empty, Empty, 0));
+ perfdata->Add(new PerfdataValue(String("icinga2_") + subject.Name + "_items_15mins", (redis.get()->*subject.Getter)(15 * 60, now), false, "", Empty, Empty, 0));
+ }
+
+ ServiceState state;
+ std::ostringstream msgbuf;
+ auto i2okmsg (i2okmsgs.str());
+ auto idbokmsg (idbokmsgs.str());
+ auto warnmsg (warnmsgs.str());
+ auto critmsg (critmsgs.str());
+
+ msgbuf << "Icinga DB ";
+
+ if (!critmsg.empty()) {
+ state = ServiceCritical;
+ msgbuf << "CRITICAL:" << critmsg;
+
+ if (!warnmsg.empty()) {
+ msgbuf << "\n\nWARNING:" << warnmsg;
+ }
+ } else if (!warnmsg.empty()) {
+ state = ServiceWarning;
+ msgbuf << "WARNING:" << warnmsg;
+ } else {
+ state = ServiceOK;
+ msgbuf << "OK: Uptime: " << Utility::FormatDuration(icingadbUptime) << ". Version: " << version << ".";
+ }
+
+ if (!i2okmsg.empty()) {
+ msgbuf << "\n\nIcinga 2:\n" << i2okmsg;
+ }
+
+ if (!idbokmsg.empty()) {
+ msgbuf << "\n\nIcinga DB:\n" << idbokmsg;
+ }
+
+ cr->SetPerformanceData(perfdata);
+ ReportIcingadbCheck(checkable, commandObj, cr, msgbuf.str(), state);
+}
diff --git a/lib/icingadb/icingadbchecktask.hpp b/lib/icingadb/icingadbchecktask.hpp
new file mode 100644
index 0000000..ba7d61b
--- /dev/null
+++ b/lib/icingadb/icingadbchecktask.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#ifndef ICINGADBCHECKTASK_H
+#define ICINGADBCHECKTASK_H
+
+#include "icingadb/icingadb.hpp"
+#include "icinga/checkable.hpp"
+
+namespace icinga
+{
+
+/**
+ * Icinga DB check.
+ *
+ * @ingroup icingadb
+ */
+class IcingadbCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ IcingadbCheckTask();
+};
+
+}
+
+#endif /* ICINGADBCHECKTASK_H */
diff --git a/lib/icingadb/redisconnection.cpp b/lib/icingadb/redisconnection.cpp
new file mode 100644
index 0000000..798a827
--- /dev/null
+++ b/lib/icingadb/redisconnection.cpp
@@ -0,0 +1,773 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icingadb/redisconnection.hpp"
+#include "base/array.hpp"
+#include "base/convert.hpp"
+#include "base/defer.hpp"
+#include "base/exception.hpp"
+#include "base/io-engine.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/string.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/tlsutility.hpp"
+#include "base/utility.hpp"
+#include <boost/asio.hpp>
+#include <boost/coroutine/exceptions.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
+#include <boost/utility/string_view.hpp>
+#include <boost/variant/get.hpp>
+#include <exception>
+#include <future>
+#include <iterator>
+#include <memory>
+#include <openssl/ssl.h>
+#include <openssl/x509_vfy.h>
+#include <utility>
+
+using namespace icinga;
+namespace asio = boost::asio;
+
+boost::regex RedisConnection::m_ErrAuth ("\\AERR AUTH ");
+
+RedisConnection::RedisConnection(const String& host, int port, const String& path, const String& password, int db,
+ bool useTls, bool insecure, const String& certPath, const String& keyPath, const String& caPath, const String& crlPath,
+ const String& tlsProtocolmin, const String& cipherList, double connectTimeout, DebugInfo di, const RedisConnection::Ptr& parent)
+ : RedisConnection(IoEngine::Get().GetIoContext(), host, port, path, password, db,
+ useTls, insecure, certPath, keyPath, caPath, crlPath, tlsProtocolmin, cipherList, connectTimeout, std::move(di), parent)
+{
+}
+
+RedisConnection::RedisConnection(boost::asio::io_context& io, String host, int port, String path, String password,
+ int db, bool useTls, bool insecure, String certPath, String keyPath, String caPath, String crlPath,
+ String tlsProtocolmin, String cipherList, double connectTimeout, DebugInfo di, const RedisConnection::Ptr& parent)
+ : m_Host(std::move(host)), m_Port(port), m_Path(std::move(path)), m_Password(std::move(password)),
+ m_DbIndex(db), m_CertPath(std::move(certPath)), m_KeyPath(std::move(keyPath)), m_Insecure(insecure),
+ m_CaPath(std::move(caPath)), m_CrlPath(std::move(crlPath)), m_TlsProtocolmin(std::move(tlsProtocolmin)),
+ m_CipherList(std::move(cipherList)), m_ConnectTimeout(connectTimeout), m_DebugInfo(std::move(di)), m_Connecting(false), m_Connected(false),
+ m_Started(false), m_Strand(io), m_QueuedWrites(io), m_QueuedReads(io), m_LogStatsTimer(io), m_Parent(parent)
+{
+ if (useTls && m_Path.IsEmpty()) {
+ UpdateTLSContext();
+ }
+}
+
+void RedisConnection::UpdateTLSContext()
+{
+ m_TLSContext = SetupSslContext(m_CertPath, m_KeyPath, m_CaPath,
+ m_CrlPath, m_CipherList, m_TlsProtocolmin, m_DebugInfo);
+}
+
+void RedisConnection::Start()
+{
+ if (!m_Started.exchange(true)) {
+ Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_Strand, [this, keepAlive](asio::yield_context yc) { ReadLoop(yc); });
+ IoEngine::SpawnCoroutine(m_Strand, [this, keepAlive](asio::yield_context yc) { WriteLoop(yc); });
+
+ if (!m_Parent) {
+ IoEngine::SpawnCoroutine(m_Strand, [this, keepAlive](asio::yield_context yc) { LogStats(yc); });
+ }
+ }
+
+ if (!m_Connecting.exchange(true)) {
+ Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_Strand, [this, keepAlive](asio::yield_context yc) { Connect(yc); });
+ }
+}
+
+bool RedisConnection::IsConnected() {
+ return m_Connected.load();
+}
+
+/**
+ * Append a Redis query to a log message
+ *
+ * @param query Redis query
+ * @param msg Log message
+ */
+static inline
+void LogQuery(RedisConnection::Query& query, Log& msg)
+{
+ int i = 0;
+
+ for (auto& arg : query) {
+ if (++i == 8) {
+ msg << " ...";
+ break;
+ }
+
+ if (arg.GetLength() > 64) {
+ msg << " '" << arg.SubStr(0, 61) << "...'";
+ } else {
+ msg << " '" << arg << '\'';
+ }
+ }
+}
+
+/**
+ * Queue a Redis query for sending
+ *
+ * @param query Redis query
+ * @param priority The query's priority
+ */
+void RedisConnection::FireAndForgetQuery(RedisConnection::Query query, RedisConnection::QueryPriority priority, QueryAffects affects)
+{
+ if (LogDebug >= Logger::GetMinLogSeverity()) {
+ Log msg (LogDebug, "IcingaDB", "Firing and forgetting query:");
+ LogQuery(query, msg);
+ }
+
+ auto item (Shared<Query>::Make(std::move(query)));
+ auto ctime (Utility::GetTime());
+
+ asio::post(m_Strand, [this, item, priority, ctime, affects]() {
+ m_Queues.Writes[priority].emplace(WriteQueueItem{item, nullptr, nullptr, nullptr, nullptr, ctime, affects});
+ m_QueuedWrites.Set();
+ IncreasePendingQueries(1);
+ });
+}
+
+/**
+ * Queue Redis queries for sending
+ *
+ * @param queries Redis queries
+ * @param priority The queries' priority
+ */
+void RedisConnection::FireAndForgetQueries(RedisConnection::Queries queries, RedisConnection::QueryPriority priority, QueryAffects affects)
+{
+ if (LogDebug >= Logger::GetMinLogSeverity()) {
+ for (auto& query : queries) {
+ Log msg(LogDebug, "IcingaDB", "Firing and forgetting query:");
+ LogQuery(query, msg);
+ }
+ }
+
+ auto item (Shared<Queries>::Make(std::move(queries)));
+ auto ctime (Utility::GetTime());
+
+ asio::post(m_Strand, [this, item, priority, ctime, affects]() {
+ m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, item, nullptr, nullptr, nullptr, ctime, affects});
+ m_QueuedWrites.Set();
+ IncreasePendingQueries(item->size());
+ });
+}
+
+/**
+ * Queue a Redis query for sending, wait for the response and return (or throw) it
+ *
+ * @param query Redis query
+ * @param priority The query's priority
+ *
+ * @return The response
+ */
+RedisConnection::Reply RedisConnection::GetResultOfQuery(RedisConnection::Query query, RedisConnection::QueryPriority priority, QueryAffects affects)
+{
+ if (LogDebug >= Logger::GetMinLogSeverity()) {
+ Log msg (LogDebug, "IcingaDB", "Executing query:");
+ LogQuery(query, msg);
+ }
+
+ std::promise<Reply> promise;
+ auto future (promise.get_future());
+ auto item (Shared<std::pair<Query, std::promise<Reply>>>::Make(std::move(query), std::move(promise)));
+ auto ctime (Utility::GetTime());
+
+ asio::post(m_Strand, [this, item, priority, ctime, affects]() {
+ m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, nullptr, item, nullptr, nullptr, ctime, affects});
+ m_QueuedWrites.Set();
+ IncreasePendingQueries(1);
+ });
+
+ item = nullptr;
+ future.wait();
+ return future.get();
+}
+
+/**
+ * Queue Redis queries for sending, wait for the responses and return (or throw) them
+ *
+ * @param queries Redis queries
+ * @param priority The queries' priority
+ *
+ * @return The responses
+ */
+RedisConnection::Replies RedisConnection::GetResultsOfQueries(RedisConnection::Queries queries, RedisConnection::QueryPriority priority, QueryAffects affects)
+{
+ if (LogDebug >= Logger::GetMinLogSeverity()) {
+ for (auto& query : queries) {
+ Log msg(LogDebug, "IcingaDB", "Executing query:");
+ LogQuery(query, msg);
+ }
+ }
+
+ std::promise<Replies> promise;
+ auto future (promise.get_future());
+ auto item (Shared<std::pair<Queries, std::promise<Replies>>>::Make(std::move(queries), std::move(promise)));
+ auto ctime (Utility::GetTime());
+
+ asio::post(m_Strand, [this, item, priority, ctime, affects]() {
+ m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, nullptr, nullptr, item, nullptr, ctime, affects});
+ m_QueuedWrites.Set();
+ IncreasePendingQueries(item->first.size());
+ });
+
+ item = nullptr;
+ future.wait();
+ return future.get();
+}
+
+void RedisConnection::EnqueueCallback(const std::function<void(boost::asio::yield_context&)>& callback, RedisConnection::QueryPriority priority)
+{
+ auto ctime (Utility::GetTime());
+
+ asio::post(m_Strand, [this, callback, priority, ctime]() {
+ m_Queues.Writes[priority].emplace(WriteQueueItem{nullptr, nullptr, nullptr, nullptr, callback, ctime});
+ m_QueuedWrites.Set();
+ });
+}
+
+/**
+ * Puts a no-op command with a result at the end of the queue and wait for the result,
+ * i.e. for everything enqueued to be processed by the server.
+ *
+ * @ingroup icingadb
+ */
+void RedisConnection::Sync()
+{
+ GetResultOfQuery({"PING"}, RedisConnection::QueryPriority::SyncConnection);
+}
+
+/**
+ * Get the enqueue time of the oldest still queued Redis query
+ *
+ * @return *nix timestamp or 0
+ */
+double RedisConnection::GetOldestPendingQueryTs()
+{
+ auto promise (Shared<std::promise<double>>::Make());
+ auto future (promise->get_future());
+
+ asio::post(m_Strand, [this, promise]() {
+ double oldest = 0;
+
+ for (auto& queue : m_Queues.Writes) {
+ if (m_SuppressedQueryKinds.find(queue.first) == m_SuppressedQueryKinds.end() && !queue.second.empty()) {
+ auto ctime (queue.second.front().CTime);
+
+ if (ctime < oldest || oldest == 0) {
+ oldest = ctime;
+ }
+ }
+ }
+
+ promise->set_value(oldest);
+ });
+
+ future.wait();
+ return future.get();
+}
+
+/**
+ * Mark kind as kind of queries not to actually send yet
+ *
+ * @param kind Query kind
+ */
+void RedisConnection::SuppressQueryKind(RedisConnection::QueryPriority kind)
+{
+ asio::post(m_Strand, [this, kind]() { m_SuppressedQueryKinds.emplace(kind); });
+}
+
+/**
+ * Unmark kind as kind of queries not to actually send yet
+ *
+ * @param kind Query kind
+ */
+void RedisConnection::UnsuppressQueryKind(RedisConnection::QueryPriority kind)
+{
+ asio::post(m_Strand, [this, kind]() {
+ m_SuppressedQueryKinds.erase(kind);
+ m_QueuedWrites.Set();
+ });
+}
+
+/**
+ * Try to connect to Redis
+ */
+void RedisConnection::Connect(asio::yield_context& yc)
+{
+ Defer notConnecting ([this]() { m_Connecting.store(m_Connected.load()); });
+
+ boost::asio::deadline_timer timer (m_Strand.context());
+
+ auto waitForReadLoop ([this, &yc]() {
+ while (!m_Queues.FutureResponseActions.empty()) {
+ IoEngine::YieldCurrentCoroutine(yc);
+ }
+ });
+
+ for (;;) {
+ try {
+ if (m_Path.IsEmpty()) {
+ if (m_TLSContext) {
+ Log(m_Parent ? LogNotice : LogInformation, "IcingaDB")
+ << "Trying to connect to Redis server (async, TLS) on host '" << m_Host << ":" << m_Port << "'";
+
+ auto conn (Shared<AsioTlsStream>::Make(m_Strand.context(), *m_TLSContext, m_Host));
+ auto& tlsConn (conn->next_layer());
+ auto connectTimeout (MakeTimeout(conn));
+ Defer cancelTimeout ([&connectTimeout]() { connectTimeout->Cancel(); });
+
+ icinga::Connect(conn->lowest_layer(), m_Host, Convert::ToString(m_Port), yc);
+ tlsConn.async_handshake(tlsConn.client, yc);
+
+ if (!m_Insecure) {
+ std::shared_ptr<X509> cert (tlsConn.GetPeerCertificate());
+
+ if (!cert) {
+ BOOST_THROW_EXCEPTION(std::runtime_error(
+ "Redis didn't present any TLS certificate."
+ ));
+ }
+
+ if (!tlsConn.IsVerifyOK()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error(
+ "TLS certificate validation failed: " + std::string(tlsConn.GetVerifyError())
+ ));
+ }
+ }
+
+ Handshake(conn, yc);
+ waitForReadLoop();
+ m_TlsConn = std::move(conn);
+ } else {
+ Log(m_Parent ? LogNotice : LogInformation, "IcingaDB")
+ << "Trying to connect to Redis server (async) on host '" << m_Host << ":" << m_Port << "'";
+
+ auto conn (Shared<TcpConn>::Make(m_Strand.context()));
+ auto connectTimeout (MakeTimeout(conn));
+ Defer cancelTimeout ([&connectTimeout]() { connectTimeout->Cancel(); });
+
+ icinga::Connect(conn->next_layer(), m_Host, Convert::ToString(m_Port), yc);
+ Handshake(conn, yc);
+ waitForReadLoop();
+ m_TcpConn = std::move(conn);
+ }
+ } else {
+ Log(LogInformation, "IcingaDB")
+ << "Trying to connect to Redis server (async) on unix socket path '" << m_Path << "'";
+
+ auto conn (Shared<UnixConn>::Make(m_Strand.context()));
+ auto connectTimeout (MakeTimeout(conn));
+ Defer cancelTimeout ([&connectTimeout]() { connectTimeout->Cancel(); });
+
+ conn->next_layer().async_connect(Unix::endpoint(m_Path.CStr()), yc);
+ Handshake(conn, yc);
+ waitForReadLoop();
+ m_UnixConn = std::move(conn);
+ }
+
+ m_Connected.store(true);
+
+ Log(m_Parent ? LogNotice : LogInformation, "IcingaDB", "Connected to Redis server");
+
+ // Operate on a copy so that the callback can set a new callback without destroying itself while running.
+ auto callback (m_ConnectedCallback);
+ if (callback) {
+ callback(yc);
+ }
+
+ break;
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "IcingaDB")
+ << "Cannot connect to " << m_Host << ":" << m_Port << ": " << ex.what();
+ }
+
+ timer.expires_from_now(boost::posix_time::seconds(5));
+ timer.async_wait(yc);
+ }
+
+}
+
+/**
+ * Actually receive the responses to the Redis queries send by WriteItem() and handle them
+ */
+void RedisConnection::ReadLoop(asio::yield_context& yc)
+{
+ for (;;) {
+ m_QueuedReads.Wait(yc);
+
+ while (!m_Queues.FutureResponseActions.empty()) {
+ auto item (std::move(m_Queues.FutureResponseActions.front()));
+ m_Queues.FutureResponseActions.pop();
+
+ switch (item.Action) {
+ case ResponseAction::Ignore:
+ try {
+ for (auto i (item.Amount); i; --i) {
+ ReadOne(yc);
+ }
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "IcingaDB")
+ << "Error during receiving the response to a query which has been fired and forgotten: " << ex.what();
+
+ continue;
+ } catch (...) {
+ Log(LogCritical, "IcingaDB")
+ << "Error during receiving the response to a query which has been fired and forgotten";
+
+ continue;
+ }
+
+ break;
+ case ResponseAction::Deliver:
+ for (auto i (item.Amount); i; --i) {
+ auto promise (std::move(m_Queues.ReplyPromises.front()));
+ m_Queues.ReplyPromises.pop();
+
+ Reply reply;
+
+ try {
+ reply = ReadOne(yc);
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+
+ continue;
+ }
+
+ promise.set_value(std::move(reply));
+ }
+
+ break;
+ case ResponseAction::DeliverBulk:
+ {
+ auto promise (std::move(m_Queues.RepliesPromises.front()));
+ m_Queues.RepliesPromises.pop();
+
+ Replies replies;
+ replies.reserve(item.Amount);
+
+ for (auto i (item.Amount); i; --i) {
+ try {
+ replies.emplace_back(ReadOne(yc));
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ break;
+ }
+ }
+
+ try {
+ promise.set_value(std::move(replies));
+ } catch (const std::future_error&) {
+ // Complaint about the above op is not allowed
+ // due to promise.set_exception() was already called
+ }
+ }
+ }
+ }
+
+ m_QueuedReads.Clear();
+ }
+}
+
+/**
+ * Actually send the Redis queries queued by {FireAndForget,GetResultsOf}{Query,Queries}()
+ */
+void RedisConnection::WriteLoop(asio::yield_context& yc)
+{
+ for (;;) {
+ m_QueuedWrites.Wait(yc);
+
+ WriteFirstOfHighestPrio:
+ for (auto& queue : m_Queues.Writes) {
+ if (m_SuppressedQueryKinds.find(queue.first) != m_SuppressedQueryKinds.end() || queue.second.empty()) {
+ continue;
+ }
+
+ auto next (std::move(queue.second.front()));
+ queue.second.pop();
+
+ WriteItem(yc, std::move(next));
+
+ goto WriteFirstOfHighestPrio;
+ }
+
+ m_QueuedWrites.Clear();
+ }
+}
+
+/**
+ * Periodically log current query performance
+ */
+void RedisConnection::LogStats(asio::yield_context& yc)
+{
+ double lastMessage = 0;
+
+ m_LogStatsTimer.expires_from_now(boost::posix_time::seconds(10));
+
+ for (;;) {
+ m_LogStatsTimer.async_wait(yc);
+ m_LogStatsTimer.expires_from_now(boost::posix_time::seconds(10));
+
+ if (!IsConnected())
+ continue;
+
+ auto now (Utility::GetTime());
+ bool timeoutReached = now - lastMessage >= 5 * 60;
+
+ if (m_PendingQueries < 1 && !timeoutReached)
+ continue;
+
+ auto output (round(m_OutputQueries.CalculateRate(now, 10)));
+
+ if (m_PendingQueries < output * 5 && !timeoutReached)
+ continue;
+
+ Log(LogInformation, "IcingaDB")
+ << "Pending queries: " << m_PendingQueries << " (Input: "
+ << round(m_InputQueries.CalculateRate(now, 10)) << "/s; Output: " << output << "/s)";
+
+ lastMessage = now;
+ }
+}
+
+/**
+ * Send next and schedule receiving the response
+ *
+ * @param next Redis queries
+ */
+void RedisConnection::WriteItem(boost::asio::yield_context& yc, RedisConnection::WriteQueueItem next)
+{
+ if (next.FireAndForgetQuery) {
+ auto& item (*next.FireAndForgetQuery);
+ DecreasePendingQueries(1);
+
+ try {
+ WriteOne(item, yc);
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (const std::exception& ex) {
+ Log msg (LogCritical, "IcingaDB", "Error during sending query");
+ LogQuery(item, msg);
+ msg << " which has been fired and forgotten: " << ex.what();
+
+ return;
+ } catch (...) {
+ Log msg (LogCritical, "IcingaDB", "Error during sending query");
+ LogQuery(item, msg);
+ msg << " which has been fired and forgotten";
+
+ return;
+ }
+
+ if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Ignore) {
+ m_Queues.FutureResponseActions.emplace(FutureResponseAction{1, ResponseAction::Ignore});
+ } else {
+ ++m_Queues.FutureResponseActions.back().Amount;
+ }
+
+ m_QueuedReads.Set();
+ }
+
+ if (next.FireAndForgetQueries) {
+ auto& item (*next.FireAndForgetQueries);
+ size_t i = 0;
+
+ DecreasePendingQueries(item.size());
+
+ try {
+ for (auto& query : item) {
+ WriteOne(query, yc);
+ ++i;
+ }
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (const std::exception& ex) {
+ Log msg (LogCritical, "IcingaDB", "Error during sending query");
+ LogQuery(item[i], msg);
+ msg << " which has been fired and forgotten: " << ex.what();
+
+ return;
+ } catch (...) {
+ Log msg (LogCritical, "IcingaDB", "Error during sending query");
+ LogQuery(item[i], msg);
+ msg << " which has been fired and forgotten";
+
+ return;
+ }
+
+ if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Ignore) {
+ m_Queues.FutureResponseActions.emplace(FutureResponseAction{item.size(), ResponseAction::Ignore});
+ } else {
+ m_Queues.FutureResponseActions.back().Amount += item.size();
+ }
+
+ m_QueuedReads.Set();
+ }
+
+ if (next.GetResultOfQuery) {
+ auto& item (*next.GetResultOfQuery);
+ DecreasePendingQueries(1);
+
+ try {
+ WriteOne(item.first, yc);
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (...) {
+ item.second.set_exception(std::current_exception());
+
+ return;
+ }
+
+ m_Queues.ReplyPromises.emplace(std::move(item.second));
+
+ if (m_Queues.FutureResponseActions.empty() || m_Queues.FutureResponseActions.back().Action != ResponseAction::Deliver) {
+ m_Queues.FutureResponseActions.emplace(FutureResponseAction{1, ResponseAction::Deliver});
+ } else {
+ ++m_Queues.FutureResponseActions.back().Amount;
+ }
+
+ m_QueuedReads.Set();
+ }
+
+ if (next.GetResultsOfQueries) {
+ auto& item (*next.GetResultsOfQueries);
+ DecreasePendingQueries(item.first.size());
+
+ try {
+ for (auto& query : item.first) {
+ WriteOne(query, yc);
+ }
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (...) {
+ item.second.set_exception(std::current_exception());
+
+ return;
+ }
+
+ m_Queues.RepliesPromises.emplace(std::move(item.second));
+ m_Queues.FutureResponseActions.emplace(FutureResponseAction{item.first.size(), ResponseAction::DeliverBulk});
+
+ m_QueuedReads.Set();
+ }
+
+ if (next.Callback) {
+ next.Callback(yc);
+ }
+
+ RecordAffected(next.Affects, Utility::GetTime());
+}
+
+/**
+ * Receive the response to a Redis query
+ *
+ * @return The response
+ */
+RedisConnection::Reply RedisConnection::ReadOne(boost::asio::yield_context& yc)
+{
+ if (m_Path.IsEmpty()) {
+ if (m_TLSContext) {
+ return ReadOne(m_TlsConn, yc);
+ } else {
+ return ReadOne(m_TcpConn, yc);
+ }
+ } else {
+ return ReadOne(m_UnixConn, yc);
+ }
+}
+
+/**
+ * Send query
+ *
+ * @param query Redis query
+ */
+void RedisConnection::WriteOne(RedisConnection::Query& query, asio::yield_context& yc)
+{
+ if (m_Path.IsEmpty()) {
+ if (m_TLSContext) {
+ WriteOne(m_TlsConn, query, yc);
+ } else {
+ WriteOne(m_TcpConn, query, yc);
+ }
+ } else {
+ WriteOne(m_UnixConn, query, yc);
+ }
+}
+
+/**
+ * Specify a callback that is run each time a connection is successfully established
+ *
+ * The callback is executed from a Boost.Asio coroutine and should therefore not perform blocking operations.
+ *
+ * @param callback Callback to execute
+ */
+void RedisConnection::SetConnectedCallback(std::function<void(asio::yield_context& yc)> callback) {
+ m_ConnectedCallback = std::move(callback);
+}
+
+int RedisConnection::GetQueryCount(RingBuffer::SizeType span)
+{
+ return m_OutputQueries.UpdateAndGetValues(Utility::GetTime(), span);
+}
+
+void RedisConnection::IncreasePendingQueries(int count)
+{
+ if (m_Parent) {
+ auto parent (m_Parent);
+
+ asio::post(parent->m_Strand, [parent, count]() {
+ parent->IncreasePendingQueries(count);
+ });
+ } else {
+ m_PendingQueries += count;
+ m_InputQueries.InsertValue(Utility::GetTime(), count);
+ }
+}
+
+void RedisConnection::DecreasePendingQueries(int count)
+{
+ if (m_Parent) {
+ auto parent (m_Parent);
+
+ asio::post(parent->m_Strand, [parent, count]() {
+ parent->DecreasePendingQueries(count);
+ });
+ } else {
+ m_PendingQueries -= count;
+ m_OutputQueries.InsertValue(Utility::GetTime(), count);
+ }
+}
+
+void RedisConnection::RecordAffected(RedisConnection::QueryAffects affected, double when)
+{
+ if (m_Parent) {
+ auto parent (m_Parent);
+
+ asio::post(parent->m_Strand, [parent, affected, when]() {
+ parent->RecordAffected(affected, when);
+ });
+ } else {
+ if (affected.Config) {
+ m_WrittenConfig.InsertValue(when, affected.Config);
+ }
+
+ if (affected.State) {
+ m_WrittenState.InsertValue(when, affected.State);
+ }
+
+ if (affected.History) {
+ m_WrittenHistory.InsertValue(when, affected.History);
+ }
+ }
+}
diff --git a/lib/icingadb/redisconnection.hpp b/lib/icingadb/redisconnection.hpp
new file mode 100644
index 0000000..f346ba2
--- /dev/null
+++ b/lib/icingadb/redisconnection.hpp
@@ -0,0 +1,678 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef REDISCONNECTION_H
+#define REDISCONNECTION_H
+
+#include "base/array.hpp"
+#include "base/atomic.hpp"
+#include "base/convert.hpp"
+#include "base/io-engine.hpp"
+#include "base/object.hpp"
+#include "base/ringbuffer.hpp"
+#include "base/shared.hpp"
+#include "base/string.hpp"
+#include "base/tlsstream.hpp"
+#include "base/value.hpp"
+#include <boost/asio/buffer.hpp>
+#include <boost/asio/buffered_stream.hpp>
+#include <boost/asio/deadline_timer.hpp>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/io_context_strand.hpp>
+#include <boost/asio/ip/tcp.hpp>
+#include <boost/asio/local/stream_protocol.hpp>
+#include <boost/asio/read.hpp>
+#include <boost/asio/read_until.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/asio/streambuf.hpp>
+#include <boost/asio/write.hpp>
+#include <boost/lexical_cast.hpp>
+#include <boost/regex.hpp>
+#include <boost/utility/string_view.hpp>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <future>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <stdexcept>
+#include <utility>
+#include <vector>
+
+namespace icinga
+{
+/**
+ * An Async Redis connection.
+ *
+ * @ingroup icingadb
+ */
+ class RedisConnection final : public Object
+ {
+ public:
+ DECLARE_PTR_TYPEDEFS(RedisConnection);
+
+ typedef std::vector<String> Query;
+ typedef std::vector<Query> Queries;
+ typedef Value Reply;
+ typedef std::vector<Reply> Replies;
+
+ /**
+ * Redis query priorities, highest first.
+ *
+ * @ingroup icingadb
+ */
+ enum class QueryPriority : unsigned char
+ {
+ Heartbeat,
+ RuntimeStateStream, // runtime state updates, doesn't affect initially synced states
+ Config, // includes initially synced states
+ RuntimeStateSync, // updates initially synced states at runtime, in parallel to config dump, therefore must be < Config
+ History,
+ CheckResult,
+ SyncConnection = 255
+ };
+
+ struct QueryAffects
+ {
+ size_t Config;
+ size_t State;
+ size_t History;
+
+ QueryAffects(size_t config = 0, size_t state = 0, size_t history = 0)
+ : Config(config), State(state), History(history) { }
+ };
+
+ RedisConnection(const String& host, int port, const String& path, const String& password, int db,
+ bool useTls, bool insecure, const String& certPath, const String& keyPath, const String& caPath, const String& crlPath,
+ const String& tlsProtocolmin, const String& cipherList, double connectTimeout, DebugInfo di, const Ptr& parent = nullptr);
+
+ void UpdateTLSContext();
+
+ void Start();
+
+ bool IsConnected();
+
+ void FireAndForgetQuery(Query query, QueryPriority priority, QueryAffects affects = {});
+ void FireAndForgetQueries(Queries queries, QueryPriority priority, QueryAffects affects = {});
+
+ Reply GetResultOfQuery(Query query, QueryPriority priority, QueryAffects affects = {});
+ Replies GetResultsOfQueries(Queries queries, QueryPriority priority, QueryAffects affects = {});
+
+ void EnqueueCallback(const std::function<void(boost::asio::yield_context&)>& callback, QueryPriority priority);
+ void Sync();
+ double GetOldestPendingQueryTs();
+
+ void SuppressQueryKind(QueryPriority kind);
+ void UnsuppressQueryKind(QueryPriority kind);
+
+ void SetConnectedCallback(std::function<void(boost::asio::yield_context& yc)> callback);
+
+ inline bool GetConnected()
+ {
+ return m_Connected.load();
+ }
+
+ int GetQueryCount(RingBuffer::SizeType span);
+
+ inline int GetPendingQueryCount()
+ {
+ return m_PendingQueries;
+ }
+
+ inline int GetWrittenConfigFor(RingBuffer::SizeType span, RingBuffer::SizeType tv = Utility::GetTime())
+ {
+ return m_WrittenConfig.UpdateAndGetValues(tv, span);
+ }
+
+ inline int GetWrittenStateFor(RingBuffer::SizeType span, RingBuffer::SizeType tv = Utility::GetTime())
+ {
+ return m_WrittenState.UpdateAndGetValues(tv, span);
+ }
+
+ inline int GetWrittenHistoryFor(RingBuffer::SizeType span, RingBuffer::SizeType tv = Utility::GetTime())
+ {
+ return m_WrittenHistory.UpdateAndGetValues(tv, span);
+ }
+
+ private:
+ /**
+ * What to do with the responses to Redis queries.
+ *
+ * @ingroup icingadb
+ */
+ enum class ResponseAction : unsigned char
+ {
+ Ignore, // discard
+ Deliver, // submit to the requestor
+ DeliverBulk // submit multiple responses to the requestor at once
+ };
+
+ /**
+ * What to do with how many responses to Redis queries.
+ *
+ * @ingroup icingadb
+ */
+ struct FutureResponseAction
+ {
+ size_t Amount;
+ ResponseAction Action;
+ };
+
+ /**
+ * Something to be send to Redis.
+ *
+ * @ingroup icingadb
+ */
+ struct WriteQueueItem
+ {
+ Shared<Query>::Ptr FireAndForgetQuery;
+ Shared<Queries>::Ptr FireAndForgetQueries;
+ Shared<std::pair<Query, std::promise<Reply>>>::Ptr GetResultOfQuery;
+ Shared<std::pair<Queries, std::promise<Replies>>>::Ptr GetResultsOfQueries;
+ std::function<void(boost::asio::yield_context&)> Callback;
+
+ double CTime;
+ QueryAffects Affects;
+ };
+
+ typedef boost::asio::ip::tcp Tcp;
+ typedef boost::asio::local::stream_protocol Unix;
+
+ typedef boost::asio::buffered_stream<Tcp::socket> TcpConn;
+ typedef boost::asio::buffered_stream<Unix::socket> UnixConn;
+
+ Shared<boost::asio::ssl::context>::Ptr m_TLSContext;
+
+ template<class AsyncReadStream>
+ static Value ReadRESP(AsyncReadStream& stream, boost::asio::yield_context& yc);
+
+ template<class AsyncReadStream>
+ static std::vector<char> ReadLine(AsyncReadStream& stream, boost::asio::yield_context& yc, size_t hint = 0);
+
+ template<class AsyncWriteStream>
+ static void WriteRESP(AsyncWriteStream& stream, const Query& query, boost::asio::yield_context& yc);
+
+ static boost::regex m_ErrAuth;
+
+ RedisConnection(boost::asio::io_context& io, String host, int port, String path, String password,
+ int db, bool useTls, bool insecure, String certPath, String keyPath, String caPath, String crlPath,
+ String tlsProtocolmin, String cipherList, double connectTimeout, DebugInfo di, const Ptr& parent);
+
+ void Connect(boost::asio::yield_context& yc);
+ void ReadLoop(boost::asio::yield_context& yc);
+ void WriteLoop(boost::asio::yield_context& yc);
+ void LogStats(boost::asio::yield_context& yc);
+ void WriteItem(boost::asio::yield_context& yc, WriteQueueItem item);
+ Reply ReadOne(boost::asio::yield_context& yc);
+ void WriteOne(Query& query, boost::asio::yield_context& yc);
+
+ template<class StreamPtr>
+ Reply ReadOne(StreamPtr& stream, boost::asio::yield_context& yc);
+
+ template<class StreamPtr>
+ void WriteOne(StreamPtr& stream, Query& query, boost::asio::yield_context& yc);
+
+ void IncreasePendingQueries(int count);
+ void DecreasePendingQueries(int count);
+ void RecordAffected(QueryAffects affected, double when);
+
+ template<class StreamPtr>
+ void Handshake(StreamPtr& stream, boost::asio::yield_context& yc);
+
+ template<class StreamPtr>
+ Timeout::Ptr MakeTimeout(StreamPtr& stream);
+
+ String m_Path;
+ String m_Host;
+ int m_Port;
+ String m_Password;
+ int m_DbIndex;
+
+ String m_CertPath;
+ String m_KeyPath;
+ bool m_Insecure;
+ String m_CaPath;
+ String m_CrlPath;
+ String m_TlsProtocolmin;
+ String m_CipherList;
+ double m_ConnectTimeout;
+ DebugInfo m_DebugInfo;
+
+ boost::asio::io_context::strand m_Strand;
+ Shared<TcpConn>::Ptr m_TcpConn;
+ Shared<UnixConn>::Ptr m_UnixConn;
+ Shared<AsioTlsStream>::Ptr m_TlsConn;
+ Atomic<bool> m_Connecting, m_Connected, m_Started;
+
+ struct {
+ // Items to be send to Redis
+ std::map<QueryPriority, std::queue<WriteQueueItem>> Writes;
+ // Requestors, each waiting for a single response
+ std::queue<std::promise<Reply>> ReplyPromises;
+ // Requestors, each waiting for multiple responses at once
+ std::queue<std::promise<Replies>> RepliesPromises;
+ // Metadata about all of the above
+ std::queue<FutureResponseAction> FutureResponseActions;
+ } m_Queues;
+
+ // Kinds of queries not to actually send yet
+ std::set<QueryPriority> m_SuppressedQueryKinds;
+
+ // Indicate that there's something to send/receive
+ AsioConditionVariable m_QueuedWrites, m_QueuedReads;
+
+ std::function<void(boost::asio::yield_context& yc)> m_ConnectedCallback;
+
+ // Stats
+ RingBuffer m_InputQueries{10};
+ RingBuffer m_OutputQueries{15 * 60};
+ RingBuffer m_WrittenConfig{15 * 60};
+ RingBuffer m_WrittenState{15 * 60};
+ RingBuffer m_WrittenHistory{15 * 60};
+ int m_PendingQueries{0};
+ boost::asio::deadline_timer m_LogStatsTimer;
+ Ptr m_Parent;
+ };
+
+/**
+ * An error response from the Redis server.
+ *
+ * @ingroup icingadb
+ */
+class RedisError final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(RedisError);
+
+ inline RedisError(String message) : m_Message(std::move(message))
+ {
+ }
+
+ inline const String& GetMessage()
+ {
+ return m_Message;
+ }
+
+private:
+ String m_Message;
+};
+
+/**
+ * Thrown if the connection to the Redis server has already been lost.
+ *
+ * @ingroup icingadb
+ */
+class RedisDisconnected : public std::runtime_error
+{
+public:
+ inline RedisDisconnected() : runtime_error("")
+ {
+ }
+};
+
+/**
+ * Thrown on malformed Redis server responses.
+ *
+ * @ingroup icingadb
+ */
+class RedisProtocolError : public std::runtime_error
+{
+protected:
+ inline RedisProtocolError() : runtime_error("")
+ {
+ }
+};
+
+/**
+ * Thrown on malformed types in Redis server responses.
+ *
+ * @ingroup icingadb
+ */
+class BadRedisType : public RedisProtocolError
+{
+public:
+ inline BadRedisType(char type) : m_What{type, 0}
+ {
+ }
+
+ virtual const char * what() const noexcept override
+ {
+ return m_What;
+ }
+
+private:
+ char m_What[2];
+};
+
+/**
+ * Thrown on malformed ints in Redis server responses.
+ *
+ * @ingroup icingadb
+ */
+class BadRedisInt : public RedisProtocolError
+{
+public:
+ inline BadRedisInt(std::vector<char> intStr) : m_What(std::move(intStr))
+ {
+ m_What.emplace_back(0);
+ }
+
+ virtual const char * what() const noexcept override
+ {
+ return m_What.data();
+ }
+
+private:
+ std::vector<char> m_What;
+};
+
+/**
+ * Read a Redis server response from stream
+ *
+ * @param stream Redis server connection
+ *
+ * @return The response
+ */
+template<class StreamPtr>
+RedisConnection::Reply RedisConnection::ReadOne(StreamPtr& stream, boost::asio::yield_context& yc)
+{
+ namespace asio = boost::asio;
+
+ if (!stream) {
+ throw RedisDisconnected();
+ }
+
+ auto strm (stream);
+
+ try {
+ return ReadRESP(*strm, yc);
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (...) {
+ if (m_Connecting.exchange(false)) {
+ m_Connected.store(false);
+ stream = nullptr;
+
+ if (!m_Connecting.exchange(true)) {
+ Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_Strand, [this, keepAlive](asio::yield_context yc) { Connect(yc); });
+ }
+ }
+
+ throw;
+ }
+}
+
+/**
+ * Write a Redis query to stream
+ *
+ * @param stream Redis server connection
+ * @param query Redis query
+ */
+template<class StreamPtr>
+void RedisConnection::WriteOne(StreamPtr& stream, RedisConnection::Query& query, boost::asio::yield_context& yc)
+{
+ namespace asio = boost::asio;
+
+ if (!stream) {
+ throw RedisDisconnected();
+ }
+
+ auto strm (stream);
+
+ try {
+ WriteRESP(*strm, query, yc);
+ strm->async_flush(yc);
+ } catch (const boost::coroutines::detail::forced_unwind&) {
+ throw;
+ } catch (...) {
+ if (m_Connecting.exchange(false)) {
+ m_Connected.store(false);
+ stream = nullptr;
+
+ if (!m_Connecting.exchange(true)) {
+ Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_Strand, [this, keepAlive](asio::yield_context yc) { Connect(yc); });
+ }
+ }
+
+ throw;
+ }
+}
+
+/**
+ * Initialize a Redis stream
+ *
+ * @param stream Redis server connection
+ * @param query Redis query
+ */
+template<class StreamPtr>
+void RedisConnection::Handshake(StreamPtr& strm, boost::asio::yield_context& yc)
+{
+ if (m_Password.IsEmpty() && !m_DbIndex) {
+ // Trigger NOAUTH
+ WriteRESP(*strm, {"PING"}, yc);
+ } else {
+ if (!m_Password.IsEmpty()) {
+ WriteRESP(*strm, {"AUTH", m_Password}, yc);
+ }
+
+ if (m_DbIndex) {
+ WriteRESP(*strm, {"SELECT", Convert::ToString(m_DbIndex)}, yc);
+ }
+ }
+
+ strm->async_flush(yc);
+
+ if (m_Password.IsEmpty() && !m_DbIndex) {
+ Reply pong (ReadRESP(*strm, yc));
+
+ if (pong.IsObjectType<RedisError>()) {
+ // Likely NOAUTH
+ BOOST_THROW_EXCEPTION(std::runtime_error(RedisError::Ptr(pong)->GetMessage()));
+ }
+ } else {
+ if (!m_Password.IsEmpty()) {
+ Reply auth (ReadRESP(*strm, yc));
+
+ if (auth.IsObjectType<RedisError>()) {
+ auto& authErr (RedisError::Ptr(auth)->GetMessage().GetData());
+ boost::smatch what;
+
+ if (boost::regex_search(authErr, what, m_ErrAuth)) {
+ Log(LogWarning, "IcingaDB") << authErr;
+ } else {
+ // Likely WRONGPASS
+ BOOST_THROW_EXCEPTION(std::runtime_error(authErr));
+ }
+ }
+ }
+
+ if (m_DbIndex) {
+ Reply select (ReadRESP(*strm, yc));
+
+ if (select.IsObjectType<RedisError>()) {
+ // Likely NOAUTH or ERR DB
+ BOOST_THROW_EXCEPTION(std::runtime_error(RedisError::Ptr(select)->GetMessage()));
+ }
+ }
+ }
+}
+
+/**
+ * Creates a Timeout which cancels stream's I/O after m_ConnectTimeout
+ *
+ * @param stream Redis server connection
+ */
+template<class StreamPtr>
+Timeout::Ptr RedisConnection::MakeTimeout(StreamPtr& stream)
+{
+ Ptr keepAlive (this);
+
+ return new Timeout(
+ m_Strand.context(),
+ m_Strand,
+ boost::posix_time::microseconds(intmax_t(m_ConnectTimeout * 1000000)),
+ [keepAlive, stream](boost::asio::yield_context yc) {
+ boost::system::error_code ec;
+ stream->lowest_layer().cancel(ec);
+ }
+ );
+}
+
+/**
+ * Read a Redis protocol value from stream
+ *
+ * @param stream Redis server connection
+ *
+ * @return The value
+ */
+template<class AsyncReadStream>
+Value RedisConnection::ReadRESP(AsyncReadStream& stream, boost::asio::yield_context& yc)
+{
+ namespace asio = boost::asio;
+
+ char type = 0;
+ asio::async_read(stream, asio::mutable_buffer(&type, 1), yc);
+
+ switch (type) {
+ case '+':
+ {
+ auto buf (ReadLine(stream, yc));
+ return String(buf.begin(), buf.end());
+ }
+ case '-':
+ {
+ auto buf (ReadLine(stream, yc));
+ return new RedisError(String(buf.begin(), buf.end()));
+ }
+ case ':':
+ {
+ auto buf (ReadLine(stream, yc, 21));
+ intmax_t i = 0;
+
+ try {
+ i = boost::lexical_cast<intmax_t>(boost::string_view(buf.data(), buf.size()));
+ } catch (...) {
+ throw BadRedisInt(std::move(buf));
+ }
+
+ return (double)i;
+ }
+ case '$':
+ {
+ auto buf (ReadLine(stream, yc, 21));
+ intmax_t i = 0;
+
+ try {
+ i = boost::lexical_cast<intmax_t>(boost::string_view(buf.data(), buf.size()));
+ } catch (...) {
+ throw BadRedisInt(std::move(buf));
+ }
+
+ if (i < 0) {
+ return Value();
+ }
+
+ buf.clear();
+ buf.insert(buf.end(), i, 0);
+ asio::async_read(stream, asio::mutable_buffer(buf.data(), buf.size()), yc);
+
+ {
+ char crlf[2];
+ asio::async_read(stream, asio::mutable_buffer(crlf, 2), yc);
+ }
+
+ return String(buf.begin(), buf.end());
+ }
+ case '*':
+ {
+ auto buf (ReadLine(stream, yc, 21));
+ intmax_t i = 0;
+
+ try {
+ i = boost::lexical_cast<intmax_t>(boost::string_view(buf.data(), buf.size()));
+ } catch (...) {
+ throw BadRedisInt(std::move(buf));
+ }
+
+ if (i < 0) {
+ return Empty;
+ }
+
+ Array::Ptr arr = new Array();
+
+ arr->Reserve(i);
+
+ for (; i; --i) {
+ arr->Add(ReadRESP(stream, yc));
+ }
+
+ return arr;
+ }
+ default:
+ throw BadRedisType(type);
+ }
+}
+
+/**
+ * Read from stream until \r\n
+ *
+ * @param stream Redis server connection
+ * @param hint Expected amount of data
+ *
+ * @return Read data ex. \r\n
+ */
+template<class AsyncReadStream>
+std::vector<char> RedisConnection::ReadLine(AsyncReadStream& stream, boost::asio::yield_context& yc, size_t hint)
+{
+ namespace asio = boost::asio;
+
+ std::vector<char> line;
+ line.reserve(hint);
+
+ char next = 0;
+ asio::mutable_buffer buf (&next, 1);
+
+ for (;;) {
+ asio::async_read(stream, buf, yc);
+
+ if (next == '\r') {
+ asio::async_read(stream, buf, yc);
+ return line;
+ }
+
+ line.emplace_back(next);
+ }
+}
+
+/**
+ * Write a Redis protocol value to stream
+ *
+ * @param stream Redis server connection
+ * @param query Redis protocol value
+ */
+template<class AsyncWriteStream>
+void RedisConnection::WriteRESP(AsyncWriteStream& stream, const Query& query, boost::asio::yield_context& yc)
+{
+ namespace asio = boost::asio;
+
+ asio::streambuf writeBuffer;
+ std::ostream msg(&writeBuffer);
+
+ msg << "*" << query.size() << "\r\n";
+
+ for (auto& arg : query) {
+ msg << "$" << arg.GetLength() << "\r\n" << arg << "\r\n";
+ }
+
+ asio::async_write(stream, writeBuffer, yc);
+}
+
+}
+
+#endif //REDISCONNECTION_H
diff --git a/lib/livestatus/CMakeLists.txt b/lib/livestatus/CMakeLists.txt
new file mode 100644
index 0000000..d49f9f5
--- /dev/null
+++ b/lib/livestatus/CMakeLists.txt
@@ -0,0 +1,65 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(livestatuslistener.ti livestatuslistener-ti.cpp livestatuslistener-ti.hpp)
+
+set(livestatus_SOURCES
+ i2-livestatus.hpp
+ aggregator.cpp aggregator.hpp
+ andfilter.cpp andfilter.hpp
+ attributefilter.cpp attributefilter.hpp
+ avgaggregator.cpp avgaggregator.hpp
+ column.cpp column.hpp
+ combinerfilter.cpp combinerfilter.hpp
+ commandstable.cpp commandstable.hpp
+ commentstable.cpp commentstable.hpp
+ contactgroupstable.cpp contactgroupstable.hpp
+ contactstable.cpp contactstable.hpp
+ countaggregator.cpp countaggregator.hpp
+ downtimestable.cpp downtimestable.hpp
+ endpointstable.cpp endpointstable.hpp
+ filter.hpp
+ historytable.hpp
+ hostgroupstable.cpp hostgroupstable.hpp
+ hoststable.cpp hoststable.hpp
+ invavgaggregator.cpp invavgaggregator.hpp
+ invsumaggregator.cpp invsumaggregator.hpp
+ livestatuslistener.cpp livestatuslistener.hpp livestatuslistener-ti.hpp
+ livestatuslogutility.cpp livestatuslogutility.hpp
+ livestatusquery.cpp livestatusquery.hpp
+ logtable.cpp logtable.hpp
+ maxaggregator.cpp maxaggregator.hpp
+ minaggregator.cpp minaggregator.hpp
+ negatefilter.cpp negatefilter.hpp
+ orfilter.cpp orfilter.hpp
+ servicegroupstable.cpp servicegroupstable.hpp
+ servicestable.cpp servicestable.hpp
+ statehisttable.cpp statehisttable.hpp
+ statustable.cpp statustable.hpp
+ stdaggregator.cpp stdaggregator.hpp
+ sumaggregator.cpp sumaggregator.hpp
+ table.cpp table.hpp
+ timeperiodstable.cpp timeperiodstable.hpp
+ zonestable.cpp zonestable.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(livestatus livestatus livestatus_SOURCES)
+endif()
+
+add_library(livestatus OBJECT ${livestatus_SOURCES})
+
+add_dependencies(livestatus base config icinga remote)
+
+set_target_properties (
+ livestatus PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/livestatus.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_INITRUNDIR}/cmd\")")
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/livestatus/aggregator.cpp b/lib/livestatus/aggregator.cpp
new file mode 100644
index 0000000..a809b07
--- /dev/null
+++ b/lib/livestatus/aggregator.cpp
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/aggregator.hpp"
+
+using namespace icinga;
+
+void Aggregator::SetFilter(const Filter::Ptr& filter)
+{
+ m_Filter = filter;
+}
+
+Filter::Ptr Aggregator::GetFilter() const
+{
+ return m_Filter;
+}
+
+AggregatorState::~AggregatorState()
+{ }
diff --git a/lib/livestatus/aggregator.hpp b/lib/livestatus/aggregator.hpp
new file mode 100644
index 0000000..1c0f778
--- /dev/null
+++ b/lib/livestatus/aggregator.hpp
@@ -0,0 +1,44 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef AGGREGATOR_H
+#define AGGREGATOR_H
+
+#include "livestatus/i2-livestatus.hpp"
+#include "livestatus/table.hpp"
+#include "livestatus/filter.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct AggregatorState
+{
+ virtual ~AggregatorState();
+};
+
+/**
+ * @ingroup livestatus
+ */
+class Aggregator : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Aggregator);
+
+ virtual void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) = 0;
+ virtual double GetResultAndFreeState(AggregatorState *state) const = 0;
+ void SetFilter(const Filter::Ptr& filter);
+
+protected:
+ Aggregator() = default;
+
+ Filter::Ptr GetFilter() const;
+
+private:
+ Filter::Ptr m_Filter;
+};
+
+}
+
+#endif /* AGGREGATOR_H */
diff --git a/lib/livestatus/andfilter.cpp b/lib/livestatus/andfilter.cpp
new file mode 100644
index 0000000..9852580
--- /dev/null
+++ b/lib/livestatus/andfilter.cpp
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/andfilter.hpp"
+
+using namespace icinga;
+
+bool AndFilter::Apply(const Table::Ptr& table, const Value& row)
+{
+ for (const Filter::Ptr& filter : m_Filters) {
+ if (!filter->Apply(table, row))
+ return false;
+ }
+
+ return true;
+}
diff --git a/lib/livestatus/andfilter.hpp b/lib/livestatus/andfilter.hpp
new file mode 100644
index 0000000..8192bf7
--- /dev/null
+++ b/lib/livestatus/andfilter.hpp
@@ -0,0 +1,26 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ANDFILTER_H
+#define ANDFILTER_H
+
+#include "livestatus/combinerfilter.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class AndFilter final : public CombinerFilter
+{
+public:
+ DECLARE_PTR_TYPEDEFS(AndFilter);
+
+ bool Apply(const Table::Ptr& table, const Value& row) override;
+};
+
+}
+
+#endif /* ANDFILTER_H */
diff --git a/lib/livestatus/attributefilter.cpp b/lib/livestatus/attributefilter.cpp
new file mode 100644
index 0000000..50d7244
--- /dev/null
+++ b/lib/livestatus/attributefilter.cpp
@@ -0,0 +1,121 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/attributefilter.hpp"
+#include "base/convert.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include <boost/regex.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+
+using namespace icinga;
+
+AttributeFilter::AttributeFilter(String column, String op, String operand)
+ : m_Column(std::move(column)), m_Operator(std::move(op)), m_Operand(std::move(operand))
+{ }
+
+bool AttributeFilter::Apply(const Table::Ptr& table, const Value& row)
+{
+ Column column = table->GetColumn(m_Column);
+
+ Value value = column.ExtractValue(row);
+
+ if (value.IsObjectType<Array>()) {
+ Array::Ptr array = value;
+
+ if (m_Operator == ">=" || m_Operator == "<") {
+ bool negate = (m_Operator == "<");
+
+ ObjectLock olock(array);
+ for (const String& item : array) {
+ if (item == m_Operand)
+ return !negate; /* Item found in list. */
+ }
+
+ return negate; /* Item not found in list. */
+ } else if (m_Operator == "=") {
+ return (array->GetLength() == 0);
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid operator for column '" + m_Column + "': " + m_Operator + " (expected '>=' or '=')."));
+ }
+ } else {
+ if (m_Operator == "=") {
+ if (value.GetType() == ValueNumber || value.GetType() == ValueBoolean)
+ return (static_cast<double>(value) == Convert::ToDouble(m_Operand));
+ else
+ return (static_cast<String>(value) == m_Operand);
+ } else if (m_Operator == "~") {
+ bool ret;
+ try {
+ boost::regex expr(m_Operand.GetData());
+ String operand = value;
+ boost::smatch what;
+ ret = boost::regex_search(operand.GetData(), what, expr);
+ } catch (boost::exception&) {
+ Log(LogWarning, "AttributeFilter")
+ << "Regex '" << m_Operand << " " << m_Operator << " " << value << "' error.";
+ ret = false;
+ }
+
+ //Log(LogDebug, "LivestatusListener/AttributeFilter")
+ // << "Attribute filter '" << m_Operand + " " << m_Operator << " "
+ // << value << "' " << (ret ? "matches" : "doesn't match") << ".";
+
+ return ret;
+ } else if (m_Operator == "=~") {
+ bool ret;
+ try {
+ String operand = value;
+ ret = boost::iequals(operand, m_Operand.GetData());
+ } catch (boost::exception&) {
+ Log(LogWarning, "AttributeFilter")
+ << "Case-insensitive equality '" << m_Operand << " " << m_Operator << " " << value << "' error.";
+ ret = false;
+ }
+
+ return ret;
+ } else if (m_Operator == "~~") {
+ bool ret;
+ try {
+ boost::regex expr(m_Operand.GetData(), boost::regex::icase);
+ String operand = value;
+ boost::smatch what;
+ ret = boost::regex_search(operand.GetData(), what, expr);
+ } catch (boost::exception&) {
+ Log(LogWarning, "AttributeFilter")
+ << "Regex '" << m_Operand << " " << m_Operator << " " << value << "' error.";
+ ret = false;
+ }
+
+ //Log(LogDebug, "LivestatusListener/AttributeFilter")
+ // << "Attribute filter '" << m_Operand << " " << m_Operator << " "
+ // << value << "' " << (ret ? "matches" : "doesn't match") << ".";
+
+ return ret;
+ } else if (m_Operator == "<") {
+ if (value.GetType() == ValueNumber)
+ return (static_cast<double>(value) < Convert::ToDouble(m_Operand));
+ else
+ return (static_cast<String>(value) < m_Operand);
+ } else if (m_Operator == ">") {
+ if (value.GetType() == ValueNumber)
+ return (static_cast<double>(value) > Convert::ToDouble(m_Operand));
+ else
+ return (static_cast<String>(value) > m_Operand);
+ } else if (m_Operator == "<=") {
+ if (value.GetType() == ValueNumber)
+ return (static_cast<double>(value) <= Convert::ToDouble(m_Operand));
+ else
+ return (static_cast<String>(value) <= m_Operand);
+ } else if (m_Operator == ">=") {
+ if (value.GetType() == ValueNumber)
+ return (static_cast<double>(value) >= Convert::ToDouble(m_Operand));
+ else
+ return (static_cast<String>(value) >= m_Operand);
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Unknown operator for column '" + m_Column + "': " + m_Operator));
+ }
+ }
+
+ return false;
+}
diff --git a/lib/livestatus/attributefilter.hpp b/lib/livestatus/attributefilter.hpp
new file mode 100644
index 0000000..18bd843
--- /dev/null
+++ b/lib/livestatus/attributefilter.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ATTRIBUTEFILTER_H
+#define ATTRIBUTEFILTER_H
+
+#include "livestatus/filter.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class AttributeFilter final : public Filter
+{
+public:
+ DECLARE_PTR_TYPEDEFS(AttributeFilter);
+
+ AttributeFilter(String column, String op, String operand);
+
+ bool Apply(const Table::Ptr& table, const Value& row) override;
+
+protected:
+ String m_Column;
+ String m_Operator;
+ String m_Operand;
+};
+
+}
+
+#endif /* FILTER_H */
diff --git a/lib/livestatus/avgaggregator.cpp b/lib/livestatus/avgaggregator.cpp
new file mode 100644
index 0000000..35701f3
--- /dev/null
+++ b/lib/livestatus/avgaggregator.cpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/avgaggregator.hpp"
+
+using namespace icinga;
+
+AvgAggregator::AvgAggregator(String attr)
+ : m_AvgAttr(std::move(attr))
+{ }
+
+AvgAggregatorState *AvgAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new AvgAggregatorState();
+
+ return static_cast<AvgAggregatorState *>(*state);
+}
+
+void AvgAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_AvgAttr);
+
+ Value value = column.ExtractValue(row);
+
+ AvgAggregatorState *pstate = EnsureState(state);
+
+ pstate->Avg += value;
+ pstate->AvgCount++;
+}
+
+double AvgAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ AvgAggregatorState *pstate = EnsureState(&state);
+ double result = pstate->Avg / pstate->AvgCount;
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/avgaggregator.hpp b/lib/livestatus/avgaggregator.hpp
new file mode 100644
index 0000000..11bd9f3
--- /dev/null
+++ b/lib/livestatus/avgaggregator.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef AVGAGGREGATOR_H
+#define AVGAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct AvgAggregatorState final : public AggregatorState
+{
+ double Avg{0};
+ double AvgCount{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class AvgAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(AvgAggregator);
+
+ AvgAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_AvgAttr;
+
+ static AvgAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* AVGAGGREGATOR_H */
diff --git a/lib/livestatus/column.cpp b/lib/livestatus/column.cpp
new file mode 100644
index 0000000..c915b3d
--- /dev/null
+++ b/lib/livestatus/column.cpp
@@ -0,0 +1,21 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/column.hpp"
+
+using namespace icinga;
+
+Column::Column(ValueAccessor valueAccessor, ObjectAccessor objectAccessor)
+ : m_ValueAccessor(std::move(valueAccessor)), m_ObjectAccessor(std::move(objectAccessor))
+{ }
+
+Value Column::ExtractValue(const Value& urow, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject) const
+{
+ Value row;
+
+ if (m_ObjectAccessor)
+ row = m_ObjectAccessor(urow, groupByType, groupByObject);
+ else
+ row = urow;
+
+ return m_ValueAccessor(row);
+}
diff --git a/lib/livestatus/column.hpp b/lib/livestatus/column.hpp
new file mode 100644
index 0000000..264cca7
--- /dev/null
+++ b/lib/livestatus/column.hpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COLUMN_H
+#define COLUMN_H
+
+#include "livestatus/i2-livestatus.hpp"
+#include "base/value.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+enum LivestatusGroupByType {
+ LivestatusGroupByNone,
+ LivestatusGroupByHostGroup,
+ LivestatusGroupByServiceGroup
+};
+
+class Column
+{
+public:
+ typedef std::function<Value (const Value&)> ValueAccessor;
+ typedef std::function<Value (const Value&, LivestatusGroupByType, const Object::Ptr&)> ObjectAccessor;
+
+ Column(ValueAccessor valueAccessor, ObjectAccessor objectAccessor);
+
+ Value ExtractValue(const Value& urow, LivestatusGroupByType groupByType = LivestatusGroupByNone, const Object::Ptr& groupByObject = Empty) const;
+
+private:
+ ValueAccessor m_ValueAccessor;
+ ObjectAccessor m_ObjectAccessor;
+};
+
+}
+
+#endif /* COLUMN_H */
diff --git a/lib/livestatus/combinerfilter.cpp b/lib/livestatus/combinerfilter.cpp
new file mode 100644
index 0000000..36a8328
--- /dev/null
+++ b/lib/livestatus/combinerfilter.cpp
@@ -0,0 +1,10 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/combinerfilter.hpp"
+
+using namespace icinga;
+
+void CombinerFilter::AddSubFilter(const Filter::Ptr& filter)
+{
+ m_Filters.push_back(filter);
+}
diff --git a/lib/livestatus/combinerfilter.hpp b/lib/livestatus/combinerfilter.hpp
new file mode 100644
index 0000000..49b8b61
--- /dev/null
+++ b/lib/livestatus/combinerfilter.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMBINERFILTER_H
+#define COMBINERFILTER_H
+
+#include "livestatus/filter.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class CombinerFilter : public Filter
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CombinerFilter);
+
+ void AddSubFilter(const Filter::Ptr& filter);
+
+protected:
+ std::vector<Filter::Ptr> m_Filters;
+
+ CombinerFilter() = default;
+};
+
+}
+
+#endif /* COMBINERFILTER_H */
diff --git a/lib/livestatus/commandstable.cpp b/lib/livestatus/commandstable.cpp
new file mode 100644
index 0000000..3a777d2
--- /dev/null
+++ b/lib/livestatus/commandstable.cpp
@@ -0,0 +1,142 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/commandstable.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/compatutility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include <boost/algorithm/string/replace.hpp>
+
+using namespace icinga;
+
+CommandsTable::CommandsTable()
+{
+ AddColumns(this);
+}
+
+void CommandsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&CommandsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "line", Column(&CommandsTable::LineAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_names", Column(&CommandsTable::CustomVariableNamesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_values", Column(&CommandsTable::CustomVariableValuesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variables", Column(&CommandsTable::CustomVariablesAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes_list", Column(&Table::ZeroAccessor, objectAccessor));
+}
+
+String CommandsTable::GetName() const
+{
+ return "commands";
+}
+
+String CommandsTable::GetPrefix() const
+{
+ return "command";
+}
+
+void CommandsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const ConfigObject::Ptr& object : ConfigType::GetObjectsByType<CheckCommand>()) {
+ if (!addRowFn(object, LivestatusGroupByNone, Empty))
+ return;
+ }
+
+ for (const ConfigObject::Ptr& object : ConfigType::GetObjectsByType<EventCommand>()) {
+ if (!addRowFn(object, LivestatusGroupByNone, Empty))
+ return;
+ }
+
+ for (const ConfigObject::Ptr& object : ConfigType::GetObjectsByType<NotificationCommand>()) {
+ if (!addRowFn(object, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value CommandsTable::NameAccessor(const Value& row)
+{
+ Command::Ptr command = static_cast<Command::Ptr>(row);
+
+ return CompatUtility::GetCommandName(command);
+}
+
+Value CommandsTable::LineAccessor(const Value& row)
+{
+ Command::Ptr command = static_cast<Command::Ptr>(row);
+
+ if (!command)
+ return Empty;
+
+ return CompatUtility::GetCommandLine(command);
+}
+
+Value CommandsTable::CustomVariableNamesAccessor(const Value& row)
+{
+ Command::Ptr command = static_cast<Command::Ptr>(row);
+
+ if (!command)
+ return Empty;
+
+ Dictionary::Ptr vars = command->GetVars();
+
+ ArrayData keys;
+
+ if (vars) {
+ ObjectLock xlock(vars);
+ for (const auto& kv : vars) {
+ keys.push_back(kv.first);
+ }
+ }
+
+ return new Array(std::move(keys));
+}
+
+Value CommandsTable::CustomVariableValuesAccessor(const Value& row)
+{
+ Command::Ptr command = static_cast<Command::Ptr>(row);
+
+ if (!command)
+ return Empty;
+
+ Dictionary::Ptr vars = command->GetVars();
+
+ ArrayData keys;
+
+ if (vars) {
+ ObjectLock xlock(vars);
+ for (const auto& kv : vars) {
+ keys.push_back(kv.second);
+ }
+ }
+
+ return new Array(std::move(keys));
+}
+
+Value CommandsTable::CustomVariablesAccessor(const Value& row)
+{
+ Command::Ptr command = static_cast<Command::Ptr>(row);
+
+ if (!command)
+ return Empty;
+
+ Dictionary::Ptr vars = command->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock xlock(vars);
+ for (const auto& kv : vars) {
+ result.push_back(new Array({
+ kv.first,
+ kv.second
+ }));
+ }
+ }
+
+ return new Array(std::move(result));
+}
diff --git a/lib/livestatus/commandstable.hpp b/lib/livestatus/commandstable.hpp
new file mode 100644
index 0000000..cd2d915
--- /dev/null
+++ b/lib/livestatus/commandstable.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMMANDSTABLE_H
+#define COMMANDSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class CommandsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CommandsTable);
+
+ CommandsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value LineAccessor(const Value& row);
+ static Value CustomVariableNamesAccessor(const Value& row);
+ static Value CustomVariableValuesAccessor(const Value& row);
+ static Value CustomVariablesAccessor(const Value& row);
+};
+
+}
+
+#endif /* COMMANDSTABLE_H */
diff --git a/lib/livestatus/commentstable.cpp b/lib/livestatus/commentstable.cpp
new file mode 100644
index 0000000..40bffad
--- /dev/null
+++ b/lib/livestatus/commentstable.cpp
@@ -0,0 +1,178 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/commentstable.hpp"
+#include "livestatus/hoststable.hpp"
+#include "livestatus/servicestable.hpp"
+#include "icinga/service.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+CommentsTable::CommentsTable()
+{
+ AddColumns(this);
+}
+
+void CommentsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "author", Column(&CommentsTable::AuthorAccessor, objectAccessor));
+ table->AddColumn(prefix + "comment", Column(&CommentsTable::CommentAccessor, objectAccessor));
+ table->AddColumn(prefix + "id", Column(&CommentsTable::IdAccessor, objectAccessor));
+ table->AddColumn(prefix + "entry_time", Column(&CommentsTable::EntryTimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "type", Column(&CommentsTable::TypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_service", Column(&CommentsTable::IsServiceAccessor, objectAccessor));
+ table->AddColumn(prefix + "persistent", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "source", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "entry_type", Column(&CommentsTable::EntryTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "expires", Column(&CommentsTable::ExpiresAccessor, objectAccessor));
+ table->AddColumn(prefix + "expire_time", Column(&CommentsTable::ExpireTimeAccessor, objectAccessor));
+
+ /* order is important - host w/o services must not be empty */
+ ServicesTable::AddColumns(table, "service_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return ServiceAccessor(row, objectAccessor);
+ });
+ HostsTable::AddColumns(table, "host_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return HostAccessor(row, objectAccessor);
+ });
+}
+
+String CommentsTable::GetName() const
+{
+ return "comments";
+}
+
+String CommentsTable::GetPrefix() const
+{
+ return "comment";
+}
+
+void CommentsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const Comment::Ptr& comment : ConfigType::GetObjectsByType<Comment>()) {
+ if (!addRowFn(comment, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Object::Ptr CommentsTable::HostAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ Checkable::Ptr checkable = comment->GetCheckable();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ return host;
+}
+
+Object::Ptr CommentsTable::ServiceAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ Checkable::Ptr checkable = comment->GetCheckable();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ return service;
+}
+
+Value CommentsTable::AuthorAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return comment->GetAuthor();
+}
+
+Value CommentsTable::CommentAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return comment->GetText();
+}
+
+Value CommentsTable::IdAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return comment->GetLegacyId();
+}
+
+Value CommentsTable::EntryTimeAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return static_cast<int>(comment->GetEntryTime());
+}
+
+Value CommentsTable::TypeAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+ Checkable::Ptr checkable = comment->GetCheckable();
+
+ if (!checkable)
+ return Empty;
+
+ if (dynamic_pointer_cast<Host>(checkable))
+ return 1;
+ else
+ return 2;
+}
+
+Value CommentsTable::IsServiceAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+ Checkable::Ptr checkable = comment->GetCheckable();
+
+ if (!checkable)
+ return Empty;
+
+ return (dynamic_pointer_cast<Host>(checkable) ? 0 : 1);
+}
+
+Value CommentsTable::EntryTypeAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return comment->GetEntryType();
+}
+
+Value CommentsTable::ExpiresAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return comment->GetExpireTime() != 0;
+}
+
+Value CommentsTable::ExpireTimeAccessor(const Value& row)
+{
+ Comment::Ptr comment = static_cast<Comment::Ptr>(row);
+
+ if (!comment)
+ return Empty;
+
+ return static_cast<int>(comment->GetExpireTime());
+}
diff --git a/lib/livestatus/commentstable.hpp b/lib/livestatus/commentstable.hpp
new file mode 100644
index 0000000..b46e155
--- /dev/null
+++ b/lib/livestatus/commentstable.hpp
@@ -0,0 +1,49 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COMMENTSTABLE_H
+#define COMMENTSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class CommentsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CommentsTable);
+
+ CommentsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+private:
+ static Object::Ptr HostAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr ServiceAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+
+ static Value AuthorAccessor(const Value& row);
+ static Value CommentAccessor(const Value& row);
+ static Value IdAccessor(const Value& row);
+ static Value EntryTimeAccessor(const Value& row);
+ static Value TypeAccessor(const Value& row);
+ static Value IsServiceAccessor(const Value& row);
+ static Value EntryTypeAccessor(const Value& row);
+ static Value ExpiresAccessor(const Value& row);
+ static Value ExpireTimeAccessor(const Value& row);
+};
+
+}
+
+#endif /* COMMENTSTABLE_H */
diff --git a/lib/livestatus/contactgroupstable.cpp b/lib/livestatus/contactgroupstable.cpp
new file mode 100644
index 0000000..b4d6853
--- /dev/null
+++ b/lib/livestatus/contactgroupstable.cpp
@@ -0,0 +1,74 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/contactgroupstable.hpp"
+#include "icinga/usergroup.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+ContactGroupsTable::ContactGroupsTable()
+{
+ AddColumns(this);
+}
+
+void ContactGroupsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&ContactGroupsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "alias", Column(&ContactGroupsTable::AliasAccessor, objectAccessor));
+ table->AddColumn(prefix + "members", Column(&ContactGroupsTable::MembersAccessor, objectAccessor));
+}
+
+String ContactGroupsTable::GetName() const
+{
+ return "contactgroups";
+}
+
+String ContactGroupsTable::GetPrefix() const
+{
+ return "contactgroup";
+}
+
+void ContactGroupsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const UserGroup::Ptr& ug : ConfigType::GetObjectsByType<UserGroup>()) {
+ if (!addRowFn(ug, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value ContactGroupsTable::NameAccessor(const Value& row)
+{
+ UserGroup::Ptr user_group = static_cast<UserGroup::Ptr>(row);
+
+ if (!user_group)
+ return Empty;
+
+ return user_group->GetName();
+}
+
+Value ContactGroupsTable::AliasAccessor(const Value& row)
+{
+ UserGroup::Ptr user_group = static_cast<UserGroup::Ptr>(row);
+
+ if (!user_group)
+ return Empty;
+
+ return user_group->GetName();
+}
+
+Value ContactGroupsTable::MembersAccessor(const Value& row)
+{
+ UserGroup::Ptr user_group = static_cast<UserGroup::Ptr>(row);
+
+ if (!user_group)
+ return Empty;
+
+ ArrayData result;
+
+ for (const User::Ptr& user : user_group->GetMembers()) {
+ result.push_back(user->GetName());
+ }
+
+ return new Array(std::move(result));
+}
diff --git a/lib/livestatus/contactgroupstable.hpp b/lib/livestatus/contactgroupstable.hpp
new file mode 100644
index 0000000..a57f5c3
--- /dev/null
+++ b/lib/livestatus/contactgroupstable.hpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONTACTGROUPSTABLE_H
+#define CONTACTGROUPSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class ContactGroupsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ContactGroupsTable);
+
+ ContactGroupsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value AliasAccessor(const Value& row);
+ static Value MembersAccessor(const Value& row);
+};
+
+}
+
+#endif /* CONTACTGROUPSTABLE_H */
diff --git a/lib/livestatus/contactstable.cpp b/lib/livestatus/contactstable.cpp
new file mode 100644
index 0000000..d6a04c4
--- /dev/null
+++ b/lib/livestatus/contactstable.cpp
@@ -0,0 +1,278 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/contactstable.hpp"
+#include "icinga/user.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/compatutility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+ContactsTable::ContactsTable()
+{
+ AddColumns(this);
+}
+
+void ContactsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&ContactsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "alias", Column(&ContactsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "email", Column(&ContactsTable::EmailAccessor, objectAccessor));
+ table->AddColumn(prefix + "pager", Column(&ContactsTable::PagerAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_notification_period", Column(&ContactsTable::HostNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "service_notification_period", Column(&ContactsTable::ServiceNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "can_submit_commands", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_notifications_enabled", Column(&ContactsTable::HostNotificationsEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "service_notifications_enabled", Column(&ContactsTable::ServiceNotificationsEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_host_notification_period", Column(&ContactsTable::InHostNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_service_notification_period", Column(&ContactsTable::InServiceNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "vars_variable_names", Column(&ContactsTable::CustomVariableNamesAccessor, objectAccessor));
+ table->AddColumn(prefix + "vars_variable_values", Column(&ContactsTable::CustomVariableValuesAccessor, objectAccessor));
+ table->AddColumn(prefix + "vars_variables", Column(&ContactsTable::CustomVariablesAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes_list", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "cv_is_json", Column(&ContactsTable::CVIsJsonAccessor, objectAccessor));
+
+}
+
+String ContactsTable::GetName() const
+{
+ return "contacts";
+}
+
+String ContactsTable::GetPrefix() const
+{
+ return "contact";
+}
+
+void ContactsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const User::Ptr& user : ConfigType::GetObjectsByType<User>()) {
+ if (!addRowFn(user, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value ContactsTable::NameAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ return user->GetName();
+}
+
+Value ContactsTable::AliasAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ return user->GetDisplayName();
+}
+
+Value ContactsTable::EmailAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ return user->GetEmail();
+}
+
+Value ContactsTable::PagerAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ return user->GetPager();
+}
+
+Value ContactsTable::HostNotificationPeriodAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ /* same as service */
+ TimePeriod::Ptr timeperiod = user->GetPeriod();
+
+ if (!timeperiod)
+ return Empty;
+
+ return timeperiod->GetName();
+}
+
+Value ContactsTable::ServiceNotificationPeriodAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ TimePeriod::Ptr timeperiod = user->GetPeriod();
+
+ if (!timeperiod)
+ return Empty;
+
+ return timeperiod->GetName();
+}
+
+Value ContactsTable::HostNotificationsEnabledAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ return (user->GetEnableNotifications() ? 1 : 0);
+}
+
+Value ContactsTable::ServiceNotificationsEnabledAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ return (user->GetEnableNotifications() ? 1 : 0);
+}
+
+Value ContactsTable::InHostNotificationPeriodAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ TimePeriod::Ptr timeperiod = user->GetPeriod();
+
+ if (!timeperiod)
+ return Empty;
+
+ return (timeperiod->IsInside(Utility::GetTime()) ? 1 : 0);
+}
+
+Value ContactsTable::InServiceNotificationPeriodAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ TimePeriod::Ptr timeperiod = user->GetPeriod();
+
+ if (!timeperiod)
+ return Empty;
+
+ return (timeperiod->IsInside(Utility::GetTime()) ? 1 : 0);
+}
+
+Value ContactsTable::CustomVariableNamesAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ Dictionary::Ptr vars = user->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ result.push_back(kv.first);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ContactsTable::CustomVariableValuesAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ Dictionary::Ptr vars = user->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ result.push_back(JsonEncode(kv.second));
+ else
+ result.push_back(kv.second);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ContactsTable::CustomVariablesAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ Dictionary::Ptr vars = user->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ Value val;
+
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ val = JsonEncode(kv.second);
+ else
+ val = kv.second;
+
+ result.push_back(new Array({
+ kv.first,
+ val
+ }));
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ContactsTable::CVIsJsonAccessor(const Value& row)
+{
+ User::Ptr user = static_cast<User::Ptr>(row);
+
+ if (!user)
+ return Empty;
+
+ Dictionary::Ptr vars = user->GetVars();
+
+ if (!vars)
+ return Empty;
+
+ bool cv_is_json = false;
+
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ cv_is_json = true;
+ }
+
+ return cv_is_json;
+}
diff --git a/lib/livestatus/contactstable.hpp b/lib/livestatus/contactstable.hpp
new file mode 100644
index 0000000..0bd2679
--- /dev/null
+++ b/lib/livestatus/contactstable.hpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONTACTSTABLE_H
+#define CONTACTSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class ContactsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ContactsTable);
+
+ ContactsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value AliasAccessor(const Value& row);
+ static Value EmailAccessor(const Value& row);
+ static Value PagerAccessor(const Value& row);
+ static Value HostNotificationPeriodAccessor(const Value& row);
+ static Value ServiceNotificationPeriodAccessor(const Value& row);
+ static Value HostNotificationsEnabledAccessor(const Value& row);
+ static Value ServiceNotificationsEnabledAccessor(const Value& row);
+ static Value InHostNotificationPeriodAccessor(const Value& row);
+ static Value InServiceNotificationPeriodAccessor(const Value& row);
+ static Value CustomVariableNamesAccessor(const Value& row);
+ static Value CustomVariableValuesAccessor(const Value& row);
+ static Value CustomVariablesAccessor(const Value& row);
+ static Value CVIsJsonAccessor(const Value& row);
+};
+
+}
+
+#endif /* CONTACTSTABLE_H */
diff --git a/lib/livestatus/countaggregator.cpp b/lib/livestatus/countaggregator.cpp
new file mode 100644
index 0000000..b8a7238
--- /dev/null
+++ b/lib/livestatus/countaggregator.cpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/countaggregator.hpp"
+
+using namespace icinga;
+
+CountAggregatorState *CountAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new CountAggregatorState();
+
+ return static_cast<CountAggregatorState *>(*state);
+}
+
+void CountAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ CountAggregatorState *pstate = EnsureState(state);
+
+ if (GetFilter()->Apply(table, row))
+ pstate->Count++;
+}
+
+double CountAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ CountAggregatorState *pstate = EnsureState(&state);
+ double result = pstate->Count;
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/countaggregator.hpp b/lib/livestatus/countaggregator.hpp
new file mode 100644
index 0000000..22d4983
--- /dev/null
+++ b/lib/livestatus/countaggregator.hpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef COUNTAGGREGATOR_H
+#define COUNTAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct CountAggregatorState final : public AggregatorState
+{
+ int Count{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class CountAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CountAggregator);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ static CountAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* COUNTAGGREGATOR_H */
diff --git a/lib/livestatus/downtimestable.cpp b/lib/livestatus/downtimestable.cpp
new file mode 100644
index 0000000..09c111e
--- /dev/null
+++ b/lib/livestatus/downtimestable.cpp
@@ -0,0 +1,168 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/downtimestable.hpp"
+#include "livestatus/hoststable.hpp"
+#include "livestatus/servicestable.hpp"
+#include "icinga/service.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+
+using namespace icinga;
+
+DowntimesTable::DowntimesTable()
+{
+ AddColumns(this);
+}
+
+void DowntimesTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "author", Column(&DowntimesTable::AuthorAccessor, objectAccessor));
+ table->AddColumn(prefix + "comment", Column(&DowntimesTable::CommentAccessor, objectAccessor));
+ table->AddColumn(prefix + "id", Column(&DowntimesTable::IdAccessor, objectAccessor));
+ table->AddColumn(prefix + "entry_time", Column(&DowntimesTable::EntryTimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "type", Column(&DowntimesTable::TypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_service", Column(&DowntimesTable::IsServiceAccessor, objectAccessor));
+ table->AddColumn(prefix + "start_time", Column(&DowntimesTable::StartTimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "end_time", Column(&DowntimesTable::EndTimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "fixed", Column(&DowntimesTable::FixedAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration", Column(&DowntimesTable::DurationAccessor, objectAccessor));
+ table->AddColumn(prefix + "triggered_by", Column(&DowntimesTable::TriggeredByAccessor, objectAccessor));
+
+ /* order is important - host w/o services must not be empty */
+ ServicesTable::AddColumns(table, "service_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return ServiceAccessor(row, objectAccessor);
+ });
+ HostsTable::AddColumns(table, "host_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return HostAccessor(row, objectAccessor);
+ });
+}
+
+String DowntimesTable::GetName() const
+{
+ return "downtimes";
+}
+
+String DowntimesTable::GetPrefix() const
+{
+ return "downtime";
+}
+
+void DowntimesTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const Downtime::Ptr& downtime : ConfigType::GetObjectsByType<Downtime>()) {
+ if (!addRowFn(downtime, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Object::Ptr DowntimesTable::HostAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ return host;
+}
+
+Object::Ptr DowntimesTable::ServiceAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ return service;
+}
+
+Value DowntimesTable::AuthorAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return downtime->GetAuthor();
+}
+
+Value DowntimesTable::CommentAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return downtime->GetComment();
+}
+
+Value DowntimesTable::IdAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return downtime->GetLegacyId();
+}
+
+Value DowntimesTable::EntryTimeAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return static_cast<int>(downtime->GetEntryTime());
+}
+
+Value DowntimesTable::TypeAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+ // 1 .. active, 0 .. pending
+ return (downtime->IsInEffect() ? 1 : 0);
+}
+
+Value DowntimesTable::IsServiceAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+ Checkable::Ptr checkable = downtime->GetCheckable();
+
+ return (dynamic_pointer_cast<Host>(checkable) ? 0 : 1);
+}
+
+Value DowntimesTable::StartTimeAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return static_cast<int>(downtime->GetStartTime());
+}
+
+Value DowntimesTable::EndTimeAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return static_cast<int>(downtime->GetEndTime());
+}
+
+Value DowntimesTable::FixedAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return downtime->GetFixed();
+}
+
+Value DowntimesTable::DurationAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ return downtime->GetDuration();
+}
+
+Value DowntimesTable::TriggeredByAccessor(const Value& row)
+{
+ Downtime::Ptr downtime = static_cast<Downtime::Ptr>(row);
+
+ String triggerDowntimeName = downtime->GetTriggeredBy();
+
+ Downtime::Ptr triggerDowntime = Downtime::GetByName(triggerDowntimeName);
+
+ if (triggerDowntime)
+ return triggerDowntime->GetLegacyId();
+
+ return Empty;
+}
diff --git a/lib/livestatus/downtimestable.hpp b/lib/livestatus/downtimestable.hpp
new file mode 100644
index 0000000..4b5c909
--- /dev/null
+++ b/lib/livestatus/downtimestable.hpp
@@ -0,0 +1,51 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DOWNTIMESTABLE_H
+#define DOWNTIMESTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class DowntimesTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(DowntimesTable);
+
+ DowntimesTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+private:
+ static Object::Ptr HostAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr ServiceAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+
+ static Value AuthorAccessor(const Value& row);
+ static Value CommentAccessor(const Value& row);
+ static Value IdAccessor(const Value& row);
+ static Value EntryTimeAccessor(const Value& row);
+ static Value TypeAccessor(const Value& row);
+ static Value IsServiceAccessor(const Value& row);
+ static Value StartTimeAccessor(const Value& row);
+ static Value EndTimeAccessor(const Value& row);
+ static Value FixedAccessor(const Value& row);
+ static Value DurationAccessor(const Value& row);
+ static Value TriggeredByAccessor(const Value& row);
+};
+
+}
+
+#endif /* DOWNTIMESTABLE_H */
diff --git a/lib/livestatus/endpointstable.cpp b/lib/livestatus/endpointstable.cpp
new file mode 100644
index 0000000..3d407eb
--- /dev/null
+++ b/lib/livestatus/endpointstable.cpp
@@ -0,0 +1,109 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/endpointstable.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/zone.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/replace.hpp>
+
+using namespace icinga;
+
+EndpointsTable::EndpointsTable()
+{
+ AddColumns(this);
+}
+
+void EndpointsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&EndpointsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "identity", Column(&EndpointsTable::IdentityAccessor, objectAccessor));
+ table->AddColumn(prefix + "node", Column(&EndpointsTable::NodeAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_connected", Column(&EndpointsTable::IsConnectedAccessor, objectAccessor));
+ table->AddColumn(prefix + "zone", Column(&EndpointsTable::ZoneAccessor, objectAccessor));
+}
+
+String EndpointsTable::GetName() const
+{
+ return "endpoints";
+}
+
+String EndpointsTable::GetPrefix() const
+{
+ return "endpoint";
+}
+
+void EndpointsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const Endpoint::Ptr& endpoint : ConfigType::GetObjectsByType<Endpoint>()) {
+ if (!addRowFn(endpoint, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value EndpointsTable::NameAccessor(const Value& row)
+{
+ Endpoint::Ptr endpoint = static_cast<Endpoint::Ptr>(row);
+
+ if (!endpoint)
+ return Empty;
+
+ return endpoint->GetName();
+}
+
+Value EndpointsTable::IdentityAccessor(const Value& row)
+{
+ Endpoint::Ptr endpoint = static_cast<Endpoint::Ptr>(row);
+
+ if (!endpoint)
+ return Empty;
+
+ return endpoint->GetName();
+}
+
+Value EndpointsTable::NodeAccessor(const Value& row)
+{
+ Endpoint::Ptr endpoint = static_cast<Endpoint::Ptr>(row);
+
+ if (!endpoint)
+ return Empty;
+
+ return IcingaApplication::GetInstance()->GetNodeName();
+}
+
+Value EndpointsTable::IsConnectedAccessor(const Value& row)
+{
+ Endpoint::Ptr endpoint = static_cast<Endpoint::Ptr>(row);
+
+ if (!endpoint)
+ return Empty;
+
+ unsigned int is_connected = endpoint->GetConnected() ? 1 : 0;
+
+ /* if identity is equal to node, fake is_connected */
+ if (endpoint->GetName() == IcingaApplication::GetInstance()->GetNodeName())
+ is_connected = 1;
+
+ return is_connected;
+}
+
+Value EndpointsTable::ZoneAccessor(const Value& row)
+{
+ Endpoint::Ptr endpoint = static_cast<Endpoint::Ptr>(row);
+
+ if (!endpoint)
+ return Empty;
+
+ Zone::Ptr zone = endpoint->GetZone();
+
+ if (!zone)
+ return Empty;
+
+ return zone->GetName();
+}
diff --git a/lib/livestatus/endpointstable.hpp b/lib/livestatus/endpointstable.hpp
new file mode 100644
index 0000000..7d011ef
--- /dev/null
+++ b/lib/livestatus/endpointstable.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ENDPOINTSTABLE_H
+#define ENDPOINTSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class EndpointsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(EndpointsTable);
+
+ EndpointsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value IdentityAccessor(const Value& row);
+ static Value NodeAccessor(const Value& row);
+ static Value IsConnectedAccessor(const Value& row);
+ static Value ZoneAccessor(const Value& row);
+};
+
+}
+
+#endif /* ENDPOINTSTABLE_H */
diff --git a/lib/livestatus/filter.hpp b/lib/livestatus/filter.hpp
new file mode 100644
index 0000000..b9a01c8
--- /dev/null
+++ b/lib/livestatus/filter.hpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FILTER_H
+#define FILTER_H
+
+#include "livestatus/i2-livestatus.hpp"
+#include "livestatus/table.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class Filter : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Filter);
+
+ virtual bool Apply(const Table::Ptr& table, const Value& row) = 0;
+
+protected:
+ Filter() = default;
+};
+
+}
+
+#endif /* FILTER_H */
diff --git a/lib/livestatus/historytable.hpp b/lib/livestatus/historytable.hpp
new file mode 100644
index 0000000..f117857
--- /dev/null
+++ b/lib/livestatus/historytable.hpp
@@ -0,0 +1,24 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HISTORYTABLE_H
+#define HISTORYTABLE_H
+
+#include "livestatus/table.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+
+/**
+ * @ingroup livestatus
+ */
+class HistoryTable : public Table
+{
+public:
+ virtual void UpdateLogEntries(const Dictionary::Ptr& bag, int line_count, int lineno, const AddRowFunction& addRowFn) = 0;
+};
+
+}
+
+#endif /* HISTORYTABLE_H */
diff --git a/lib/livestatus/hostgroupstable.cpp b/lib/livestatus/hostgroupstable.cpp
new file mode 100644
index 0000000..984eddb
--- /dev/null
+++ b/lib/livestatus/hostgroupstable.cpp
@@ -0,0 +1,473 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/hostgroupstable.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+HostGroupsTable::HostGroupsTable()
+{
+ AddColumns(this);
+}
+
+void HostGroupsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&HostGroupsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "alias", Column(&HostGroupsTable::AliasAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes", Column(&HostGroupsTable::NotesAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_url", Column(&HostGroupsTable::NotesUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "action_url", Column(&HostGroupsTable::ActionUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "members", Column(&HostGroupsTable::MembersAccessor, objectAccessor));
+ table->AddColumn(prefix + "members_with_state", Column(&HostGroupsTable::MembersWithStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "worst_host_state", Column(&HostGroupsTable::WorstHostStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_hosts", Column(&HostGroupsTable::NumHostsAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_hosts_pending", Column(&HostGroupsTable::NumHostsPendingAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_hosts_up", Column(&HostGroupsTable::NumHostsUpAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_hosts_down", Column(&HostGroupsTable::NumHostsDownAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_hosts_unreach", Column(&HostGroupsTable::NumHostsUnreachAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services", Column(&HostGroupsTable::NumServicesAccessor, objectAccessor));
+ table->AddColumn(prefix + "worst_service_state", Column(&HostGroupsTable::WorstServiceStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_pending", Column(&HostGroupsTable::NumServicesPendingAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_ok", Column(&HostGroupsTable::NumServicesOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_warn", Column(&HostGroupsTable::NumServicesWarnAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_crit", Column(&HostGroupsTable::NumServicesCritAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_unknown", Column(&HostGroupsTable::NumServicesUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "worst_service_hard_state", Column(&HostGroupsTable::WorstServiceHardStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_ok", Column(&HostGroupsTable::NumServicesHardOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_warn", Column(&HostGroupsTable::NumServicesHardWarnAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_crit", Column(&HostGroupsTable::NumServicesHardCritAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_unknown", Column(&HostGroupsTable::NumServicesHardUnknownAccessor, objectAccessor));
+}
+
+String HostGroupsTable::GetName() const
+{
+ return "hostgroups";
+}
+
+String HostGroupsTable::GetPrefix() const
+{
+ return "hostgroup";
+}
+
+void HostGroupsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const HostGroup::Ptr& hg : ConfigType::GetObjectsByType<HostGroup>()) {
+ if (!addRowFn(hg, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value HostGroupsTable::NameAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ return hg->GetName();
+}
+
+Value HostGroupsTable::AliasAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ return hg->GetDisplayName();
+}
+
+Value HostGroupsTable::NotesAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ return hg->GetNotes();
+}
+
+Value HostGroupsTable::NotesUrlAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ return hg->GetNotesUrl();
+}
+
+Value HostGroupsTable::ActionUrlAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ return hg->GetActionUrl();
+}
+
+Value HostGroupsTable::MembersAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ ArrayData members;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ members.push_back(host->GetName());
+ }
+
+ return new Array(std::move(members));
+}
+
+Value HostGroupsTable::MembersWithStateAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ ArrayData members;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ members.push_back(new Array({
+ host->GetName(),
+ host->GetState()
+ }));
+ }
+
+ return new Array(std::move(members));
+}
+
+Value HostGroupsTable::WorstHostStateAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int worst_host = HostUp;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ if (host->GetState() > worst_host)
+ worst_host = host->GetState();
+ }
+
+ return worst_host;
+}
+
+Value HostGroupsTable::NumHostsAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ return hg->GetMembers().size();
+}
+
+Value HostGroupsTable::NumHostsPendingAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_hosts = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ /* no checkresult */
+ if (!host->GetLastCheckResult())
+ num_hosts++;
+ }
+
+ return num_hosts;
+}
+
+Value HostGroupsTable::NumHostsUpAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_hosts = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ if (host->GetState() == HostUp)
+ num_hosts++;
+ }
+
+ return num_hosts;
+}
+
+Value HostGroupsTable::NumHostsDownAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_hosts = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ if (host->GetState() == HostDown)
+ num_hosts++;
+ }
+
+ return num_hosts;
+}
+
+Value HostGroupsTable::NumHostsUnreachAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_hosts = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ if (!host->IsReachable())
+ num_hosts++;
+ }
+
+ return num_hosts;
+}
+
+Value HostGroupsTable::NumServicesAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ if (hg->GetMembers().size() == 0)
+ return 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ num_services += host->GetServices().size();
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::WorstServiceStateAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ Value worst_service = ServiceOK;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() > worst_service)
+ worst_service = service->GetState();
+ }
+ }
+
+ return worst_service;
+}
+
+Value HostGroupsTable::NumServicesPendingAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (!service->GetLastCheckResult())
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesOkAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceOK)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesWarnAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceWarning)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesCritAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceCritical)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesUnknownAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceUnknown)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::WorstServiceHardStateAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ Value worst_service = ServiceOK;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard) {
+ if (service->GetState() > worst_service)
+ worst_service = service->GetState();
+ }
+ }
+ }
+
+ return worst_service;
+}
+
+Value HostGroupsTable::NumServicesHardOkAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceOK)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesHardWarnAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceWarning)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesHardCritAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceCritical)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
+
+Value HostGroupsTable::NumServicesHardUnknownAccessor(const Value& row)
+{
+ HostGroup::Ptr hg = static_cast<HostGroup::Ptr>(row);
+
+ if (!hg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceUnknown)
+ num_services++;
+ }
+ }
+
+ return num_services;
+}
diff --git a/lib/livestatus/hostgroupstable.hpp b/lib/livestatus/hostgroupstable.hpp
new file mode 100644
index 0000000..cc5039f
--- /dev/null
+++ b/lib/livestatus/hostgroupstable.hpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HOSTGROUPSTABLE_H
+#define HOSTGROUPSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class HostGroupsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(HostGroupsTable);
+
+ HostGroupsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value AliasAccessor(const Value& row);
+ static Value NotesAccessor(const Value& row);
+ static Value NotesUrlAccessor(const Value& row);
+ static Value ActionUrlAccessor(const Value& row);
+ static Value MembersAccessor(const Value& row);
+ static Value MembersWithStateAccessor(const Value& row);
+ static Value WorstHostStateAccessor(const Value& row);
+ static Value NumHostsAccessor(const Value& row);
+ static Value NumHostsPendingAccessor(const Value& row);
+ static Value NumHostsUpAccessor(const Value& row);
+ static Value NumHostsDownAccessor(const Value& row);
+ static Value NumHostsUnreachAccessor(const Value& row);
+ static Value NumServicesAccessor(const Value& row);
+ static Value WorstServiceStateAccessor(const Value& row);
+ static Value NumServicesPendingAccessor(const Value& row);
+ static Value NumServicesOkAccessor(const Value& row);
+ static Value NumServicesWarnAccessor(const Value& row);
+ static Value NumServicesCritAccessor(const Value& row);
+ static Value NumServicesUnknownAccessor(const Value& row);
+ static Value WorstServiceHardStateAccessor(const Value& row);
+ static Value NumServicesHardOkAccessor(const Value& row);
+ static Value NumServicesHardWarnAccessor(const Value& row);
+ static Value NumServicesHardCritAccessor(const Value& row);
+ static Value NumServicesHardUnknownAccessor(const Value& row);
+};
+
+}
+
+#endif /* HOSTGROUPSTABLE_H */
diff --git a/lib/livestatus/hoststable.cpp b/lib/livestatus/hoststable.cpp
new file mode 100644
index 0000000..d90f4a5
--- /dev/null
+++ b/lib/livestatus/hoststable.cpp
@@ -0,0 +1,1517 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/hoststable.hpp"
+#include "livestatus/hostgroupstable.hpp"
+#include "livestatus/endpointstable.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/replace.hpp>
+
+using namespace icinga;
+
+HostsTable::HostsTable(LivestatusGroupByType type)
+ :Table(type)
+{
+ AddColumns(this);
+}
+
+void HostsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&HostsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_name", Column(&HostsTable::NameAccessor, objectAccessor)); //ugly compatibility hack
+ table->AddColumn(prefix + "display_name", Column(&HostsTable::DisplayNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "alias", Column(&HostsTable::DisplayNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "address", Column(&HostsTable::AddressAccessor, objectAccessor));
+ table->AddColumn(prefix + "address6", Column(&HostsTable::Address6Accessor, objectAccessor));
+ table->AddColumn(prefix + "check_command", Column(&HostsTable::CheckCommandAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_command_expanded", Column(&HostsTable::CheckCommandExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "event_handler", Column(&HostsTable::EventHandlerAccessor, objectAccessor));
+ table->AddColumn(prefix + "notification_period", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_period", Column(&HostsTable::CheckPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes", Column(&HostsTable::NotesAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_expanded", Column(&HostsTable::NotesExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_url", Column(&HostsTable::NotesUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_url_expanded", Column(&HostsTable::NotesUrlExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "action_url", Column(&HostsTable::ActionUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "action_url_expanded", Column(&HostsTable::ActionUrlExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "plugin_output", Column(&HostsTable::PluginOutputAccessor, objectAccessor));
+ table->AddColumn(prefix + "perf_data", Column(&HostsTable::PerfDataAccessor, objectAccessor));
+ table->AddColumn(prefix + "icon_image", Column(&HostsTable::IconImageAccessor, objectAccessor));
+ table->AddColumn(prefix + "icon_image_expanded", Column(&HostsTable::IconImageExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "icon_image_alt", Column(&HostsTable::IconImageAltAccessor, objectAccessor));
+ table->AddColumn(prefix + "statusmap_image", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "long_plugin_output", Column(&HostsTable::LongPluginOutputAccessor, objectAccessor));
+ table->AddColumn(prefix + "initial_state", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "max_check_attempts", Column(&HostsTable::MaxCheckAttemptsAccessor, objectAccessor));
+ table->AddColumn(prefix + "flap_detection_enabled", Column(&HostsTable::FlapDetectionEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_freshness", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "process_performance_data", Column(&HostsTable::ProcessPerformanceDataAccessor, objectAccessor));
+ table->AddColumn(prefix + "accept_passive_checks", Column(&HostsTable::AcceptPassiveChecksAccessor, objectAccessor));
+ table->AddColumn(prefix + "event_handler_enabled", Column(&HostsTable::EventHandlerEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "acknowledgement_type", Column(&HostsTable::AcknowledgementTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_type", Column(&HostsTable::CheckTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_state", Column(&HostsTable::LastStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_hard_state", Column(&HostsTable::LastHardStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "current_attempt", Column(&HostsTable::CurrentAttemptAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_notification", Column(&HostsTable::LastNotificationAccessor, objectAccessor));
+ table->AddColumn(prefix + "next_notification", Column(&HostsTable::NextNotificationAccessor, objectAccessor));
+ table->AddColumn(prefix + "next_check", Column(&HostsTable::NextCheckAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_hard_state_change", Column(&HostsTable::LastHardStateChangeAccessor, objectAccessor));
+ table->AddColumn(prefix + "has_been_checked", Column(&HostsTable::HasBeenCheckedAccessor, objectAccessor));
+ table->AddColumn(prefix + "current_notification_number", Column(&HostsTable::CurrentNotificationNumberAccessor, objectAccessor));
+ table->AddColumn(prefix + "pending_flex_downtime", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "total_services", Column(&HostsTable::TotalServicesAccessor, objectAccessor));
+ table->AddColumn(prefix + "checks_enabled", Column(&HostsTable::ChecksEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "notifications_enabled", Column(&HostsTable::NotificationsEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "acknowledged", Column(&HostsTable::AcknowledgedAccessor, objectAccessor));
+ table->AddColumn(prefix + "state", Column(&HostsTable::StateAccessor, objectAccessor));
+ table->AddColumn(prefix + "state_type", Column(&HostsTable::StateTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "no_more_notifications", Column(&HostsTable::NoMoreNotificationsAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_flapping_recovery_notification", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_check", Column(&HostsTable::LastCheckAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_state_change", Column(&HostsTable::LastStateChangeAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_up", Column(&HostsTable::LastTimeUpAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_down", Column(&HostsTable::LastTimeDownAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_unreachable", Column(&HostsTable::LastTimeUnreachableAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_flapping", Column(&HostsTable::IsFlappingAccessor, objectAccessor));
+ table->AddColumn(prefix + "scheduled_downtime_depth", Column(&HostsTable::ScheduledDowntimeDepthAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_executing", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "active_checks_enabled", Column(&HostsTable::ActiveChecksEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_options", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "obsess_over_host", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes_list", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_interval", Column(&HostsTable::CheckIntervalAccessor, objectAccessor));
+ table->AddColumn(prefix + "retry_interval", Column(&HostsTable::RetryIntervalAccessor, objectAccessor));
+ table->AddColumn(prefix + "notification_interval", Column(&HostsTable::NotificationIntervalAccessor, objectAccessor));
+ table->AddColumn(prefix + "first_notification_delay", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "low_flap_threshold", Column(&HostsTable::LowFlapThresholdAccessor, objectAccessor));
+ table->AddColumn(prefix + "high_flap_threshold", Column(&HostsTable::HighFlapThresholdAccessor, objectAccessor));
+ table->AddColumn(prefix + "x_3d", Column(&EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "y_3d", Column(&EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "z_3d", Column(&EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "x_2d", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "y_2d", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "latency", Column(&HostsTable::LatencyAccessor, objectAccessor));
+ table->AddColumn(prefix + "execution_time", Column(&HostsTable::ExecutionTimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "percent_state_change", Column(&HostsTable::PercentStateChangeAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_notification_period", Column(&HostsTable::InNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_check_period", Column(&HostsTable::InCheckPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "contacts", Column(&HostsTable::ContactsAccessor, objectAccessor));
+ table->AddColumn(prefix + "downtimes", Column(&HostsTable::DowntimesAccessor, objectAccessor));
+ table->AddColumn(prefix + "downtimes_with_info", Column(&HostsTable::DowntimesWithInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "comments", Column(&HostsTable::CommentsAccessor, objectAccessor));
+ table->AddColumn(prefix + "comments_with_info", Column(&HostsTable::CommentsWithInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "comments_with_extra_info", Column(&HostsTable::CommentsWithExtraInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_names", Column(&HostsTable::CustomVariableNamesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_values", Column(&HostsTable::CustomVariableValuesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variables", Column(&HostsTable::CustomVariablesAccessor, objectAccessor));
+ table->AddColumn(prefix + "filename", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "parents", Column(&HostsTable::ParentsAccessor, objectAccessor));
+ table->AddColumn(prefix + "childs", Column(&HostsTable::ChildsAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services", Column(&HostsTable::NumServicesAccessor, objectAccessor));
+ table->AddColumn(prefix + "worst_service_state", Column(&HostsTable::WorstServiceStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_ok", Column(&HostsTable::NumServicesOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_warn", Column(&HostsTable::NumServicesWarnAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_crit", Column(&HostsTable::NumServicesCritAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_unknown", Column(&HostsTable::NumServicesUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_pending", Column(&HostsTable::NumServicesPendingAccessor, objectAccessor));
+ table->AddColumn(prefix + "worst_service_hard_state", Column(&HostsTable::WorstServiceHardStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_ok", Column(&HostsTable::NumServicesHardOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_warn", Column(&HostsTable::NumServicesHardWarnAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_crit", Column(&HostsTable::NumServicesHardCritAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_unknown", Column(&HostsTable::NumServicesHardUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "hard_state", Column(&HostsTable::HardStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "pnpgraph_present", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "staleness", Column(&HostsTable::StalenessAccessor, objectAccessor));
+ table->AddColumn(prefix + "groups", Column(&HostsTable::GroupsAccessor, objectAccessor));
+ table->AddColumn(prefix + "contact_groups", Column(&HostsTable::ContactGroupsAccessor, objectAccessor));
+ table->AddColumn(prefix + "services", Column(&HostsTable::ServicesAccessor, objectAccessor));
+ table->AddColumn(prefix + "services_with_state", Column(&HostsTable::ServicesWithStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "services_with_info", Column(&HostsTable::ServicesWithInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_source", Column(&HostsTable::CheckSourceAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_reachable", Column(&HostsTable::IsReachableAccessor, objectAccessor));
+ table->AddColumn(prefix + "cv_is_json", Column(&HostsTable::CVIsJsonAccessor, objectAccessor));
+ table->AddColumn(prefix + "original_attributes", Column(&HostsTable::OriginalAttributesAccessor, objectAccessor));
+
+ /* add additional group by values received through the object accessor */
+ if (table->GetGroupByType() == LivestatusGroupByHostGroup) {
+ /* _1 = row, _2 = groupByType, _3 = groupByObject */
+ Log(LogDebug, "Livestatus")
+ << "Processing hosts group by hostgroup table.";
+ HostGroupsTable::AddColumns(table, "hostgroup_", [](const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject) -> Value {
+ return HostGroupAccessor(row, groupByType, groupByObject);
+ });
+ }
+}
+
+String HostsTable::GetName() const
+{
+ return "hosts";
+}
+
+String HostsTable::GetPrefix() const
+{
+ return "host";
+}
+
+void HostsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ if (GetGroupByType() == LivestatusGroupByHostGroup) {
+ for (const HostGroup::Ptr& hg : ConfigType::GetObjectsByType<HostGroup>()) {
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ /* the caller must know which groupby type and value are set for this row */
+ if (!addRowFn(host, LivestatusGroupByHostGroup, hg))
+ return;
+ }
+ }
+ } else {
+ for (const Host::Ptr& host : ConfigType::GetObjectsByType<Host>()) {
+ if (!addRowFn(host, LivestatusGroupByNone, Empty))
+ return;
+ }
+ }
+}
+
+Object::Ptr HostsTable::HostGroupAccessor(const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject)
+{
+ /* return the current group by value set from within FetchRows()
+ * this is the hostgrouo object used for the table join inside
+ * in AddColumns()
+ */
+ if (groupByType == LivestatusGroupByHostGroup)
+ return groupByObject;
+
+ return nullptr;
+}
+
+Value HostsTable::NameAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetName();
+}
+
+Value HostsTable::DisplayNameAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetDisplayName();
+}
+
+Value HostsTable::AddressAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetAddress();
+}
+
+Value HostsTable::Address6Accessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetAddress6();
+}
+
+Value HostsTable::CheckCommandAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ CheckCommand::Ptr checkcommand = host->GetCheckCommand();
+ if (checkcommand)
+ return CompatUtility::GetCommandName(checkcommand) + "!" + CompatUtility::GetCheckableCommandArgs(host);
+
+ return Empty;
+}
+
+Value HostsTable::CheckCommandExpandedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ CheckCommand::Ptr checkcommand = host->GetCheckCommand();
+ if (checkcommand)
+ return CompatUtility::GetCommandName(checkcommand) + "!" + CompatUtility::GetCheckableCommandArgs(host);
+
+ return Empty;
+}
+
+Value HostsTable::EventHandlerAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ EventCommand::Ptr eventcommand = host->GetEventCommand();
+ if (eventcommand)
+ return CompatUtility::GetCommandName(eventcommand);
+
+ return Empty;
+}
+
+Value HostsTable::CheckPeriodAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ TimePeriod::Ptr checkPeriod = host->GetCheckPeriod();
+
+ if (!checkPeriod)
+ return Empty;
+
+ return checkPeriod->GetName();
+}
+
+Value HostsTable::NotesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetNotes();
+}
+
+Value HostsTable::NotesExpandedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "host", host },
+ };
+
+ return MacroProcessor::ResolveMacros(host->GetNotes(), resolvers);
+}
+
+Value HostsTable::NotesUrlAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetNotesUrl();
+}
+
+Value HostsTable::NotesUrlExpandedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "host", host },
+ };
+
+ return MacroProcessor::ResolveMacros(host->GetNotesUrl(), resolvers);
+}
+
+Value HostsTable::ActionUrlAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetActionUrl();
+}
+
+Value HostsTable::ActionUrlExpandedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "host", host },
+ };
+
+ return MacroProcessor::ResolveMacros(host->GetActionUrl(), resolvers);
+}
+
+Value HostsTable::PluginOutputAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ String output;
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ return output;
+}
+
+Value HostsTable::PerfDataAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ String perfdata;
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (!cr)
+ return Empty;
+
+ return PluginUtility::FormatPerfdata(cr->GetPerformanceData());
+}
+
+Value HostsTable::IconImageAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetIconImage();
+}
+
+Value HostsTable::IconImageExpandedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "host", host },
+ };
+
+ return MacroProcessor::ResolveMacros(host->GetIconImage(), resolvers);
+}
+
+Value HostsTable::IconImageAltAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetIconImageAlt();
+}
+
+Value HostsTable::LongPluginOutputAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ String long_output;
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (cr)
+ long_output = CompatUtility::GetCheckResultLongOutput(cr);
+
+ return long_output;
+}
+
+Value HostsTable::MaxCheckAttemptsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetMaxCheckAttempts();
+}
+
+Value HostsTable::FlapDetectionEnabledAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnableFlapping());
+}
+
+Value HostsTable::AcceptPassiveChecksAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnablePassiveChecks());
+}
+
+Value HostsTable::EventHandlerEnabledAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnableEventHandler());
+}
+
+Value HostsTable::AcknowledgementTypeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ObjectLock olock(host);
+ return host->GetAcknowledgement();
+}
+
+Value HostsTable::CheckTypeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return (host->GetEnableActiveChecks() ? 0 : 1); /* 0 .. active, 1 .. passive */
+}
+
+Value HostsTable::LastStateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetLastState();
+}
+
+Value HostsTable::LastHardStateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetLastHardState();
+}
+
+Value HostsTable::CurrentAttemptAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetCheckAttempt();
+}
+
+Value HostsTable::LastNotificationAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationLastNotification(host);
+}
+
+Value HostsTable::NextNotificationAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationNextNotification(host);
+}
+
+Value HostsTable::NextCheckAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetNextCheck());
+}
+
+Value HostsTable::LastHardStateChangeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetLastHardStateChange());
+}
+
+Value HostsTable::HasBeenCheckedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->HasBeenChecked());
+}
+
+Value HostsTable::CurrentNotificationNumberAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationNotificationNumber(host);
+}
+
+Value HostsTable::TotalServicesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetTotalServices();
+}
+
+Value HostsTable::ChecksEnabledAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnableActiveChecks());
+}
+
+Value HostsTable::NotificationsEnabledAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnableNotifications());
+}
+
+Value HostsTable::ProcessPerformanceDataAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnablePerfdata());
+}
+
+Value HostsTable::AcknowledgedAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ObjectLock olock(host);
+ return host->IsAcknowledged();
+}
+
+Value HostsTable::StateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->IsReachable() ? host->GetState() : 2;
+}
+
+Value HostsTable::StateTypeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetStateType();
+}
+
+Value HostsTable::NoMoreNotificationsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return (CompatUtility::GetCheckableNotificationNotificationInterval(host) == 0 && !host->GetVolatile()) ? 1 : 0;
+}
+
+Value HostsTable::LastCheckAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetLastCheck());
+}
+
+Value HostsTable::LastStateChangeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetLastStateChange());
+}
+
+Value HostsTable::LastTimeUpAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetLastStateUp());
+}
+
+Value HostsTable::LastTimeDownAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetLastStateDown());
+}
+
+Value HostsTable::LastTimeUnreachableAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return static_cast<int>(host->GetLastStateUnreachable());
+}
+
+Value HostsTable::IsFlappingAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->IsFlapping();
+}
+
+Value HostsTable::ScheduledDowntimeDepthAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetDowntimeDepth();
+}
+
+Value HostsTable::ActiveChecksEnabledAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return Convert::ToLong(host->GetEnableActiveChecks());
+}
+
+Value HostsTable::CheckIntervalAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetCheckInterval() / LIVESTATUS_INTERVAL_LENGTH;
+}
+
+Value HostsTable::RetryIntervalAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetRetryInterval() / LIVESTATUS_INTERVAL_LENGTH;
+}
+
+Value HostsTable::NotificationIntervalAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationNotificationInterval(host);
+}
+
+Value HostsTable::LowFlapThresholdAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetFlappingThresholdLow();
+}
+
+Value HostsTable::HighFlapThresholdAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetFlappingThresholdHigh();
+}
+
+Value HostsTable::LatencyAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (!cr)
+ return Empty;
+
+ return cr->CalculateLatency();
+}
+
+Value HostsTable::ExecutionTimeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (!cr)
+ return Empty;
+
+ return cr->CalculateExecutionTime();
+}
+
+Value HostsTable::PercentStateChangeAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetFlappingCurrent();
+}
+
+Value HostsTable::InNotificationPeriodAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ for (const Notification::Ptr& notification : host->GetNotifications()) {
+ TimePeriod::Ptr timeperiod = notification->GetPeriod();
+
+ if (!timeperiod || timeperiod->IsInside(Utility::GetTime()))
+ return 1;
+ }
+
+ return 0;
+}
+
+Value HostsTable::InCheckPeriodAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ TimePeriod::Ptr timeperiod = host->GetCheckPeriod();
+
+ /* none set means always checked */
+ if (!timeperiod)
+ return 1;
+
+ return Convert::ToLong(timeperiod->IsInside(Utility::GetTime()));
+}
+
+Value HostsTable::ContactsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const User::Ptr& user : CompatUtility::GetCheckableNotificationUsers(host)) {
+ result.push_back(user->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::DowntimesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Downtime::Ptr& downtime : host->GetDowntimes()) {
+ if (downtime->IsExpired())
+ continue;
+
+ result.push_back(downtime->GetLegacyId());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::DowntimesWithInfoAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Downtime::Ptr& downtime : host->GetDowntimes()) {
+ if (downtime->IsExpired())
+ continue;
+
+ result.push_back(new Array({
+ downtime->GetLegacyId(),
+ downtime->GetAuthor(),
+ downtime->GetComment()
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CommentsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Comment::Ptr& comment : host->GetComments()) {
+ if (comment->IsExpired())
+ continue;
+
+ result.push_back(comment->GetLegacyId());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CommentsWithInfoAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Comment::Ptr& comment : host->GetComments()) {
+ if (comment->IsExpired())
+ continue;
+
+ result.push_back(new Array({
+ comment->GetLegacyId(),
+ comment->GetAuthor(),
+ comment->GetText()
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CommentsWithExtraInfoAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Comment::Ptr& comment : host->GetComments()) {
+ if (comment->IsExpired())
+ continue;
+
+ result.push_back(new Array({
+ comment->GetLegacyId(),
+ comment->GetAuthor(),
+ comment->GetText(),
+ comment->GetEntryType(),
+ static_cast<int>(comment->GetEntryTime())
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CustomVariableNamesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Dictionary::Ptr vars = host->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ result.push_back(kv.first);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CustomVariableValuesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Dictionary::Ptr vars = host->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ result.push_back(JsonEncode(kv.second));
+ else
+ result.push_back(kv.second);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CustomVariablesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Dictionary::Ptr vars = host->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ Value val;
+
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ val = JsonEncode(kv.second);
+ else
+ val = kv.second;
+
+ result.push_back(new Array({
+ kv.first,
+ val
+ }));
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CVIsJsonAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Dictionary::Ptr vars = host->GetVars();
+
+ if (!vars)
+ return Empty;
+
+ bool cv_is_json = false;
+
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ cv_is_json = true;
+ }
+
+ return cv_is_json;
+}
+
+Value HostsTable::ParentsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Checkable::Ptr& parent : host->GetParents()) {
+ Host::Ptr parent_host = dynamic_pointer_cast<Host>(parent);
+
+ if (!parent_host)
+ continue;
+
+ result.push_back(parent_host->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::ChildsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Checkable::Ptr& child : host->GetChildren()) {
+ Host::Ptr child_host = dynamic_pointer_cast<Host>(child);
+
+ if (!child_host)
+ continue;
+
+ result.push_back(child_host->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::NumServicesAccessor(const Value& row)
+{
+ /* duplicate of TotalServices */
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->GetTotalServices();
+}
+
+Value HostsTable::WorstServiceStateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Value worst_service = ServiceOK;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() > worst_service)
+ worst_service = service->GetState();
+ }
+
+ return worst_service;
+}
+
+Value HostsTable::NumServicesOkAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceOK)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesWarnAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceWarning)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesCritAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceCritical)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesUnknownAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetState() == ServiceUnknown)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesPendingAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (!service->GetLastCheckResult())
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::WorstServiceHardStateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Value worst_service = ServiceOK;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard) {
+ if (service->GetState() > worst_service)
+ worst_service = service->GetState();
+ }
+ }
+
+ return worst_service;
+}
+
+Value HostsTable::NumServicesHardOkAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceOK)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesHardWarnAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceWarning)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesHardCritAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceCritical)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::NumServicesHardUnknownAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : host->GetServices()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceUnknown)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value HostsTable::HardStateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ if (host->GetState() == HostUp)
+ return HostUp;
+ else if (host->GetStateType() == StateTypeHard)
+ return host->GetState();
+
+ return host->GetLastHardState();
+}
+
+Value HostsTable::StalenessAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ if (host->HasBeenChecked() && host->GetLastCheck() > 0)
+ return (Utility::GetTime() - host->GetLastCheck()) / (host->GetCheckInterval() * 3600);
+
+ return 0.0;
+}
+
+Value HostsTable::GroupsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ Array::Ptr groups = host->GetGroups();
+
+ if (!groups)
+ return Empty;
+
+ return groups;
+}
+
+Value HostsTable::ContactGroupsAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ ArrayData result;
+
+ for (const UserGroup::Ptr& usergroup : CompatUtility::GetCheckableNotificationUserGroups(host)) {
+ result.push_back(usergroup->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::ServicesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ std::vector<Service::Ptr> rservices = host->GetServices();
+
+ ArrayData result;
+ result.reserve(rservices.size());
+
+ for (const Service::Ptr& service : rservices) {
+ result.push_back(service->GetShortName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::ServicesWithStateAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ std::vector<Service::Ptr> rservices = host->GetServices();
+
+ ArrayData result;
+ result.reserve(rservices.size());
+
+ for (const Service::Ptr& service : rservices) {
+ result.push_back(new Array({
+ service->GetShortName(),
+ service->GetState(),
+ service->HasBeenChecked() ? 1 : 0
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::ServicesWithInfoAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ std::vector<Service::Ptr> rservices = host->GetServices();
+
+ ArrayData result;
+ result.reserve(rservices.size());
+
+ for (const Service::Ptr& service : rservices) {
+ String output;
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ result.push_back(new Array({
+ service->GetShortName(),
+ service->GetState(),
+ service->HasBeenChecked() ? 1 : 0,
+ output
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value HostsTable::CheckSourceAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ CheckResult::Ptr cr = host->GetLastCheckResult();
+
+ if (cr)
+ return cr->GetCheckSource();
+
+ return Empty;
+}
+
+Value HostsTable::IsReachableAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return host->IsReachable();
+}
+
+Value HostsTable::OriginalAttributesAccessor(const Value& row)
+{
+ Host::Ptr host = static_cast<Host::Ptr>(row);
+
+ if (!host)
+ return Empty;
+
+ return JsonEncode(host->GetOriginalAttributes());
+}
diff --git a/lib/livestatus/hoststable.hpp b/lib/livestatus/hoststable.hpp
new file mode 100644
index 0000000..9386183
--- /dev/null
+++ b/lib/livestatus/hoststable.hpp
@@ -0,0 +1,133 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HOSTSTABLE_H
+#define HOSTSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class HostsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(HostsTable);
+
+ HostsTable(LivestatusGroupByType type = LivestatusGroupByNone);
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Object::Ptr HostGroupAccessor(const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject);
+
+ static Value NameAccessor(const Value& row);
+ static Value DisplayNameAccessor(const Value& row);
+ static Value AddressAccessor(const Value& row);
+ static Value Address6Accessor(const Value& row);
+ static Value CheckCommandAccessor(const Value& row);
+ static Value CheckCommandExpandedAccessor(const Value& row);
+ static Value EventHandlerAccessor(const Value& row);
+ static Value CheckPeriodAccessor(const Value& row);
+ static Value NotesAccessor(const Value& row);
+ static Value NotesExpandedAccessor(const Value& row);
+ static Value NotesUrlAccessor(const Value& row);
+ static Value NotesUrlExpandedAccessor(const Value& row);
+ static Value ActionUrlAccessor(const Value& row);
+ static Value ActionUrlExpandedAccessor(const Value& row);
+ static Value PluginOutputAccessor(const Value& row);
+ static Value PerfDataAccessor(const Value& row);
+ static Value IconImageAccessor(const Value& row);
+ static Value IconImageExpandedAccessor(const Value& row);
+ static Value IconImageAltAccessor(const Value& row);
+ static Value LongPluginOutputAccessor(const Value& row);
+ static Value MaxCheckAttemptsAccessor(const Value& row);
+ static Value FlapDetectionEnabledAccessor(const Value& row);
+ static Value ProcessPerformanceDataAccessor(const Value& row);
+ static Value AcceptPassiveChecksAccessor(const Value& row);
+ static Value EventHandlerEnabledAccessor(const Value& row);
+ static Value AcknowledgementTypeAccessor(const Value& row);
+ static Value CheckTypeAccessor(const Value& row);
+ static Value LastStateAccessor(const Value& row);
+ static Value LastHardStateAccessor(const Value& row);
+ static Value CurrentAttemptAccessor(const Value& row);
+ static Value LastNotificationAccessor(const Value& row);
+ static Value NextNotificationAccessor(const Value& row);
+ static Value NextCheckAccessor(const Value& row);
+ static Value LastHardStateChangeAccessor(const Value& row);
+ static Value HasBeenCheckedAccessor(const Value& row);
+ static Value CurrentNotificationNumberAccessor(const Value& row);
+ static Value TotalServicesAccessor(const Value& row);
+ static Value ChecksEnabledAccessor(const Value& row);
+ static Value NotificationsEnabledAccessor(const Value& row);
+ static Value AcknowledgedAccessor(const Value& row);
+ static Value StateAccessor(const Value& row);
+ static Value StateTypeAccessor(const Value& row);
+ static Value NoMoreNotificationsAccessor(const Value& row);
+ static Value LastCheckAccessor(const Value& row);
+ static Value LastStateChangeAccessor(const Value& row);
+ static Value LastTimeUpAccessor(const Value& row);
+ static Value LastTimeDownAccessor(const Value& row);
+ static Value LastTimeUnreachableAccessor(const Value& row);
+ static Value IsFlappingAccessor(const Value& row);
+ static Value ScheduledDowntimeDepthAccessor(const Value& row);
+ static Value ActiveChecksEnabledAccessor(const Value& row);
+ static Value CheckIntervalAccessor(const Value& row);
+ static Value RetryIntervalAccessor(const Value& row);
+ static Value NotificationIntervalAccessor(const Value& row);
+ static Value LowFlapThresholdAccessor(const Value& row);
+ static Value HighFlapThresholdAccessor(const Value& row);
+ static Value LatencyAccessor(const Value& row);
+ static Value ExecutionTimeAccessor(const Value& row);
+ static Value PercentStateChangeAccessor(const Value& row);
+ static Value InNotificationPeriodAccessor(const Value& row);
+ static Value InCheckPeriodAccessor(const Value& row);
+ static Value ContactsAccessor(const Value& row);
+ static Value DowntimesAccessor(const Value& row);
+ static Value DowntimesWithInfoAccessor(const Value& row);
+ static Value CommentsAccessor(const Value& row);
+ static Value CommentsWithInfoAccessor(const Value& row);
+ static Value CommentsWithExtraInfoAccessor(const Value& row);
+ static Value CustomVariableNamesAccessor(const Value& row);
+ static Value CustomVariableValuesAccessor(const Value& row);
+ static Value CustomVariablesAccessor(const Value& row);
+ static Value ParentsAccessor(const Value& row);
+ static Value ChildsAccessor(const Value& row);
+ static Value NumServicesAccessor(const Value& row);
+ static Value WorstServiceStateAccessor(const Value& row);
+ static Value NumServicesOkAccessor(const Value& row);
+ static Value NumServicesWarnAccessor(const Value& row);
+ static Value NumServicesCritAccessor(const Value& row);
+ static Value NumServicesUnknownAccessor(const Value& row);
+ static Value NumServicesPendingAccessor(const Value& row);
+ static Value WorstServiceHardStateAccessor(const Value& row);
+ static Value NumServicesHardOkAccessor(const Value& row);
+ static Value NumServicesHardWarnAccessor(const Value& row);
+ static Value NumServicesHardCritAccessor(const Value& row);
+ static Value NumServicesHardUnknownAccessor(const Value& row);
+ static Value HardStateAccessor(const Value& row);
+ static Value StalenessAccessor(const Value& row);
+ static Value GroupsAccessor(const Value& row);
+ static Value ContactGroupsAccessor(const Value& row);
+ static Value ServicesAccessor(const Value& row);
+ static Value ServicesWithStateAccessor(const Value& row);
+ static Value ServicesWithInfoAccessor(const Value& row);
+ static Value CheckSourceAccessor(const Value& row);
+ static Value IsReachableAccessor(const Value& row);
+ static Value CVIsJsonAccessor(const Value& row);
+ static Value OriginalAttributesAccessor(const Value& row);
+};
+
+}
+
+#endif /* HOSTSTABLE_H */
diff --git a/lib/livestatus/i2-livestatus.hpp b/lib/livestatus/i2-livestatus.hpp
new file mode 100644
index 0000000..3375d97
--- /dev/null
+++ b/lib/livestatus/i2-livestatus.hpp
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2LIVESTATUS_H
+#define I2LIVESTATUS_H
+
+/**
+ * @defgroup icinga Livestatus
+ *
+ * The Livestatus library implements the Livestatus protocol for Icinga.
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2LIVESTATUS_H */
diff --git a/lib/livestatus/invavgaggregator.cpp b/lib/livestatus/invavgaggregator.cpp
new file mode 100644
index 0000000..33cf85c
--- /dev/null
+++ b/lib/livestatus/invavgaggregator.cpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/invavgaggregator.hpp"
+
+using namespace icinga;
+
+InvAvgAggregator::InvAvgAggregator(String attr)
+ : m_InvAvgAttr(std::move(attr))
+{ }
+
+InvAvgAggregatorState *InvAvgAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new InvAvgAggregatorState();
+
+ return static_cast<InvAvgAggregatorState *>(*state);
+}
+
+void InvAvgAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_InvAvgAttr);
+
+ Value value = column.ExtractValue(row);
+
+ InvAvgAggregatorState *pstate = EnsureState(state);
+
+ pstate->InvAvg += (1.0 / value);
+ pstate->InvAvgCount++;
+}
+
+double InvAvgAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ InvAvgAggregatorState *pstate = EnsureState(&state);
+ double result = pstate->InvAvg / pstate->InvAvgCount;
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/invavgaggregator.hpp b/lib/livestatus/invavgaggregator.hpp
new file mode 100644
index 0000000..9282b37
--- /dev/null
+++ b/lib/livestatus/invavgaggregator.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef INVAVGAGGREGATOR_H
+#define INVAVGAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct InvAvgAggregatorState final : public AggregatorState
+{
+ double InvAvg{0};
+ double InvAvgCount{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class InvAvgAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(InvAvgAggregator);
+
+ InvAvgAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_InvAvgAttr;
+
+ static InvAvgAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* INVAVGAGGREGATOR_H */
diff --git a/lib/livestatus/invsumaggregator.cpp b/lib/livestatus/invsumaggregator.cpp
new file mode 100644
index 0000000..c955667
--- /dev/null
+++ b/lib/livestatus/invsumaggregator.cpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/invsumaggregator.hpp"
+
+using namespace icinga;
+
+InvSumAggregator::InvSumAggregator(String attr)
+ : m_InvSumAttr(std::move(attr))
+{ }
+
+InvSumAggregatorState *InvSumAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new InvSumAggregatorState();
+
+ return static_cast<InvSumAggregatorState *>(*state);
+}
+
+void InvSumAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_InvSumAttr);
+
+ Value value = column.ExtractValue(row);
+
+ InvSumAggregatorState *pstate = EnsureState(state);
+
+ pstate->InvSum += (1.0 / value);
+}
+
+double InvSumAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ InvSumAggregatorState *pstate = EnsureState(&state);
+ double result = pstate->InvSum;
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/invsumaggregator.hpp b/lib/livestatus/invsumaggregator.hpp
new file mode 100644
index 0000000..f7de7be
--- /dev/null
+++ b/lib/livestatus/invsumaggregator.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef INVSUMAGGREGATOR_H
+#define INVSUMAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct InvSumAggregatorState final : public AggregatorState
+{
+ double InvSum{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class InvSumAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(InvSumAggregator);
+
+ InvSumAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_InvSumAttr;
+
+ static InvSumAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* INVSUMAGGREGATOR_H */
diff --git a/lib/livestatus/livestatuslistener.cpp b/lib/livestatus/livestatuslistener.cpp
new file mode 100644
index 0000000..e44650b
--- /dev/null
+++ b/lib/livestatus/livestatuslistener.cpp
@@ -0,0 +1,211 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/livestatuslistener.hpp"
+#include "livestatus/livestatuslistener-ti.cpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/objectlock.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/unixsocket.hpp"
+#include "base/networkstream.hpp"
+#include "base/application.hpp"
+#include "base/function.hpp"
+#include "base/statsfunction.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(LivestatusListener);
+
+static int l_ClientsConnected = 0;
+static int l_Connections = 0;
+static std::mutex l_ComponentMutex;
+
+REGISTER_STATSFUNCTION(LivestatusListener, &LivestatusListener::StatsFunc);
+
+void LivestatusListener::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const LivestatusListener::Ptr& livestatuslistener : ConfigType::GetObjectsByType<LivestatusListener>()) {
+ nodes.emplace_back(livestatuslistener->GetName(), new Dictionary({
+ { "connections", l_Connections }
+ }));
+
+ perfdata->Add(new PerfdataValue("livestatuslistener_" + livestatuslistener->GetName() + "_connections", l_Connections));
+ }
+
+ status->Set("livestatuslistener", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Starts the component.
+ */
+void LivestatusListener::Start(bool runtimeCreated)
+{
+ ObjectImpl<LivestatusListener>::Start(runtimeCreated);
+
+ Log(LogInformation, "LivestatusListener")
+ << "'" << GetName() << "' started.";
+
+ if (GetSocketType() == "tcp") {
+ TcpSocket::Ptr socket = new TcpSocket();
+
+ try {
+ socket->Bind(GetBindHost(), GetBindPort(), AF_UNSPEC);
+ } catch (std::exception&) {
+ Log(LogCritical, "LivestatusListener")
+ << "Cannot bind TCP socket on host '" << GetBindHost() << "' port '" << GetBindPort() << "'.";
+ return;
+ }
+
+ m_Listener = socket;
+
+ m_Thread = std::thread([this]() { ServerThreadProc(); });
+
+ Log(LogInformation, "LivestatusListener")
+ << "Created TCP socket listening on host '" << GetBindHost() << "' port '" << GetBindPort() << "'.";
+ }
+ else if (GetSocketType() == "unix") {
+#ifndef _WIN32
+ UnixSocket::Ptr socket = new UnixSocket();
+
+ try {
+ socket->Bind(GetSocketPath());
+ } catch (std::exception&) {
+ Log(LogCritical, "LivestatusListener")
+ << "Cannot bind UNIX socket to '" << GetSocketPath() << "'.";
+ return;
+ }
+
+ /* group must be able to write */
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
+
+ if (chmod(GetSocketPath().CStr(), mode) < 0) {
+ Log(LogCritical, "LivestatusListener")
+ << "chmod() on unix socket '" << GetSocketPath() << "' failed with error code " << errno << ", \"" << Utility::FormatErrorNumber(errno) << "\"";
+ return;
+ }
+
+ m_Listener = socket;
+
+ m_Thread = std::thread([this]() { ServerThreadProc(); });
+
+ Log(LogInformation, "LivestatusListener")
+ << "Created UNIX socket in '" << GetSocketPath() << "'.";
+#else
+ /* no UNIX sockets on windows */
+ Log(LogCritical, "LivestatusListener", "Unix sockets are not supported on Windows.");
+ return;
+#endif
+ }
+}
+
+void LivestatusListener::Stop(bool runtimeRemoved)
+{
+ ObjectImpl<LivestatusListener>::Stop(runtimeRemoved);
+
+ Log(LogInformation, "LivestatusListener")
+ << "'" << GetName() << "' stopped.";
+
+ m_Listener->Close();
+
+ if (m_Thread.joinable())
+ m_Thread.join();
+}
+
+int LivestatusListener::GetClientsConnected()
+{
+ std::unique_lock<std::mutex> lock(l_ComponentMutex);
+
+ return l_ClientsConnected;
+}
+
+int LivestatusListener::GetConnections()
+{
+ std::unique_lock<std::mutex> lock(l_ComponentMutex);
+
+ return l_Connections;
+}
+
+void LivestatusListener::ServerThreadProc()
+{
+ m_Listener->Listen();
+
+ try {
+ for (;;) {
+ timeval tv = { 0, 500000 };
+
+ if (m_Listener->Poll(true, false, &tv)) {
+ Socket::Ptr client = m_Listener->Accept();
+ Log(LogNotice, "LivestatusListener", "Client connected");
+ Utility::QueueAsyncCallback([this, client]() { ClientHandler(client); }, LowLatencyScheduler);
+ }
+
+ if (!IsActive())
+ break;
+ }
+ } catch (std::exception&) {
+ Log(LogCritical, "LivestatusListener", "Cannot accept new connection.");
+ }
+
+ m_Listener->Close();
+}
+
+void LivestatusListener::ClientHandler(const Socket::Ptr& client)
+{
+ {
+ std::unique_lock<std::mutex> lock(l_ComponentMutex);
+ l_ClientsConnected++;
+ l_Connections++;
+ }
+
+ Stream::Ptr stream = new NetworkStream(client);
+
+ StreamReadContext context;
+
+ for (;;) {
+ String line;
+
+ std::vector<String> lines;
+
+ for (;;) {
+ StreamReadStatus srs = stream->ReadLine(&line, context);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ if (line.GetLength() > 0)
+ lines.push_back(line);
+ else
+ break;
+ }
+
+ if (lines.empty())
+ break;
+
+ LivestatusQuery::Ptr query = new LivestatusQuery(lines, GetCompatLogPath());
+ if (!query->Execute(stream))
+ break;
+ }
+
+ {
+ std::unique_lock<std::mutex> lock(l_ComponentMutex);
+ l_ClientsConnected--;
+ }
+}
+
+
+void LivestatusListener::ValidateSocketType(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<LivestatusListener>::ValidateSocketType(lvalue, utils);
+
+ if (lvalue() != "unix" && lvalue() != "tcp")
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "socket_type" }, "Socket type '" + lvalue() + "' is invalid."));
+}
diff --git a/lib/livestatus/livestatuslistener.hpp b/lib/livestatus/livestatuslistener.hpp
new file mode 100644
index 0000000..dc739f6
--- /dev/null
+++ b/lib/livestatus/livestatuslistener.hpp
@@ -0,0 +1,47 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LIVESTATUSLISTENER_H
+#define LIVESTATUSLISTENER_H
+
+#include "livestatus/i2-livestatus.hpp"
+#include "livestatus/livestatuslistener-ti.hpp"
+#include "livestatus/livestatusquery.hpp"
+#include "base/socket.hpp"
+#include <thread>
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class LivestatusListener final : public ObjectImpl<LivestatusListener>
+{
+public:
+ DECLARE_OBJECT(LivestatusListener);
+ DECLARE_OBJECTNAME(LivestatusListener);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ static int GetClientsConnected();
+ static int GetConnections();
+
+ void ValidateSocketType(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ void ServerThreadProc();
+ void ClientHandler(const Socket::Ptr& client);
+
+ Socket::Ptr m_Listener;
+ std::thread m_Thread;
+};
+
+}
+
+#endif /* LIVESTATUSLISTENER_H */
diff --git a/lib/livestatus/livestatuslistener.ti b/lib/livestatus/livestatuslistener.ti
new file mode 100644
index 0000000..31482cf
--- /dev/null
+++ b/lib/livestatus/livestatuslistener.ti
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/application.hpp"
+
+library livestatus;
+
+namespace icinga
+{
+
+class LivestatusListener : ConfigObject {
+ activation_priority 100;
+
+ [config] String socket_type {
+ default {{{ return "unix"; }}}
+ };
+ [config] String socket_path {
+ default {{{ return Configuration::InitRunDir + "/cmd/livestatus"; }}}
+ };
+ [config] String bind_host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config] String bind_port {
+ default {{{ return "6558"; }}}
+ };
+ [config] String compat_log_path {
+ default {{{ return Configuration::LogDir + "/compat"; }}}
+ };
+};
+
+}
diff --git a/lib/livestatus/livestatuslogutility.cpp b/lib/livestatus/livestatuslogutility.cpp
new file mode 100644
index 0000000..565c2ca
--- /dev/null
+++ b/lib/livestatus/livestatuslogutility.cpp
@@ -0,0 +1,321 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/livestatuslogutility.hpp"
+#include "icinga/service.hpp"
+#include "icinga/host.hpp"
+#include "icinga/user.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/logger.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+#include <fstream>
+
+using namespace icinga;
+
+void LivestatusLogUtility::CreateLogIndex(const String& path, std::map<time_t, String>& index)
+{
+ Utility::Glob(path + "/icinga.log", [&index](const String& newPath) { CreateLogIndexFileHandler(newPath, index); }, GlobFile);
+ Utility::Glob(path + "/archives/*.log", [&index](const String& newPath) { CreateLogIndexFileHandler(newPath, index); }, GlobFile);
+}
+
+void LivestatusLogUtility::CreateLogIndexFileHandler(const String& path, std::map<time_t, String>& index)
+{
+ std::ifstream stream;
+ stream.open(path.CStr(), std::ifstream::in);
+
+ if (!stream)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Could not open log file: " + path));
+
+ /* read the first bytes to get the timestamp: [123456789] */
+ char buffer[12];
+
+ stream.read(buffer, 12);
+
+ if (buffer[0] != '[' || buffer[11] != ']') {
+ /* this can happen for directories too, silently ignore them */
+ return;
+ }
+
+ /* extract timestamp */
+ buffer[11] = 0;
+ time_t ts_start = atoi(buffer+1);
+
+ stream.close();
+
+ Log(LogDebug, "LivestatusLogUtility")
+ << "Indexing log file: '" << path << "' with timestamp start: '" << ts_start << "'.";
+
+ index[ts_start] = path;
+}
+
+void LivestatusLogUtility::CreateLogCache(std::map<time_t, String> index, HistoryTable *table,
+ time_t from, time_t until, const AddRowFunction& addRowFn)
+{
+ ASSERT(table);
+
+ /* m_LogFileIndex map tells which log files are involved ordered by their start timestamp */
+ unsigned long line_count = 0;
+ for (const auto& kv : index) {
+ unsigned int ts = kv.first;
+
+ /* skip log files not in range (performance optimization) */
+ if (ts < from || ts > until)
+ continue;
+
+ String log_file = index[ts];
+ int lineno = 0;
+
+ std::ifstream fp;
+ fp.exceptions(std::ifstream::badbit);
+ fp.open(log_file.CStr(), std::ifstream::in);
+
+ while (fp.good()) {
+ std::string line;
+ std::getline(fp, line);
+
+ if (line.empty())
+ continue; /* Ignore empty lines */
+
+ Dictionary::Ptr log_entry_attrs = LivestatusLogUtility::GetAttributes(line);
+
+ /* no attributes available - invalid log line */
+ if (!log_entry_attrs) {
+ Log(LogDebug, "LivestatusLogUtility")
+ << "Skipping invalid log line: '" << line << "'.";
+ continue;
+ }
+
+ table->UpdateLogEntries(log_entry_attrs, line_count, lineno, addRowFn);
+
+ line_count++;
+ lineno++;
+ }
+
+ fp.close();
+ }
+}
+
+Dictionary::Ptr LivestatusLogUtility::GetAttributes(const String& text)
+{
+ Dictionary::Ptr bag = new Dictionary();
+
+ /*
+ * [1379025342] SERVICE NOTIFICATION: contactname;hostname;servicedesc;WARNING;true;foo output
+ */
+ unsigned long time = atoi(text.SubStr(1, 11).CStr());
+
+ Log(LogDebug, "LivestatusLogUtility")
+ << "Processing log line: '" << text << "'.";
+ bag->Set("time", time);
+
+ size_t colon = text.FindFirstOf(':');
+ size_t colon_offset = colon - 13;
+
+ String type = String(text.SubStr(13, colon_offset)).Trim();
+ String options = String(text.SubStr(colon + 1)).Trim();
+
+ bag->Set("type", type);
+ bag->Set("options", options);
+
+ std::vector<String> tokens = options.Split(";");
+
+ /* set default values */
+ bag->Set("class", LogEntryClassInfo);
+ bag->Set("log_type", 0);
+ bag->Set("state", 0);
+ bag->Set("attempt", 0);
+ bag->Set("message", text); /* used as 'message' in log table, and 'log_output' in statehist table */
+
+ if (type.Contains("INITIAL HOST STATE") ||
+ type.Contains("CURRENT HOST STATE") ||
+ type.Contains("HOST ALERT")) {
+ if (tokens.size() < 5)
+ return bag;
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("state", Host::StateFromString(tokens[1]));
+ bag->Set("state_type", tokens[2]);
+ bag->Set("attempt", atoi(tokens[3].CStr()));
+ bag->Set("plugin_output", tokens[4]);
+
+ if (type.Contains("INITIAL HOST STATE")) {
+ bag->Set("class", LogEntryClassState);
+ bag->Set("log_type", LogEntryTypeHostInitialState);
+ }
+ else if (type.Contains("CURRENT HOST STATE")) {
+ bag->Set("class", LogEntryClassState);
+ bag->Set("log_type", LogEntryTypeHostCurrentState);
+ }
+ else {
+ bag->Set("class", LogEntryClassAlert);
+ bag->Set("log_type", LogEntryTypeHostAlert);
+ }
+
+ return bag;
+ } else if (type.Contains("HOST DOWNTIME ALERT") || type.Contains("HOST FLAPPING ALERT")) {
+ if (tokens.size() < 3)
+ return bag;
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("state_type", tokens[1]);
+ bag->Set("comment", tokens[2]);
+
+ if (type.Contains("HOST FLAPPING ALERT")) {
+ bag->Set("class", LogEntryClassAlert);
+ bag->Set("log_type", LogEntryTypeHostFlapping);
+ } else {
+ bag->Set("class", LogEntryClassAlert);
+ bag->Set("log_type", LogEntryTypeHostDowntimeAlert);
+ }
+
+ return bag;
+ } else if (type.Contains("INITIAL SERVICE STATE") ||
+ type.Contains("CURRENT SERVICE STATE") ||
+ type.Contains("SERVICE ALERT")) {
+ if (tokens.size() < 6)
+ return bag;
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("service_description", tokens[1]);
+ bag->Set("state", Service::StateFromString(tokens[2]));
+ bag->Set("state_type", tokens[3]);
+ bag->Set("attempt", atoi(tokens[4].CStr()));
+ bag->Set("plugin_output", tokens[5]);
+
+ if (type.Contains("INITIAL SERVICE STATE")) {
+ bag->Set("class", LogEntryClassState);
+ bag->Set("log_type", LogEntryTypeServiceInitialState);
+ }
+ else if (type.Contains("CURRENT SERVICE STATE")) {
+ bag->Set("class", LogEntryClassState);
+ bag->Set("log_type", LogEntryTypeServiceCurrentState);
+ }
+ else {
+ bag->Set("class", LogEntryClassAlert);
+ bag->Set("log_type", LogEntryTypeServiceAlert);
+ }
+
+ return bag;
+ } else if (type.Contains("SERVICE DOWNTIME ALERT") ||
+ type.Contains("SERVICE FLAPPING ALERT")) {
+ if (tokens.size() < 4)
+ return bag;
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("service_description", tokens[1]);
+ bag->Set("state_type", tokens[2]);
+ bag->Set("comment", tokens[3]);
+
+ if (type.Contains("SERVICE FLAPPING ALERT")) {
+ bag->Set("class", LogEntryClassAlert);
+ bag->Set("log_type", LogEntryTypeServiceFlapping);
+ } else {
+ bag->Set("class", LogEntryClassAlert);
+ bag->Set("log_type", LogEntryTypeServiceDowntimeAlert);
+ }
+
+ return bag;
+ } else if (type.Contains("TIMEPERIOD TRANSITION")) {
+ if (tokens.size() < 4)
+ return bag;
+
+ bag->Set("class", LogEntryClassState);
+ bag->Set("log_type", LogEntryTypeTimeperiodTransition);
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("service_description", tokens[1]);
+ bag->Set("state_type", tokens[2]);
+ bag->Set("comment", tokens[3]);
+ } else if (type.Contains("HOST NOTIFICATION")) {
+ if (tokens.size() < 6)
+ return bag;
+
+ bag->Set("contact_name", tokens[0]);
+ bag->Set("host_name", tokens[1]);
+ bag->Set("state_type", tokens[2].CStr());
+ bag->Set("state", Service::StateFromString(tokens[3]));
+ bag->Set("command_name", tokens[4]);
+ bag->Set("plugin_output", tokens[5]);
+
+ bag->Set("class", LogEntryClassNotification);
+ bag->Set("log_type", LogEntryTypeHostNotification);
+
+ return bag;
+ } else if (type.Contains("SERVICE NOTIFICATION")) {
+ if (tokens.size() < 7)
+ return bag;
+
+ bag->Set("contact_name", tokens[0]);
+ bag->Set("host_name", tokens[1]);
+ bag->Set("service_description", tokens[2]);
+ bag->Set("state_type", tokens[3].CStr());
+ bag->Set("state", Service::StateFromString(tokens[4]));
+ bag->Set("command_name", tokens[5]);
+ bag->Set("plugin_output", tokens[6]);
+
+ bag->Set("class", LogEntryClassNotification);
+ bag->Set("log_type", LogEntryTypeServiceNotification);
+
+ return bag;
+ } else if (type.Contains("PASSIVE HOST CHECK")) {
+ if (tokens.size() < 3)
+ return bag;
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("state", Host::StateFromString(tokens[1]));
+ bag->Set("plugin_output", tokens[2]);
+
+ bag->Set("class", LogEntryClassPassive);
+
+ return bag;
+ } else if (type.Contains("PASSIVE SERVICE CHECK")) {
+ if (tokens.size() < 4)
+ return bag;
+
+ bag->Set("host_name", tokens[0]);
+ bag->Set("service_description", tokens[1]);
+ bag->Set("state", Host::StateFromString(tokens[2]));
+ bag->Set("plugin_output", tokens[3]);
+
+ bag->Set("class", LogEntryClassPassive);
+
+ return bag;
+ } else if (type.Contains("EXTERNAL COMMAND")) {
+ bag->Set("class", LogEntryClassCommand);
+ /* string processing not implemented in 1.x */
+
+ return bag;
+ } else if (type.Contains("LOG VERSION")) {
+ bag->Set("class", LogEntryClassProgram);
+ bag->Set("log_type", LogEntryTypeVersion);
+
+ return bag;
+ } else if (type.Contains("logging initial states")) {
+ bag->Set("class", LogEntryClassProgram);
+ bag->Set("log_type", LogEntryTypeInitialStates);
+
+ return bag;
+ } else if (type.Contains("starting... (PID=")) {
+ bag->Set("class", LogEntryClassProgram);
+ bag->Set("log_type", LogEntryTypeProgramStarting);
+
+ return bag;
+ }
+ /* program */
+ else if (type.Contains("restarting...") ||
+ type.Contains("shutting down...") ||
+ type.Contains("Bailing out") ||
+ type.Contains("active mode...") ||
+ type.Contains("standby mode...")) {
+ bag->Set("class", LogEntryClassProgram);
+
+ return bag;
+ }
+
+ return bag;
+}
diff --git a/lib/livestatus/livestatuslogutility.hpp b/lib/livestatus/livestatuslogutility.hpp
new file mode 100644
index 0000000..66d1154
--- /dev/null
+++ b/lib/livestatus/livestatuslogutility.hpp
@@ -0,0 +1,60 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LIVESTATUSLOGUTILITY_H
+#define LIVESTATUSLOGUTILITY_H
+
+#include "livestatus/historytable.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+enum LogEntryType {
+ LogEntryTypeHostAlert,
+ LogEntryTypeHostDowntimeAlert,
+ LogEntryTypeHostFlapping,
+ LogEntryTypeHostNotification,
+ LogEntryTypeHostInitialState,
+ LogEntryTypeHostCurrentState,
+ LogEntryTypeServiceAlert,
+ LogEntryTypeServiceDowntimeAlert,
+ LogEntryTypeServiceFlapping,
+ LogEntryTypeServiceNotification,
+ LogEntryTypeServiceInitialState,
+ LogEntryTypeServiceCurrentState,
+ LogEntryTypeTimeperiodTransition,
+ LogEntryTypeVersion,
+ LogEntryTypeInitialStates,
+ LogEntryTypeProgramStarting
+};
+
+enum LogEntryClass {
+ LogEntryClassInfo = 0,
+ LogEntryClassAlert = 1,
+ LogEntryClassProgram = 2,
+ LogEntryClassNotification = 3,
+ LogEntryClassPassive = 4,
+ LogEntryClassCommand = 5,
+ LogEntryClassState = 6,
+ LogEntryClassText = 7
+};
+
+/**
+ * @ingroup livestatus
+ */
+class LivestatusLogUtility
+{
+public:
+ static void CreateLogIndex(const String& path, std::map<time_t, String>& index);
+ static void CreateLogIndexFileHandler(const String& path, std::map<time_t, String>& index);
+ static void CreateLogCache(std::map<time_t, String> index, HistoryTable *table, time_t from, time_t until, const AddRowFunction& addRowFn);
+ static Dictionary::Ptr GetAttributes(const String& text);
+
+private:
+ LivestatusLogUtility();
+};
+
+}
+
+#endif /* LIVESTATUSLOGUTILITY_H */
diff --git a/lib/livestatus/livestatusquery.cpp b/lib/livestatus/livestatusquery.cpp
new file mode 100644
index 0000000..0f9b3da
--- /dev/null
+++ b/lib/livestatus/livestatusquery.cpp
@@ -0,0 +1,648 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/livestatusquery.hpp"
+#include "livestatus/countaggregator.hpp"
+#include "livestatus/sumaggregator.hpp"
+#include "livestatus/minaggregator.hpp"
+#include "livestatus/maxaggregator.hpp"
+#include "livestatus/avgaggregator.hpp"
+#include "livestatus/stdaggregator.hpp"
+#include "livestatus/invsumaggregator.hpp"
+#include "livestatus/invavgaggregator.hpp"
+#include "livestatus/attributefilter.hpp"
+#include "livestatus/negatefilter.hpp"
+#include "livestatus/orfilter.hpp"
+#include "livestatus/andfilter.hpp"
+#include "icinga/externalcommandprocessor.hpp"
+#include "base/debug.hpp"
+#include "base/convert.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include "base/json.hpp"
+#include "base/serializer.hpp"
+#include "base/timer.hpp"
+#include "base/initialize.hpp"
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/join.hpp>
+
+using namespace icinga;
+
+static int l_ExternalCommands = 0;
+static std::mutex l_QueryMutex;
+
+LivestatusQuery::LivestatusQuery(const std::vector<String>& lines, const String& compat_log_path)
+ : m_KeepAlive(false), m_OutputFormat("csv"), m_ColumnHeaders(true), m_Limit(-1), m_ErrorCode(0),
+ m_LogTimeFrom(0), m_LogTimeUntil(static_cast<long>(Utility::GetTime()))
+{
+ if (lines.size() == 0) {
+ m_Verb = "ERROR";
+ m_ErrorCode = LivestatusErrorQuery;
+ m_ErrorMessage = "Empty Query. Aborting.";
+ return;
+ }
+
+ String msg;
+ for (const String& line : lines) {
+ msg += line + "\n";
+ }
+ Log(LogDebug, "LivestatusQuery", msg);
+
+ m_CompatLogPath = compat_log_path;
+
+ /* default separators */
+ m_Separators.emplace_back("\n");
+ m_Separators.emplace_back(";");
+ m_Separators.emplace_back(",");
+ m_Separators.emplace_back("|");
+
+ String line = lines[0];
+
+ size_t sp_index = line.FindFirstOf(" ");
+
+ if (sp_index == String::NPos)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Livestatus header must contain a verb."));
+
+ String verb = line.SubStr(0, sp_index);
+ String target = line.SubStr(sp_index + 1);
+
+ m_Verb = verb;
+
+ if (m_Verb == "COMMAND") {
+ m_KeepAlive = true;
+ m_Command = target;
+ } else if (m_Verb == "GET") {
+ m_Table = target;
+ } else {
+ m_Verb = "ERROR";
+ m_ErrorCode = LivestatusErrorQuery;
+ m_ErrorMessage = "Unknown livestatus verb: " + m_Verb;
+ return;
+ }
+
+ std::deque<Filter::Ptr> filters, stats;
+ std::deque<Aggregator::Ptr> aggregators;
+
+ for (unsigned int i = 1; i < lines.size(); i++) {
+ line = lines[i];
+
+ size_t col_index = line.FindFirstOf(":");
+ String header = line.SubStr(0, col_index);
+ String params;
+
+ //OutputFormat:json or OutputFormat: json
+ if (line.GetLength() > col_index + 1)
+ params = line.SubStr(col_index + 1).Trim();
+
+ if (header == "ResponseHeader")
+ m_ResponseHeader = params;
+ else if (header == "OutputFormat")
+ m_OutputFormat = params;
+ else if (header == "KeepAlive")
+ m_KeepAlive = (params == "on");
+ else if (header == "Columns") {
+ m_ColumnHeaders = false; // Might be explicitly re-enabled later on
+ m_Columns = params.Split(" ");
+ } else if (header == "Separators") {
+ std::vector<String> separators = params.Split(" ");
+
+ /* ugly ascii long to char conversion, but works */
+ if (separators.size() > 0)
+ m_Separators[0] = String(1, static_cast<char>(Convert::ToLong(separators[0])));
+ if (separators.size() > 1)
+ m_Separators[1] = String(1, static_cast<char>(Convert::ToLong(separators[1])));
+ if (separators.size() > 2)
+ m_Separators[2] = String(1, static_cast<char>(Convert::ToLong(separators[2])));
+ if (separators.size() > 3)
+ m_Separators[3] = String(1, static_cast<char>(Convert::ToLong(separators[3])));
+ } else if (header == "ColumnHeaders")
+ m_ColumnHeaders = (params == "on");
+ else if (header == "Limit")
+ m_Limit = Convert::ToLong(params);
+ else if (header == "Filter") {
+ Filter::Ptr filter = ParseFilter(params, m_LogTimeFrom, m_LogTimeUntil);
+
+ if (!filter) {
+ m_Verb = "ERROR";
+ m_ErrorCode = LivestatusErrorQuery;
+ m_ErrorMessage = "Invalid filter specification: " + line;
+ return;
+ }
+
+ filters.push_back(filter);
+ } else if (header == "Stats") {
+ m_ColumnHeaders = false; // Might be explicitly re-enabled later on
+
+ std::vector<String> tokens = params.Split(" ");
+
+ if (tokens.size() < 2) {
+ m_Verb = "ERROR";
+ m_ErrorCode = LivestatusErrorQuery;
+ m_ErrorMessage = "Missing aggregator column name: " + line;
+ return;
+ }
+
+ String aggregate_arg = tokens[0];
+ String aggregate_attr = tokens[1];
+
+ Aggregator::Ptr aggregator;
+ Filter::Ptr filter;
+
+ if (aggregate_arg == "sum") {
+ aggregator = new SumAggregator(aggregate_attr);
+ } else if (aggregate_arg == "min") {
+ aggregator = new MinAggregator(aggregate_attr);
+ } else if (aggregate_arg == "max") {
+ aggregator = new MaxAggregator(aggregate_attr);
+ } else if (aggregate_arg == "avg") {
+ aggregator = new AvgAggregator(aggregate_attr);
+ } else if (aggregate_arg == "std") {
+ aggregator = new StdAggregator(aggregate_attr);
+ } else if (aggregate_arg == "suminv") {
+ aggregator = new InvSumAggregator(aggregate_attr);
+ } else if (aggregate_arg == "avginv") {
+ aggregator = new InvAvgAggregator(aggregate_attr);
+ } else {
+ filter = ParseFilter(params, m_LogTimeFrom, m_LogTimeUntil);
+
+ if (!filter) {
+ m_Verb = "ERROR";
+ m_ErrorCode = LivestatusErrorQuery;
+ m_ErrorMessage = "Invalid filter specification: " + line;
+ return;
+ }
+
+ aggregator = new CountAggregator();
+ }
+
+ aggregator->SetFilter(filter);
+ aggregators.push_back(aggregator);
+
+ stats.push_back(filter);
+ } else if (header == "Or" || header == "And" || header == "StatsOr" || header == "StatsAnd") {
+ std::deque<Filter::Ptr>& deq = (header == "Or" || header == "And") ? filters : stats;
+
+ unsigned int num = Convert::ToLong(params);
+ CombinerFilter::Ptr filter;
+
+ if (header == "Or" || header == "StatsOr") {
+ filter = new OrFilter();
+ Log(LogDebug, "LivestatusQuery")
+ << "Add OR filter for " << params << " column(s). " << deq.size() << " filters available.";
+ } else {
+ filter = new AndFilter();
+ Log(LogDebug, "LivestatusQuery")
+ << "Add AND filter for " << params << " column(s). " << deq.size() << " filters available.";
+ }
+
+ if (num > deq.size()) {
+ m_Verb = "ERROR";
+ m_ErrorCode = 451;
+ m_ErrorMessage = "Or/StatsOr is referencing " + Convert::ToString(num) + " filters; stack only contains " + Convert::ToString(static_cast<long>(deq.size())) + " filters";
+ return;
+ }
+
+ while (num > 0 && num--) {
+ filter->AddSubFilter(deq.back());
+ Log(LogDebug, "LivestatusQuery")
+ << "Add " << num << " filter.";
+ deq.pop_back();
+ if (&deq == &stats)
+ aggregators.pop_back();
+ }
+
+ deq.emplace_back(filter);
+ if (&deq == &stats) {
+ Aggregator::Ptr aggregator = new CountAggregator();
+ aggregator->SetFilter(filter);
+ aggregators.push_back(aggregator);
+ }
+ } else if (header == "Negate" || header == "StatsNegate") {
+ std::deque<Filter::Ptr>& deq = (header == "Negate") ? filters : stats;
+
+ if (deq.empty()) {
+ m_Verb = "ERROR";
+ m_ErrorCode = 451;
+ m_ErrorMessage = "Negate/StatsNegate used, however the filter stack is empty";
+ return;
+ }
+
+ Filter::Ptr filter = deq.back();
+ deq.pop_back();
+
+ if (!filter) {
+ m_Verb = "ERROR";
+ m_ErrorCode = 451;
+ m_ErrorMessage = "Negate/StatsNegate used, however last stats doesn't have a filter";
+ return;
+ }
+
+ deq.push_back(new NegateFilter(filter));
+
+ if (deq == stats) {
+ Aggregator::Ptr aggregator = aggregators.back();
+ aggregator->SetFilter(filter);
+ }
+ }
+ }
+
+ /* Combine all top-level filters into a single filter. */
+ AndFilter::Ptr top_filter = new AndFilter();
+
+ for (const Filter::Ptr& filter : filters) {
+ top_filter->AddSubFilter(filter);
+ }
+
+ m_Filter = top_filter;
+ m_Aggregators.swap(aggregators);
+}
+
+int LivestatusQuery::GetExternalCommands()
+{
+ std::unique_lock<std::mutex> lock(l_QueryMutex);
+
+ return l_ExternalCommands;
+}
+
+Filter::Ptr LivestatusQuery::ParseFilter(const String& params, unsigned long& from, unsigned long& until)
+{
+ /*
+ * time >= 1382696656
+ * type = SERVICE FLAPPING ALERT
+ */
+ std::vector<String> tokens;
+ size_t sp_index;
+ String temp_buffer = params;
+
+ /* extract attr and op */
+ for (int i = 0; i < 2; i++) {
+ sp_index = temp_buffer.FindFirstOf(" ");
+
+ /* check if this is the last argument */
+ if (sp_index == String::NPos) {
+ /* 'attr op' or 'attr op val' is valid */
+ if (i < 1)
+ BOOST_THROW_EXCEPTION(std::runtime_error("Livestatus filter '" + params + "' does not contain all required fields."));
+
+ break;
+ }
+
+ tokens.emplace_back(temp_buffer.SubStr(0, sp_index));
+ temp_buffer = temp_buffer.SubStr(sp_index + 1);
+ }
+
+ /* add the rest as value */
+ tokens.emplace_back(std::move(temp_buffer));
+
+ if (tokens.size() == 2)
+ tokens.emplace_back("");
+
+ if (tokens.size() < 3)
+ return nullptr;
+
+ bool negate = false;
+ String attr = tokens[0];
+ String op = tokens[1];
+ String val = tokens[2];
+
+ if (op == "!=") {
+ op = "=";
+ negate = true;
+ } else if (op == "!~") {
+ op = "~";
+ negate = true;
+ } else if (op == "!=~") {
+ op = "=~";
+ negate = true;
+ } else if (op == "!~~") {
+ op = "~~";
+ negate = true;
+ }
+
+ Filter::Ptr filter = new AttributeFilter(attr, op, val);
+
+ if (negate)
+ filter = new NegateFilter(filter);
+
+ /* pre-filter log time duration */
+ if (attr == "time") {
+ if (op == "<" || op == "<=") {
+ until = Convert::ToLong(val);
+ } else if (op == ">" || op == ">=") {
+ from = Convert::ToLong(val);
+ }
+ }
+
+ Log(LogDebug, "LivestatusQuery")
+ << "Parsed filter with attr: '" << attr << "' op: '" << op << "' val: '" << val << "'.";
+
+ return filter;
+}
+
+void LivestatusQuery::BeginResultSet(std::ostream& fp) const
+{
+ if (m_OutputFormat == "json" || m_OutputFormat == "python")
+ fp << "[";
+}
+
+void LivestatusQuery::EndResultSet(std::ostream& fp) const
+{
+ if (m_OutputFormat == "json" || m_OutputFormat == "python")
+ fp << "]";
+}
+
+void LivestatusQuery::AppendResultRow(std::ostream& fp, const Array::Ptr& row, bool& first_row) const
+{
+ if (m_OutputFormat == "csv") {
+ bool first = true;
+
+ ObjectLock rlock(row);
+ for (const Value& value : row) {
+ if (first)
+ first = false;
+ else
+ fp << m_Separators[1];
+
+ if (value.IsObjectType<Array>())
+ PrintCsvArray(fp, value, 0);
+ else
+ fp << value;
+ }
+
+ fp << m_Separators[0];
+ } else if (m_OutputFormat == "json") {
+ if (!first_row)
+ fp << ", ";
+
+ fp << JsonEncode(row);
+ } else if (m_OutputFormat == "python") {
+ if (!first_row)
+ fp << ", ";
+
+ PrintPythonArray(fp, row);
+ }
+
+ first_row = false;
+}
+
+void LivestatusQuery::PrintCsvArray(std::ostream& fp, const Array::Ptr& array, int level) const
+{
+ bool first = true;
+
+ ObjectLock olock(array);
+ for (const Value& value : array) {
+ if (first)
+ first = false;
+ else
+ fp << ((level == 0) ? m_Separators[2] : m_Separators[3]);
+
+ if (value.IsObjectType<Array>())
+ PrintCsvArray(fp, value, level + 1);
+ else if (value.IsBoolean())
+ fp << Convert::ToLong(value);
+ else
+ fp << value;
+ }
+}
+
+void LivestatusQuery::PrintPythonArray(std::ostream& fp, const Array::Ptr& rs) const
+{
+ fp << "[ ";
+
+ bool first = true;
+
+ for (const Value& value : rs) {
+ if (first)
+ first = false;
+ else
+ fp << ", ";
+
+ if (value.IsObjectType<Array>())
+ PrintPythonArray(fp, value);
+ else if (value.IsNumber())
+ fp << value;
+ else
+ fp << QuoteStringPython(value);
+ }
+
+ fp << " ]";
+}
+
+String LivestatusQuery::QuoteStringPython(const String& str) {
+ String result = str;
+ boost::algorithm::replace_all(result, "\"", "\\\"");
+ return "r\"" + result + "\"";
+}
+
+void LivestatusQuery::ExecuteGetHelper(const Stream::Ptr& stream)
+{
+ Log(LogNotice, "LivestatusQuery")
+ << "Table: " << m_Table;
+
+ Table::Ptr table = Table::GetByName(m_Table, m_CompatLogPath, m_LogTimeFrom, m_LogTimeUntil);
+
+ if (!table) {
+ SendResponse(stream, LivestatusErrorNotFound, "Table '" + m_Table + "' does not exist.");
+
+ return;
+ }
+
+ std::vector<LivestatusRowValue> objects = table->FilterRows(m_Filter, m_Limit);
+ std::vector<String> columns;
+
+ if (m_Columns.size() > 0)
+ columns = m_Columns;
+ else
+ columns = table->GetColumnNames();
+
+ std::ostringstream result;
+ bool first_row = true;
+ BeginResultSet(result);
+
+ if (m_Aggregators.empty()) {
+ typedef std::pair<String, Column> ColumnPair;
+
+ std::vector<ColumnPair> column_objs;
+ column_objs.reserve(columns.size());
+
+ for (const String& columnName : columns)
+ column_objs.emplace_back(columnName, table->GetColumn(columnName));
+
+ ArrayData header;
+
+ for (const LivestatusRowValue& object : objects) {
+ ArrayData row;
+
+ row.reserve(column_objs.size());
+
+ for (const ColumnPair& cv : column_objs) {
+ if (m_ColumnHeaders)
+ header.push_back(cv.first);
+
+ row.push_back(cv.second.ExtractValue(object.Row, object.GroupByType, object.GroupByObject));
+ }
+
+ if (m_ColumnHeaders) {
+ AppendResultRow(result, new Array(std::move(header)), first_row);
+ m_ColumnHeaders = false;
+ }
+
+ AppendResultRow(result, new Array(std::move(row)), first_row);
+ }
+ } else {
+ std::map<std::vector<Value>, std::vector<AggregatorState *> > allStats;
+
+ /* add aggregated stats */
+ for (const LivestatusRowValue& object : objects) {
+ std::vector<Value> statsKey;
+
+ for (const String& columnName : m_Columns) {
+ Column column = table->GetColumn(columnName);
+ statsKey.emplace_back(column.ExtractValue(object.Row, object.GroupByType, object.GroupByObject));
+ }
+
+ auto it = allStats.find(statsKey);
+
+ if (it == allStats.end()) {
+ std::vector<AggregatorState *> newStats(m_Aggregators.size(), nullptr);
+ it = allStats.insert(std::make_pair(statsKey, newStats)).first;
+ }
+
+ auto& stats = it->second;
+
+ int index = 0;
+
+ for (const Aggregator::Ptr& aggregator : m_Aggregators) {
+ aggregator->Apply(table, object.Row, &stats[index]);
+ index++;
+ }
+ }
+
+ /* add column headers both for raw and aggregated data */
+ if (m_ColumnHeaders) {
+ ArrayData header;
+
+ for (const String& columnName : m_Columns) {
+ header.push_back(columnName);
+ }
+
+ for (size_t i = 1; i <= m_Aggregators.size(); i++) {
+ header.push_back("stats_" + Convert::ToString(i));
+ }
+
+ AppendResultRow(result, new Array(std::move(header)), first_row);
+ }
+
+ for (const auto& kv : allStats) {
+ ArrayData row;
+
+ row.reserve(m_Columns.size() + m_Aggregators.size());
+
+ for (const Value& keyPart : kv.first) {
+ row.push_back(keyPart);
+ }
+
+ auto& stats = kv.second;
+
+ for (size_t i = 0; i < m_Aggregators.size(); i++)
+ row.push_back(m_Aggregators[i]->GetResultAndFreeState(stats[i]));
+
+ AppendResultRow(result, new Array(std::move(row)), first_row);
+ }
+
+ /* add a bogus zero value if aggregated is empty*/
+ if (allStats.empty()) {
+ ArrayData row;
+
+ row.reserve(m_Aggregators.size());
+
+ for (size_t i = 1; i <= m_Aggregators.size(); i++) {
+ row.push_back(0);
+ }
+
+ AppendResultRow(result, new Array(std::move(row)), first_row);
+ }
+ }
+
+ EndResultSet(result);
+
+ SendResponse(stream, LivestatusErrorOK, result.str());
+}
+
+void LivestatusQuery::ExecuteCommandHelper(const Stream::Ptr& stream)
+{
+ {
+ std::unique_lock<std::mutex> lock(l_QueryMutex);
+
+ l_ExternalCommands++;
+ }
+
+ Log(LogNotice, "LivestatusQuery")
+ << "Executing command: " << m_Command;
+ ExternalCommandProcessor::Execute(m_Command);
+ SendResponse(stream, LivestatusErrorOK, "");
+}
+
+void LivestatusQuery::ExecuteErrorHelper(const Stream::Ptr& stream)
+{
+ Log(LogDebug, "LivestatusQuery")
+ << "ERROR: Code: '" << m_ErrorCode << "' Message: '" << m_ErrorMessage << "'.";
+ SendResponse(stream, m_ErrorCode, m_ErrorMessage);
+}
+
+void LivestatusQuery::SendResponse(const Stream::Ptr& stream, int code, const String& data)
+{
+ if (m_ResponseHeader == "fixed16")
+ PrintFixed16(stream, code, data);
+
+ if (m_ResponseHeader == "fixed16" || code == LivestatusErrorOK) {
+ try {
+ stream->Write(data.CStr(), data.GetLength());
+ } catch (const std::exception&) {
+ Log(LogCritical, "LivestatusQuery", "Cannot write query response to socket.");
+ }
+ }
+}
+
+void LivestatusQuery::PrintFixed16(const Stream::Ptr& stream, int code, const String& data)
+{
+ ASSERT(code >= 100 && code <= 999);
+
+ String sCode = Convert::ToString(code);
+ String sLength = Convert::ToString(static_cast<long>(data.GetLength()));
+
+ String header = sCode + String(16 - 3 - sLength.GetLength() - 1, ' ') + sLength + m_Separators[0];
+
+ try {
+ stream->Write(header.CStr(), header.GetLength());
+ } catch (const std::exception&) {
+ Log(LogCritical, "LivestatusQuery", "Cannot write to TCP socket.");
+ }
+}
+
+bool LivestatusQuery::Execute(const Stream::Ptr& stream)
+{
+ try {
+ Log(LogNotice, "LivestatusQuery")
+ << "Executing livestatus query: " << m_Verb;
+
+ if (m_Verb == "GET")
+ ExecuteGetHelper(stream);
+ else if (m_Verb == "COMMAND")
+ ExecuteCommandHelper(stream);
+ else if (m_Verb == "ERROR")
+ ExecuteErrorHelper(stream);
+ else
+ BOOST_THROW_EXCEPTION(std::runtime_error("Invalid livestatus query verb."));
+ } catch (const std::exception& ex) {
+ SendResponse(stream, LivestatusErrorQuery, DiagnosticInformation(ex));
+ }
+
+ if (!m_KeepAlive) {
+ stream->Close();
+ return false;
+ }
+
+ return true;
+}
diff --git a/lib/livestatus/livestatusquery.hpp b/lib/livestatus/livestatusquery.hpp
new file mode 100644
index 0000000..910cc16
--- /dev/null
+++ b/lib/livestatus/livestatusquery.hpp
@@ -0,0 +1,90 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LIVESTATUSQUERY_H
+#define LIVESTATUSQUERY_H
+
+#include "livestatus/filter.hpp"
+#include "livestatus/aggregator.hpp"
+#include "base/object.hpp"
+#include "base/array.hpp"
+#include "base/stream.hpp"
+#include "base/scriptframe.hpp"
+#include <deque>
+
+using namespace icinga;
+
+namespace icinga
+{
+
+enum LivestatusError
+{
+ LivestatusErrorOK = 200,
+ LivestatusErrorNotFound = 404,
+ LivestatusErrorQuery = 452
+};
+
+/**
+ * @ingroup livestatus
+ */
+class LivestatusQuery final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(LivestatusQuery);
+
+ LivestatusQuery(const std::vector<String>& lines, const String& compat_log_path);
+
+ bool Execute(const Stream::Ptr& stream);
+
+ static int GetExternalCommands();
+
+private:
+ String m_Verb;
+
+ bool m_KeepAlive;
+
+ /* Parameters for GET queries. */
+ String m_Table;
+ std::vector<String> m_Columns;
+ std::vector<String> m_Separators;
+
+ Filter::Ptr m_Filter;
+ std::deque<Aggregator::Ptr> m_Aggregators;
+
+ String m_OutputFormat;
+ bool m_ColumnHeaders;
+ int m_Limit;
+
+ String m_ResponseHeader;
+
+ /* Parameters for COMMAND/SCRIPT queries. */
+ String m_Command;
+ String m_Session;
+
+ /* Parameters for invalid queries. */
+ int m_ErrorCode;
+ String m_ErrorMessage;
+
+ unsigned long m_LogTimeFrom;
+ unsigned long m_LogTimeUntil;
+ String m_CompatLogPath;
+
+ void BeginResultSet(std::ostream& fp) const;
+ void EndResultSet(std::ostream& fp) const;
+ void AppendResultRow(std::ostream& fp, const Array::Ptr& row, bool& first_row) const;
+ void PrintCsvArray(std::ostream& fp, const Array::Ptr& array, int level) const;
+ void PrintPythonArray(std::ostream& fp, const Array::Ptr& array) const;
+ static String QuoteStringPython(const String& str);
+
+ void ExecuteGetHelper(const Stream::Ptr& stream);
+ void ExecuteCommandHelper(const Stream::Ptr& stream);
+ void ExecuteErrorHelper(const Stream::Ptr& stream);
+
+ void SendResponse(const Stream::Ptr& stream, int code, const String& data);
+ void PrintFixed16(const Stream::Ptr& stream, int code, const String& data);
+
+ static Filter::Ptr ParseFilter(const String& params, unsigned long& from, unsigned long& until);
+};
+
+}
+
+#endif /* LIVESTATUSQUERY_H */
diff --git a/lib/livestatus/logtable.cpp b/lib/livestatus/logtable.cpp
new file mode 100644
index 0000000..c1358dd
--- /dev/null
+++ b/lib/livestatus/logtable.cpp
@@ -0,0 +1,229 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/logtable.hpp"
+#include "livestatus/livestatuslogutility.hpp"
+#include "livestatus/hoststable.hpp"
+#include "livestatus/servicestable.hpp"
+#include "livestatus/contactstable.hpp"
+#include "livestatus/commandstable.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/cib.hpp"
+#include "icinga/service.hpp"
+#include "icinga/host.hpp"
+#include "icinga/user.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+#include <fstream>
+
+using namespace icinga;
+
+LogTable::LogTable(const String& compat_log_path, time_t from, time_t until)
+{
+ /* store attributes for FetchRows */
+ m_TimeFrom = from;
+ m_TimeUntil = until;
+ m_CompatLogPath = compat_log_path;
+
+ AddColumns(this);
+}
+
+void LogTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "time", Column(&LogTable::TimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "lineno", Column(&LogTable::LinenoAccessor, objectAccessor));
+ table->AddColumn(prefix + "class", Column(&LogTable::ClassAccessor, objectAccessor));
+ table->AddColumn(prefix + "message", Column(&LogTable::MessageAccessor, objectAccessor));
+ table->AddColumn(prefix + "type", Column(&LogTable::TypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "options", Column(&LogTable::OptionsAccessor, objectAccessor));
+ table->AddColumn(prefix + "comment", Column(&LogTable::CommentAccessor, objectAccessor));
+ table->AddColumn(prefix + "plugin_output", Column(&LogTable::PluginOutputAccessor, objectAccessor));
+ table->AddColumn(prefix + "state", Column(&LogTable::StateAccessor, objectAccessor));
+ table->AddColumn(prefix + "state_type", Column(&LogTable::StateTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "attempt", Column(&LogTable::AttemptAccessor, objectAccessor));
+ table->AddColumn(prefix + "service_description", Column(&LogTable::ServiceDescriptionAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_name", Column(&LogTable::HostNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "contact_name", Column(&LogTable::ContactNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "command_name", Column(&LogTable::CommandNameAccessor, objectAccessor));
+
+ HostsTable::AddColumns(table, "current_host_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return HostAccessor(row, objectAccessor);
+ });
+ ServicesTable::AddColumns(table, "current_service_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return ServiceAccessor(row, objectAccessor);
+ });
+ ContactsTable::AddColumns(table, "current_contact_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return ContactAccessor(row, objectAccessor);
+ });
+ CommandsTable::AddColumns(table, "current_command_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return CommandAccessor(row, objectAccessor);
+ });
+}
+
+String LogTable::GetName() const
+{
+ return "log";
+}
+
+String LogTable::GetPrefix() const
+{
+ return "log";
+}
+
+void LogTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ Log(LogDebug, "LogTable")
+ << "Pre-selecting log file from " << m_TimeFrom << " until " << m_TimeUntil;
+
+ /* create log file index */
+ LivestatusLogUtility::CreateLogIndex(m_CompatLogPath, m_LogFileIndex);
+
+ /* generate log cache */
+ LivestatusLogUtility::CreateLogCache(m_LogFileIndex, this, m_TimeFrom, m_TimeUntil, addRowFn);
+}
+
+/* gets called in LivestatusLogUtility::CreateLogCache */
+void LogTable::UpdateLogEntries(const Dictionary::Ptr& log_entry_attrs, int line_count, int lineno, const AddRowFunction& addRowFn)
+{
+ /* additional attributes only for log table */
+ log_entry_attrs->Set("lineno", lineno);
+
+ addRowFn(log_entry_attrs, LivestatusGroupByNone, Empty);
+}
+
+Object::Ptr LogTable::HostAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ String host_name = static_cast<Dictionary::Ptr>(row)->Get("host_name");
+
+ if (host_name.IsEmpty())
+ return nullptr;
+
+ return Host::GetByName(host_name);
+}
+
+Object::Ptr LogTable::ServiceAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ String host_name = static_cast<Dictionary::Ptr>(row)->Get("host_name");
+ String service_description = static_cast<Dictionary::Ptr>(row)->Get("service_description");
+
+ if (service_description.IsEmpty() || host_name.IsEmpty())
+ return nullptr;
+
+ return Service::GetByNamePair(host_name, service_description);
+}
+
+Object::Ptr LogTable::ContactAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ String contact_name = static_cast<Dictionary::Ptr>(row)->Get("contact_name");
+
+ if (contact_name.IsEmpty())
+ return nullptr;
+
+ return User::GetByName(contact_name);
+}
+
+Object::Ptr LogTable::CommandAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ String command_name = static_cast<Dictionary::Ptr>(row)->Get("command_name");
+
+ if (command_name.IsEmpty())
+ return nullptr;
+
+ CheckCommand::Ptr check_command = CheckCommand::GetByName(command_name);
+ if (!check_command) {
+ EventCommand::Ptr event_command = EventCommand::GetByName(command_name);
+ if (!event_command) {
+ NotificationCommand::Ptr notification_command = NotificationCommand::GetByName(command_name);
+ if (!notification_command)
+ return nullptr;
+ else
+ return notification_command;
+ } else
+ return event_command;
+ } else
+ return check_command;
+}
+
+Value LogTable::TimeAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("time");
+}
+
+Value LogTable::LinenoAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("lineno");
+}
+
+Value LogTable::ClassAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("class");
+}
+
+Value LogTable::MessageAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("message");
+}
+
+Value LogTable::TypeAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("type");
+}
+
+Value LogTable::OptionsAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("options");
+}
+
+Value LogTable::CommentAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("comment");
+}
+
+Value LogTable::PluginOutputAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("plugin_output");
+}
+
+Value LogTable::StateAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("state");
+}
+
+Value LogTable::StateTypeAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("state_type");
+}
+
+Value LogTable::AttemptAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("attempt");
+}
+
+Value LogTable::ServiceDescriptionAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("service_description");
+}
+
+Value LogTable::HostNameAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("host_name");
+}
+
+Value LogTable::ContactNameAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("contact_name");
+}
+
+Value LogTable::CommandNameAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("command_name");
+}
diff --git a/lib/livestatus/logtable.hpp b/lib/livestatus/logtable.hpp
new file mode 100644
index 0000000..7a89310
--- /dev/null
+++ b/lib/livestatus/logtable.hpp
@@ -0,0 +1,65 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef LOGTABLE_H
+#define LOGTABLE_H
+
+#include "livestatus/historytable.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class LogTable final : public HistoryTable
+{
+public:
+ DECLARE_PTR_TYPEDEFS(LogTable);
+
+ LogTable(const String& compat_log_path, time_t from, time_t until);
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+ void UpdateLogEntries(const Dictionary::Ptr& log_entry_attrs, int line_count, int lineno, const AddRowFunction& addRowFn) override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Object::Ptr HostAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr ServiceAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr ContactAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr CommandAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+
+ static Value TimeAccessor(const Value& row);
+ static Value LinenoAccessor(const Value& row);
+ static Value ClassAccessor(const Value& row);
+ static Value MessageAccessor(const Value& row);
+ static Value TypeAccessor(const Value& row);
+ static Value OptionsAccessor(const Value& row);
+ static Value CommentAccessor(const Value& row);
+ static Value PluginOutputAccessor(const Value& row);
+ static Value StateAccessor(const Value& row);
+ static Value StateTypeAccessor(const Value& row);
+ static Value AttemptAccessor(const Value& row);
+ static Value ServiceDescriptionAccessor(const Value& row);
+ static Value HostNameAccessor(const Value& row);
+ static Value ContactNameAccessor(const Value& row);
+ static Value CommandNameAccessor(const Value& row);
+
+private:
+ std::map<time_t, String> m_LogFileIndex;
+ std::map<time_t, Dictionary::Ptr> m_RowsCache;
+ time_t m_TimeFrom;
+ time_t m_TimeUntil;
+ String m_CompatLogPath;
+};
+
+}
+
+#endif /* LOGTABLE_H */
diff --git a/lib/livestatus/maxaggregator.cpp b/lib/livestatus/maxaggregator.cpp
new file mode 100644
index 0000000..375d24b
--- /dev/null
+++ b/lib/livestatus/maxaggregator.cpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/maxaggregator.hpp"
+
+using namespace icinga;
+
+MaxAggregator::MaxAggregator(String attr)
+ : m_MaxAttr(std::move(attr))
+{ }
+
+MaxAggregatorState *MaxAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new MaxAggregatorState();
+
+ return static_cast<MaxAggregatorState *>(*state);
+}
+
+void MaxAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_MaxAttr);
+
+ Value value = column.ExtractValue(row);
+
+ MaxAggregatorState *pstate = EnsureState(state);
+
+ if (value > pstate->Max)
+ pstate->Max = value;
+}
+
+double MaxAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ MaxAggregatorState *pstate = EnsureState(&state);
+ double result = pstate->Max;
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/maxaggregator.hpp b/lib/livestatus/maxaggregator.hpp
new file mode 100644
index 0000000..5bff5f9
--- /dev/null
+++ b/lib/livestatus/maxaggregator.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MAXAGGREGATOR_H
+#define MAXAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct MaxAggregatorState final : public AggregatorState
+{
+ double Max{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class MaxAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(MaxAggregator);
+
+ MaxAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_MaxAttr;
+
+ static MaxAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* MAXAGGREGATOR_H */
diff --git a/lib/livestatus/minaggregator.cpp b/lib/livestatus/minaggregator.cpp
new file mode 100644
index 0000000..06cb76e
--- /dev/null
+++ b/lib/livestatus/minaggregator.cpp
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/minaggregator.hpp"
+
+using namespace icinga;
+
+MinAggregator::MinAggregator(String attr)
+ : m_MinAttr(std::move(attr))
+{ }
+
+MinAggregatorState *MinAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new MinAggregatorState();
+
+ return static_cast<MinAggregatorState *>(*state);
+}
+
+void MinAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_MinAttr);
+
+ Value value = column.ExtractValue(row);
+
+ MinAggregatorState *pstate = EnsureState(state);
+
+ if (value < pstate->Min)
+ pstate->Min = value;
+}
+
+double MinAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ MinAggregatorState *pstate = EnsureState(&state);
+
+ double result;
+
+ if (pstate->Min == DBL_MAX)
+ result = 0;
+ else
+ result = pstate->Min;
+
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/minaggregator.hpp b/lib/livestatus/minaggregator.hpp
new file mode 100644
index 0000000..71a9d89
--- /dev/null
+++ b/lib/livestatus/minaggregator.hpp
@@ -0,0 +1,42 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MINAGGREGATOR_H
+#define MINAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+#include <cfloat>
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct MinAggregatorState final : public AggregatorState
+{
+ double Min{DBL_MAX};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class MinAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(MinAggregator);
+
+ MinAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_MinAttr;
+
+ static MinAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* MINAGGREGATOR_H */
diff --git a/lib/livestatus/negatefilter.cpp b/lib/livestatus/negatefilter.cpp
new file mode 100644
index 0000000..60202b4
--- /dev/null
+++ b/lib/livestatus/negatefilter.cpp
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/negatefilter.hpp"
+
+using namespace icinga;
+
+NegateFilter::NegateFilter(Filter::Ptr inner)
+ : m_Inner(std::move(inner))
+{ }
+
+bool NegateFilter::Apply(const Table::Ptr& table, const Value& row)
+{
+ return !m_Inner->Apply(table, row);
+}
diff --git a/lib/livestatus/negatefilter.hpp b/lib/livestatus/negatefilter.hpp
new file mode 100644
index 0000000..c08943c
--- /dev/null
+++ b/lib/livestatus/negatefilter.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NEGATEFILTER_H
+#define NEGATEFILTER_H
+
+#include "livestatus/filter.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class NegateFilter final : public Filter
+{
+public:
+ DECLARE_PTR_TYPEDEFS(NegateFilter);
+
+ NegateFilter(Filter::Ptr inner);
+
+ bool Apply(const Table::Ptr& table, const Value& row) override;
+
+private:
+ Filter::Ptr m_Inner;
+};
+
+}
+
+#endif /* NEGATEFILTER_H */
diff --git a/lib/livestatus/orfilter.cpp b/lib/livestatus/orfilter.cpp
new file mode 100644
index 0000000..6cc446c
--- /dev/null
+++ b/lib/livestatus/orfilter.cpp
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/orfilter.hpp"
+
+using namespace icinga;
+
+bool OrFilter::Apply(const Table::Ptr& table, const Value& row)
+{
+ if (m_Filters.empty())
+ return true;
+
+ for (const Filter::Ptr& filter : m_Filters) {
+ if (filter->Apply(table, row))
+ return true;
+ }
+
+ return false;
+}
diff --git a/lib/livestatus/orfilter.hpp b/lib/livestatus/orfilter.hpp
new file mode 100644
index 0000000..df855c1
--- /dev/null
+++ b/lib/livestatus/orfilter.hpp
@@ -0,0 +1,26 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ORFILTER_H
+#define ORFILTER_H
+
+#include "livestatus/combinerfilter.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class OrFilter final : public CombinerFilter
+{
+public:
+ DECLARE_PTR_TYPEDEFS(OrFilter);
+
+ bool Apply(const Table::Ptr& table, const Value& row) override;
+};
+
+}
+
+#endif /* ORFILTER_H */
diff --git a/lib/livestatus/servicegroupstable.cpp b/lib/livestatus/servicegroupstable.cpp
new file mode 100644
index 0000000..38d6d05
--- /dev/null
+++ b/lib/livestatus/servicegroupstable.cpp
@@ -0,0 +1,323 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/servicegroupstable.hpp"
+#include "icinga/servicegroup.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+ServiceGroupsTable::ServiceGroupsTable()
+{
+ AddColumns(this);
+}
+
+void ServiceGroupsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&ServiceGroupsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "alias", Column(&ServiceGroupsTable::AliasAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes", Column(&ServiceGroupsTable::NotesAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_url", Column(&ServiceGroupsTable::NotesUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "action_url", Column(&ServiceGroupsTable::ActionUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "members", Column(&ServiceGroupsTable::MembersAccessor, objectAccessor));
+ table->AddColumn(prefix + "members_with_state", Column(&ServiceGroupsTable::MembersWithStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "worst_service_state", Column(&ServiceGroupsTable::WorstServiceStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services", Column(&ServiceGroupsTable::NumServicesAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_ok", Column(&ServiceGroupsTable::NumServicesOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_warn", Column(&ServiceGroupsTable::NumServicesWarnAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_crit", Column(&ServiceGroupsTable::NumServicesCritAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_unknown", Column(&ServiceGroupsTable::NumServicesUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_pending", Column(&ServiceGroupsTable::NumServicesPendingAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_ok", Column(&ServiceGroupsTable::NumServicesHardOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_warn", Column(&ServiceGroupsTable::NumServicesHardWarnAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_crit", Column(&ServiceGroupsTable::NumServicesHardCritAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services_hard_unknown", Column(&ServiceGroupsTable::NumServicesHardUnknownAccessor, objectAccessor));
+}
+
+String ServiceGroupsTable::GetName() const
+{
+ return "servicegroups";
+}
+
+String ServiceGroupsTable::GetPrefix() const
+{
+ return "servicegroup";
+}
+
+void ServiceGroupsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const ServiceGroup::Ptr& sg : ConfigType::GetObjectsByType<ServiceGroup>()) {
+ if (!addRowFn(sg, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value ServiceGroupsTable::NameAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ return sg->GetName();
+}
+
+Value ServiceGroupsTable::AliasAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ return sg->GetDisplayName();
+}
+
+Value ServiceGroupsTable::NotesAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ return sg->GetNotes();
+}
+
+Value ServiceGroupsTable::NotesUrlAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ return sg->GetNotesUrl();
+}
+
+Value ServiceGroupsTable::ActionUrlAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ return sg->GetActionUrl();
+}
+
+Value ServiceGroupsTable::MembersAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ result.push_back(new Array({
+ service->GetHost()->GetName(),
+ service->GetShortName()
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServiceGroupsTable::MembersWithStateAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ result.push_back(new Array({
+ service->GetHost()->GetName(),
+ service->GetShortName(),
+ service->GetHost()->GetState(),
+ service->GetState()
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServiceGroupsTable::WorstServiceStateAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ Value worst_service = ServiceOK;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetState() > worst_service)
+ worst_service = service->GetState();
+ }
+
+ return worst_service;
+}
+
+Value ServiceGroupsTable::NumServicesAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ return sg->GetMembers().size();
+}
+
+Value ServiceGroupsTable::NumServicesOkAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetState() == ServiceOK)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesWarnAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetState() == ServiceWarning)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesCritAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetState() == ServiceCritical)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesUnknownAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetState() == ServiceUnknown)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesPendingAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (!service->GetLastCheckResult())
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesHardOkAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceOK)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesHardWarnAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceWarning)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesHardCritAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceCritical)
+ num_services++;
+ }
+
+ return num_services;
+}
+
+Value ServiceGroupsTable::NumServicesHardUnknownAccessor(const Value& row)
+{
+ ServiceGroup::Ptr sg = static_cast<ServiceGroup::Ptr>(row);
+
+ if (!sg)
+ return Empty;
+
+ int num_services = 0;
+
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ if (service->GetStateType() == StateTypeHard && service->GetState() == ServiceUnknown)
+ num_services++;
+ }
+
+ return num_services;
+}
diff --git a/lib/livestatus/servicegroupstable.hpp b/lib/livestatus/servicegroupstable.hpp
new file mode 100644
index 0000000..b3c60c4
--- /dev/null
+++ b/lib/livestatus/servicegroupstable.hpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERVICEGROUPSTABLE_H
+#define SERVICEGROUPSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class ServiceGroupsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ServiceGroupsTable);
+
+ ServiceGroupsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value AliasAccessor(const Value& row);
+ static Value NotesAccessor(const Value& row);
+ static Value NotesUrlAccessor(const Value& row);
+ static Value ActionUrlAccessor(const Value& row);
+ static Value MembersAccessor(const Value& row);
+ static Value MembersWithStateAccessor(const Value& row);
+ static Value WorstServiceStateAccessor(const Value& row);
+ static Value NumServicesAccessor(const Value& row);
+ static Value NumServicesOkAccessor(const Value& row);
+ static Value NumServicesWarnAccessor(const Value& row);
+ static Value NumServicesCritAccessor(const Value& row);
+ static Value NumServicesUnknownAccessor(const Value& row);
+ static Value NumServicesPendingAccessor(const Value& row);
+ static Value NumServicesHardOkAccessor(const Value& row);
+ static Value NumServicesHardWarnAccessor(const Value& row);
+ static Value NumServicesHardCritAccessor(const Value& row);
+ static Value NumServicesHardUnknownAccessor(const Value& row);
+};
+
+}
+
+#endif /* SERVICEGROUPSTABLE_H */
diff --git a/lib/livestatus/servicestable.cpp b/lib/livestatus/servicestable.cpp
new file mode 100644
index 0000000..681445a
--- /dev/null
+++ b/lib/livestatus/servicestable.cpp
@@ -0,0 +1,1200 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/servicestable.hpp"
+#include "livestatus/hoststable.hpp"
+#include "livestatus/servicegroupstable.hpp"
+#include "livestatus/hostgroupstable.hpp"
+#include "livestatus/endpointstable.hpp"
+#include "icinga/service.hpp"
+#include "icinga/servicegroup.hpp"
+#include "icinga/hostgroup.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/timeperiod.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/replace.hpp>
+
+using namespace icinga;
+
+ServicesTable::ServicesTable(LivestatusGroupByType type)
+ : Table(type)
+{
+ AddColumns(this);
+}
+
+
+void ServicesTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "description", Column(&ServicesTable::ShortNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "service_description", Column(&ServicesTable::ShortNameAccessor, objectAccessor)); //ugly compatibility hack
+ table->AddColumn(prefix + "display_name", Column(&ServicesTable::DisplayNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_command", Column(&ServicesTable::CheckCommandAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_command_expanded", Column(&ServicesTable::CheckCommandExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "event_handler", Column(&ServicesTable::EventHandlerAccessor, objectAccessor));
+ table->AddColumn(prefix + "plugin_output", Column(&ServicesTable::PluginOutputAccessor, objectAccessor));
+ table->AddColumn(prefix + "long_plugin_output", Column(&ServicesTable::LongPluginOutputAccessor, objectAccessor));
+ table->AddColumn(prefix + "perf_data", Column(&ServicesTable::PerfDataAccessor, objectAccessor));
+ table->AddColumn(prefix + "notification_period", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_period", Column(&ServicesTable::CheckPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes", Column(&ServicesTable::NotesAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_expanded", Column(&ServicesTable::NotesExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_url", Column(&ServicesTable::NotesUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "notes_url_expanded", Column(&ServicesTable::NotesUrlExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "action_url", Column(&ServicesTable::ActionUrlAccessor, objectAccessor));
+ table->AddColumn(prefix + "action_url_expanded", Column(&ServicesTable::ActionUrlExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "icon_image", Column(&ServicesTable::IconImageAccessor, objectAccessor));
+ table->AddColumn(prefix + "icon_image_expanded", Column(&ServicesTable::IconImageExpandedAccessor, objectAccessor));
+ table->AddColumn(prefix + "icon_image_alt", Column(&ServicesTable::IconImageAltAccessor, objectAccessor));
+ table->AddColumn(prefix + "initial_state", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "max_check_attempts", Column(&ServicesTable::MaxCheckAttemptsAccessor, objectAccessor));
+ table->AddColumn(prefix + "current_attempt", Column(&ServicesTable::CurrentAttemptAccessor, objectAccessor));
+ table->AddColumn(prefix + "state", Column(&ServicesTable::StateAccessor, objectAccessor));
+ table->AddColumn(prefix + "has_been_checked", Column(&ServicesTable::HasBeenCheckedAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_state", Column(&ServicesTable::LastStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_hard_state", Column(&ServicesTable::LastHardStateAccessor, objectAccessor));
+ table->AddColumn(prefix + "state_type", Column(&ServicesTable::StateTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_type", Column(&ServicesTable::CheckTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "acknowledged", Column(&ServicesTable::AcknowledgedAccessor, objectAccessor));
+ table->AddColumn(prefix + "acknowledgement_type", Column(&ServicesTable::AcknowledgementTypeAccessor, objectAccessor));
+ table->AddColumn(prefix + "no_more_notifications", Column(&ServicesTable::NoMoreNotificationsAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_ok", Column(&ServicesTable::LastTimeOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_warning", Column(&ServicesTable::LastTimeWarningAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_critical", Column(&ServicesTable::LastTimeCriticalAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_time_unknown", Column(&ServicesTable::LastTimeUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_check", Column(&ServicesTable::LastCheckAccessor, objectAccessor));
+ table->AddColumn(prefix + "next_check", Column(&ServicesTable::NextCheckAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_notification", Column(&ServicesTable::LastNotificationAccessor, objectAccessor));
+ table->AddColumn(prefix + "next_notification", Column(&ServicesTable::NextNotificationAccessor, objectAccessor));
+ table->AddColumn(prefix + "current_notification_number", Column(&ServicesTable::CurrentNotificationNumberAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_state_change", Column(&ServicesTable::LastStateChangeAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_hard_state_change", Column(&ServicesTable::LastHardStateChangeAccessor, objectAccessor));
+ table->AddColumn(prefix + "scheduled_downtime_depth", Column(&ServicesTable::ScheduledDowntimeDepthAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_flapping", Column(&ServicesTable::IsFlappingAccessor, objectAccessor));
+ table->AddColumn(prefix + "checks_enabled", Column(&ServicesTable::ChecksEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "accept_passive_checks", Column(&ServicesTable::AcceptPassiveChecksAccessor, objectAccessor));
+ table->AddColumn(prefix + "event_handler_enabled", Column(&ServicesTable::EventHandlerEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "notifications_enabled", Column(&ServicesTable::NotificationsEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "process_performance_data", Column(&ServicesTable::ProcessPerformanceDataAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_executing", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "active_checks_enabled", Column(&ServicesTable::ActiveChecksEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_options", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "flap_detection_enabled", Column(&ServicesTable::FlapDetectionEnabledAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_freshness", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "obsess_over_service", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "modified_attributes_list", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "pnpgraph_present", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "staleness", Column(&ServicesTable::StalenessAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_interval", Column(&ServicesTable::CheckIntervalAccessor, objectAccessor));
+ table->AddColumn(prefix + "retry_interval", Column(&ServicesTable::RetryIntervalAccessor, objectAccessor));
+ table->AddColumn(prefix + "notification_interval", Column(&ServicesTable::NotificationIntervalAccessor, objectAccessor));
+ table->AddColumn(prefix + "first_notification_delay", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "low_flap_threshold", Column(&ServicesTable::LowFlapThresholdAccessor, objectAccessor));
+ table->AddColumn(prefix + "high_flap_threshold", Column(&ServicesTable::HighFlapThresholdAccessor, objectAccessor));
+ table->AddColumn(prefix + "latency", Column(&ServicesTable::LatencyAccessor, objectAccessor));
+ table->AddColumn(prefix + "execution_time", Column(&ServicesTable::ExecutionTimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "percent_state_change", Column(&ServicesTable::PercentStateChangeAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_check_period", Column(&ServicesTable::InCheckPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_notification_period", Column(&ServicesTable::InNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "contacts", Column(&ServicesTable::ContactsAccessor, objectAccessor));
+ table->AddColumn(prefix + "downtimes", Column(&ServicesTable::DowntimesAccessor, objectAccessor));
+ table->AddColumn(prefix + "downtimes_with_info", Column(&ServicesTable::DowntimesWithInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "comments", Column(&ServicesTable::CommentsAccessor, objectAccessor));
+ table->AddColumn(prefix + "comments_with_info", Column(&ServicesTable::CommentsWithInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "comments_with_extra_info", Column(&ServicesTable::CommentsWithExtraInfoAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_names", Column(&ServicesTable::CustomVariableNamesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_values", Column(&ServicesTable::CustomVariableValuesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variables", Column(&ServicesTable::CustomVariablesAccessor, objectAccessor));
+ table->AddColumn(prefix + "groups", Column(&ServicesTable::GroupsAccessor, objectAccessor));
+ table->AddColumn(prefix + "contact_groups", Column(&ServicesTable::ContactGroupsAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_source", Column(&ServicesTable::CheckSourceAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_reachable", Column(&ServicesTable::IsReachableAccessor, objectAccessor));
+ table->AddColumn(prefix + "cv_is_json", Column(&ServicesTable::CVIsJsonAccessor, objectAccessor));
+ table->AddColumn(prefix + "original_attributes", Column(&ServicesTable::OriginalAttributesAccessor, objectAccessor));
+
+ HostsTable::AddColumns(table, "host_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return HostAccessor(row, objectAccessor);
+ });
+
+ /* add additional group by values received through the object accessor */
+ if (table->GetGroupByType() == LivestatusGroupByServiceGroup) {
+ /* _1 = row, _2 = groupByType, _3 = groupByObject */
+ Log(LogDebug, "Livestatus")
+ << "Processing services group by servicegroup table.";
+ ServiceGroupsTable::AddColumns(table, "servicegroup_", [](const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject) -> Value {
+ return ServiceGroupAccessor(row, groupByType, groupByObject);
+ });
+ } else if (table->GetGroupByType() == LivestatusGroupByHostGroup) {
+ /* _1 = row, _2 = groupByType, _3 = groupByObject */
+ Log(LogDebug, "Livestatus")
+ << "Processing services group by hostgroup table.";
+ HostGroupsTable::AddColumns(table, "hostgroup_", [](const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject) -> Value {
+ return HostGroupAccessor(row, groupByType, groupByObject);
+ });
+ }
+}
+
+String ServicesTable::GetName() const
+{
+ return "services";
+}
+
+String ServicesTable::GetPrefix() const
+{
+ return "service";
+}
+
+void ServicesTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ if (GetGroupByType() == LivestatusGroupByServiceGroup) {
+ for (const ServiceGroup::Ptr& sg : ConfigType::GetObjectsByType<ServiceGroup>()) {
+ for (const Service::Ptr& service : sg->GetMembers()) {
+ /* the caller must know which groupby type and value are set for this row */
+ if (!addRowFn(service, LivestatusGroupByServiceGroup, sg))
+ return;
+ }
+ }
+ } else if (GetGroupByType() == LivestatusGroupByHostGroup) {
+ for (const HostGroup::Ptr& hg : ConfigType::GetObjectsByType<HostGroup>()) {
+ ObjectLock ylock(hg);
+ for (const Host::Ptr& host : hg->GetMembers()) {
+ ObjectLock ylock(host);
+ for (const Service::Ptr& service : host->GetServices()) {
+ /* the caller must know which groupby type and value are set for this row */
+ if (!addRowFn(service, LivestatusGroupByHostGroup, hg))
+ return;
+ }
+ }
+ }
+ } else {
+ for (const Service::Ptr& service : ConfigType::GetObjectsByType<Service>()) {
+ if (!addRowFn(service, LivestatusGroupByNone, Empty))
+ return;
+ }
+ }
+}
+
+Object::Ptr ServicesTable::HostAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor)
+{
+ Value service;
+
+ if (parentObjectAccessor)
+ service = parentObjectAccessor(row, LivestatusGroupByNone, Empty);
+ else
+ service = row;
+
+ Service::Ptr svc = static_cast<Service::Ptr>(service);
+
+ if (!svc)
+ return nullptr;
+
+ return svc->GetHost();
+}
+
+Object::Ptr ServicesTable::ServiceGroupAccessor(const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject)
+{
+ /* return the current group by value set from within FetchRows()
+ * this is the servicegroup object used for the table join inside
+ * in AddColumns()
+ */
+ if (groupByType == LivestatusGroupByServiceGroup)
+ return groupByObject;
+
+ return nullptr;
+}
+
+Object::Ptr ServicesTable::HostGroupAccessor(const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject)
+{
+ /* return the current group by value set from within FetchRows()
+ * this is the servicegroup object used for the table join inside
+ * in AddColumns()
+ */
+ if (groupByType == LivestatusGroupByHostGroup)
+ return groupByObject;
+
+ return nullptr;
+}
+
+Value ServicesTable::ShortNameAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetShortName();
+}
+
+Value ServicesTable::DisplayNameAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetDisplayName();
+}
+
+Value ServicesTable::CheckCommandAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ CheckCommand::Ptr checkcommand = service->GetCheckCommand();
+
+ if (checkcommand)
+ return CompatUtility::GetCommandName(checkcommand) + "!" + CompatUtility::GetCheckableCommandArgs(service);
+
+ return Empty;
+}
+
+Value ServicesTable::CheckCommandExpandedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ CheckCommand::Ptr checkcommand = service->GetCheckCommand();
+
+ if (checkcommand)
+ return CompatUtility::GetCommandName(checkcommand) + "!" + CompatUtility::GetCheckableCommandArgs(service);
+
+ return Empty;
+}
+
+Value ServicesTable::EventHandlerAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ EventCommand::Ptr eventcommand = service->GetEventCommand();
+
+ if (eventcommand)
+ return CompatUtility::GetCommandName(eventcommand);
+
+ return Empty;
+}
+
+Value ServicesTable::PluginOutputAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ String output;
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (cr)
+ output = CompatUtility::GetCheckResultOutput(cr);
+
+ return output;
+}
+
+Value ServicesTable::LongPluginOutputAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ String long_output;
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (cr)
+ long_output = CompatUtility::GetCheckResultLongOutput(cr);
+
+ return long_output;
+}
+
+Value ServicesTable::PerfDataAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ String perfdata;
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (!cr)
+ return Empty;
+
+ return PluginUtility::FormatPerfdata(cr->GetPerformanceData());
+}
+
+Value ServicesTable::CheckPeriodAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ TimePeriod::Ptr checkPeriod = service->GetCheckPeriod();
+
+ if (!checkPeriod)
+ return Empty;
+
+ return checkPeriod->GetName();
+}
+
+Value ServicesTable::NotesAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetNotes();
+}
+
+Value ServicesTable::NotesExpandedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "service", service },
+ { "host", service->GetHost() },
+ };
+
+ return MacroProcessor::ResolveMacros(service->GetNotes(), resolvers);
+}
+
+Value ServicesTable::NotesUrlAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetNotesUrl();
+}
+
+Value ServicesTable::NotesUrlExpandedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "service", service },
+ { "host", service->GetHost() },
+ };
+
+ return MacroProcessor::ResolveMacros(service->GetNotesUrl(), resolvers);
+}
+
+Value ServicesTable::ActionUrlAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetActionUrl();
+}
+
+Value ServicesTable::ActionUrlExpandedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "service", service },
+ { "host", service->GetHost() },
+ };
+
+ return MacroProcessor::ResolveMacros(service->GetActionUrl(), resolvers);
+}
+
+Value ServicesTable::IconImageAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetIconImage();
+}
+
+Value ServicesTable::IconImageExpandedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ MacroProcessor::ResolverList resolvers {
+ { "service", service },
+ { "host", service->GetHost() },
+ };
+
+ return MacroProcessor::ResolveMacros(service->GetIconImage(), resolvers);
+}
+
+Value ServicesTable::IconImageAltAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetIconImageAlt();
+}
+
+Value ServicesTable::MaxCheckAttemptsAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetMaxCheckAttempts();
+}
+
+Value ServicesTable::CurrentAttemptAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetCheckAttempt();
+}
+
+Value ServicesTable::StateAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetState();
+}
+
+Value ServicesTable::HasBeenCheckedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->HasBeenChecked());
+}
+
+Value ServicesTable::LastStateAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetLastState();
+}
+
+Value ServicesTable::LastHardStateAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetLastHardState();
+}
+
+Value ServicesTable::StateTypeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetStateType();
+}
+
+Value ServicesTable::CheckTypeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return (service->GetEnableActiveChecks() ? 0 : 1); /* 0 .. active, 1 .. passive */
+}
+
+Value ServicesTable::AcknowledgedAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ObjectLock olock(service);
+ return service->IsAcknowledged();
+}
+
+Value ServicesTable::AcknowledgementTypeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ObjectLock olock(service);
+ return service->GetAcknowledgement();
+}
+
+Value ServicesTable::NoMoreNotificationsAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return (CompatUtility::GetCheckableNotificationNotificationInterval(service) == 0 && !service->GetVolatile()) ? 1 : 0;
+}
+
+Value ServicesTable::LastTimeOkAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastStateOK());
+}
+
+Value ServicesTable::LastTimeWarningAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastStateWarning());
+}
+
+Value ServicesTable::LastTimeCriticalAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastStateCritical());
+}
+
+Value ServicesTable::LastTimeUnknownAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastStateUnknown());
+}
+
+Value ServicesTable::LastCheckAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastCheck());
+}
+
+Value ServicesTable::NextCheckAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetNextCheck());
+}
+
+Value ServicesTable::LastNotificationAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationLastNotification(service);
+}
+
+Value ServicesTable::NextNotificationAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationNextNotification(service);
+}
+
+Value ServicesTable::CurrentNotificationNumberAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationNotificationNumber(service);
+}
+
+Value ServicesTable::LastStateChangeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastStateChange());
+}
+
+Value ServicesTable::LastHardStateChangeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return static_cast<int>(service->GetLastHardStateChange());
+}
+
+Value ServicesTable::ScheduledDowntimeDepthAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetDowntimeDepth();
+}
+
+Value ServicesTable::IsFlappingAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->IsFlapping();
+}
+
+Value ServicesTable::ChecksEnabledAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnableActiveChecks());
+}
+
+Value ServicesTable::AcceptPassiveChecksAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnablePassiveChecks());
+}
+
+Value ServicesTable::EventHandlerEnabledAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnableEventHandler());
+}
+
+Value ServicesTable::NotificationsEnabledAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnableNotifications());
+}
+
+Value ServicesTable::ProcessPerformanceDataAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnablePerfdata());
+}
+
+Value ServicesTable::ActiveChecksEnabledAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnableActiveChecks());
+}
+
+Value ServicesTable::FlapDetectionEnabledAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return Convert::ToLong(service->GetEnableFlapping());
+}
+
+Value ServicesTable::StalenessAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ if (service->HasBeenChecked() && service->GetLastCheck() > 0)
+ return (Utility::GetTime() - service->GetLastCheck()) / (service->GetCheckInterval() * 3600);
+
+ return 0.0;
+}
+
+Value ServicesTable::CheckIntervalAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetCheckInterval() / LIVESTATUS_INTERVAL_LENGTH;
+}
+
+Value ServicesTable::RetryIntervalAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetRetryInterval() / LIVESTATUS_INTERVAL_LENGTH;
+}
+
+Value ServicesTable::NotificationIntervalAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return CompatUtility::GetCheckableNotificationNotificationInterval(service);
+}
+
+Value ServicesTable::LowFlapThresholdAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetFlappingThresholdLow();
+}
+
+Value ServicesTable::HighFlapThresholdAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetFlappingThresholdHigh();
+}
+
+Value ServicesTable::LatencyAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (!cr)
+ return Empty;
+
+ return cr->CalculateLatency();
+}
+
+Value ServicesTable::ExecutionTimeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (!cr)
+ return Empty;
+
+ return cr->CalculateExecutionTime();
+}
+
+Value ServicesTable::PercentStateChangeAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->GetFlappingCurrent();
+}
+
+Value ServicesTable::InCheckPeriodAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ TimePeriod::Ptr timeperiod = service->GetCheckPeriod();
+
+ /* none set means always checked */
+ if (!timeperiod)
+ return 1;
+
+ return Convert::ToLong(timeperiod->IsInside(Utility::GetTime()));
+}
+
+Value ServicesTable::InNotificationPeriodAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ for (const Notification::Ptr& notification : service->GetNotifications()) {
+ TimePeriod::Ptr timeperiod = notification->GetPeriod();
+
+ if (!timeperiod || timeperiod->IsInside(Utility::GetTime()))
+ return 1;
+ }
+
+ return 0;
+}
+
+Value ServicesTable::ContactsAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const User::Ptr& user : CompatUtility::GetCheckableNotificationUsers(service)) {
+ result.push_back(user->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::DowntimesAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Downtime::Ptr& downtime : service->GetDowntimes()) {
+ if (downtime->IsExpired())
+ continue;
+
+ result.push_back(downtime->GetLegacyId());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::DowntimesWithInfoAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Downtime::Ptr& downtime : service->GetDowntimes()) {
+ if (downtime->IsExpired())
+ continue;
+
+ result.push_back(new Array({
+ downtime->GetLegacyId(),
+ downtime->GetAuthor(),
+ downtime->GetComment()
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CommentsAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Comment::Ptr& comment : service->GetComments()) {
+ if (comment->IsExpired())
+ continue;
+
+ result.push_back(comment->GetLegacyId());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CommentsWithInfoAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Comment::Ptr& comment : service->GetComments()) {
+ if (comment->IsExpired())
+ continue;
+
+ result.push_back(new Array({
+ comment->GetLegacyId(),
+ comment->GetAuthor(),
+ comment->GetText()
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CommentsWithExtraInfoAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const Comment::Ptr& comment : service->GetComments()) {
+ if (comment->IsExpired())
+ continue;
+
+ result.push_back(new Array({
+ comment->GetLegacyId(),
+ comment->GetAuthor(),
+ comment->GetText(),
+ comment->GetEntryType(),
+ static_cast<int>(comment->GetEntryTime())
+ }));
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CustomVariableNamesAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ Dictionary::Ptr vars = service->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ result.push_back(kv.first);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CustomVariableValuesAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ Dictionary::Ptr vars = service->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ result.push_back(JsonEncode(kv.second));
+ else
+ result.push_back(kv.second);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CustomVariablesAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ Dictionary::Ptr vars = service->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ Value val;
+
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ val = JsonEncode(kv.second);
+ else
+ val = kv.second;
+
+ result.push_back(new Array({
+ kv.first,
+ val
+ }));
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CVIsJsonAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ Dictionary::Ptr vars = service->GetVars();
+
+ if (!vars)
+ return Empty;
+
+ bool cv_is_json = false;
+
+ ObjectLock olock(vars);
+ for (const Dictionary::Pair& kv : vars) {
+ if (kv.second.IsObjectType<Array>() || kv.second.IsObjectType<Dictionary>())
+ cv_is_json = true;
+ }
+
+ return cv_is_json;
+}
+
+Value ServicesTable::GroupsAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ Array::Ptr groups = service->GetGroups();
+
+ if (!groups)
+ return Empty;
+
+ return groups;
+}
+
+Value ServicesTable::ContactGroupsAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ ArrayData result;
+
+ for (const UserGroup::Ptr& usergroup : CompatUtility::GetCheckableNotificationUserGroups(service)) {
+ result.push_back(usergroup->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ServicesTable::CheckSourceAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ CheckResult::Ptr cr = service->GetLastCheckResult();
+
+ if (cr)
+ return cr->GetCheckSource();
+
+ return Empty;
+}
+
+Value ServicesTable::IsReachableAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return service->IsReachable();
+}
+
+Value ServicesTable::OriginalAttributesAccessor(const Value& row)
+{
+ Service::Ptr service = static_cast<Service::Ptr>(row);
+
+ if (!service)
+ return Empty;
+
+ return JsonEncode(service->GetOriginalAttributes());
+}
diff --git a/lib/livestatus/servicestable.hpp b/lib/livestatus/servicestable.hpp
new file mode 100644
index 0000000..56d5ae5
--- /dev/null
+++ b/lib/livestatus/servicestable.hpp
@@ -0,0 +1,115 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SERVICESTABLE_H
+#define SERVICESTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class ServicesTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ServicesTable);
+
+ ServicesTable(LivestatusGroupByType type = LivestatusGroupByNone);
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Object::Ptr HostAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr ServiceGroupAccessor(const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject);
+ static Object::Ptr HostGroupAccessor(const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject);
+
+ static Value ShortNameAccessor(const Value& row);
+ static Value DisplayNameAccessor(const Value& row);
+ static Value CheckCommandAccessor(const Value& row);
+ static Value CheckCommandExpandedAccessor(const Value& row);
+ static Value EventHandlerAccessor(const Value& row);
+ static Value PluginOutputAccessor(const Value& row);
+ static Value LongPluginOutputAccessor(const Value& row);
+ static Value PerfDataAccessor(const Value& row);
+ static Value CheckPeriodAccessor(const Value& row);
+ static Value NotesAccessor(const Value& row);
+ static Value NotesExpandedAccessor(const Value& row);
+ static Value NotesUrlAccessor(const Value& row);
+ static Value NotesUrlExpandedAccessor(const Value& row);
+ static Value ActionUrlAccessor(const Value& row);
+ static Value ActionUrlExpandedAccessor(const Value& row);
+ static Value IconImageAccessor(const Value& row);
+ static Value IconImageExpandedAccessor(const Value& row);
+ static Value IconImageAltAccessor(const Value& row);
+ static Value MaxCheckAttemptsAccessor(const Value& row);
+ static Value CurrentAttemptAccessor(const Value& row);
+ static Value StateAccessor(const Value& row);
+ static Value HasBeenCheckedAccessor(const Value& row);
+ static Value LastStateAccessor(const Value& row);
+ static Value LastHardStateAccessor(const Value& row);
+ static Value StateTypeAccessor(const Value& row);
+ static Value CheckTypeAccessor(const Value& row);
+ static Value AcknowledgedAccessor(const Value& row);
+ static Value AcknowledgementTypeAccessor(const Value& row);
+ static Value NoMoreNotificationsAccessor(const Value& row);
+ static Value LastTimeOkAccessor(const Value& row);
+ static Value LastTimeWarningAccessor(const Value& row);
+ static Value LastTimeCriticalAccessor(const Value& row);
+ static Value LastTimeUnknownAccessor(const Value& row);
+ static Value LastCheckAccessor(const Value& row);
+ static Value NextCheckAccessor(const Value& row);
+ static Value LastNotificationAccessor(const Value& row);
+ static Value NextNotificationAccessor(const Value& row);
+ static Value CurrentNotificationNumberAccessor(const Value& row);
+ static Value LastStateChangeAccessor(const Value& row);
+ static Value LastHardStateChangeAccessor(const Value& row);
+ static Value ScheduledDowntimeDepthAccessor(const Value& row);
+ static Value IsFlappingAccessor(const Value& row);
+ static Value ChecksEnabledAccessor(const Value& row);
+ static Value AcceptPassiveChecksAccessor(const Value& row);
+ static Value EventHandlerEnabledAccessor(const Value& row);
+ static Value NotificationsEnabledAccessor(const Value& row);
+ static Value ProcessPerformanceDataAccessor(const Value& row);
+ static Value ActiveChecksEnabledAccessor(const Value& row);
+ static Value FlapDetectionEnabledAccessor(const Value& row);
+ static Value StalenessAccessor(const Value& row);
+ static Value CheckIntervalAccessor(const Value& row);
+ static Value RetryIntervalAccessor(const Value& row);
+ static Value NotificationIntervalAccessor(const Value& row);
+ static Value LowFlapThresholdAccessor(const Value& row);
+ static Value HighFlapThresholdAccessor(const Value& row);
+ static Value LatencyAccessor(const Value& row);
+ static Value ExecutionTimeAccessor(const Value& row);
+ static Value PercentStateChangeAccessor(const Value& row);
+ static Value InCheckPeriodAccessor(const Value& row);
+ static Value InNotificationPeriodAccessor(const Value& row);
+ static Value ContactsAccessor(const Value& row);
+ static Value DowntimesAccessor(const Value& row);
+ static Value DowntimesWithInfoAccessor(const Value& row);
+ static Value CommentsAccessor(const Value& row);
+ static Value CommentsWithInfoAccessor(const Value& row);
+ static Value CommentsWithExtraInfoAccessor(const Value& row);
+ static Value CustomVariableNamesAccessor(const Value& row);
+ static Value CustomVariableValuesAccessor(const Value& row);
+ static Value CustomVariablesAccessor(const Value& row);
+ static Value GroupsAccessor(const Value& row);
+ static Value ContactGroupsAccessor(const Value& row);
+ static Value CheckSourceAccessor(const Value& row);
+ static Value IsReachableAccessor(const Value& row);
+ static Value CVIsJsonAccessor(const Value& row);
+ static Value OriginalAttributesAccessor(const Value& row);
+};
+
+}
+
+#endif /* SERVICESTABLE_H */
diff --git a/lib/livestatus/statehisttable.cpp b/lib/livestatus/statehisttable.cpp
new file mode 100644
index 0000000..2d7e49b
--- /dev/null
+++ b/lib/livestatus/statehisttable.cpp
@@ -0,0 +1,466 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/statehisttable.hpp"
+#include "livestatus/livestatuslogutility.hpp"
+#include "livestatus/hoststable.hpp"
+#include "livestatus/servicestable.hpp"
+#include "livestatus/contactstable.hpp"
+#include "livestatus/commandstable.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/cib.hpp"
+#include "icinga/service.hpp"
+#include "icinga/host.hpp"
+#include "icinga/user.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/algorithm/string/predicate.hpp>
+#include <fstream>
+
+using namespace icinga;
+
+StateHistTable::StateHistTable(const String& compat_log_path, time_t from, time_t until)
+{
+ /* store attributes for FetchRows */
+ m_TimeFrom = from;
+ m_TimeUntil = until;
+ m_CompatLogPath = compat_log_path;
+
+ AddColumns(this);
+}
+
+void StateHistTable::UpdateLogEntries(const Dictionary::Ptr& log_entry_attrs, int line_count, int lineno, const AddRowFunction& addRowFn)
+{
+ unsigned int time = log_entry_attrs->Get("time");
+ String host_name = log_entry_attrs->Get("host_name");
+ String service_description = log_entry_attrs->Get("service_description");
+ unsigned long state = log_entry_attrs->Get("state");
+ int log_type = log_entry_attrs->Get("log_type");
+ String state_type = log_entry_attrs->Get("state_type"); //SOFT, HARD, STARTED, STOPPED, ...
+ String log_line = log_entry_attrs->Get("message"); /* use message from log table */
+
+ Checkable::Ptr checkable;
+
+ if (service_description.IsEmpty())
+ checkable = Host::GetByName(host_name);
+ else
+ checkable = Service::GetByNamePair(host_name, service_description);
+
+ /* invalid log line for state history */
+ if (!checkable)
+ return;
+
+ Array::Ptr state_hist_service_states;
+ Dictionary::Ptr state_hist_bag;
+ unsigned long query_part = m_TimeUntil - m_TimeFrom;
+
+ /* insert new service states array with values if not existing */
+ if (m_CheckablesCache.find(checkable) == m_CheckablesCache.end()) {
+
+ /* create new values */
+ state_hist_service_states = new Array();
+ state_hist_bag = new Dictionary();
+
+ Service::Ptr service = dynamic_pointer_cast<Service>(checkable);
+ Host::Ptr host;
+
+ if (service)
+ host = service->GetHost();
+ else
+ host = static_pointer_cast<Host>(checkable);
+
+ state_hist_bag->Set("host_name", host->GetName());
+
+ if (service)
+ state_hist_bag->Set("service_description", service->GetShortName());
+
+ state_hist_bag->Set("state", state);
+ state_hist_bag->Set("in_downtime", 0);
+ state_hist_bag->Set("in_host_downtime", 0);
+ state_hist_bag->Set("in_notification_period", 1); // assume "always"
+ state_hist_bag->Set("is_flapping", 0);
+ state_hist_bag->Set("time", time);
+ state_hist_bag->Set("lineno", lineno);
+ state_hist_bag->Set("log_output", log_line); /* complete line */
+ state_hist_bag->Set("from", time); /* starting at current timestamp */
+ state_hist_bag->Set("until", time); /* will be updated later on state change */
+ state_hist_bag->Set("query_part", query_part); /* required for _part calculations */
+
+ state_hist_service_states->Add(state_hist_bag);
+
+ Log(LogDebug, "StateHistTable")
+ << "statehist: Adding new object '" << checkable->GetName() << "' to services cache.";
+ } else {
+ state_hist_service_states = m_CheckablesCache[checkable];
+ state_hist_bag = state_hist_service_states->Get(state_hist_service_states->GetLength()-1); /* fetch latest state from history */
+
+ /* state duration */
+
+ /* determine service notifications notification_period and compare against current timestamp */
+ bool in_notification_period = true;
+ String notification_period_name;
+ for (const Notification::Ptr& notification : checkable->GetNotifications()) {
+ TimePeriod::Ptr notification_period = notification->GetPeriod();
+
+ if (notification_period) {
+ if (notification_period->IsInside(static_cast<double>(time)))
+ in_notification_period = true;
+ else
+ in_notification_period = false;
+
+ notification_period_name = notification_period->GetName(); // last one wins
+ } else
+ in_notification_period = true; // assume "always"
+ }
+
+ /* check for state changes, flapping & downtime start/end */
+ switch (log_type) {
+ case LogEntryTypeHostAlert:
+ case LogEntryTypeHostInitialState:
+ case LogEntryTypeHostCurrentState:
+ case LogEntryTypeServiceAlert:
+ case LogEntryTypeServiceInitialState:
+ case LogEntryTypeServiceCurrentState:
+ if (state != state_hist_bag->Get("state")) {
+ /* 1. seal old state_hist_bag */
+ state_hist_bag->Set("until", time); /* add until record for duration calculation */
+
+ /* 2. add new state_hist_bag */
+ Dictionary::Ptr state_hist_bag_new = new Dictionary();
+
+ state_hist_bag_new->Set("host_name", state_hist_bag->Get("host_name"));
+ state_hist_bag_new->Set("service_description", state_hist_bag->Get("service_description"));
+ state_hist_bag_new->Set("state", state);
+ state_hist_bag_new->Set("in_downtime", state_hist_bag->Get("in_downtime")); // keep value from previous state!
+ state_hist_bag_new->Set("in_host_downtime", state_hist_bag->Get("in_host_downtime")); // keep value from previous state!
+ state_hist_bag_new->Set("in_notification_period", (in_notification_period ? 1 : 0));
+ state_hist_bag_new->Set("notification_period", notification_period_name);
+ state_hist_bag_new->Set("is_flapping", state_hist_bag->Get("is_flapping")); // keep value from previous state!
+ state_hist_bag_new->Set("time", time);
+ state_hist_bag_new->Set("lineno", lineno);
+ state_hist_bag_new->Set("log_output", log_line); /* complete line */
+ state_hist_bag_new->Set("from", time); /* starting at current timestamp */
+ state_hist_bag_new->Set("until", time + 1); /* will be updated later */
+ state_hist_bag_new->Set("query_part", query_part);
+
+ state_hist_service_states->Add(state_hist_bag_new);
+
+ Log(LogDebug, "StateHistTable")
+ << "statehist: State change detected for object '" << checkable->GetName() << "' in '" << log_line << "'.";
+ }
+ break;
+ case LogEntryTypeHostFlapping:
+ case LogEntryTypeServiceFlapping:
+ if (state_type == "STARTED")
+ state_hist_bag->Set("is_flapping", 1);
+ else if (state_type == "STOPPED" || state_type == "DISABLED")
+ state_hist_bag->Set("is_flapping", 0);
+ break;
+ break;
+ case LogEntryTypeHostDowntimeAlert:
+ case LogEntryTypeServiceDowntimeAlert:
+ if (state_type == "STARTED") {
+ state_hist_bag->Set("in_downtime", 1);
+ if (log_type == LogEntryTypeHostDowntimeAlert)
+ state_hist_bag->Set("in_host_downtime", 1);
+ }
+ else if (state_type == "STOPPED" || state_type == "CANCELLED") {
+ state_hist_bag->Set("in_downtime", 0);
+ if (log_type == LogEntryTypeHostDowntimeAlert)
+ state_hist_bag->Set("in_host_downtime", 0);
+ }
+ break;
+ default:
+ //nothing to update
+ break;
+ }
+
+ }
+
+ m_CheckablesCache[checkable] = state_hist_service_states;
+
+ /* TODO find a way to directly call addRowFn() - right now m_ServicesCache depends on historical lines ("already seen service") */
+}
+
+void StateHistTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "time", Column(&StateHistTable::TimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "lineno", Column(&StateHistTable::LinenoAccessor, objectAccessor));
+ table->AddColumn(prefix + "from", Column(&StateHistTable::FromAccessor, objectAccessor));
+ table->AddColumn(prefix + "until", Column(&StateHistTable::UntilAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration", Column(&StateHistTable::DurationAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_part", Column(&StateHistTable::DurationPartAccessor, objectAccessor));
+ table->AddColumn(prefix + "state", Column(&StateHistTable::StateAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_down", Column(&StateHistTable::HostDownAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_downtime", Column(&StateHistTable::InDowntimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_host_downtime", Column(&StateHistTable::InHostDowntimeAccessor, objectAccessor));
+ table->AddColumn(prefix + "is_flapping", Column(&StateHistTable::IsFlappingAccessor, objectAccessor));
+ table->AddColumn(prefix + "in_notification_period", Column(&StateHistTable::InNotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "notification_period", Column(&StateHistTable::NotificationPeriodAccessor, objectAccessor));
+ table->AddColumn(prefix + "debug_info", Column(&Table::EmptyStringAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_name", Column(&StateHistTable::HostNameAccessor, objectAccessor));
+ table->AddColumn(prefix + "service_description", Column(&StateHistTable::ServiceDescriptionAccessor, objectAccessor));
+ table->AddColumn(prefix + "log_output", Column(&StateHistTable::LogOutputAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_ok", Column(&StateHistTable::DurationOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_part_ok", Column(&StateHistTable::DurationPartOkAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_warning", Column(&StateHistTable::DurationWarningAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_part_warning", Column(&StateHistTable::DurationPartWarningAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_critical", Column(&StateHistTable::DurationCriticalAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_part_critical", Column(&StateHistTable::DurationPartCriticalAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_unknown", Column(&StateHistTable::DurationUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_part_unknown", Column(&StateHistTable::DurationPartUnknownAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_unmonitored", Column(&StateHistTable::DurationUnmonitoredAccessor, objectAccessor));
+ table->AddColumn(prefix + "duration_part_unmonitored", Column(&StateHistTable::DurationPartUnmonitoredAccessor, objectAccessor));
+
+ HostsTable::AddColumns(table, "current_host_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return HostAccessor(row, objectAccessor);
+ });
+ ServicesTable::AddColumns(table, "current_service_", [objectAccessor](const Value& row, LivestatusGroupByType, const Object::Ptr&) -> Value {
+ return ServiceAccessor(row, objectAccessor);
+ });
+}
+
+String StateHistTable::GetName() const
+{
+ return "log";
+}
+
+String StateHistTable::GetPrefix() const
+{
+ return "log";
+}
+
+void StateHistTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ Log(LogDebug, "StateHistTable")
+ << "Pre-selecting log file from " << m_TimeFrom << " until " << m_TimeUntil;
+
+ /* create log file index */
+ LivestatusLogUtility::CreateLogIndex(m_CompatLogPath, m_LogFileIndex);
+
+ /* generate log cache */
+ LivestatusLogUtility::CreateLogCache(m_LogFileIndex, this, m_TimeFrom, m_TimeUntil, addRowFn);
+
+ Checkable::Ptr checkable;
+
+ for (const auto& kv : m_CheckablesCache) {
+ for (const Dictionary::Ptr& state_hist_bag : kv.second) {
+ /* pass a dictionary from state history array */
+ if (!addRowFn(state_hist_bag, LivestatusGroupByNone, Empty))
+ return;
+ }
+ }
+}
+
+Object::Ptr StateHistTable::HostAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ String host_name = static_cast<Dictionary::Ptr>(row)->Get("host_name");
+
+ if (host_name.IsEmpty())
+ return nullptr;
+
+ return Host::GetByName(host_name);
+}
+
+Object::Ptr StateHistTable::ServiceAccessor(const Value& row, const Column::ObjectAccessor&)
+{
+ String host_name = static_cast<Dictionary::Ptr>(row)->Get("host_name");
+ String service_description = static_cast<Dictionary::Ptr>(row)->Get("service_description");
+
+ if (service_description.IsEmpty() || host_name.IsEmpty())
+ return nullptr;
+
+ return Service::GetByNamePair(host_name, service_description);
+}
+
+Value StateHistTable::TimeAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("time");
+}
+
+Value StateHistTable::LinenoAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("lineno");
+}
+
+Value StateHistTable::FromAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("from");
+}
+
+Value StateHistTable::UntilAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("until");
+}
+
+Value StateHistTable::DurationAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from"));
+}
+
+Value StateHistTable::DurationPartAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from")) / state_hist_bag->Get("query_part");
+}
+
+Value StateHistTable::StateAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("state");
+}
+
+Value StateHistTable::HostDownAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("host_down");
+}
+
+Value StateHistTable::InDowntimeAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("in_downtime");
+}
+
+Value StateHistTable::InHostDowntimeAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("in_host_downtime");
+}
+
+Value StateHistTable::IsFlappingAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("is_flapping");
+}
+
+Value StateHistTable::InNotificationPeriodAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("in_notification_period");
+}
+
+Value StateHistTable::NotificationPeriodAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("notification_period");
+}
+
+Value StateHistTable::HostNameAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("host_name");
+}
+
+Value StateHistTable::ServiceDescriptionAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("service_description");
+}
+
+Value StateHistTable::LogOutputAccessor(const Value& row)
+{
+ return static_cast<Dictionary::Ptr>(row)->Get("log_output");
+}
+
+Value StateHistTable::DurationOkAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceOK)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from"));
+
+ return 0;
+}
+
+Value StateHistTable::DurationPartOkAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceOK)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from")) / state_hist_bag->Get("query_part");
+
+ return 0;
+}
+
+Value StateHistTable::DurationWarningAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceWarning)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from"));
+
+ return 0;
+}
+
+Value StateHistTable::DurationPartWarningAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceWarning)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from")) / state_hist_bag->Get("query_part");
+
+ return 0;
+}
+
+Value StateHistTable::DurationCriticalAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceCritical)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from"));
+
+ return 0;
+}
+
+Value StateHistTable::DurationPartCriticalAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceCritical)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from")) / state_hist_bag->Get("query_part");
+
+ return 0;
+}
+
+Value StateHistTable::DurationUnknownAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceUnknown)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from"));
+
+ return 0;
+}
+
+Value StateHistTable::DurationPartUnknownAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == ServiceUnknown)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from")) / state_hist_bag->Get("query_part");
+
+ return 0;
+}
+
+Value StateHistTable::DurationUnmonitoredAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == -1)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from"));
+
+ return 0;
+}
+
+Value StateHistTable::DurationPartUnmonitoredAccessor(const Value& row)
+{
+ Dictionary::Ptr state_hist_bag = static_cast<Dictionary::Ptr>(row);
+
+ if (state_hist_bag->Get("state") == -1)
+ return (state_hist_bag->Get("until") - state_hist_bag->Get("from")) / state_hist_bag->Get("query_part");
+
+ return 0;
+}
diff --git a/lib/livestatus/statehisttable.hpp b/lib/livestatus/statehisttable.hpp
new file mode 100644
index 0000000..db00615
--- /dev/null
+++ b/lib/livestatus/statehisttable.hpp
@@ -0,0 +1,75 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STATEHISTTABLE_H
+#define STATEHISTTABLE_H
+
+#include "icinga/service.hpp"
+#include "livestatus/historytable.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class StateHistTable final : public HistoryTable
+{
+public:
+ DECLARE_PTR_TYPEDEFS(StateHistTable);
+
+ StateHistTable(const String& compat_log_path, time_t from, time_t until);
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+ void UpdateLogEntries(const Dictionary::Ptr& log_entry_attrs, int line_count, int lineno, const AddRowFunction& addRowFn) override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Object::Ptr HostAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+ static Object::Ptr ServiceAccessor(const Value& row, const Column::ObjectAccessor& parentObjectAccessor);
+
+ static Value TimeAccessor(const Value& row);
+ static Value LinenoAccessor(const Value& row);
+ static Value FromAccessor(const Value& row);
+ static Value UntilAccessor(const Value& row);
+ static Value DurationAccessor(const Value& row);
+ static Value DurationPartAccessor(const Value& row);
+ static Value StateAccessor(const Value& row);
+ static Value HostDownAccessor(const Value& row);
+ static Value InDowntimeAccessor(const Value& row);
+ static Value InHostDowntimeAccessor(const Value& row);
+ static Value IsFlappingAccessor(const Value& row);
+ static Value InNotificationPeriodAccessor(const Value& row);
+ static Value NotificationPeriodAccessor(const Value& row);
+ static Value HostNameAccessor(const Value& row);
+ static Value ServiceDescriptionAccessor(const Value& row);
+ static Value LogOutputAccessor(const Value& row);
+ static Value DurationOkAccessor(const Value& row);
+ static Value DurationPartOkAccessor(const Value& row);
+ static Value DurationWarningAccessor(const Value& row);
+ static Value DurationPartWarningAccessor(const Value& row);
+ static Value DurationCriticalAccessor(const Value& row);
+ static Value DurationPartCriticalAccessor(const Value& row);
+ static Value DurationUnknownAccessor(const Value& row);
+ static Value DurationPartUnknownAccessor(const Value& row);
+ static Value DurationUnmonitoredAccessor(const Value& row);
+ static Value DurationPartUnmonitoredAccessor(const Value& row);
+
+private:
+ std::map<time_t, String> m_LogFileIndex;
+ std::map<Checkable::Ptr, Array::Ptr> m_CheckablesCache;
+ time_t m_TimeFrom;
+ time_t m_TimeUntil;
+ String m_CompatLogPath;
+};
+
+}
+
+#endif /* STATEHISTTABLE_H */
diff --git a/lib/livestatus/statustable.cpp b/lib/livestatus/statustable.cpp
new file mode 100644
index 0000000..ae0a736
--- /dev/null
+++ b/lib/livestatus/statustable.cpp
@@ -0,0 +1,269 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/statustable.hpp"
+#include "livestatus/livestatuslistener.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/cib.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/application.hpp"
+
+using namespace icinga;
+
+StatusTable::StatusTable()
+{
+ AddColumns(this);
+}
+
+void StatusTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "neb_callbacks", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "neb_callbacks_rate", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "requests", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "requests_rate", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "connections", Column(&StatusTable::ConnectionsAccessor, objectAccessor));
+ table->AddColumn(prefix + "connections_rate", Column(&StatusTable::ConnectionsRateAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "service_checks", Column(&StatusTable::ServiceChecksAccessor, objectAccessor));
+ table->AddColumn(prefix + "service_checks_rate", Column(&StatusTable::ServiceChecksRateAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "host_checks", Column(&StatusTable::HostChecksAccessor, objectAccessor));
+ table->AddColumn(prefix + "host_checks_rate", Column(&StatusTable::HostChecksRateAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "forks", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "forks_rate", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "log_messages", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "log_messages_rate", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "external_commands", Column(&StatusTable::ExternalCommandsAccessor, objectAccessor));
+ table->AddColumn(prefix + "external_commands_rate", Column(&StatusTable::ExternalCommandsRateAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "livechecks", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "livechecks_rate", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "livecheck_overflows", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "livecheck_overflows_rate", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "nagios_pid", Column(&StatusTable::NagiosPidAccessor, objectAccessor));
+ table->AddColumn(prefix + "enable_notifications", Column(&StatusTable::EnableNotificationsAccessor, objectAccessor));
+ table->AddColumn(prefix + "execute_service_checks", Column(&StatusTable::ExecuteServiceChecksAccessor, objectAccessor));
+ table->AddColumn(prefix + "accept_passive_service_checks", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "execute_host_checks", Column(&StatusTable::ExecuteHostChecksAccessor, objectAccessor));
+ table->AddColumn(prefix + "accept_passive_host_checks", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "enable_event_handlers", Column(&StatusTable::EnableEventHandlersAccessor, objectAccessor));
+ table->AddColumn(prefix + "obsess_over_services", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "obsess_over_hosts", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_service_freshness", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_host_freshness", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "enable_flap_detection", Column(&StatusTable::EnableFlapDetectionAccessor, objectAccessor));
+ table->AddColumn(prefix + "process_performance_data", Column(&StatusTable::ProcessPerformanceDataAccessor, objectAccessor));
+ table->AddColumn(prefix + "check_external_commands", Column(&Table::OneAccessor, objectAccessor));
+ table->AddColumn(prefix + "program_start", Column(&StatusTable::ProgramStartAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_command_check", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "last_log_rotation", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "interval_length", Column(&StatusTable::IntervalLengthAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_hosts", Column(&StatusTable::NumHostsAccessor, objectAccessor));
+ table->AddColumn(prefix + "num_services", Column(&StatusTable::NumServicesAccessor, objectAccessor));
+ table->AddColumn(prefix + "program_version", Column(&StatusTable::ProgramVersionAccessor, objectAccessor));
+ table->AddColumn(prefix + "external_command_buffer_slots", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "external_command_buffer_usage", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "external_command_buffer_max", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "cached_log_messages", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "livestatus_version", Column(&StatusTable::LivestatusVersionAccessor, objectAccessor));
+ table->AddColumn(prefix + "livestatus_active_connections", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "livestatus_queued_connections", Column(&Table::ZeroAccessor, objectAccessor));
+ table->AddColumn(prefix + "livestatus_threads", Column(&Table::ZeroAccessor, objectAccessor));
+
+ table->AddColumn(prefix + "custom_variable_names", Column(&StatusTable::CustomVariableNamesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variable_values", Column(&StatusTable::CustomVariableValuesAccessor, objectAccessor));
+ table->AddColumn(prefix + "custom_variables", Column(&StatusTable::CustomVariablesAccessor, objectAccessor));
+}
+
+String StatusTable::GetName() const
+{
+ return "status";
+}
+
+String StatusTable::GetPrefix() const
+{
+ return "status";
+}
+
+void StatusTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ Object::Ptr obj = new Object();
+
+ /* Return a fake row. */
+ addRowFn(obj, LivestatusGroupByNone, Empty);
+}
+
+Value StatusTable::ConnectionsAccessor(const Value&)
+{
+ return LivestatusListener::GetConnections();
+}
+
+Value StatusTable::ConnectionsRateAccessor(const Value&)
+{
+ return (LivestatusListener::GetConnections() / (Utility::GetTime() - Application::GetStartTime()));
+}
+
+Value StatusTable::HostChecksAccessor(const Value&)
+{
+ auto timespan = static_cast<long>(Utility::GetTime() - Application::GetStartTime());
+ return CIB::GetActiveHostChecksStatistics(timespan);
+}
+
+Value StatusTable::HostChecksRateAccessor(const Value&)
+{
+ auto timespan = static_cast<long>(Utility::GetTime() - Application::GetStartTime());
+ return (CIB::GetActiveHostChecksStatistics(timespan) / (Utility::GetTime() - Application::GetStartTime()));
+}
+
+Value StatusTable::ServiceChecksAccessor(const Value&)
+{
+ auto timespan = static_cast<long>(Utility::GetTime() - Application::GetStartTime());
+ return CIB::GetActiveServiceChecksStatistics(timespan);
+}
+
+Value StatusTable::ServiceChecksRateAccessor(const Value&)
+{
+ auto timespan = static_cast<long>(Utility::GetTime() - Application::GetStartTime());
+ return (CIB::GetActiveServiceChecksStatistics(timespan) / (Utility::GetTime() - Application::GetStartTime()));
+}
+
+Value StatusTable::ExternalCommandsAccessor(const Value&)
+{
+ return LivestatusQuery::GetExternalCommands();
+}
+
+Value StatusTable::ExternalCommandsRateAccessor(const Value&)
+{
+ return (LivestatusQuery::GetExternalCommands() / (Utility::GetTime() - Application::GetStartTime()));
+}
+
+Value StatusTable::NagiosPidAccessor(const Value&)
+{
+ return Utility::GetPid();
+}
+
+Value StatusTable::EnableNotificationsAccessor(const Value&)
+{
+ return (IcingaApplication::GetInstance()->GetEnableNotifications() ? 1 : 0);
+}
+
+Value StatusTable::ExecuteServiceChecksAccessor(const Value&)
+{
+ return (IcingaApplication::GetInstance()->GetEnableServiceChecks() ? 1 : 0);
+}
+
+Value StatusTable::ExecuteHostChecksAccessor(const Value&)
+{
+ return (IcingaApplication::GetInstance()->GetEnableHostChecks() ? 1 : 0);
+}
+
+Value StatusTable::EnableEventHandlersAccessor(const Value&)
+{
+ return (IcingaApplication::GetInstance()->GetEnableEventHandlers() ? 1 : 0);
+}
+
+Value StatusTable::EnableFlapDetectionAccessor(const Value&)
+{
+ return (IcingaApplication::GetInstance()->GetEnableFlapping() ? 1 : 0);
+}
+
+Value StatusTable::ProcessPerformanceDataAccessor(const Value&)
+{
+ return (IcingaApplication::GetInstance()->GetEnablePerfdata() ? 1 : 0);
+}
+
+Value StatusTable::ProgramStartAccessor(const Value&)
+{
+ return static_cast<long>(Application::GetStartTime());
+}
+
+Value StatusTable::IntervalLengthAccessor(const Value&)
+{
+ return LIVESTATUS_INTERVAL_LENGTH;
+}
+
+Value StatusTable::NumHostsAccessor(const Value&)
+{
+ return ConfigType::Get<Host>()->GetObjectCount();
+}
+
+Value StatusTable::NumServicesAccessor(const Value&)
+{
+ return ConfigType::Get<Service>()->GetObjectCount();
+}
+
+Value StatusTable::ProgramVersionAccessor(const Value&)
+{
+ return Application::GetAppVersion() + "-icinga2";
+}
+
+Value StatusTable::LivestatusVersionAccessor(const Value&)
+{
+ return Application::GetAppVersion();
+}
+
+Value StatusTable::LivestatusActiveConnectionsAccessor(const Value&)
+{
+ return LivestatusListener::GetClientsConnected();
+}
+
+Value StatusTable::CustomVariableNamesAccessor(const Value&)
+{
+ Dictionary::Ptr vars = IcingaApplication::GetInstance()->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const auto& kv : vars) {
+ result.push_back(kv.first);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value StatusTable::CustomVariableValuesAccessor(const Value&)
+{
+ Dictionary::Ptr vars = IcingaApplication::GetInstance()->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const auto& kv : vars) {
+ result.push_back(kv.second);
+ }
+ }
+
+ return new Array(std::move(result));
+}
+
+Value StatusTable::CustomVariablesAccessor(const Value&)
+{
+ Dictionary::Ptr vars = IcingaApplication::GetInstance()->GetVars();
+
+ ArrayData result;
+
+ if (vars) {
+ ObjectLock olock(vars);
+ for (const auto& kv : vars) {
+ result.push_back(new Array({
+ kv.first,
+ kv.second
+ }));
+ }
+ }
+
+ return new Array(std::move(result));
+}
diff --git a/lib/livestatus/statustable.hpp b/lib/livestatus/statustable.hpp
new file mode 100644
index 0000000..2fba249
--- /dev/null
+++ b/lib/livestatus/statustable.hpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STATUSTABLE_H
+#define STATUSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class StatusTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(StatusTable);
+
+ StatusTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value ConnectionsAccessor(const Value& row);
+ static Value ConnectionsRateAccessor(const Value& row);
+ static Value ServiceChecksAccessor(const Value& row);
+ static Value ServiceChecksRateAccessor(const Value& row);
+ static Value HostChecksAccessor(const Value& row);
+ static Value HostChecksRateAccessor(const Value& row);
+ static Value ExternalCommandsAccessor(const Value& row);
+ static Value ExternalCommandsRateAccessor(const Value& row);
+ static Value NagiosPidAccessor(const Value& row);
+ static Value EnableNotificationsAccessor(const Value& row);
+ static Value ExecuteServiceChecksAccessor(const Value& row);
+ static Value ExecuteHostChecksAccessor(const Value& row);
+ static Value EnableEventHandlersAccessor(const Value& row);
+ static Value EnableFlapDetectionAccessor(const Value& row);
+ static Value ProcessPerformanceDataAccessor(const Value& row);
+ static Value ProgramStartAccessor(const Value& row);
+ static Value IntervalLengthAccessor(const Value& row);
+ static Value NumHostsAccessor(const Value& row);
+ static Value NumServicesAccessor(const Value& row);
+ static Value ProgramVersionAccessor(const Value& row);
+ static Value LivestatusVersionAccessor(const Value& row);
+ static Value LivestatusActiveConnectionsAccessor(const Value& row);
+ static Value CustomVariableNamesAccessor(const Value& row);
+ static Value CustomVariableValuesAccessor(const Value& row);
+ static Value CustomVariablesAccessor(const Value& row);
+};
+
+}
+
+#endif /* STATUSTABLE_H */
diff --git a/lib/livestatus/stdaggregator.cpp b/lib/livestatus/stdaggregator.cpp
new file mode 100644
index 0000000..99c3a8e
--- /dev/null
+++ b/lib/livestatus/stdaggregator.cpp
@@ -0,0 +1,40 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/stdaggregator.hpp"
+#include <math.h>
+
+using namespace icinga;
+
+StdAggregator::StdAggregator(String attr)
+ : m_StdAttr(std::move(attr))
+{ }
+
+StdAggregatorState *StdAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new StdAggregatorState();
+
+ return static_cast<StdAggregatorState *>(*state);
+}
+
+void StdAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_StdAttr);
+
+ Value value = column.ExtractValue(row);
+
+ StdAggregatorState *pstate = EnsureState(state);
+
+ pstate->StdSum += value;
+ pstate->StdQSum += pow(value, 2);
+ pstate->StdCount++;
+}
+
+double StdAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ StdAggregatorState *pstate = EnsureState(&state);
+ double result = sqrt((pstate->StdQSum - (1 / pstate->StdCount) * pow(pstate->StdSum, 2)) / (pstate->StdCount - 1));
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/stdaggregator.hpp b/lib/livestatus/stdaggregator.hpp
new file mode 100644
index 0000000..3680fe7
--- /dev/null
+++ b/lib/livestatus/stdaggregator.hpp
@@ -0,0 +1,43 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STDAGGREGATOR_H
+#define STDAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct StdAggregatorState final : public AggregatorState
+{
+ double StdSum{0};
+ double StdQSum{0};
+ double StdCount{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class StdAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(StdAggregator);
+
+ StdAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_StdAttr;
+
+ static StdAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* STDAGGREGATOR_H */
diff --git a/lib/livestatus/sumaggregator.cpp b/lib/livestatus/sumaggregator.cpp
new file mode 100644
index 0000000..fc4b62e
--- /dev/null
+++ b/lib/livestatus/sumaggregator.cpp
@@ -0,0 +1,37 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/sumaggregator.hpp"
+
+using namespace icinga;
+
+SumAggregator::SumAggregator(String attr)
+ : m_SumAttr(std::move(attr))
+{ }
+
+SumAggregatorState *SumAggregator::EnsureState(AggregatorState **state)
+{
+ if (!*state)
+ *state = new SumAggregatorState();
+
+ return static_cast<SumAggregatorState *>(*state);
+}
+
+void SumAggregator::Apply(const Table::Ptr& table, const Value& row, AggregatorState **state)
+{
+ Column column = table->GetColumn(m_SumAttr);
+
+ Value value = column.ExtractValue(row);
+
+ SumAggregatorState *pstate = EnsureState(state);
+
+ pstate->Sum += value;
+}
+
+double SumAggregator::GetResultAndFreeState(AggregatorState *state) const
+{
+ SumAggregatorState *pstate = EnsureState(&state);
+ double result = pstate->Sum;
+ delete pstate;
+
+ return result;
+}
diff --git a/lib/livestatus/sumaggregator.hpp b/lib/livestatus/sumaggregator.hpp
new file mode 100644
index 0000000..23f22fb
--- /dev/null
+++ b/lib/livestatus/sumaggregator.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SUMAGGREGATOR_H
+#define SUMAGGREGATOR_H
+
+#include "livestatus/table.hpp"
+#include "livestatus/aggregator.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+struct SumAggregatorState final : public AggregatorState
+{
+ double Sum{0};
+};
+
+/**
+ * @ingroup livestatus
+ */
+class SumAggregator final : public Aggregator
+{
+public:
+ DECLARE_PTR_TYPEDEFS(SumAggregator);
+
+ SumAggregator(String attr);
+
+ void Apply(const Table::Ptr& table, const Value& row, AggregatorState **state) override;
+ double GetResultAndFreeState(AggregatorState *state) const override;
+
+private:
+ String m_SumAttr;
+
+ static SumAggregatorState *EnsureState(AggregatorState **state);
+};
+
+}
+
+#endif /* SUMAGGREGATOR_H */
diff --git a/lib/livestatus/table.cpp b/lib/livestatus/table.cpp
new file mode 100644
index 0000000..8e5d85a
--- /dev/null
+++ b/lib/livestatus/table.cpp
@@ -0,0 +1,165 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/table.hpp"
+#include "livestatus/statustable.hpp"
+#include "livestatus/contactgroupstable.hpp"
+#include "livestatus/contactstable.hpp"
+#include "livestatus/hostgroupstable.hpp"
+#include "livestatus/hoststable.hpp"
+#include "livestatus/servicegroupstable.hpp"
+#include "livestatus/servicestable.hpp"
+#include "livestatus/commandstable.hpp"
+#include "livestatus/commentstable.hpp"
+#include "livestatus/downtimestable.hpp"
+#include "livestatus/endpointstable.hpp"
+#include "livestatus/zonestable.hpp"
+#include "livestatus/timeperiodstable.hpp"
+#include "livestatus/logtable.hpp"
+#include "livestatus/statehisttable.hpp"
+#include "livestatus/filter.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+
+using namespace icinga;
+
+Table::Table(LivestatusGroupByType type)
+ : m_GroupByType(type), m_GroupByObject(Empty)
+{ }
+
+Table::Ptr Table::GetByName(const String& name, const String& compat_log_path, const unsigned long& from, const unsigned long& until)
+{
+ if (name == "status")
+ return new StatusTable();
+ else if (name == "contactgroups")
+ return new ContactGroupsTable();
+ else if (name == "contacts")
+ return new ContactsTable();
+ else if (name == "hostgroups")
+ return new HostGroupsTable();
+ else if (name == "hosts")
+ return new HostsTable();
+ else if (name == "hostsbygroup")
+ return new HostsTable(LivestatusGroupByHostGroup);
+ else if (name == "servicegroups")
+ return new ServiceGroupsTable();
+ else if (name == "services")
+ return new ServicesTable();
+ else if (name == "servicesbygroup")
+ return new ServicesTable(LivestatusGroupByServiceGroup);
+ else if (name == "servicesbyhostgroup")
+ return new ServicesTable(LivestatusGroupByHostGroup);
+ else if (name == "commands")
+ return new CommandsTable();
+ else if (name == "comments")
+ return new CommentsTable();
+ else if (name == "downtimes")
+ return new DowntimesTable();
+ else if (name == "timeperiods")
+ return new TimePeriodsTable();
+ else if (name == "log")
+ return new LogTable(compat_log_path, from, until);
+ else if (name == "statehist")
+ return new StateHistTable(compat_log_path, from, until);
+ else if (name == "endpoints")
+ return new EndpointsTable();
+ else if (name == "zones")
+ return new ZonesTable();
+
+ return nullptr;
+}
+
+void Table::AddColumn(const String& name, const Column& column)
+{
+ std::pair<String, Column> item = std::make_pair(name, column);
+
+ auto ret = m_Columns.insert(item);
+
+ if (!ret.second)
+ ret.first->second = column;
+}
+
+Column Table::GetColumn(const String& name) const
+{
+ String dname = name;
+ String prefix = GetPrefix() + "_";
+
+ if (dname.Find(prefix) == 0)
+ dname = dname.SubStr(prefix.GetLength());
+
+ auto it = m_Columns.find(dname);
+
+ if (it == m_Columns.end())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Column '" + dname + "' does not exist in table '" + GetName() + "'."));
+
+ return it->second;
+}
+
+std::vector<String> Table::GetColumnNames() const
+{
+ std::vector<String> names;
+
+ for (const auto& kv : m_Columns) {
+ names.push_back(kv.first);
+ }
+
+ return names;
+}
+
+std::vector<LivestatusRowValue> Table::FilterRows(const Filter::Ptr& filter, int limit)
+{
+ std::vector<LivestatusRowValue> rs;
+
+ FetchRows([this, filter, limit, &rs](const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject) {
+ return FilteredAddRow(rs, filter, limit, row, groupByType, groupByObject);
+ });
+
+ return rs;
+}
+
+bool Table::FilteredAddRow(std::vector<LivestatusRowValue>& rs, const Filter::Ptr& filter, int limit, const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject)
+{
+ if (limit != -1 && static_cast<int>(rs.size()) == limit)
+ return false;
+
+ if (!filter || filter->Apply(this, row)) {
+ LivestatusRowValue rval;
+ rval.Row = row;
+ rval.GroupByType = groupByType;
+ rval.GroupByObject = groupByObject;
+
+ rs.emplace_back(std::move(rval));
+ }
+
+ return true;
+}
+
+Value Table::ZeroAccessor(const Value&)
+{
+ return 0;
+}
+
+Value Table::OneAccessor(const Value&)
+{
+ return 1;
+}
+
+Value Table::EmptyStringAccessor(const Value&)
+{
+ return "";
+}
+
+Value Table::EmptyArrayAccessor(const Value&)
+{
+ return new Array();
+}
+
+Value Table::EmptyDictionaryAccessor(const Value&)
+{
+ return new Dictionary();
+}
+
+LivestatusGroupByType Table::GetGroupByType() const
+{
+ return m_GroupByType;
+}
diff --git a/lib/livestatus/table.hpp b/lib/livestatus/table.hpp
new file mode 100644
index 0000000..fa3fc2a
--- /dev/null
+++ b/lib/livestatus/table.hpp
@@ -0,0 +1,73 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TABLE_H
+#define TABLE_H
+
+#include "livestatus/column.hpp"
+#include "base/object.hpp"
+#include "base/dictionary.hpp"
+#include "base/array.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+// Well, don't ask.
+#define LIVESTATUS_INTERVAL_LENGTH 60.0
+
+struct LivestatusRowValue {
+ Value Row;
+ LivestatusGroupByType GroupByType;
+ Value GroupByObject;
+};
+
+typedef std::function<bool (const Value&, LivestatusGroupByType, const Object::Ptr&)> AddRowFunction;
+
+class Filter;
+
+/**
+ * @ingroup livestatus
+ */
+class Table : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Table);
+
+ static Table::Ptr GetByName(const String& name, const String& compat_log_path = "", const unsigned long& from = 0, const unsigned long& until = 0);
+
+ virtual String GetName() const = 0;
+ virtual String GetPrefix() const = 0;
+
+ std::vector<LivestatusRowValue> FilterRows(const intrusive_ptr<Filter>& filter, int limit = -1);
+
+ void AddColumn(const String& name, const Column& column);
+ Column GetColumn(const String& name) const;
+ std::vector<String> GetColumnNames() const;
+
+ LivestatusGroupByType GetGroupByType() const;
+
+protected:
+ Table(LivestatusGroupByType type = LivestatusGroupByNone);
+
+ virtual void FetchRows(const AddRowFunction& addRowFn) = 0;
+
+ static Value ZeroAccessor(const Value&);
+ static Value OneAccessor(const Value&);
+ static Value EmptyStringAccessor(const Value&);
+ static Value EmptyArrayAccessor(const Value&);
+ static Value EmptyDictionaryAccessor(const Value&);
+
+ LivestatusGroupByType m_GroupByType;
+ Value m_GroupByObject;
+
+private:
+ std::map<String, Column> m_Columns;
+
+ bool FilteredAddRow(std::vector<LivestatusRowValue>& rs, const intrusive_ptr<Filter>& filter, int limit, const Value& row, LivestatusGroupByType groupByType, const Object::Ptr& groupByObject);
+};
+
+}
+
+#endif /* TABLE_H */
+
+#include "livestatus/filter.hpp"
diff --git a/lib/livestatus/timeperiodstable.cpp b/lib/livestatus/timeperiodstable.cpp
new file mode 100644
index 0000000..5797d93
--- /dev/null
+++ b/lib/livestatus/timeperiodstable.cpp
@@ -0,0 +1,58 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/timeperiodstable.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/timeperiod.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/replace.hpp>
+
+using namespace icinga;
+
+TimePeriodsTable::TimePeriodsTable()
+{
+ AddColumns(this);
+}
+
+void TimePeriodsTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&TimePeriodsTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "alias", Column(&TimePeriodsTable::AliasAccessor, objectAccessor));
+ table->AddColumn(prefix + "in", Column(&TimePeriodsTable::InAccessor, objectAccessor));
+}
+
+String TimePeriodsTable::GetName() const
+{
+ return "timeperiod";
+}
+
+String TimePeriodsTable::GetPrefix() const
+{
+ return "timeperiod";
+}
+
+void TimePeriodsTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const TimePeriod::Ptr& tp : ConfigType::GetObjectsByType<TimePeriod>()) {
+ if (!addRowFn(tp, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value TimePeriodsTable::NameAccessor(const Value& row)
+{
+ return static_cast<TimePeriod::Ptr>(row)->GetName();
+}
+
+Value TimePeriodsTable::AliasAccessor(const Value& row)
+{
+ return static_cast<TimePeriod::Ptr>(row)->GetDisplayName();
+}
+
+Value TimePeriodsTable::InAccessor(const Value& row)
+{
+ return (static_cast<TimePeriod::Ptr>(row)->IsInside(Utility::GetTime()) ? 1 : 0);
+}
diff --git a/lib/livestatus/timeperiodstable.hpp b/lib/livestatus/timeperiodstable.hpp
new file mode 100644
index 0000000..31cef93
--- /dev/null
+++ b/lib/livestatus/timeperiodstable.hpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TIMEPERIODSTABLE_H
+#define TIMEPERIODSTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class TimePeriodsTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TimePeriodsTable);
+
+ TimePeriodsTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value AliasAccessor(const Value& row);
+ static Value InAccessor(const Value& row);
+};
+
+}
+
+#endif /* TIMEPERIODSTABLE_H */
diff --git a/lib/livestatus/zonestable.cpp b/lib/livestatus/zonestable.cpp
new file mode 100644
index 0000000..d86cc72
--- /dev/null
+++ b/lib/livestatus/zonestable.cpp
@@ -0,0 +1,92 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/zonestable.hpp"
+#include "remote/zone.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+ZonesTable::ZonesTable()
+{
+ AddColumns(this);
+}
+
+void ZonesTable::AddColumns(Table *table, const String& prefix,
+ const Column::ObjectAccessor& objectAccessor)
+{
+ table->AddColumn(prefix + "name", Column(&ZonesTable::NameAccessor, objectAccessor));
+ table->AddColumn(prefix + "parent", Column(&ZonesTable::ParentAccessor, objectAccessor));
+ table->AddColumn(prefix + "endpoints", Column(&ZonesTable::EndpointsAccessor, objectAccessor));
+ table->AddColumn(prefix + "global", Column(&ZonesTable::GlobalAccessor, objectAccessor));
+}
+
+String ZonesTable::GetName() const
+{
+ return "zones";
+}
+
+String ZonesTable::GetPrefix() const
+{
+ return "zone";
+}
+
+void ZonesTable::FetchRows(const AddRowFunction& addRowFn)
+{
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ if (!addRowFn(zone, LivestatusGroupByNone, Empty))
+ return;
+ }
+}
+
+Value ZonesTable::NameAccessor(const Value& row)
+{
+ Zone::Ptr zone = static_cast<Zone::Ptr>(row);
+
+ if (!zone)
+ return Empty;
+
+ return zone->GetName();
+}
+
+Value ZonesTable::ParentAccessor(const Value& row)
+{
+ Zone::Ptr zone = static_cast<Zone::Ptr>(row);
+
+ if (!zone)
+ return Empty;
+
+ Zone::Ptr parent_zone = zone->GetParent();
+
+ if (!parent_zone)
+ return Empty;
+
+ return parent_zone->GetName();
+}
+
+Value ZonesTable::EndpointsAccessor(const Value& row)
+{
+ Zone::Ptr zone = static_cast<Zone::Ptr>(row);
+
+ if (!zone)
+ return Empty;
+
+ std::set<Endpoint::Ptr> endpoints = zone->GetEndpoints();
+
+ ArrayData result;
+
+ for (const Endpoint::Ptr& endpoint : endpoints) {
+ result.push_back(endpoint->GetName());
+ }
+
+ return new Array(std::move(result));
+}
+
+Value ZonesTable::GlobalAccessor(const Value& row)
+{
+ Zone::Ptr zone = static_cast<Zone::Ptr>(row);
+
+ if (!zone)
+ return Empty;
+
+ return zone->GetGlobal() ? 1 : 0;
+}
diff --git a/lib/livestatus/zonestable.hpp b/lib/livestatus/zonestable.hpp
new file mode 100644
index 0000000..fb03488
--- /dev/null
+++ b/lib/livestatus/zonestable.hpp
@@ -0,0 +1,40 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ZONESTABLE_H
+#define ZONESTABLE_H
+
+#include "livestatus/table.hpp"
+
+using namespace icinga;
+
+namespace icinga
+{
+
+/**
+ * @ingroup livestatus
+ */
+class ZonesTable final : public Table
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ZonesTable);
+
+ ZonesTable();
+
+ static void AddColumns(Table *table, const String& prefix = String(),
+ const Column::ObjectAccessor& objectAccessor = Column::ObjectAccessor());
+
+ String GetName() const override;
+ String GetPrefix() const override;
+
+protected:
+ void FetchRows(const AddRowFunction& addRowFn) override;
+
+ static Value NameAccessor(const Value& row);
+ static Value ParentAccessor(const Value& row);
+ static Value EndpointsAccessor(const Value& row);
+ static Value GlobalAccessor(const Value& row);
+};
+
+}
+
+#endif /* ZONESTABLE_H */
diff --git a/lib/methods/CMakeLists.txt b/lib/methods/CMakeLists.txt
new file mode 100644
index 0000000..a7c3090
--- /dev/null
+++ b/lib/methods/CMakeLists.txt
@@ -0,0 +1,34 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkembedconfig_target(methods-itl.conf methods-itl.cpp)
+
+set(methods_SOURCES
+ i2-methods.hpp methods-itl.cpp
+ clusterchecktask.cpp clusterchecktask.hpp
+ clusterzonechecktask.cpp clusterzonechecktask.hpp
+ dummychecktask.cpp dummychecktask.hpp
+ exceptionchecktask.cpp exceptionchecktask.hpp
+ icingachecktask.cpp icingachecktask.hpp
+ ifwapichecktask.cpp ifwapichecktask.hpp
+ nullchecktask.cpp nullchecktask.hpp
+ nulleventtask.cpp nulleventtask.hpp
+ pluginchecktask.cpp pluginchecktask.hpp
+ plugineventtask.cpp plugineventtask.hpp
+ pluginnotificationtask.cpp pluginnotificationtask.hpp
+ randomchecktask.cpp randomchecktask.hpp
+ timeperiodtask.cpp timeperiodtask.hpp
+ sleepchecktask.cpp sleepchecktask.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(methods methods methods_SOURCES)
+endif()
+
+add_library(methods OBJECT ${methods_SOURCES})
+
+add_dependencies(methods base config icinga)
+
+set_target_properties (
+ methods PROPERTIES
+ FOLDER Lib
+)
diff --git a/lib/methods/clusterchecktask.cpp b/lib/methods/clusterchecktask.cpp
new file mode 100644
index 0000000..6ce28ca
--- /dev/null
+++ b/lib/methods/clusterchecktask.cpp
@@ -0,0 +1,117 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/clusterchecktask.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/endpoint.hpp"
+#include "icinga/cib.hpp"
+#include "icinga/service.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/checkcommand.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/function.hpp"
+#include "base/configtype.hpp"
+#include <boost/algorithm/string/join.hpp>
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, ClusterCheck, &ClusterCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void ClusterCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ CheckCommand::Ptr command = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+ String commandName = command->GetName();
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+ if (!listener) {
+ String output = "No API listener is configured for this instance.";
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = 126;
+ pr.Output = output;
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetOutput(output);
+ cr->SetState(ServiceUnknown);
+ checkable->ProcessCheckResult(cr);
+ }
+
+ return;
+ }
+
+ std::pair<Dictionary::Ptr, Dictionary::Ptr> stats = listener->GetStatus();
+ Dictionary::Ptr status = stats.first;
+ int numConnEndpoints = status->Get("num_conn_endpoints");
+ int numNotConnEndpoints = status->Get("num_not_conn_endpoints");
+
+ ServiceState state;
+ String output = "Icinga 2 Cluster";
+
+ if (numNotConnEndpoints > 0) {
+ output += " Problem: " + Convert::ToString(numNotConnEndpoints) + " endpoints are not connected.";
+ output += "\n(" + FormatArray(status->Get("not_conn_endpoints")) + ")";
+
+ state = ServiceCritical;
+ } else {
+ output += " OK: " + Convert::ToString(numConnEndpoints) + " endpoints are connected.";
+ output += "\n(" + FormatArray(status->Get("conn_endpoints")) + ")";
+
+ state = ServiceOK;
+ }
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ /* use feature stats perfdata */
+ std::pair<Dictionary::Ptr, Array::Ptr> feature_stats = CIB::GetFeatureStats();
+ cr->SetPerformanceData(feature_stats.second);
+
+ cr->SetCommand(commandName);
+ cr->SetState(state);
+ cr->SetOutput(output);
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
+
+String ClusterCheckTask::FormatArray(const Array::Ptr& arr)
+{
+ bool first = true;
+ String str;
+
+ if (arr) {
+ ObjectLock olock(arr);
+ for (const Value& value : arr) {
+ if (first)
+ first = false;
+ else
+ str += ", ";
+
+ str += Convert::ToString(value);
+ }
+ }
+
+ return str;
+}
diff --git a/lib/methods/clusterchecktask.hpp b/lib/methods/clusterchecktask.hpp
new file mode 100644
index 0000000..16ee8a5
--- /dev/null
+++ b/lib/methods/clusterchecktask.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CLUSTERCHECKTASK_H
+#define CLUSTERCHECKTASK_H
+
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+/**
+ * Cluster check type.
+ *
+ * @ingroup methods
+ */
+class ClusterCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ ClusterCheckTask();
+ static String FormatArray(const Array::Ptr& arr);
+};
+
+}
+
+#endif /* CLUSTERCHECKTASK_H */
diff --git a/lib/methods/clusterzonechecktask.cpp b/lib/methods/clusterzonechecktask.cpp
new file mode 100644
index 0000000..fd52534
--- /dev/null
+++ b/lib/methods/clusterzonechecktask.cpp
@@ -0,0 +1,218 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/clusterzonechecktask.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/zone.hpp"
+#include "base/function.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, ClusterZoneCheck, &ClusterZoneCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void ClusterZoneCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+ CheckCommand::Ptr command = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+ String commandName = command->GetName();
+
+ if (!listener) {
+ String output = "No API listener is configured for this instance.";
+ ServiceState state = ServiceUnknown;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetCommand(commandName);
+ cr->SetOutput(output);
+ cr->SetState(state);
+ checkable->ProcessCheckResult(cr);
+ }
+
+ return;
+ }
+
+ Value raw_command = command->GetCommandLine();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", command);
+
+ String zoneName = MacroProcessor::ResolveMacros("$cluster_zone$", resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ String missingLagWarning;
+ String missingLagCritical;
+
+ double lagWarning = MacroProcessor::ResolveMacros("$cluster_lag_warning$", resolvers, checkable->GetLastCheckResult(),
+ &missingLagWarning, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ double lagCritical = MacroProcessor::ResolveMacros("$cluster_lag_critical$", resolvers, checkable->GetLastCheckResult(),
+ &missingLagCritical, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ if (zoneName.IsEmpty()) {
+ String output = "Macro 'cluster_zone' must be set.";
+ ServiceState state = ServiceUnknown;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetCommand(commandName);
+ cr->SetOutput(output);
+ cr->SetState(state);
+ checkable->ProcessCheckResult(cr);
+ }
+
+ return;
+ }
+
+ Zone::Ptr zone = Zone::GetByName(zoneName);
+
+ if (!zone) {
+ String output = "Zone '" + zoneName + "' does not exist.";
+ ServiceState state = ServiceUnknown;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetCommand(commandName);
+ cr->SetOutput(output);
+ cr->SetState(state);
+ checkable->ProcessCheckResult(cr);
+ }
+ return;
+ }
+
+ bool connected = false;
+ double zoneLag = 0;
+
+ double lastMessageSent = 0;
+ double lastMessageReceived = 0;
+ double messagesSentPerSecond = 0;
+ double messagesReceivedPerSecond = 0;
+ double bytesSentPerSecond = 0;
+ double bytesReceivedPerSecond = 0;
+
+ {
+ auto endpoints (zone->GetEndpoints());
+
+ for (const Endpoint::Ptr& endpoint : endpoints) {
+ if (endpoint->GetConnected())
+ connected = true;
+
+ double eplag = ApiListener::CalculateZoneLag(endpoint);
+
+ if (eplag > 0 && eplag > zoneLag)
+ zoneLag = eplag;
+
+ if (endpoint->GetLastMessageSent() > lastMessageSent)
+ lastMessageSent = endpoint->GetLastMessageSent();
+
+ if (endpoint->GetLastMessageReceived() > lastMessageReceived)
+ lastMessageReceived = endpoint->GetLastMessageReceived();
+
+ messagesSentPerSecond += endpoint->GetMessagesSentPerSecond();
+ messagesReceivedPerSecond += endpoint->GetMessagesReceivedPerSecond();
+ bytesSentPerSecond += endpoint->GetBytesSentPerSecond();
+ bytesReceivedPerSecond += endpoint->GetBytesReceivedPerSecond();
+ }
+
+ if (!connected && endpoints.size() == 1u && *endpoints.begin() == Endpoint::GetLocalEndpoint()) {
+ connected = true;
+ }
+ }
+
+ ServiceState state;
+ String output;
+
+ if (connected) {
+ state = ServiceOK;
+ output = "Zone '" + zoneName + "' is connected. Log lag: " + Utility::FormatDuration(zoneLag);
+
+ /* Check whether the thresholds have been resolved and compare them */
+ if (missingLagCritical.IsEmpty() && zoneLag > lagCritical) {
+ state = ServiceCritical;
+ output = "Zone '" + zoneName + "' is connected. Log lag: " + Utility::FormatDuration(zoneLag)
+ + " greater than critical threshold: " + Utility::FormatDuration(lagCritical);
+ } else if (missingLagWarning.IsEmpty() && zoneLag > lagWarning) {
+ state = ServiceWarning;
+ output = "Zone '" + zoneName + "' is connected. Log lag: " + Utility::FormatDuration(zoneLag)
+ + " greater than warning threshold: " + Utility::FormatDuration(lagWarning);
+ }
+ } else {
+ state = ServiceCritical;
+ output = "Zone '" + zoneName + "' is not connected. Log lag: " + Utility::FormatDuration(zoneLag);
+ }
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetCommand(commandName);
+ cr->SetState(state);
+ cr->SetOutput(output);
+ cr->SetPerformanceData(new Array({
+ new PerfdataValue("slave_lag", zoneLag, false, "s", lagWarning, lagCritical),
+ new PerfdataValue("last_messages_sent", lastMessageSent),
+ new PerfdataValue("last_messages_received", lastMessageReceived),
+ new PerfdataValue("sum_messages_sent_per_second", messagesSentPerSecond),
+ new PerfdataValue("sum_messages_received_per_second", messagesReceivedPerSecond),
+ new PerfdataValue("sum_bytes_sent_per_second", bytesSentPerSecond),
+ new PerfdataValue("sum_bytes_received_per_second", bytesReceivedPerSecond)
+ }));
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
diff --git a/lib/methods/clusterzonechecktask.hpp b/lib/methods/clusterzonechecktask.hpp
new file mode 100644
index 0000000..2af442c
--- /dev/null
+++ b/lib/methods/clusterzonechecktask.hpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CLUSTERZONECHECKTASK_H
+#define CLUSTERZONECHECKTASK_H
+
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+/**
+ * Cluster zone check type.
+ *
+ * @ingroup methods
+ */
+class ClusterZoneCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ ClusterZoneCheckTask();
+};
+
+}
+
+#endif /* CLUSTERZONECHECKTASK_H */
diff --git a/lib/methods/dummychecktask.cpp b/lib/methods/dummychecktask.cpp
new file mode 100644
index 0000000..905a022
--- /dev/null
+++ b/lib/methods/dummychecktask.cpp
@@ -0,0 +1,75 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+# include <stdlib.h>
+#endif /* _WIN32 */
+#include "methods/dummychecktask.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, DummyCheck, &DummyCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void DummyCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ CheckCommand::Ptr command = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", command);
+
+ int dummyState = MacroProcessor::ResolveMacros("$dummy_state$", resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ String dummyText = MacroProcessor::ResolveMacros("$dummy_text$", resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ /* Parse output and performance data. */
+ std::pair<String, String> co = PluginUtility::ParseCheckOutput(dummyText);
+
+ double now = Utility::GetTime();
+ String commandName = command->GetName();
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = dummyText;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = dummyState;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetOutput(co.first);
+ cr->SetPerformanceData(PluginUtility::SplitPerfdata(co.second));
+ cr->SetState(PluginUtility::ExitStatusToState(dummyState));
+ cr->SetExitStatus(dummyState);
+ cr->SetExecutionStart(now);
+ cr->SetExecutionEnd(now);
+ cr->SetCommand(commandName);
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
diff --git a/lib/methods/dummychecktask.hpp b/lib/methods/dummychecktask.hpp
new file mode 100644
index 0000000..621bbfb
--- /dev/null
+++ b/lib/methods/dummychecktask.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DUMMYCHECKTASK_H
+#define DUMMYCHECKTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Test class for additional check types. Implements the "dummy" check type.
+ *
+ * @ingroup methods
+ */
+class DummyCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ DummyCheckTask();
+};
+
+}
+
+#endif /* DUMMYCHECKTASK_H */
diff --git a/lib/methods/exceptionchecktask.cpp b/lib/methods/exceptionchecktask.cpp
new file mode 100644
index 0000000..47707f2
--- /dev/null
+++ b/lib/methods/exceptionchecktask.cpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+# include <stdlib.h>
+#endif /* _WIN32 */
+#include "methods/exceptionchecktask.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, ExceptionCheck, &ExceptionCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void ExceptionCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ ScriptError scriptError = ScriptError("Test") << boost::errinfo_api_function("Test");
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = scriptError.what();
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = 3;
+
+ Checkable::ExecuteCommandProcessFinishedHandler("", pr);
+ } else {
+ BOOST_THROW_EXCEPTION(ScriptError("Test") << boost::errinfo_api_function("Test"));
+ }
+}
diff --git a/lib/methods/exceptionchecktask.hpp b/lib/methods/exceptionchecktask.hpp
new file mode 100644
index 0000000..09db104
--- /dev/null
+++ b/lib/methods/exceptionchecktask.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EXCEPTIONCHECKTASK_H
+#define EXCEPTIONCHECKTASK_H
+
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Test class for additional check types. Implements the "exception" check type.
+ *
+ * @ingroup methods
+ */
+class ExceptionCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ ExceptionCheckTask();
+};
+
+}
+
+#endif /* EXCEPTIONCHECKTASK_H */
diff --git a/lib/methods/i2-methods.hpp b/lib/methods/i2-methods.hpp
new file mode 100644
index 0000000..ffd8002
--- /dev/null
+++ b/lib/methods/i2-methods.hpp
@@ -0,0 +1,15 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2METHODS_H
+#define I2METHODS_H
+
+/**
+ * @defgroup methods Icinga methods
+ *
+ * The methods library implements methods for various task (e.g. checks, event
+ * handlers, etc.).
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2METHODS_H */
diff --git a/lib/methods/icingachecktask.cpp b/lib/methods/icingachecktask.cpp
new file mode 100644
index 0000000..d3eae1f
--- /dev/null
+++ b/lib/methods/icingachecktask.cpp
@@ -0,0 +1,209 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/icingachecktask.hpp"
+#include "icinga/cib.hpp"
+#include "icinga/service.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/clusterevents.hpp"
+#include "icinga/checkable.hpp"
+#include "remote/apilistener.hpp"
+#include "base/application.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/function.hpp"
+#include "base/configtype.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, IcingaCheck, &IcingaCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void IcingaCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ CheckCommand::Ptr command = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", command);
+
+ String missingIcingaMinVersion;
+
+ String icingaMinVersion = MacroProcessor::ResolveMacros("$icinga_min_version$", resolvers, checkable->GetLastCheckResult(),
+ &missingIcingaMinVersion, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ double interval = Utility::GetTime() - Application::GetStartTime();
+
+ if (interval > 60)
+ interval = 60;
+
+ /* use feature stats perfdata */
+ std::pair<Dictionary::Ptr, Array::Ptr> feature_stats = CIB::GetFeatureStats();
+
+ Array::Ptr perfdata = feature_stats.second;
+
+ perfdata->Add(new PerfdataValue("active_host_checks", CIB::GetActiveHostChecksStatistics(interval) / interval));
+ perfdata->Add(new PerfdataValue("passive_host_checks", CIB::GetPassiveHostChecksStatistics(interval) / interval));
+ perfdata->Add(new PerfdataValue("active_host_checks_1min", CIB::GetActiveHostChecksStatistics(60)));
+ perfdata->Add(new PerfdataValue("passive_host_checks_1min", CIB::GetPassiveHostChecksStatistics(60)));
+ perfdata->Add(new PerfdataValue("active_host_checks_5min", CIB::GetActiveHostChecksStatistics(60 * 5)));
+ perfdata->Add(new PerfdataValue("passive_host_checks_5min", CIB::GetPassiveHostChecksStatistics(60 * 5)));
+ perfdata->Add(new PerfdataValue("active_host_checks_15min", CIB::GetActiveHostChecksStatistics(60 * 15)));
+ perfdata->Add(new PerfdataValue("passive_host_checks_15min", CIB::GetPassiveHostChecksStatistics(60 * 15)));
+
+ perfdata->Add(new PerfdataValue("active_service_checks", CIB::GetActiveServiceChecksStatistics(interval) / interval));
+ perfdata->Add(new PerfdataValue("passive_service_checks", CIB::GetPassiveServiceChecksStatistics(interval) / interval));
+ perfdata->Add(new PerfdataValue("active_service_checks_1min", CIB::GetActiveServiceChecksStatistics(60)));
+ perfdata->Add(new PerfdataValue("passive_service_checks_1min", CIB::GetPassiveServiceChecksStatistics(60)));
+ perfdata->Add(new PerfdataValue("active_service_checks_5min", CIB::GetActiveServiceChecksStatistics(60 * 5)));
+ perfdata->Add(new PerfdataValue("passive_service_checks_5min", CIB::GetPassiveServiceChecksStatistics(60 * 5)));
+ perfdata->Add(new PerfdataValue("active_service_checks_15min", CIB::GetActiveServiceChecksStatistics(60 * 15)));
+ perfdata->Add(new PerfdataValue("passive_service_checks_15min", CIB::GetPassiveServiceChecksStatistics(60 * 15)));
+
+ perfdata->Add(new PerfdataValue("current_pending_callbacks", Application::GetTP().GetPending()));
+ perfdata->Add(new PerfdataValue("current_concurrent_checks", Checkable::CurrentConcurrentChecks.load()));
+ perfdata->Add(new PerfdataValue("remote_check_queue", ClusterEvents::GetCheckRequestQueueSize()));
+
+ CheckableCheckStatistics scs = CIB::CalculateServiceCheckStats();
+
+ perfdata->Add(new PerfdataValue("min_latency", scs.min_latency));
+ perfdata->Add(new PerfdataValue("max_latency", scs.max_latency));
+ perfdata->Add(new PerfdataValue("avg_latency", scs.avg_latency));
+ perfdata->Add(new PerfdataValue("min_execution_time", scs.min_execution_time));
+ perfdata->Add(new PerfdataValue("max_execution_time", scs.max_execution_time));
+ perfdata->Add(new PerfdataValue("avg_execution_time", scs.avg_execution_time));
+
+ ServiceStatistics ss = CIB::CalculateServiceStats();
+
+ perfdata->Add(new PerfdataValue("num_services_ok", ss.services_ok));
+ perfdata->Add(new PerfdataValue("num_services_warning", ss.services_warning));
+ perfdata->Add(new PerfdataValue("num_services_critical", ss.services_critical));
+ perfdata->Add(new PerfdataValue("num_services_unknown", ss.services_unknown));
+ perfdata->Add(new PerfdataValue("num_services_pending", ss.services_pending));
+ perfdata->Add(new PerfdataValue("num_services_unreachable", ss.services_unreachable));
+ perfdata->Add(new PerfdataValue("num_services_flapping", ss.services_flapping));
+ perfdata->Add(new PerfdataValue("num_services_in_downtime", ss.services_in_downtime));
+ perfdata->Add(new PerfdataValue("num_services_acknowledged", ss.services_acknowledged));
+ perfdata->Add(new PerfdataValue("num_services_handled", ss.services_handled));
+ perfdata->Add(new PerfdataValue("num_services_problem", ss.services_problem));
+
+ double uptime = Application::GetUptime();
+ perfdata->Add(new PerfdataValue("uptime", uptime));
+
+ HostStatistics hs = CIB::CalculateHostStats();
+
+ perfdata->Add(new PerfdataValue("num_hosts_up", hs.hosts_up));
+ perfdata->Add(new PerfdataValue("num_hosts_down", hs.hosts_down));
+ perfdata->Add(new PerfdataValue("num_hosts_pending", hs.hosts_pending));
+ perfdata->Add(new PerfdataValue("num_hosts_unreachable", hs.hosts_unreachable));
+ perfdata->Add(new PerfdataValue("num_hosts_flapping", hs.hosts_flapping));
+ perfdata->Add(new PerfdataValue("num_hosts_in_downtime", hs.hosts_in_downtime));
+ perfdata->Add(new PerfdataValue("num_hosts_acknowledged", hs.hosts_acknowledged));
+ perfdata->Add(new PerfdataValue("num_hosts_handled", hs.hosts_handled));
+ perfdata->Add(new PerfdataValue("num_hosts_problem", hs.hosts_problem));
+
+ std::vector<Endpoint::Ptr> endpoints = ConfigType::GetObjectsByType<Endpoint>();
+
+ double lastMessageSent = 0;
+ double lastMessageReceived = 0;
+ double messagesSentPerSecond = 0;
+ double messagesReceivedPerSecond = 0;
+ double bytesSentPerSecond = 0;
+ double bytesReceivedPerSecond = 0;
+
+ for (const Endpoint::Ptr& endpoint : endpoints)
+ {
+ if (endpoint->GetLastMessageSent() > lastMessageSent)
+ lastMessageSent = endpoint->GetLastMessageSent();
+
+ if (endpoint->GetLastMessageReceived() > lastMessageReceived)
+ lastMessageReceived = endpoint->GetLastMessageReceived();
+
+ messagesSentPerSecond += endpoint->GetMessagesSentPerSecond();
+ messagesReceivedPerSecond += endpoint->GetMessagesReceivedPerSecond();
+ bytesSentPerSecond += endpoint->GetBytesSentPerSecond();
+ bytesReceivedPerSecond += endpoint->GetBytesReceivedPerSecond();
+ }
+
+ perfdata->Add(new PerfdataValue("last_messages_sent", lastMessageSent));
+ perfdata->Add(new PerfdataValue("last_messages_received", lastMessageReceived));
+ perfdata->Add(new PerfdataValue("sum_messages_sent_per_second", messagesSentPerSecond));
+ perfdata->Add(new PerfdataValue("sum_messages_received_per_second", messagesReceivedPerSecond));
+ perfdata->Add(new PerfdataValue("sum_bytes_sent_per_second", bytesSentPerSecond));
+ perfdata->Add(new PerfdataValue("sum_bytes_received_per_second", bytesReceivedPerSecond));
+
+ cr->SetPerformanceData(perfdata);
+ ServiceState state = ServiceOK;
+
+ String appVersion = Application::GetAppVersion();
+
+ String output = "Icinga 2 has been running for " + Utility::FormatDuration(uptime) +
+ ". Version: " + appVersion;
+
+ /* Indicate a warning if the last reload failed. */
+ double lastReloadFailed = Application::GetLastReloadFailed();
+
+ if (lastReloadFailed > 0) {
+ output += "; Last reload attempt failed at " + Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", lastReloadFailed);
+ state =ServiceWarning;
+ }
+
+ /* Indicate a warning when the last synced config caused a stage validation error. */
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (listener) {
+ Dictionary::Ptr validationResult = listener->GetLastFailedZonesStageValidation();
+
+ if (validationResult) {
+ output += "; Last zone sync stage validation failed at "
+ + Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", validationResult->Get("ts"));
+
+ state = ServiceWarning;
+ }
+ }
+
+ String parsedAppVersion = Utility::ParseVersion(appVersion);
+
+ /* Return an error if the version is less than specified (optional). */
+ if (missingIcingaMinVersion.IsEmpty() && !icingaMinVersion.IsEmpty() && Utility::CompareVersion(icingaMinVersion, parsedAppVersion) < 0) {
+ output += "; Minimum version " + icingaMinVersion + " is not installed.";
+ state = ServiceCritical;
+ }
+
+ String commandName = command->GetName();
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetState(state);
+ cr->SetOutput(output);
+ cr->SetCommand(commandName);
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
diff --git a/lib/methods/icingachecktask.hpp b/lib/methods/icingachecktask.hpp
new file mode 100644
index 0000000..93def62
--- /dev/null
+++ b/lib/methods/icingachecktask.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ICINGACHECKTASK_H
+#define ICINGACHECKTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+/**
+ * Icinga check type.
+ *
+ * @ingroup methods
+ */
+class IcingaCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ IcingaCheckTask();
+};
+
+}
+
+#endif /* ICINGACHECKTASK_H */
diff --git a/lib/methods/ifwapichecktask.cpp b/lib/methods/ifwapichecktask.cpp
new file mode 100644
index 0000000..8516d70
--- /dev/null
+++ b/lib/methods/ifwapichecktask.cpp
@@ -0,0 +1,531 @@
+/* Icinga 2 | (c) 2023 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+# include <stdlib.h>
+#endif /* _WIN32 */
+#include "methods/ifwapichecktask.hpp"
+#include "methods/pluginchecktask.hpp"
+#include "icinga/checkresult-ti.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/base64.hpp"
+#include "base/defer.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/io-engine.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include "base/shared.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/tlsstream.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/url.hpp"
+#include <boost/asio.hpp>
+#include <boost/beast/core.hpp>
+#include <boost/beast/http.hpp>
+#include <boost/system/system_error.hpp>
+#include <exception>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, IfwApiCheck, &IfwApiCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+static void ReportIfwCheckResult(
+ const Checkable::Ptr& checkable, const Value& cmdLine, const CheckResult::Ptr& cr,
+ const String& output, double start, double end, int exitcode = 3, const Array::Ptr& perfdata = nullptr
+)
+{
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = perfdata ? output + " |" + String(perfdata->Join(" ")) : output;
+ pr.ExecutionStart = start;
+ pr.ExecutionEnd = end;
+ pr.ExitStatus = exitcode;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(cmdLine, pr);
+ } else {
+ auto splittedPerfdata (perfdata);
+
+ if (perfdata) {
+ splittedPerfdata = new Array();
+ ObjectLock oLock (perfdata);
+
+ for (String pv : perfdata) {
+ PluginUtility::SplitPerfdata(pv)->CopyTo(splittedPerfdata);
+ }
+ }
+
+ cr->SetOutput(output);
+ cr->SetPerformanceData(splittedPerfdata);
+ cr->SetState((ServiceState)exitcode);
+ cr->SetExitStatus(exitcode);
+ cr->SetExecutionStart(start);
+ cr->SetExecutionEnd(end);
+ cr->SetCommand(cmdLine);
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
+
+static void ReportIfwCheckResult(
+ boost::asio::yield_context yc, const Checkable::Ptr& checkable, const Value& cmdLine,
+ const CheckResult::Ptr& cr, const String& output, double start
+)
+{
+ double end = Utility::GetTime();
+ CpuBoundWork cbw (yc);
+
+ ReportIfwCheckResult(checkable, cmdLine, cr, output, start, end);
+}
+
+static const char* GetUnderstandableError(const std::exception& ex)
+{
+ auto se (dynamic_cast<const boost::system::system_error*>(&ex));
+
+ if (se && se->code() == boost::asio::error::operation_aborted) {
+ return "Timeout exceeded";
+ }
+
+ return ex.what();
+}
+
+static void DoIfwNetIo(
+ boost::asio::yield_context yc, const Checkable::Ptr& checkable, const Array::Ptr& cmdLine,
+ const CheckResult::Ptr& cr, const String& psCommand, const String& psHost, const String& san, const String& psPort,
+ AsioTlsStream& conn, boost::beast::http::request<boost::beast::http::string_body>& req, double start
+)
+{
+ namespace http = boost::beast::http;
+
+ boost::beast::flat_buffer buf;
+ http::response<http::string_body> resp;
+
+ try {
+ Connect(conn.lowest_layer(), psHost, psPort, yc);
+ } catch (const std::exception& ex) {
+ ReportIfwCheckResult(
+ yc, checkable, cmdLine, cr,
+ "Can't connect to IfW API on host '" + psHost + "' port '" + psPort + "': " + GetUnderstandableError(ex),
+ start
+ );
+ return;
+ }
+
+ auto& sslConn (conn.next_layer());
+
+ try {
+ sslConn.async_handshake(conn.next_layer().client, yc);
+ } catch (const std::exception& ex) {
+ ReportIfwCheckResult(
+ yc, checkable, cmdLine, cr,
+ "TLS handshake with IfW API on host '" + psHost + "' (SNI: '" + san
+ + "') port '" + psPort + "' failed: " + GetUnderstandableError(ex),
+ start
+ );
+ return;
+ }
+
+ if (!sslConn.IsVerifyOK()) {
+ auto cert (sslConn.GetPeerCertificate());
+ Value cn;
+
+ try {
+ cn = GetCertificateCN(cert);
+ } catch (const std::exception&) {
+ }
+
+ ReportIfwCheckResult(
+ yc, checkable, cmdLine, cr,
+ "Certificate validation failed for IfW API on host '" + psHost + "' (SNI: '" + san + "'; CN: "
+ + (cn.IsString() ? "'" + cn + "'" : "N/A") + ") port '" + psPort + "': " + sslConn.GetVerifyError(),
+ start
+ );
+ return;
+ }
+
+ try {
+ http::async_write(conn, req, yc);
+ conn.async_flush(yc);
+ } catch (const std::exception& ex) {
+ ReportIfwCheckResult(
+ yc, checkable, cmdLine, cr,
+ "Can't send HTTP request to IfW API on host '" + psHost + "' port '" + psPort + "': " + GetUnderstandableError(ex),
+ start
+ );
+ return;
+ }
+
+ try {
+ http::async_read(conn, buf, resp, yc);
+ } catch (const std::exception& ex) {
+ ReportIfwCheckResult(
+ yc, checkable, cmdLine, cr,
+ "Can't read HTTP response from IfW API on host '" + psHost + "' port '" + psPort + "': " + GetUnderstandableError(ex),
+ start
+ );
+ return;
+ }
+
+ double end = Utility::GetTime();
+
+ {
+ boost::system::error_code ec;
+ sslConn.async_shutdown(yc[ec]);
+ }
+
+ CpuBoundWork cbw (yc);
+ Value jsonRoot;
+
+ try {
+ jsonRoot = JsonDecode(resp.body());
+ } catch (const std::exception& ex) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Got bad JSON from IfW API on host '" + psHost + "' port '" + psPort + "': " + ex.what(), start, end
+ );
+ return;
+ }
+
+ if (!jsonRoot.IsObjectType<Dictionary>()) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Got JSON, but not an object, from IfW API on host '"
+ + psHost + "' port '" + psPort + "': " + JsonEncode(jsonRoot),
+ start, end
+ );
+ return;
+ }
+
+ Value jsonBranch;
+
+ if (!Dictionary::Ptr(jsonRoot)->Get(psCommand, &jsonBranch)) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Missing ." + psCommand + " in JSON object from IfW API on host '"
+ + psHost + "' port '" + psPort + "': " + JsonEncode(jsonRoot),
+ start, end
+ );
+ return;
+ }
+
+ if (!jsonBranch.IsObjectType<Dictionary>()) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "." + psCommand + " in JSON from IfW API on host '"
+ + psHost + "' port '" + psPort + "' is not an object: " + JsonEncode(jsonBranch),
+ start, end
+ );
+ return;
+ }
+
+ Dictionary::Ptr result = jsonBranch;
+
+ Value exitcode;
+
+ if (!result->Get("exitcode", &exitcode)) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Missing ." + psCommand + ".exitcode in JSON object from IfW API on host '"
+ + psHost + "' port '" + psPort + "': " + JsonEncode(result),
+ start, end
+ );
+ return;
+ }
+
+ static const std::set<double> exitcodes {ServiceOK, ServiceWarning, ServiceCritical, ServiceUnknown};
+ static const auto exitcodeList (Array::FromSet(exitcodes)->Join(", "));
+
+ if (!exitcode.IsNumber() || exitcodes.find(exitcode) == exitcodes.end()) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Got bad exitcode " + JsonEncode(exitcode) + " from IfW API on host '" + psHost + "' port '" + psPort
+ + "', expected one of: " + exitcodeList,
+ start, end
+ );
+ return;
+ }
+
+ auto perfdataVal (result->Get("perfdata"));
+ Array::Ptr perfdata;
+
+ try {
+ perfdata = perfdataVal;
+ } catch (const std::exception&) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Got bad perfdata " + JsonEncode(perfdataVal) + " from IfW API on host '"
+ + psHost + "' port '" + psPort + "', expected an array",
+ start, end
+ );
+ return;
+ }
+
+ if (perfdata) {
+ ObjectLock oLock (perfdata);
+
+ for (auto& pv : perfdata) {
+ if (!pv.IsString()) {
+ ReportIfwCheckResult(
+ checkable, cmdLine, cr,
+ "Got bad perfdata value " + JsonEncode(perfdata) + " from IfW API on host '"
+ + psHost + "' port '" + psPort + "', expected an array of strings",
+ start, end
+ );
+ return;
+ }
+ }
+ }
+
+ ReportIfwCheckResult(checkable, cmdLine, cr, result->Get("checkresult"), start, end, exitcode, perfdata);
+}
+
+void IfwApiCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ namespace asio = boost::asio;
+ namespace http = boost::beast::http;
+ using http::field;
+
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ // We're going to just resolve macros for the actual check execution happening elsewhere
+ if (resolvedMacros && !useResolvedMacros) {
+ auto commandEndpoint (checkable->GetCommandEndpoint());
+
+ // There's indeed a command endpoint, obviously for the actual check execution
+ if (commandEndpoint) {
+ // But it doesn't have this function, yet ("ifw-api-check-command")
+ if (!(commandEndpoint->GetCapabilities() & (uint_fast64_t)ApiCapabilities::IfwApiCheckCommand)) {
+ // Assume "ifw-api-check-command" has been imported into a check command which can also work
+ // based on "plugin-check-command", delegate respectively and hope for the best
+ PluginCheckTask::ScriptFunc(checkable, cr, resolvedMacros, useResolvedMacros);
+ return;
+ }
+ }
+ }
+
+ CheckCommand::Ptr command = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+ auto lcr (checkable->GetLastCheckResult());
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", command);
+
+ auto resolveMacros ([&resolvers, &lcr, &resolvedMacros, useResolvedMacros](const char* macros) -> Value {
+ return MacroProcessor::ResolveMacros(
+ macros, resolvers, lcr, nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros
+ );
+ });
+
+ String psCommand = resolveMacros("$ifw_api_command$");
+ Dictionary::Ptr arguments = resolveMacros("$ifw_api_arguments$");
+ String psHost = resolveMacros("$ifw_api_host$");
+ String psPort = resolveMacros("$ifw_api_port$");
+ String expectedSan = resolveMacros("$ifw_api_expected_san$");
+ String cert = resolveMacros("$ifw_api_cert$");
+ String key = resolveMacros("$ifw_api_key$");
+ String ca = resolveMacros("$ifw_api_ca$");
+ String crl = resolveMacros("$ifw_api_crl$");
+ String username = resolveMacros("$ifw_api_username$");
+ String password = resolveMacros("$ifw_api_password$");
+
+ Dictionary::Ptr params = new Dictionary();
+
+ if (arguments) {
+ ObjectLock oLock (arguments);
+ Array::Ptr emptyCmd = new Array();
+
+ for (auto& kv : arguments) {
+ Dictionary::Ptr argSpec;
+
+ if (kv.second.IsObjectType<Dictionary>()) {
+ argSpec = Dictionary::Ptr(kv.second)->ShallowClone();
+ } else {
+ argSpec = new Dictionary({{ "value", kv.second }});
+ }
+
+ // See default branch of below switch
+ argSpec->Set("repeat_key", false);
+
+ {
+ ObjectLock oLock (argSpec);
+
+ for (auto& kv : argSpec) {
+ if (kv.second.GetType() == ValueObject) {
+ auto now (Utility::GetTime());
+
+ ReportIfwCheckResult(
+ checkable, command->GetName(), cr,
+ "$ifw_api_arguments$ may not directly contain objects (especially functions).", now, now
+ );
+
+ return;
+ }
+ }
+ }
+
+ /* MacroProcessor::ResolveArguments() converts
+ *
+ * [ "check_example" ]
+ * and
+ * {
+ * "-f" = { set_if = "$example_flag$" }
+ * "-a" = "$example_arg$"
+ * }
+ *
+ * to
+ *
+ * [ "check_example", "-f", "-a", "X" ]
+ *
+ * but we need the args one-by-one like [ "-f" ] or [ "-a", "X" ].
+ */
+ Array::Ptr arg = MacroProcessor::ResolveArguments(
+ emptyCmd, new Dictionary({{kv.first, argSpec}}), resolvers, lcr, resolvedMacros, useResolvedMacros
+ );
+
+ switch (arg ? arg->GetLength() : 0) {
+ case 0:
+ break;
+ case 1: // [ "-f" ]
+ params->Set(arg->Get(0), true);
+ break;
+ case 2: // [ "-a", "X" ]
+ params->Set(arg->Get(0), arg->Get(1));
+ break;
+ default: { // [ "-a", "X", "Y" ]
+ auto k (arg->Get(0));
+
+ arg->Remove(0);
+ params->Set(k, arg);
+ }
+ }
+ }
+ }
+
+ auto checkTimeout (command->GetTimeout());
+ auto checkableTimeout (checkable->GetCheckTimeout());
+
+ if (!checkableTimeout.IsEmpty())
+ checkTimeout = checkableTimeout;
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ if (psHost.IsEmpty()) {
+ psHost = "localhost";
+ }
+
+ if (expectedSan.IsEmpty()) {
+ expectedSan = IcingaApplication::GetInstance()->GetNodeName();
+ }
+
+ if (cert.IsEmpty()) {
+ cert = ApiListener::GetDefaultCertPath();
+ }
+
+ if (key.IsEmpty()) {
+ key = ApiListener::GetDefaultKeyPath();
+ }
+
+ if (ca.IsEmpty()) {
+ ca = ApiListener::GetDefaultCaPath();
+ }
+
+ Url::Ptr uri = new Url();
+
+ uri->SetPath({ "v1", "checker" });
+ uri->SetQuery({{ "command", psCommand }});
+
+ static const auto userAgent ("Icinga/" + Application::GetAppVersion());
+ auto relative (uri->Format());
+ auto body (JsonEncode(params));
+ auto req (Shared<http::request<http::string_body>>::Make());
+
+ req->method(http::verb::post);
+ req->target(relative);
+ req->set(field::accept, "application/json");
+ req->set(field::content_type, "application/json");
+ req->set(field::host, expectedSan + ":" + psPort);
+ req->set(field::user_agent, userAgent);
+ req->body() = body;
+ req->content_length(req->body().size());
+
+ static const auto curlTlsMinVersion ((String("--") + DEFAULT_TLS_PROTOCOLMIN).ToLower());
+
+ Array::Ptr cmdLine = new Array({
+ "curl", "--verbose", curlTlsMinVersion, "--fail-with-body",
+ "--connect-to", expectedSan + ":" + psPort + ":" + psHost + ":" + psPort,
+ "--ciphers", DEFAULT_TLS_CIPHERS,
+ "--cert", cert,
+ "--key", key,
+ "--cacert", ca,
+ "--request", "POST",
+ "--url", "https://" + expectedSan + ":" + psPort + relative,
+ "--user-agent", userAgent,
+ "--header", "Accept: application/json",
+ "--header", "Content-Type: application/json",
+ "--data-raw", body
+ });
+
+ if (!crl.IsEmpty()) {
+ cmdLine->Add("--crlfile");
+ cmdLine->Add(crl);
+ }
+
+ if (!username.IsEmpty() && !password.IsEmpty()) {
+ auto authn (username + ":" + password);
+
+ req->set(field::authorization, "Basic " + Base64::Encode(authn));
+ cmdLine->Add("--user");
+ cmdLine->Add(authn);
+ }
+
+ auto& io (IoEngine::Get().GetIoContext());
+ auto strand (Shared<asio::io_context::strand>::Make(io));
+ Shared<asio::ssl::context>::Ptr ctx;
+ double start = Utility::GetTime();
+
+ try {
+ ctx = SetupSslContext(cert, key, ca, crl, DEFAULT_TLS_CIPHERS, DEFAULT_TLS_PROTOCOLMIN, DebugInfo());
+ } catch (const std::exception& ex) {
+ ReportIfwCheckResult(checkable, cmdLine, cr, ex.what(), start, Utility::GetTime());
+ return;
+ }
+
+ auto conn (Shared<AsioTlsStream>::Make(io, *ctx, expectedSan));
+
+ IoEngine::SpawnCoroutine(
+ *strand,
+ [strand, checkable, cmdLine, cr, psCommand, psHost, expectedSan, psPort, conn, req, start, checkTimeout](asio::yield_context yc) {
+ Timeout::Ptr timeout = new Timeout(strand->context(), *strand, boost::posix_time::microseconds(int64_t(checkTimeout * 1e6)),
+ [&conn, &checkable](boost::asio::yield_context yc) {
+ Log(LogNotice, "IfwApiCheckTask")
+ << "Timeout while checking " << checkable->GetReflectionType()->GetName()
+ << " '" << checkable->GetName() << "', cancelling attempt";
+
+ boost::system::error_code ec;
+ conn->lowest_layer().cancel(ec);
+ }
+ );
+
+ Defer cancelTimeout ([&timeout]() { timeout->Cancel(); });
+
+ DoIfwNetIo(yc, checkable, cmdLine, cr, psCommand, psHost, expectedSan, psPort, *conn, *req, start);
+ }
+ );
+}
diff --git a/lib/methods/ifwapichecktask.hpp b/lib/methods/ifwapichecktask.hpp
new file mode 100644
index 0000000..3932733
--- /dev/null
+++ b/lib/methods/ifwapichecktask.hpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2023 Icinga GmbH | GPLv2+ */
+
+#pragma once
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Executes checks via Icinga for Windows API.
+ *
+ * @ingroup methods
+ */
+class IfwApiCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ IfwApiCheckTask();
+};
+
+}
diff --git a/lib/methods/methods-itl.conf b/lib/methods/methods-itl.conf
new file mode 100644
index 0000000..6249692
--- /dev/null
+++ b/lib/methods/methods-itl.conf
@@ -0,0 +1,90 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+System.assert(Internal.run_with_activation_context(function() {
+ template CheckCommand "icinga-check-command" use (IcingaCheck = Internal.IcingaCheck) {
+ execute = IcingaCheck
+
+ vars.icinga_min_version = ""
+ }
+
+ template CheckCommand "cluster-check-command" use (ClusterCheck = Internal.ClusterCheck) {
+ execute = ClusterCheck
+ }
+
+ template CheckCommand "cluster-zone-check-command" use (ClusterZoneCheck = Internal.ClusterZoneCheck) {
+ execute = ClusterZoneCheck
+ }
+
+ template CheckCommand "plugin-check-command" use (PluginCheck = Internal.PluginCheck) default {
+ execute = PluginCheck
+ }
+
+ template NotificationCommand "plugin-notification-command" use (PluginNotification = Internal.PluginNotification) default {
+ execute = PluginNotification
+ }
+
+ template EventCommand "plugin-event-command" use (PluginEvent = Internal.PluginEvent) default {
+ execute = PluginEvent
+ }
+
+ template CheckCommand "dummy-check-command" use (DummyCheck = Internal.DummyCheck) {
+ execute = DummyCheck
+ }
+
+ template CheckCommand "random-check-command" use (RandomCheck = Internal.RandomCheck) {
+ execute = RandomCheck
+ }
+
+ template CheckCommand "exception-check-command" use (ExceptionCheck = Internal.ExceptionCheck) {
+ execute = ExceptionCheck
+ }
+
+ template CheckCommand "null-check-command" use (NullCheck = Internal.NullCheck) {
+ execute = NullCheck
+ }
+
+ template CheckCommand "ifw-api-check-command" use (IfwApiCheck = Internal.IfwApiCheck) {
+ execute = IfwApiCheck
+ }
+
+ template EventCommand "null-event-command" use (NullEvent = Internal.NullEvent) {
+ execute = NullEvent
+ }
+
+ template TimePeriod "empty-timeperiod" use (EmptyTimePeriod = Internal.EmptyTimePeriod) {
+ update = EmptyTimePeriod
+ }
+
+ template TimePeriod "even-minutes-timeperiod" use (EvenMinutesTimePeriod = Internal.EvenMinutesTimePeriod) {
+ update = EvenMinutesTimePeriod
+ }
+
+ template CheckCommand "sleep-check-command" use (SleepCheck = Internal.SleepCheck) {
+ execute = SleepCheck
+
+ vars.sleep_time = 1s
+ }
+}))
+
+var methods = [
+ "IcingaCheck",
+ "IfwApiCheck",
+ "ClusterCheck",
+ "ClusterZoneCheck",
+ "PluginCheck",
+ "ClrCheck",
+ "PluginNotification",
+ "PluginEvent",
+ "DummyCheck",
+ "RandomCheck",
+ "ExceptionCheck",
+ "NullCheck",
+ "NullEvent",
+ "EmptyTimePeriod",
+ "EvenMinutesTimePeriod",
+ "SleepCheck"
+]
+
+for (method in methods) {
+ Internal.remove(method)
+}
diff --git a/lib/methods/nullchecktask.cpp b/lib/methods/nullchecktask.cpp
new file mode 100644
index 0000000..ee66029
--- /dev/null
+++ b/lib/methods/nullchecktask.cpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+# include <stdlib.h>
+#endif /* _WIN32 */
+#include "methods/nullchecktask.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, NullCheck, &NullCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void NullCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ String output = "Hello from ";
+ output += IcingaApplication::GetInstance()->GetNodeName();
+ ServiceState state = ServiceOK;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler("", pr);
+ } else {
+ cr->SetOutput(output);
+ cr->SetPerformanceData(new Array({
+ new PerfdataValue("time", Convert::ToDouble(Utility::GetTime()))
+ }));
+ cr->SetState(state);
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
diff --git a/lib/methods/nullchecktask.hpp b/lib/methods/nullchecktask.hpp
new file mode 100644
index 0000000..954cf8d
--- /dev/null
+++ b/lib/methods/nullchecktask.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NULLCHECKTASK_H
+#define NULLCHECKTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Test class for additional check types. Implements the "null" check type.
+ *
+ * @ingroup methods
+ */
+class NullCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ NullCheckTask();
+};
+
+}
+
+#endif /* NULLCHECKTASK_H */
diff --git a/lib/methods/nulleventtask.cpp b/lib/methods/nulleventtask.cpp
new file mode 100644
index 0000000..3c02f23
--- /dev/null
+++ b/lib/methods/nulleventtask.cpp
@@ -0,0 +1,26 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/nulleventtask.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, NullEvent, &NullEventTask::ScriptFunc, "checkable:resolvedMacros:useResolvedMacros");
+
+void NullEventTask::ScriptFunc(const Checkable::Ptr& checkable, const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = "";
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = 0;
+
+ Checkable::ExecuteCommandProcessFinishedHandler("", pr);
+ }
+}
diff --git a/lib/methods/nulleventtask.hpp b/lib/methods/nulleventtask.hpp
new file mode 100644
index 0000000..153470f
--- /dev/null
+++ b/lib/methods/nulleventtask.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NULLEVENTTASK_H
+#define NULLEVENTTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Test class for additional event handler types. Implements the "null" event handler type.
+ *
+ * @ingroup methods
+ */
+class NullEventTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ NullEventTask();
+};
+
+}
+
+#endif /* NULLEVENTTASK_H */
diff --git a/lib/methods/pluginchecktask.cpp b/lib/methods/pluginchecktask.cpp
new file mode 100644
index 0000000..b4749fb
--- /dev/null
+++ b/lib/methods/pluginchecktask.cpp
@@ -0,0 +1,89 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/pluginchecktask.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/function.hpp"
+#include "base/utility.hpp"
+#include "base/process.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, PluginCheck, &PluginCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void PluginCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ CheckCommand::Ptr commandObj = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", commandObj);
+
+ int timeout = commandObj->GetTimeout();
+
+ if (!checkable->GetCheckTimeout().IsEmpty())
+ timeout = checkable->GetCheckTimeout();
+
+ std::function<void(const Value& commandLine, const ProcessResult&)> callback;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ callback = Checkable::ExecuteCommandProcessFinishedHandler;
+ } else {
+ callback = [checkable, cr](const Value& commandLine, const ProcessResult& pr) {
+ ProcessFinishedHandler(checkable, cr, commandLine, pr);
+ };
+ }
+
+ PluginUtility::ExecuteCommand(commandObj, checkable, checkable->GetLastCheckResult(),
+ resolvers, resolvedMacros, useResolvedMacros, timeout, callback);
+
+ if (!resolvedMacros || useResolvedMacros) {
+ Checkable::CurrentConcurrentChecks.fetch_add(1);
+ Checkable::IncreasePendingChecks();
+ }
+}
+
+void PluginCheckTask::ProcessFinishedHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const Value& commandLine, const ProcessResult& pr)
+{
+ Checkable::CurrentConcurrentChecks.fetch_sub(1);
+ Checkable::DecreasePendingChecks();
+
+ if (pr.ExitStatus > 3) {
+ Process::Arguments parguments = Process::PrepareCommand(commandLine);
+ Log(LogWarning, "PluginCheckTask")
+ << "Check command for object '" << checkable->GetName() << "' (PID: " << pr.PID
+ << ", arguments: " << Process::PrettyPrintArguments(parguments) << ") terminated with exit code "
+ << pr.ExitStatus << ", output: " << pr.Output;
+ }
+
+ String output = pr.Output.Trim();
+
+ std::pair<String, String> co = PluginUtility::ParseCheckOutput(output);
+ cr->SetCommand(commandLine);
+ cr->SetOutput(co.first);
+ cr->SetPerformanceData(PluginUtility::SplitPerfdata(co.second));
+ cr->SetState(PluginUtility::ExitStatusToState(pr.ExitStatus));
+ cr->SetExitStatus(pr.ExitStatus);
+ cr->SetExecutionStart(pr.ExecutionStart);
+ cr->SetExecutionEnd(pr.ExecutionEnd);
+
+ checkable->ProcessCheckResult(cr);
+}
diff --git a/lib/methods/pluginchecktask.hpp b/lib/methods/pluginchecktask.hpp
new file mode 100644
index 0000000..a4fc3a3
--- /dev/null
+++ b/lib/methods/pluginchecktask.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PLUGINCHECKTASK_H
+#define PLUGINCHECKTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "base/process.hpp"
+#include "icinga/service.hpp"
+
+namespace icinga
+{
+
+/**
+ * Implements service checks based on external plugins.
+ *
+ * @ingroup methods
+ */
+class PluginCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ PluginCheckTask();
+
+ static void ProcessFinishedHandler(const Checkable::Ptr& service,
+ const CheckResult::Ptr& cr, const Value& commandLine, const ProcessResult& pr);
+};
+
+}
+
+#endif /* PLUGINCHECKTASK_H */
diff --git a/lib/methods/plugineventtask.cpp b/lib/methods/plugineventtask.cpp
new file mode 100644
index 0000000..00efb6c
--- /dev/null
+++ b/lib/methods/plugineventtask.cpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/plugineventtask.hpp"
+#include "icinga/eventcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/function.hpp"
+#include "base/utility.hpp"
+#include "base/process.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, PluginEvent, &PluginEventTask::ScriptFunc, "checkable:resolvedMacros:useResolvedMacros");
+
+void PluginEventTask::ScriptFunc(const Checkable::Ptr& checkable,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+
+ EventCommand::Ptr commandObj = EventCommand::ExecuteOverride ? EventCommand::ExecuteOverride : checkable->GetEventCommand();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", commandObj);
+
+ int timeout = commandObj->GetTimeout();
+ std::function<void(const Value& commandLine, const ProcessResult&)> callback;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ callback = Checkable::ExecuteCommandProcessFinishedHandler;
+ } else {
+ callback = [checkable](const Value& commandLine, const ProcessResult& pr) { ProcessFinishedHandler(checkable, commandLine, pr); };
+ }
+
+ PluginUtility::ExecuteCommand(commandObj, checkable, checkable->GetLastCheckResult(),
+ resolvers, resolvedMacros, useResolvedMacros, timeout, callback);
+}
+
+void PluginEventTask::ProcessFinishedHandler(const Checkable::Ptr& checkable, const Value& commandLine, const ProcessResult& pr)
+{
+ if (pr.ExitStatus != 0) {
+ Process::Arguments parguments = Process::PrepareCommand(commandLine);
+ Log(LogWarning, "PluginEventTask")
+ << "Event command for object '" << checkable->GetName() << "' (PID: " << pr.PID
+ << ", arguments: " << Process::PrettyPrintArguments(parguments) << ") terminated with exit code "
+ << pr.ExitStatus << ", output: " << pr.Output;
+ }
+}
diff --git a/lib/methods/plugineventtask.hpp b/lib/methods/plugineventtask.hpp
new file mode 100644
index 0000000..8908a82
--- /dev/null
+++ b/lib/methods/plugineventtask.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PLUGINEVENTTASK_H
+#define PLUGINEVENTTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+#include "base/process.hpp"
+
+namespace icinga
+{
+
+/**
+ * Implements event handlers based on external plugins.
+ *
+ * @ingroup methods
+ */
+class PluginEventTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ PluginEventTask();
+
+ static void ProcessFinishedHandler(const Checkable::Ptr& checkable,
+ const Value& commandLine, const ProcessResult& pr);
+};
+
+}
+
+#endif /* PLUGINEVENTTASK_H */
diff --git a/lib/methods/pluginnotificationtask.cpp b/lib/methods/pluginnotificationtask.cpp
new file mode 100644
index 0000000..95911fa
--- /dev/null
+++ b/lib/methods/pluginnotificationtask.cpp
@@ -0,0 +1,123 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/pluginnotificationtask.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/pluginutility.hpp"
+#include "icinga/service.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/process.hpp"
+#include "base/convert.hpp"
+
+#ifdef __linux__
+# include <linux/binfmts.h>
+# include <unistd.h>
+
+# ifndef PAGE_SIZE
+// MAX_ARG_STRLEN is a multiple of PAGE_SIZE which is missing
+# define PAGE_SIZE getpagesize()
+# endif /* PAGE_SIZE */
+
+// Make e.g. the $host.output$ itself even 10% shorter to leave enough room
+// for e.g. --host-output= as in --host-output=$host.output$, but without int overflow
+const static auto l_MaxOutLen = MAX_ARG_STRLEN - MAX_ARG_STRLEN / 10u;
+#endif /* __linux__ */
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, PluginNotification, &PluginNotificationTask::ScriptFunc, "notification:user:cr:itype:author:comment:resolvedMacros:useResolvedMacros");
+
+void PluginNotificationTask::ScriptFunc(const Notification::Ptr& notification,
+ const User::Ptr& user, const CheckResult::Ptr& cr, int itype,
+ const String& author, const String& comment, const Dictionary::Ptr& resolvedMacros,
+ bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(notification);
+ REQUIRE_NOT_NULL(user);
+
+ NotificationCommand::Ptr commandObj = NotificationCommand::ExecuteOverride ? NotificationCommand::ExecuteOverride : notification->GetCommand();
+
+ auto type = static_cast<NotificationType>(itype);
+
+ Checkable::Ptr checkable = notification->GetCheckable();
+
+ Dictionary::Ptr notificationExtra = new Dictionary({
+ { "type", Notification::NotificationTypeToStringCompat(type) }, //TODO: Change that to our types.
+ { "author", author },
+#ifdef __linux__
+ { "comment", comment.SubStr(0, l_MaxOutLen) }
+#else /* __linux__ */
+ { "comment", comment }
+#endif /* __linux__ */
+ });
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ resolvers.emplace_back("user", user);
+ resolvers.emplace_back("notification", notificationExtra);
+ resolvers.emplace_back("notification", notification);
+
+ if (service) {
+#ifdef __linux__
+ auto cr (service->GetLastCheckResult());
+
+ if (cr) {
+ auto output (cr->GetOutput());
+
+ if (output.GetLength() > l_MaxOutLen) {
+ resolvers.emplace_back("service", new Dictionary({{"output", output.SubStr(0, l_MaxOutLen)}}));
+ }
+ }
+#endif /* __linux__ */
+
+ resolvers.emplace_back("service", service);
+ }
+
+#ifdef __linux__
+ auto hcr (host->GetLastCheckResult());
+
+ if (hcr) {
+ auto output (hcr->GetOutput());
+
+ if (output.GetLength() > l_MaxOutLen) {
+ resolvers.emplace_back("host", new Dictionary({{"output", output.SubStr(0, l_MaxOutLen)}}));
+ }
+ }
+#endif /* __linux__ */
+
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", commandObj);
+
+ int timeout = commandObj->GetTimeout();
+ std::function<void(const Value& commandLine, const ProcessResult&)> callback;
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ callback = Checkable::ExecuteCommandProcessFinishedHandler;
+ } else {
+ callback = [checkable](const Value& commandline, const ProcessResult& pr) { ProcessFinishedHandler(checkable, commandline, pr); };
+ }
+
+ PluginUtility::ExecuteCommand(commandObj, checkable, cr, resolvers,
+ resolvedMacros, useResolvedMacros, timeout, callback);
+}
+
+void PluginNotificationTask::ProcessFinishedHandler(const Checkable::Ptr& checkable, const Value& commandLine, const ProcessResult& pr)
+{
+ if (pr.ExitStatus != 0) {
+ Process::Arguments parguments = Process::PrepareCommand(commandLine);
+ Log(LogWarning, "PluginNotificationTask")
+ << "Notification command for object '" << checkable->GetName() << "' (PID: " << pr.PID
+ << ", arguments: " << Process::PrettyPrintArguments(parguments) << ") terminated with exit code "
+ << pr.ExitStatus << ", output: " << pr.Output;
+ }
+}
diff --git a/lib/methods/pluginnotificationtask.hpp b/lib/methods/pluginnotificationtask.hpp
new file mode 100644
index 0000000..66d6539
--- /dev/null
+++ b/lib/methods/pluginnotificationtask.hpp
@@ -0,0 +1,36 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PLUGINNOTIFICATIONTASK_H
+#define PLUGINNOTIFICATIONTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/service.hpp"
+#include "base/process.hpp"
+
+namespace icinga
+{
+
+/**
+ * Implements sending notifications based on external plugins.
+ *
+ * @ingroup methods
+ */
+class PluginNotificationTask
+{
+public:
+ static void ScriptFunc(const Notification::Ptr& notification,
+ const User::Ptr& user, const CheckResult::Ptr& cr, int itype,
+ const String& author, const String& comment,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ PluginNotificationTask();
+
+ static void ProcessFinishedHandler(const Checkable::Ptr& checkable,
+ const Value& commandLine, const ProcessResult& pr);
+};
+
+}
+
+#endif /* PLUGINNOTIFICATIONTASK_H */
diff --git a/lib/methods/randomchecktask.cpp b/lib/methods/randomchecktask.cpp
new file mode 100644
index 0000000..9b133ef
--- /dev/null
+++ b/lib/methods/randomchecktask.cpp
@@ -0,0 +1,65 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+# include <stdlib.h>
+#endif /* _WIN32 */
+#include "methods/randomchecktask.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/checkcommand.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, RandomCheck, &RandomCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void RandomCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ double now = Utility::GetTime();
+ double uptime = Application::GetUptime();
+
+ String output = "Hello from " + IcingaApplication::GetInstance()->GetNodeName()
+ + ". Icinga 2 has been running for " + Utility::FormatDuration(uptime)
+ + ". Version: " + Application::GetAppVersion();
+
+ CheckCommand::Ptr command = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+ String commandName = command->GetName();
+ ServiceState state = static_cast<ServiceState>(Utility::Random() % 4);
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ double now = Utility::GetTime();
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = state;
+
+ Checkable::ExecuteCommandProcessFinishedHandler(commandName, pr);
+ } else {
+ cr->SetOutput(output);
+
+ double random = Utility::Random() % 1000;
+ cr->SetPerformanceData(new Array({
+ new PerfdataValue("time", now),
+ new PerfdataValue("value", random),
+ new PerfdataValue("value_1m", random * 0.9),
+ new PerfdataValue("value_5m", random * 0.8),
+ new PerfdataValue("uptime", uptime),
+ }));
+
+ cr->SetState(state);
+ cr->SetCommand(commandName);
+
+ checkable->ProcessCheckResult(cr);
+ }
+}
diff --git a/lib/methods/randomchecktask.hpp b/lib/methods/randomchecktask.hpp
new file mode 100644
index 0000000..00ce663
--- /dev/null
+++ b/lib/methods/randomchecktask.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef RANDOMCHECKTASK_H
+#define RANDOMCHECKTASK_H
+
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Test class for additional check types. Implements the "null" check type.
+ *
+ * @ingroup methods
+ */
+class RandomCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& service, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ RandomCheckTask();
+};
+
+}
+
+#endif /* RANDOMCHECKTASK_H */
diff --git a/lib/methods/sleepchecktask.cpp b/lib/methods/sleepchecktask.cpp
new file mode 100644
index 0000000..af6b063
--- /dev/null
+++ b/lib/methods/sleepchecktask.cpp
@@ -0,0 +1,67 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/sleepchecktask.hpp"
+#include "icinga/pluginutility.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/function.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, SleepCheck, &SleepCheckTask::ScriptFunc, "checkable:cr:resolvedMacros:useResolvedMacros");
+
+void SleepCheckTask::ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros)
+{
+ REQUIRE_NOT_NULL(checkable);
+ REQUIRE_NOT_NULL(cr);
+
+ CheckCommand::Ptr commandObj = CheckCommand::ExecuteOverride ? CheckCommand::ExecuteOverride : checkable->GetCheckCommand();
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+
+ if (MacroResolver::OverrideMacros)
+ resolvers.emplace_back("override", MacroResolver::OverrideMacros);
+
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+ resolvers.emplace_back("command", commandObj);
+
+ double sleepTime = MacroProcessor::ResolveMacros("$sleep_time$", resolvers, checkable->GetLastCheckResult(),
+ nullptr, MacroProcessor::EscapeCallback(), resolvedMacros, useResolvedMacros);
+
+ if (resolvedMacros && !useResolvedMacros)
+ return;
+
+ Utility::Sleep(sleepTime);
+
+ String output = "Slept for " + Convert::ToString(sleepTime) + " seconds.";
+
+ double now = Utility::GetTime();
+ CheckCommand::Ptr command = checkable->GetCheckCommand();
+ String commandName = command->GetName();
+
+ if (Checkable::ExecuteCommandProcessFinishedHandler) {
+ ProcessResult pr;
+ pr.PID = -1;
+ pr.Output = output;
+ pr.ExecutionStart = now - sleepTime;
+ pr.ExecutionEnd = now;
+ pr.ExitStatus = 0;
+
+ Checkable::ExecuteCommandProcessFinishedHandler("", pr);
+ } else {
+ cr->SetOutput(output);
+ cr->SetExecutionStart(now);
+ cr->SetExecutionEnd(now);
+ cr->SetCommand(commandName);
+
+ checkable->ProcessCheckResult(cr);
+ }
+} \ No newline at end of file
diff --git a/lib/methods/sleepchecktask.hpp b/lib/methods/sleepchecktask.hpp
new file mode 100644
index 0000000..b104f60
--- /dev/null
+++ b/lib/methods/sleepchecktask.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef SLEEPCHECKTASK_H
+#define SLEEPCHECKTASK_H
+
+#include "methods/i2-methods.hpp"
+#include "icinga/service.hpp"
+#include "base/dictionary.hpp"
+
+namespace icinga
+{
+
+/**
+ * Test class for additional check types. Implements the "sleep" check type.
+ *
+ * @ingroup methods
+ */
+class SleepCheckTask
+{
+public:
+ static void ScriptFunc(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr,
+ const Dictionary::Ptr& resolvedMacros, bool useResolvedMacros);
+
+private:
+ SleepCheckTask();
+};
+
+}
+
+#endif /* SLEEPCHECKTASK_H */ \ No newline at end of file
diff --git a/lib/methods/timeperiodtask.cpp b/lib/methods/timeperiodtask.cpp
new file mode 100644
index 0000000..bb3f1bb
--- /dev/null
+++ b/lib/methods/timeperiodtask.cpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "methods/timeperiodtask.hpp"
+#include "base/function.hpp"
+
+using namespace icinga;
+
+REGISTER_FUNCTION_NONCONST(Internal, EmptyTimePeriod, &TimePeriodTask::EmptyTimePeriodUpdate, "tp:begin:end");
+REGISTER_FUNCTION_NONCONST(Internal, EvenMinutesTimePeriod, &TimePeriodTask::EvenMinutesTimePeriodUpdate, "tp:begin:end");
+
+Array::Ptr TimePeriodTask::EmptyTimePeriodUpdate(const TimePeriod::Ptr& tp, double, double)
+{
+ REQUIRE_NOT_NULL(tp);
+
+ Array::Ptr segments = new Array();
+ return segments;
+}
+
+Array::Ptr TimePeriodTask::EvenMinutesTimePeriodUpdate(const TimePeriod::Ptr& tp, double begin, double end)
+{
+ REQUIRE_NOT_NULL(tp);
+
+ ArrayData segments;
+
+ for (long t = begin / 60 - 1; t * 60 < end; t++) {
+ if ((t % 2) == 0) {
+ segments.push_back(new Dictionary({
+ { "begin", t * 60 },
+ { "end", (t + 1) * 60 }
+ }));
+ }
+ }
+
+ return new Array(std::move(segments));
+}
diff --git a/lib/methods/timeperiodtask.hpp b/lib/methods/timeperiodtask.hpp
new file mode 100644
index 0000000..0dff1c6
--- /dev/null
+++ b/lib/methods/timeperiodtask.hpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TIMEPERIODTASK_H
+#define TIMEPERIODTASK_H
+
+#include "icinga/timeperiod.hpp"
+
+namespace icinga
+{
+
+/**
+* Test timeperiod functions.
+*
+* @ingroup methods
+*/
+class TimePeriodTask
+{
+public:
+ static Array::Ptr EmptyTimePeriodUpdate(const TimePeriod::Ptr& tp, double begin, double end);
+ static Array::Ptr EvenMinutesTimePeriodUpdate(const TimePeriod::Ptr& tp, double begin, double end);
+
+private:
+ TimePeriodTask();
+};
+
+}
+
+#endif /* TIMEPERIODTASK_H */
diff --git a/lib/mysql_shim/CMakeLists.txt b/lib/mysql_shim/CMakeLists.txt
new file mode 100644
index 0000000..fc7dbee
--- /dev/null
+++ b/lib/mysql_shim/CMakeLists.txt
@@ -0,0 +1,31 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+include_directories(${MYSQL_INCLUDE_DIR})
+
+set(mysql_shim_SOURCES
+ mysql_shim.def
+ mysqlinterface.cpp mysqlinterface.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(mysql_shim mysql_shim mysql_shim_SOURCES)
+endif()
+
+add_library(mysql_shim SHARED ${mysql_shim_SOURCES})
+
+include(GenerateExportHeader)
+generate_export_header(mysql_shim)
+
+target_link_libraries(mysql_shim ${MYSQL_LIB})
+
+set_target_properties (
+ mysql_shim PROPERTIES
+ FOLDER Lib
+ VERSION ${SPEC_VERSION}
+)
+
+install(
+ TARGETS mysql_shim
+ RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/icinga2
+)
diff --git a/lib/mysql_shim/mysql_shim.def b/lib/mysql_shim/mysql_shim.def
new file mode 100644
index 0000000..ae36765
--- /dev/null
+++ b/lib/mysql_shim/mysql_shim.def
@@ -0,0 +1,3 @@
+LIBRARY mysql_shim
+EXPORTS
+ create_mysql_shim
diff --git a/lib/mysql_shim/mysqlinterface.cpp b/lib/mysql_shim/mysqlinterface.cpp
new file mode 100644
index 0000000..43e50e8
--- /dev/null
+++ b/lib/mysql_shim/mysqlinterface.cpp
@@ -0,0 +1,119 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "mysql_shim/mysqlinterface.hpp"
+
+using namespace icinga;
+
+struct MysqlInterfaceImpl final : public MysqlInterface
+{
+ void Destroy() override
+ {
+ delete this;
+ }
+
+ my_ulonglong affected_rows(MYSQL *mysql) const override
+ {
+ return mysql_affected_rows(mysql);
+ }
+
+ void close(MYSQL *sock) const override
+ {
+ return mysql_close(sock);
+ }
+
+ const char *error(MYSQL *mysql) const override
+ {
+ return mysql_error(mysql);
+ }
+
+ MYSQL_FIELD *fetch_field(MYSQL_RES *result) const override
+ {
+ return mysql_fetch_field(result);
+ }
+
+ unsigned long *fetch_lengths(MYSQL_RES *result) const override
+ {
+ return mysql_fetch_lengths(result);
+ }
+
+ MYSQL_ROW fetch_row(MYSQL_RES *result) const override
+ {
+ return mysql_fetch_row(result);
+ }
+
+ unsigned int field_count(MYSQL *mysql) const override
+ {
+ return mysql_field_count(mysql);
+ }
+
+ MYSQL_FIELD_OFFSET field_seek(MYSQL_RES *result, MYSQL_FIELD_OFFSET offset) const override
+ {
+ return mysql_field_seek(result, offset);
+ }
+
+ void free_result(MYSQL_RES *result) const override
+ {
+ mysql_free_result(result);
+ }
+
+ MYSQL *init(MYSQL *mysql) const override
+ {
+ return mysql_init(mysql);
+ }
+
+ my_ulonglong insert_id(MYSQL *mysql) const override
+ {
+ return mysql_insert_id(mysql);
+ }
+
+ int next_result(MYSQL *mysql) const override
+ {
+ return mysql_next_result(mysql);
+ }
+
+ int ping(MYSQL *mysql) const override
+ {
+ return mysql_ping(mysql);
+ }
+
+ int query(MYSQL *mysql, const char *q) const override
+ {
+ return mysql_query(mysql, q);
+ }
+
+ MYSQL *real_connect(MYSQL *mysql, const char *host, const char *user, const char *passwd,
+ const char *db, unsigned int port, const char *unix_socket, unsigned long clientflag) const override
+ {
+ return mysql_real_connect(mysql, host, user, passwd, db, port, unix_socket, clientflag);
+ }
+
+ unsigned long real_escape_string(MYSQL *mysql, char *to, const char *from, unsigned long length) const override
+ {
+ return mysql_real_escape_string(mysql, to, from, length);
+ }
+
+ int options(MYSQL *mysql, mysql_option option, const void *arg) const override
+ {
+ return mysql_options(mysql, option, arg);
+ }
+
+ bool ssl_set(MYSQL *mysql, const char *key, const char *cert, const char *ca, const char *capath, const char *cipher) const override
+ {
+ return mysql_ssl_set(mysql, key, cert, ca, capath, cipher);
+ }
+
+ MYSQL_RES *store_result(MYSQL *mysql) const override
+ {
+ return mysql_store_result(mysql);
+ }
+
+ unsigned int thread_safe() const override
+ {
+ return mysql_thread_safe();
+ }
+};
+
+MysqlInterface *create_mysql_shim()
+{
+ return new MysqlInterfaceImpl();
+}
diff --git a/lib/mysql_shim/mysqlinterface.hpp b/lib/mysql_shim/mysqlinterface.hpp
new file mode 100644
index 0000000..04dfc30
--- /dev/null
+++ b/lib/mysql_shim/mysqlinterface.hpp
@@ -0,0 +1,65 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MYSQLINTERFACE_H
+#define MYSQLINTERFACE_H
+
+#include "mysql_shim/mysql_shim_export.h"
+#include <memory>
+#include <mysql.h>
+
+namespace icinga
+{
+
+struct MysqlInterface
+{
+ MysqlInterface(const MysqlInterface&) = delete;
+ MysqlInterface& operator=(MysqlInterface&) = delete;
+
+ virtual void Destroy() = 0;
+
+ virtual my_ulonglong affected_rows(MYSQL *mysql) const = 0;
+ virtual void close(MYSQL *sock) const = 0;
+ virtual const char *error(MYSQL *mysql) const = 0;
+ virtual MYSQL_FIELD *fetch_field(MYSQL_RES *result) const = 0;
+ virtual unsigned long *fetch_lengths(MYSQL_RES *result) const = 0;
+ virtual MYSQL_ROW fetch_row(MYSQL_RES *result) const = 0;
+ virtual unsigned int field_count(MYSQL *mysql) const = 0;
+ virtual MYSQL_FIELD_OFFSET field_seek(MYSQL_RES *result,
+ MYSQL_FIELD_OFFSET offset) const = 0;
+ virtual void free_result(MYSQL_RES *result) const = 0;
+ virtual MYSQL *init(MYSQL *mysql) const = 0;
+ virtual my_ulonglong insert_id(MYSQL *mysql) const = 0;
+ virtual int next_result(MYSQL *mysql) const = 0;
+ virtual int ping(MYSQL *mysql) const = 0;
+ virtual int query(MYSQL *mysql, const char *q) const = 0;
+ virtual MYSQL *real_connect(MYSQL *mysql, const char *host, const char *user, const char *passwd,
+ const char *db, unsigned int port, const char *unix_socket, unsigned long clientflag) const = 0;
+ virtual unsigned long real_escape_string(MYSQL *mysql, char *to, const char *from, unsigned long length) const = 0;
+ virtual int options(MYSQL *mysql, mysql_option option, const void *arg) const = 0;
+ virtual bool ssl_set(MYSQL *mysql, const char *key, const char *cert, const char *ca, const char *capath, const char *cipher) const = 0;
+ virtual MYSQL_RES *store_result(MYSQL *mysql) const = 0;
+ virtual unsigned int thread_safe() const = 0;
+
+protected:
+ MysqlInterface() = default;
+ ~MysqlInterface() = default;
+};
+
+struct MysqlInterfaceDeleter
+{
+ void operator()(MysqlInterface *ifc) const
+ {
+ ifc->Destroy();
+ }
+};
+
+}
+
+extern "C"
+{
+ MYSQL_SHIM_EXPORT icinga::MysqlInterface *create_mysql_shim();
+}
+
+typedef icinga::MysqlInterface *(*create_mysql_shim_ptr)();
+
+#endif /* MYSQLINTERFACE_H */
diff --git a/lib/notification/CMakeLists.txt b/lib/notification/CMakeLists.txt
new file mode 100644
index 0000000..783b4fa
--- /dev/null
+++ b/lib/notification/CMakeLists.txt
@@ -0,0 +1,34 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(notificationcomponent.ti notificationcomponent-ti.cpp notificationcomponent-ti.hpp)
+
+set(notification_SOURCES
+ notificationcomponent.cpp notificationcomponent.hpp notificationcomponent-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(notification notification notification_SOURCES)
+endif()
+
+add_library(notification OBJECT ${notification_SOURCES})
+
+add_dependencies(notification base config icinga)
+
+set_target_properties (
+ notification PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/notification.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+if(NOT WIN32)
+ install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_CONFIGDIR}/features-enabled\")")
+ install(CODE "execute_process(COMMAND \"${CMAKE_COMMAND}\" -E create_symlink ../features-available/notification.conf \"\$ENV{DESTDIR}${ICINGA2_FULL_CONFIGDIR}/features-enabled/notification.conf\")")
+else()
+ install_if_not_exists(${PROJECT_SOURCE_DIR}/etc/icinga2/features-enabled/notification.conf ${ICINGA2_CONFIGDIR}/features-enabled)
+endif()
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/notification/notificationcomponent.cpp b/lib/notification/notificationcomponent.cpp
new file mode 100644
index 0000000..982a838
--- /dev/null
+++ b/lib/notification/notificationcomponent.cpp
@@ -0,0 +1,271 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "notification/notificationcomponent.hpp"
+#include "notification/notificationcomponent-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/statsfunction.hpp"
+#include "remote/apilistener.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(NotificationComponent);
+
+REGISTER_STATSFUNCTION(NotificationComponent, &NotificationComponent::StatsFunc);
+
+void NotificationComponent::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const NotificationComponent::Ptr& notification_component : ConfigType::GetObjectsByType<NotificationComponent>()) {
+ nodes.emplace_back(notification_component->GetName(), 1); //add more stats
+ }
+
+ status->Set("notificationcomponent", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Starts the component.
+ */
+void NotificationComponent::Start(bool runtimeCreated)
+{
+ ObjectImpl<NotificationComponent>::Start(runtimeCreated);
+
+ Log(LogInformation, "NotificationComponent")
+ << "'" << GetName() << "' started.";
+
+ Checkable::OnNotificationsRequested.connect([this](const Checkable::Ptr& checkable, NotificationType type, const CheckResult::Ptr& cr,
+ const String& author, const String& text, const MessageOrigin::Ptr&) {
+ SendNotificationsHandler(checkable, type, cr, author, text);
+ });
+
+ m_NotificationTimer = Timer::Create();
+ m_NotificationTimer->SetInterval(5);
+ m_NotificationTimer->OnTimerExpired.connect([this](const Timer * const&) { NotificationTimerHandler(); });
+ m_NotificationTimer->Start();
+}
+
+void NotificationComponent::Stop(bool runtimeRemoved)
+{
+ m_NotificationTimer->Stop(true);
+
+ Log(LogInformation, "NotificationComponent")
+ << "'" << GetName() << "' stopped.";
+
+ ObjectImpl<NotificationComponent>::Stop(runtimeRemoved);
+}
+
+static inline
+void SubtractSuppressedNotificationTypes(const Notification::Ptr& notification, int types)
+{
+ ObjectLock olock (notification);
+
+ int suppressedTypesBefore (notification->GetSuppressedNotifications());
+ int suppressedTypesAfter (suppressedTypesBefore & ~types);
+
+ if (suppressedTypesAfter != suppressedTypesBefore) {
+ notification->SetSuppressedNotifications(suppressedTypesAfter);
+ }
+}
+
+static inline
+void FireSuppressedNotifications(const Notification::Ptr& notification)
+{
+ int suppressedTypes (notification->GetSuppressedNotifications());
+ if (!suppressedTypes)
+ return;
+
+ int subtract = 0;
+ auto checkable (notification->GetCheckable());
+
+ for (auto type : {NotificationProblem, NotificationRecovery, NotificationFlappingStart, NotificationFlappingEnd}) {
+ if ((suppressedTypes & type) && !checkable->NotificationReasonApplies(type)) {
+ subtract |= type;
+ suppressedTypes &= ~type;
+ }
+ }
+
+ if (suppressedTypes) {
+ auto tp (notification->GetPeriod());
+
+ if ((!tp || tp->IsInside(Utility::GetTime())) && !checkable->IsLikelyToBeCheckedSoon()) {
+ for (auto type : {NotificationProblem, NotificationRecovery, NotificationFlappingStart, NotificationFlappingEnd}) {
+ if (!(suppressedTypes & type) || checkable->NotificationReasonSuppressed(type))
+ continue;
+
+ auto notificationName (notification->GetName());
+
+ Log(LogNotice, "NotificationComponent")
+ << "Attempting to re-send previously suppressed notification '" << notificationName << "'.";
+
+ subtract |= type;
+ SubtractSuppressedNotificationTypes(notification, subtract);
+ subtract = 0;
+
+ try {
+ notification->BeginExecuteNotification(type, checkable->GetLastCheckResult(), false, false);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "NotificationComponent")
+ << "Exception occurred during notification for object '"
+ << notificationName << "': " << DiagnosticInformation(ex, false);
+ }
+ }
+ }
+ }
+
+ if (subtract) {
+ SubtractSuppressedNotificationTypes(notification, subtract);
+ }
+}
+
+/**
+ * Periodically sends notifications.
+ *
+ * @param - Event arguments for the timer.
+ */
+void NotificationComponent::NotificationTimerHandler()
+{
+ double now = Utility::GetTime();
+
+ /* Function already checks whether 'api' feature is enabled. */
+ Endpoint::Ptr myEndpoint = Endpoint::GetLocalEndpoint();
+
+ for (const Notification::Ptr& notification : ConfigType::GetObjectsByType<Notification>()) {
+ if (!notification->IsActive())
+ continue;
+
+ String notificationName = notification->GetName();
+ bool updatedObjectAuthority = ApiListener::UpdatedObjectAuthority();
+
+ /* Skip notification if paused, in a cluster setup & HA feature is enabled. */
+ if (notification->IsPaused()) {
+ if (updatedObjectAuthority) {
+ auto stashedNotifications (notification->GetStashedNotifications());
+ ObjectLock olock(stashedNotifications);
+
+ if (stashedNotifications->GetLength()) {
+ Log(LogNotice, "NotificationComponent")
+ << "Notification '" << notificationName << "': HA cluster active, this endpoint does not have the authority. Dropping all stashed notifications.";
+
+ stashedNotifications->Clear();
+ }
+ }
+
+ if (myEndpoint && GetEnableHA()) {
+ Log(LogNotice, "NotificationComponent")
+ << "Reminder notification '" << notificationName << "': HA cluster active, this endpoint does not have the authority (paused=true). Skipping.";
+ continue;
+ }
+ }
+
+ Checkable::Ptr checkable = notification->GetCheckable();
+
+ if (!IcingaApplication::GetInstance()->GetEnableNotifications() || !checkable->GetEnableNotifications())
+ continue;
+
+ bool reachable = checkable->IsReachable(DependencyNotification);
+
+ if (reachable) {
+ {
+ Array::Ptr unstashedNotifications = new Array();
+
+ {
+ auto stashedNotifications (notification->GetStashedNotifications());
+ ObjectLock olock(stashedNotifications);
+
+ stashedNotifications->CopyTo(unstashedNotifications);
+ stashedNotifications->Clear();
+ }
+
+ ObjectLock olock(unstashedNotifications);
+
+ for (Dictionary::Ptr unstashedNotification : unstashedNotifications) {
+ if (!unstashedNotification)
+ continue;
+
+ try {
+ Log(LogNotice, "NotificationComponent")
+ << "Attempting to send stashed notification '" << notificationName << "'.";
+
+ notification->BeginExecuteNotification(
+ (NotificationType)(int)unstashedNotification->Get("notification_type"),
+ (CheckResult::Ptr)unstashedNotification->Get("cr"),
+ (bool)unstashedNotification->Get("force"),
+ (bool)unstashedNotification->Get("reminder"),
+ (String)unstashedNotification->Get("author"),
+ (String)unstashedNotification->Get("text")
+ );
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "NotificationComponent")
+ << "Exception occurred during notification for object '"
+ << notificationName << "': " << DiagnosticInformation(ex, false);
+ }
+ }
+ }
+
+ FireSuppressedNotifications(notification);
+ }
+
+ if (notification->GetInterval() <= 0 && notification->GetNoMoreNotifications()) {
+ Log(LogNotice, "NotificationComponent")
+ << "Reminder notification '" << notificationName << "': Notification was sent out once and interval=0 disables reminder notifications.";
+ continue;
+ }
+
+ if (notification->GetNextNotification() > now)
+ continue;
+
+ {
+ ObjectLock olock(notification);
+ notification->SetNextNotification(Utility::GetTime() + notification->GetInterval());
+ }
+
+ {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ ObjectLock olock(checkable);
+
+ if (checkable->GetStateType() == StateTypeSoft)
+ continue;
+
+ /* Don't send reminder notifications for OK/Up states. */
+ if ((service && service->GetState() == ServiceOK) || (!service && host->GetState() == HostUp))
+ continue;
+
+ /* Don't send reminder notifications before initial ones. */
+ if (checkable->GetSuppressedNotifications() & NotificationProblem || notification->GetSuppressedNotifications() & NotificationProblem)
+ continue;
+
+ /* Skip in runtime filters. */
+ if (!reachable || checkable->IsInDowntime() || checkable->IsAcknowledged() || checkable->IsFlapping())
+ continue;
+ }
+
+ try {
+ Log(LogNotice, "NotificationComponent")
+ << "Attempting to send reminder notification '" << notificationName << "'.";
+
+ notification->BeginExecuteNotification(NotificationProblem, checkable->GetLastCheckResult(), false, true);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "NotificationComponent")
+ << "Exception occurred during notification for object '"
+ << notificationName << "': " << DiagnosticInformation(ex, false);
+ }
+ }
+}
+
+/**
+ * Processes icinga::SendNotifications messages.
+ */
+void NotificationComponent::SendNotificationsHandler(const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text)
+{
+ checkable->SendNotifications(type, cr, author, text);
+}
diff --git a/lib/notification/notificationcomponent.hpp b/lib/notification/notificationcomponent.hpp
new file mode 100644
index 0000000..09434e2
--- /dev/null
+++ b/lib/notification/notificationcomponent.hpp
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef NOTIFICATIONCOMPONENT_H
+#define NOTIFICATIONCOMPONENT_H
+
+#include "notification/notificationcomponent-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/timer.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup notification
+ */
+class NotificationComponent final : public ObjectImpl<NotificationComponent>
+{
+public:
+ DECLARE_OBJECT(NotificationComponent);
+ DECLARE_OBJECTNAME(NotificationComponent);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeRemoved) override;
+
+private:
+ Timer::Ptr m_NotificationTimer;
+
+ void NotificationTimerHandler();
+ void SendNotificationsHandler(const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text);
+};
+
+}
+
+#endif /* NOTIFICATIONCOMPONENT_H */
diff --git a/lib/notification/notificationcomponent.ti b/lib/notification/notificationcomponent.ti
new file mode 100644
index 0000000..13af136
--- /dev/null
+++ b/lib/notification/notificationcomponent.ti
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library notification;
+
+namespace icinga
+{
+
+class NotificationComponent : ConfigObject
+{
+ activation_priority 200;
+
+ [config] bool enable_ha (EnableHA) {
+ default {{{ return true; }}}
+ };
+};
+
+}
diff --git a/lib/perfdata/CMakeLists.txt b/lib/perfdata/CMakeLists.txt
new file mode 100644
index 0000000..168938c
--- /dev/null
+++ b/lib/perfdata/CMakeLists.txt
@@ -0,0 +1,74 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(gelfwriter.ti gelfwriter-ti.cpp gelfwriter-ti.hpp)
+mkclass_target(graphitewriter.ti graphitewriter-ti.cpp graphitewriter-ti.hpp)
+mkclass_target(influxdbcommonwriter.ti influxdbcommonwriter-ti.cpp influxdbcommonwriter-ti.hpp)
+mkclass_target(influxdbwriter.ti influxdbwriter-ti.cpp influxdbwriter-ti.hpp)
+mkclass_target(influxdb2writer.ti influxdb2writer-ti.cpp influxdb2writer-ti.hpp)
+mkclass_target(elasticsearchwriter.ti elasticsearchwriter-ti.cpp elasticsearchwriter-ti.hpp)
+mkclass_target(opentsdbwriter.ti opentsdbwriter-ti.cpp opentsdbwriter-ti.hpp)
+mkclass_target(perfdatawriter.ti perfdatawriter-ti.cpp perfdatawriter-ti.hpp)
+
+set(perfdata_SOURCES
+ elasticsearchwriter.cpp elasticsearchwriter.hpp elasticsearchwriter-ti.hpp
+ gelfwriter.cpp gelfwriter.hpp gelfwriter-ti.hpp
+ graphitewriter.cpp graphitewriter.hpp graphitewriter-ti.hpp
+ influxdbcommonwriter.cpp influxdbcommonwriter.hpp influxdbcommonwriter-ti.hpp
+ influxdbwriter.cpp influxdbwriter.hpp influxdbwriter-ti.hpp
+ influxdb2writer.cpp influxdb2writer.hpp influxdb2writer-ti.hpp
+ opentsdbwriter.cpp opentsdbwriter.hpp opentsdbwriter-ti.hpp
+ perfdatawriter.cpp perfdatawriter.hpp perfdatawriter-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(perfdata perfdata perfdata_SOURCES)
+endif()
+
+add_library(perfdata OBJECT ${perfdata_SOURCES})
+
+add_dependencies(perfdata base config icinga)
+
+set_target_properties (
+ perfdata PROPERTIES
+ FOLDER Components
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/gelf.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/graphite.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/influxdb.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/influxdb2.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/elasticsearch.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/opentsdb.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install_if_not_exists(
+ ${PROJECT_SOURCE_DIR}/etc/icinga2/features-available/perfdata.conf
+ ${ICINGA2_CONFIGDIR}/features-available
+)
+
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_SPOOLDIR}/perfdata\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_SPOOLDIR}/tmp\")")
+
+set(CPACK_NSIS_EXTRA_INSTALL_COMMANDS "${CPACK_NSIS_EXTRA_INSTALL_COMMANDS}" PARENT_SCOPE)
diff --git a/lib/perfdata/elasticsearchwriter.cpp b/lib/perfdata/elasticsearchwriter.cpp
new file mode 100644
index 0000000..9fb2aa9
--- /dev/null
+++ b/lib/perfdata/elasticsearchwriter.cpp
@@ -0,0 +1,685 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/elasticsearchwriter.hpp"
+#include "perfdata/elasticsearchwriter-ti.cpp"
+#include "remote/url.hpp"
+#include "icinga/compatutility.hpp"
+#include "icinga/service.hpp"
+#include "icinga/checkcommand.hpp"
+#include "base/application.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/stream.hpp"
+#include "base/base64.hpp"
+#include "base/json.hpp"
+#include "base/utility.hpp"
+#include "base/networkstream.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/exception.hpp"
+#include "base/statsfunction.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/beast/core/flat_buffer.hpp>
+#include <boost/beast/http/field.hpp>
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/parser.hpp>
+#include <boost/beast/http/read.hpp>
+#include <boost/beast/http/status.hpp>
+#include <boost/beast/http/string_body.hpp>
+#include <boost/beast/http/verb.hpp>
+#include <boost/beast/http/write.hpp>
+#include <boost/scoped_array.hpp>
+#include <memory>
+#include <string>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(ElasticsearchWriter);
+
+REGISTER_STATSFUNCTION(ElasticsearchWriter, &ElasticsearchWriter::StatsFunc);
+
+void ElasticsearchWriter::OnConfigLoaded()
+{
+ ObjectImpl<ElasticsearchWriter>::OnConfigLoaded();
+
+ m_WorkQueue.SetName("ElasticsearchWriter, " + GetName());
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, "ElasticsearchWriter")
+ << "HA functionality disabled. Won't pause connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ } else {
+ SetHAMode(HARunOnce);
+ }
+}
+
+void ElasticsearchWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const ElasticsearchWriter::Ptr& elasticsearchwriter : ConfigType::GetObjectsByType<ElasticsearchWriter>()) {
+ size_t workQueueItems = elasticsearchwriter->m_WorkQueue.GetLength();
+ double workQueueItemRate = elasticsearchwriter->m_WorkQueue.GetTaskCount(60) / 60.0;
+
+ nodes.emplace_back(elasticsearchwriter->GetName(), new Dictionary({
+ { "work_queue_items", workQueueItems },
+ { "work_queue_item_rate", workQueueItemRate }
+ }));
+
+ perfdata->Add(new PerfdataValue("elasticsearchwriter_" + elasticsearchwriter->GetName() + "_work_queue_items", workQueueItems));
+ perfdata->Add(new PerfdataValue("elasticsearchwriter_" + elasticsearchwriter->GetName() + "_work_queue_item_rate", workQueueItemRate));
+ }
+
+ status->Set("elasticsearchwriter", new Dictionary(std::move(nodes)));
+}
+
+void ElasticsearchWriter::Resume()
+{
+ ObjectImpl<ElasticsearchWriter>::Resume();
+
+ m_EventPrefix = "icinga2.event.";
+
+ Log(LogInformation, "ElasticsearchWriter")
+ << "'" << GetName() << "' resumed.";
+
+ m_WorkQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ /* Setup timer for periodically flushing m_DataBuffer */
+ m_FlushTimer = Timer::Create();
+ m_FlushTimer->SetInterval(GetFlushInterval());
+ m_FlushTimer->OnTimerExpired.connect([this](const Timer * const&) { FlushTimeout(); });
+ m_FlushTimer->Start();
+ m_FlushTimer->Reschedule(0);
+
+ /* Register for new metrics. */
+ m_HandleCheckResults = Checkable::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+ m_HandleStateChanges = Checkable::OnStateChange.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, StateType type, const MessageOrigin::Ptr&) {
+ StateChangeHandler(checkable, cr, type);
+ });
+ m_HandleNotifications = Checkable::OnNotificationSentToAllUsers.connect([this](const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, const NotificationType& type,
+ const CheckResult::Ptr& cr, const String& author, const String& text, const MessageOrigin::Ptr&) {
+ NotificationSentToAllUsersHandler(notification, checkable, users, type, cr, author, text);
+ });
+}
+
+/* Pause is equivalent to Stop, but with HA capabilities to resume at runtime. */
+void ElasticsearchWriter::Pause()
+{
+ m_HandleCheckResults.disconnect();
+ m_HandleStateChanges.disconnect();
+ m_HandleNotifications.disconnect();
+
+ m_FlushTimer->Stop(true);
+ m_WorkQueue.Join();
+
+ {
+ std::unique_lock<std::mutex> lock (m_DataBufferMutex);
+ Flush();
+ }
+
+ Log(LogInformation, "ElasticsearchWriter")
+ << "'" << GetName() << "' paused.";
+
+ ObjectImpl<ElasticsearchWriter>::Pause();
+}
+
+void ElasticsearchWriter::AddCheckResult(const Dictionary::Ptr& fields, const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ String prefix = "check_result.";
+
+ fields->Set(prefix + "output", cr->GetOutput());
+ fields->Set(prefix + "check_source", cr->GetCheckSource());
+ fields->Set(prefix + "exit_status", cr->GetExitStatus());
+ fields->Set(prefix + "command", cr->GetCommand());
+ fields->Set(prefix + "state", cr->GetState());
+ fields->Set(prefix + "vars_before", cr->GetVarsBefore());
+ fields->Set(prefix + "vars_after", cr->GetVarsAfter());
+
+ fields->Set(prefix + "execution_start", FormatTimestamp(cr->GetExecutionStart()));
+ fields->Set(prefix + "execution_end", FormatTimestamp(cr->GetExecutionEnd()));
+ fields->Set(prefix + "schedule_start", FormatTimestamp(cr->GetScheduleStart()));
+ fields->Set(prefix + "schedule_end", FormatTimestamp(cr->GetScheduleEnd()));
+
+ /* Add extra calculated field. */
+ fields->Set(prefix + "latency", cr->CalculateLatency());
+ fields->Set(prefix + "execution_time", cr->CalculateExecutionTime());
+
+ if (!GetEnableSendPerfdata())
+ return;
+
+ Array::Ptr perfdata = cr->GetPerformanceData();
+
+ CheckCommand::Ptr checkCommand = checkable->GetCheckCommand();
+
+ if (perfdata) {
+ ObjectLock olock(perfdata);
+ for (const Value& val : perfdata) {
+ PerfdataValue::Ptr pdv;
+
+ if (val.IsObjectType<PerfdataValue>())
+ pdv = val;
+ else {
+ try {
+ pdv = PerfdataValue::Parse(val);
+ } catch (const std::exception&) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Ignoring invalid perfdata for checkable '"
+ << checkable->GetName() << "' and command '"
+ << checkCommand->GetName() << "' with value: " << val;
+ continue;
+ }
+ }
+
+ String escapedKey = pdv->GetLabel();
+ boost::replace_all(escapedKey, " ", "_");
+ boost::replace_all(escapedKey, ".", "_");
+ boost::replace_all(escapedKey, "\\", "_");
+ boost::algorithm::replace_all(escapedKey, "::", ".");
+
+ String perfdataPrefix = prefix + "perfdata." + escapedKey;
+
+ fields->Set(perfdataPrefix + ".value", pdv->GetValue());
+
+ if (!pdv->GetMin().IsEmpty())
+ fields->Set(perfdataPrefix + ".min", pdv->GetMin());
+ if (!pdv->GetMax().IsEmpty())
+ fields->Set(perfdataPrefix + ".max", pdv->GetMax());
+ if (!pdv->GetWarn().IsEmpty())
+ fields->Set(perfdataPrefix + ".warn", pdv->GetWarn());
+ if (!pdv->GetCrit().IsEmpty())
+ fields->Set(perfdataPrefix + ".crit", pdv->GetCrit());
+
+ if (!pdv->GetUnit().IsEmpty())
+ fields->Set(perfdataPrefix + ".unit", pdv->GetUnit());
+ }
+ }
+}
+
+void ElasticsearchWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, checkable, cr]() { InternalCheckResultHandler(checkable, cr); });
+}
+
+void ElasticsearchWriter::InternalCheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("Elasticwriter processing check result for '" << checkable->GetName() << "'");
+
+ if (!IcingaApplication::GetInstance()->GetEnablePerfdata() || !checkable->GetEnablePerfdata())
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service) {
+ fields->Set("service", service->GetShortName());
+ fields->Set("state", service->GetState());
+ fields->Set("last_state", service->GetLastState());
+ fields->Set("last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("state", host->GetState());
+ fields->Set("last_state", host->GetLastState());
+ fields->Set("last_hard_state", host->GetLastHardState());
+ }
+
+ fields->Set("host", host->GetName());
+ fields->Set("state_type", checkable->GetStateType());
+
+ fields->Set("current_check_attempt", checkable->GetCheckAttempt());
+ fields->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+
+ fields->Set("reachable", checkable->IsReachable());
+
+ CheckCommand::Ptr commandObj = checkable->GetCheckCommand();
+
+ if (commandObj)
+ fields->Set("check_command", commandObj->GetName());
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ AddCheckResult(fields, checkable, cr);
+ ts = cr->GetExecutionEnd();
+ }
+
+ Enqueue(checkable, "checkresult", fields, ts);
+}
+
+void ElasticsearchWriter::StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, checkable, cr, type]() { StateChangeHandlerInternal(checkable, cr, type); });
+}
+
+void ElasticsearchWriter::StateChangeHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("Elasticwriter processing state change '" << checkable->GetName() << "'");
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ fields->Set("current_check_attempt", checkable->GetCheckAttempt());
+ fields->Set("max_check_attempts", checkable->GetMaxCheckAttempts());
+ fields->Set("host", host->GetName());
+
+ if (service) {
+ fields->Set("service", service->GetShortName());
+ fields->Set("state", service->GetState());
+ fields->Set("last_state", service->GetLastState());
+ fields->Set("last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("state", host->GetState());
+ fields->Set("last_state", host->GetLastState());
+ fields->Set("last_hard_state", host->GetLastHardState());
+ }
+
+ CheckCommand::Ptr commandObj = checkable->GetCheckCommand();
+
+ if (commandObj)
+ fields->Set("check_command", commandObj->GetName());
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ AddCheckResult(fields, checkable, cr);
+ ts = cr->GetExecutionEnd();
+ }
+
+ Enqueue(checkable, "statechange", fields, ts);
+}
+
+void ElasticsearchWriter::NotificationSentToAllUsersHandler(const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, notification, checkable, users, type, cr, author, text]() {
+ NotificationSentToAllUsersHandlerInternal(notification, checkable, users, type, cr, author, text);
+ });
+}
+
+void ElasticsearchWriter::NotificationSentToAllUsersHandlerInternal(const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("Elasticwriter processing notification to all users '" << checkable->GetName() << "'");
+
+ Log(LogDebug, "ElasticsearchWriter")
+ << "Processing notification for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ String notificationTypeString = Notification::NotificationTypeToStringCompat(type); //TODO: Change that to our own types.
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service) {
+ fields->Set("service", service->GetShortName());
+ fields->Set("state", service->GetState());
+ fields->Set("last_state", service->GetLastState());
+ fields->Set("last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("state", host->GetState());
+ fields->Set("last_state", host->GetLastState());
+ fields->Set("last_hard_state", host->GetLastHardState());
+ }
+
+ fields->Set("host", host->GetName());
+
+ ArrayData userNames;
+
+ for (const User::Ptr& user : users) {
+ userNames.push_back(user->GetName());
+ }
+
+ fields->Set("users", new Array(std::move(userNames)));
+ fields->Set("notification_type", notificationTypeString);
+ fields->Set("author", author);
+ fields->Set("text", text);
+
+ CheckCommand::Ptr commandObj = checkable->GetCheckCommand();
+
+ if (commandObj)
+ fields->Set("check_command", commandObj->GetName());
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ AddCheckResult(fields, checkable, cr);
+ ts = cr->GetExecutionEnd();
+ }
+
+ Enqueue(checkable, "notification", fields, ts);
+}
+
+void ElasticsearchWriter::Enqueue(const Checkable::Ptr& checkable, const String& type,
+ const Dictionary::Ptr& fields, double ts)
+{
+ /* Atomically buffer the data point. */
+ std::unique_lock<std::mutex> lock(m_DataBufferMutex);
+
+ /* Format the timestamps to dynamically select the date datatype inside the index. */
+ fields->Set("@timestamp", FormatTimestamp(ts));
+ fields->Set("timestamp", FormatTimestamp(ts));
+
+ String eventType = m_EventPrefix + type;
+ fields->Set("type", eventType);
+
+ /* Every payload needs a line describing the index.
+ * We do it this way to avoid problems with a near full queue.
+ */
+ String indexBody = "{\"index\": {} }\n";
+ String fieldsBody = JsonEncode(fields);
+
+ Log(LogDebug, "ElasticsearchWriter")
+ << "Checkable '" << checkable->GetName() << "' adds to metric list: '" << fieldsBody << "'.";
+
+ m_DataBuffer.emplace_back(indexBody + fieldsBody);
+
+ /* Flush if we've buffered too much to prevent excessive memory use. */
+ if (static_cast<int>(m_DataBuffer.size()) >= GetFlushThreshold()) {
+ Log(LogDebug, "ElasticsearchWriter")
+ << "Data buffer overflow writing " << m_DataBuffer.size() << " data points";
+ Flush();
+ }
+}
+
+void ElasticsearchWriter::FlushTimeout()
+{
+ /* Prevent new data points from being added to the array, there is a
+ * race condition where they could disappear.
+ */
+ std::unique_lock<std::mutex> lock(m_DataBufferMutex);
+
+ /* Flush if there are any data available. */
+ if (m_DataBuffer.size() > 0) {
+ Log(LogDebug, "ElasticsearchWriter")
+ << "Timer expired writing " << m_DataBuffer.size() << " data points";
+ Flush();
+ }
+}
+
+void ElasticsearchWriter::Flush()
+{
+ /* Flush can be called from 1) Timeout 2) Threshold 3) on shutdown/reload. */
+ if (m_DataBuffer.empty())
+ return;
+
+ /* Ensure you hold a lock against m_DataBuffer so that things
+ * don't go missing after creating the body and clearing the buffer.
+ */
+ String body = boost::algorithm::join(m_DataBuffer, "\n");
+ m_DataBuffer.clear();
+
+ /* Elasticsearch 6.x requires a new line. This is compatible to 5.x.
+ * Tested with 6.0.0 and 5.6.4.
+ */
+ body += "\n";
+
+ SendRequest(body);
+}
+
+void ElasticsearchWriter::SendRequest(const String& body)
+{
+ namespace beast = boost::beast;
+ namespace http = beast::http;
+
+ Url::Ptr url = new Url();
+
+ url->SetScheme(GetEnableTls() ? "https" : "http");
+ url->SetHost(GetHost());
+ url->SetPort(GetPort());
+
+ std::vector<String> path;
+
+ /* Specify the index path. Best practice is a daily rotation.
+ * Example: http://localhost:9200/icinga2-2017.09.11?pretty=1
+ */
+ path.emplace_back(GetIndex() + "-" + Utility::FormatDateTime("%Y.%m.%d", Utility::GetTime()));
+
+ /* Use the bulk message format. */
+ path.emplace_back("_bulk");
+
+ url->SetPath(path);
+
+ OptionalTlsStream stream;
+
+ try {
+ stream = Connect();
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Flush failed, cannot connect to Elasticsearch: " << DiagnosticInformation(ex, false);
+ return;
+ }
+
+ Defer s ([&stream]() {
+ if (stream.first) {
+ stream.first->next_layer().shutdown();
+ }
+ });
+
+ http::request<http::string_body> request (http::verb::post, std::string(url->Format(true)), 10);
+
+ request.set(http::field::user_agent, "Icinga/" + Application::GetAppVersion());
+ request.set(http::field::host, url->GetHost() + ":" + url->GetPort());
+
+ /* Specify required headers by Elasticsearch. */
+ request.set(http::field::accept, "application/json");
+
+ /* Use application/x-ndjson for bulk streams. While ES
+ * is able to handle application/json, the newline separator
+ * causes problems with Logstash (#6609).
+ */
+ request.set(http::field::content_type, "application/x-ndjson");
+
+ /* Send authentication if configured. */
+ String username = GetUsername();
+ String password = GetPassword();
+
+ if (!username.IsEmpty() && !password.IsEmpty())
+ request.set(http::field::authorization, "Basic " + Base64::Encode(username + ":" + password));
+
+ request.body() = body;
+ request.content_length(request.body().size());
+
+ /* Don't log the request body to debug log, this is already done above. */
+ Log(LogDebug, "ElasticsearchWriter")
+ << "Sending " << request.method_string() << " request" << ((!username.IsEmpty() && !password.IsEmpty()) ? " with basic auth" : "" )
+ << " to '" << url->Format() << "'.";
+
+ try {
+ if (stream.first) {
+ http::write(*stream.first, request);
+ stream.first->flush();
+ } else {
+ http::write(*stream.second, request);
+ stream.second->flush();
+ }
+ } catch (const std::exception&) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Cannot write to HTTP API on host '" << GetHost() << "' port '" << GetPort() << "'.";
+ throw;
+ }
+
+ http::parser<false, http::string_body> parser;
+ beast::flat_buffer buf;
+
+ try {
+ if (stream.first) {
+ http::read(*stream.first, buf, parser);
+ } else {
+ http::read(*stream.second, buf, parser);
+ }
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Failed to parse HTTP response from host '" << GetHost() << "' port '" << GetPort() << "': " << DiagnosticInformation(ex, false);
+ throw;
+ }
+
+ auto& response (parser.get());
+
+ if (response.result_int() > 299) {
+ if (response.result() == http::status::unauthorized) {
+ /* More verbose error logging with Elasticsearch is hidden behind a proxy. */
+ if (!username.IsEmpty() && !password.IsEmpty()) {
+ Log(LogCritical, "ElasticsearchWriter")
+ << "401 Unauthorized. Please ensure that the user '" << username
+ << "' is able to authenticate against the HTTP API/Proxy.";
+ } else {
+ Log(LogCritical, "ElasticsearchWriter")
+ << "401 Unauthorized. The HTTP API requires authentication but no username/password has been configured.";
+ }
+
+ return;
+ }
+
+ std::ostringstream msgbuf;
+ msgbuf << "Unexpected response code " << response.result_int() << " from URL '" << url->Format() << "'";
+
+ auto& contentType (response[http::field::content_type]);
+
+ if (contentType != "application/json" && contentType != "application/json; charset=utf-8") {
+ msgbuf << "; Unexpected Content-Type: '" << contentType << "'";
+ }
+
+ auto& body (response.body());
+
+#ifdef I2_DEBUG
+ msgbuf << "; Response body: '" << body << "'";
+#endif /* I2_DEBUG */
+
+ Dictionary::Ptr jsonResponse;
+
+ try {
+ jsonResponse = JsonDecode(body);
+ } catch (...) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Unable to parse JSON response:\n" << body;
+ return;
+ }
+
+ String error = jsonResponse->Get("error");
+
+ Log(LogCritical, "ElasticsearchWriter")
+ << "Error: '" << error << "'. " << msgbuf.str();
+ }
+}
+
+OptionalTlsStream ElasticsearchWriter::Connect()
+{
+ Log(LogNotice, "ElasticsearchWriter")
+ << "Connecting to Elasticsearch on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ OptionalTlsStream stream;
+ bool tls = GetEnableTls();
+
+ if (tls) {
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext(GetCertPath(), GetKeyPath(), GetCaPath());
+ } catch (const std::exception&) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Unable to create SSL context.";
+ throw;
+ }
+
+ stream.first = Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, GetHost());
+
+ } else {
+ stream.second = Shared<AsioTcpStream>::Make(IoEngine::Get().GetIoContext());
+ }
+
+ try {
+ icinga::Connect(tls ? stream.first->lowest_layer() : stream.second->lowest_layer(), GetHost(), GetPort());
+ } catch (const std::exception&) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "Can't connect to Elasticsearch on host '" << GetHost() << "' port '" << GetPort() << "'.";
+ throw;
+ }
+
+ if (tls) {
+ auto& tlsStream (stream.first->next_layer());
+
+ try {
+ tlsStream.handshake(tlsStream.client);
+ } catch (const std::exception&) {
+ Log(LogWarning, "ElasticsearchWriter")
+ << "TLS handshake with host '" << GetHost() << "' on port " << GetPort() << " failed.";
+ throw;
+ }
+
+ if (!GetInsecureNoverify()) {
+ if (!tlsStream.GetPeerCertificate()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error("Elasticsearch didn't present any TLS certificate."));
+ }
+
+ if (!tlsStream.IsVerifyOK()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error(
+ "TLS certificate validation failed: " + std::string(tlsStream.GetVerifyError())
+ ));
+ }
+ }
+ }
+
+ return stream;
+}
+
+void ElasticsearchWriter::AssertOnWorkQueue()
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+void ElasticsearchWriter::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "ElasticsearchWriter", "Exception during Elastic operation: Verify that your backend is operational!");
+
+ Log(LogDebug, "ElasticsearchWriter")
+ << "Exception during Elasticsearch operation: " << DiagnosticInformation(std::move(exp));
+}
+
+String ElasticsearchWriter::FormatTimestamp(double ts)
+{
+ /* The date format must match the default dynamic date detection
+ * pattern in indexes. This enables applications like Kibana to
+ * detect a qualified timestamp index for time-series data.
+ *
+ * Example: 2017-09-11T10:56:21.463+0200
+ *
+ * References:
+ * https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic-field-mapping.html#date-detection
+ * https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html
+ * https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
+ */
+ auto milliSeconds = static_cast<int>((ts - static_cast<int>(ts)) * 1000);
+
+ return Utility::FormatDateTime("%Y-%m-%dT%H:%M:%S", ts) + "." + Convert::ToString(milliSeconds) + Utility::FormatDateTime("%z", ts);
+}
diff --git a/lib/perfdata/elasticsearchwriter.hpp b/lib/perfdata/elasticsearchwriter.hpp
new file mode 100644
index 0000000..a988094
--- /dev/null
+++ b/lib/perfdata/elasticsearchwriter.hpp
@@ -0,0 +1,65 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ELASTICSEARCHWRITER_H
+#define ELASTICSEARCHWRITER_H
+
+#include "perfdata/elasticsearchwriter-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/workqueue.hpp"
+#include "base/timer.hpp"
+#include "base/tlsstream.hpp"
+
+namespace icinga
+{
+
+class ElasticsearchWriter final : public ObjectImpl<ElasticsearchWriter>
+{
+public:
+ DECLARE_OBJECT(ElasticsearchWriter);
+ DECLARE_OBJECTNAME(ElasticsearchWriter);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ static String FormatTimestamp(double ts);
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+private:
+ String m_EventPrefix;
+ WorkQueue m_WorkQueue{10000000, 1};
+ boost::signals2::connection m_HandleCheckResults, m_HandleStateChanges, m_HandleNotifications;
+ Timer::Ptr m_FlushTimer;
+ std::vector<String> m_DataBuffer;
+ std::mutex m_DataBufferMutex;
+
+ void AddCheckResult(const Dictionary::Ptr& fields, const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+
+ void StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type);
+ void StateChangeHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type);
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void InternalCheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void NotificationSentToAllUsersHandler(const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text);
+ void NotificationSentToAllUsersHandlerInternal(const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const std::set<User::Ptr>& users, NotificationType type,
+ const CheckResult::Ptr& cr, const String& author, const String& text);
+
+ void Enqueue(const Checkable::Ptr& checkable, const String& type,
+ const Dictionary::Ptr& fields, double ts);
+
+ OptionalTlsStream Connect();
+ void AssertOnWorkQueue();
+ void ExceptionHandler(boost::exception_ptr exp);
+ void FlushTimeout();
+ void Flush();
+ void SendRequest(const String& body);
+};
+
+}
+
+#endif /* ELASTICSEARCHWRITER_H */
diff --git a/lib/perfdata/elasticsearchwriter.ti b/lib/perfdata/elasticsearchwriter.ti
new file mode 100644
index 0000000..e3b8e27
--- /dev/null
+++ b/lib/perfdata/elasticsearchwriter.ti
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class ElasticsearchWriter : ConfigObject
+{
+ activation_priority 100;
+
+ [config, required] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config, required] String port {
+ default {{{ return "9200"; }}}
+ };
+ [config, required] String index {
+ default {{{ return "icinga2"; }}}
+ };
+ [config] bool enable_send_perfdata {
+ default {{{ return false; }}}
+ };
+ [config] String username;
+ [config, no_user_view, no_user_modify] String password;
+
+ [config] bool enable_tls {
+ default {{{ return false; }}}
+ };
+ [config] bool insecure_noverify {
+ default {{{ return false; }}}
+ };
+ [config] String ca_path;
+ [config] String cert_path;
+ [config] String key_path;
+
+ [config] int flush_interval {
+ default {{{ return 10; }}}
+ };
+ [config] int flush_threshold {
+ default {{{ return 1024; }}}
+ };
+ [config] bool enable_ha {
+ default {{{ return false; }}}
+ };
+};
+
+}
diff --git a/lib/perfdata/gelfwriter.cpp b/lib/perfdata/gelfwriter.cpp
new file mode 100644
index 0000000..c5b2bbd
--- /dev/null
+++ b/lib/perfdata/gelfwriter.cpp
@@ -0,0 +1,535 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/gelfwriter.hpp"
+#include "perfdata/gelfwriter-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/compatutility.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/application.hpp"
+#include "base/stream.hpp"
+#include "base/networkstream.hpp"
+#include "base/context.hpp"
+#include "base/exception.hpp"
+#include "base/json.hpp"
+#include "base/statsfunction.hpp"
+#include <boost/algorithm/string/replace.hpp>
+#include <utility>
+#include "base/io-engine.hpp"
+#include <boost/asio/write.hpp>
+#include <boost/asio/buffer.hpp>
+#include <boost/system/error_code.hpp>
+#include <boost/asio/error.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE(GelfWriter);
+
+REGISTER_STATSFUNCTION(GelfWriter, &GelfWriter::StatsFunc);
+
+void GelfWriter::OnConfigLoaded()
+{
+ ObjectImpl<GelfWriter>::OnConfigLoaded();
+
+ m_WorkQueue.SetName("GelfWriter, " + GetName());
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, "GelfWriter")
+ << "HA functionality disabled. Won't pause connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ } else {
+ SetHAMode(HARunOnce);
+ }
+}
+
+void GelfWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const GelfWriter::Ptr& gelfwriter : ConfigType::GetObjectsByType<GelfWriter>()) {
+ size_t workQueueItems = gelfwriter->m_WorkQueue.GetLength();
+ double workQueueItemRate = gelfwriter->m_WorkQueue.GetTaskCount(60) / 60.0;
+
+ nodes.emplace_back(gelfwriter->GetName(), new Dictionary({
+ { "work_queue_items", workQueueItems },
+ { "work_queue_item_rate", workQueueItemRate },
+ { "connected", gelfwriter->GetConnected() },
+ { "source", gelfwriter->GetSource() }
+ }));
+
+ perfdata->Add(new PerfdataValue("gelfwriter_" + gelfwriter->GetName() + "_work_queue_items", workQueueItems));
+ perfdata->Add(new PerfdataValue("gelfwriter_" + gelfwriter->GetName() + "_work_queue_item_rate", workQueueItemRate));
+ }
+
+ status->Set("gelfwriter", new Dictionary(std::move(nodes)));
+}
+
+void GelfWriter::Resume()
+{
+ ObjectImpl<GelfWriter>::Resume();
+
+ Log(LogInformation, "GelfWriter")
+ << "'" << GetName() << "' resumed.";
+
+ /* Register exception handler for WQ tasks. */
+ m_WorkQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ /* Timer for reconnecting */
+ m_ReconnectTimer = Timer::Create();
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->OnTimerExpired.connect([this](const Timer * const&) { ReconnectTimerHandler(); });
+ m_ReconnectTimer->Start();
+ m_ReconnectTimer->Reschedule(0);
+
+ /* Register event handlers. */
+ m_HandleCheckResults = Checkable::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+ m_HandleNotifications = Checkable::OnNotificationSentToUser.connect([this](const Notification::Ptr& notification,
+ const Checkable::Ptr& checkable, const User::Ptr& user, const NotificationType& type, const CheckResult::Ptr& cr,
+ const String& author, const String& commentText, const String& commandName, const MessageOrigin::Ptr&) {
+ NotificationToUserHandler(notification, checkable, user, type, cr, author, commentText, commandName);
+ });
+ m_HandleStateChanges = Checkable::OnStateChange.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, StateType type, const MessageOrigin::Ptr&) {
+ StateChangeHandler(checkable, cr, type);
+ });
+}
+
+/* Pause is equivalent to Stop, but with HA capabilities to resume at runtime. */
+void GelfWriter::Pause()
+{
+ m_HandleCheckResults.disconnect();
+ m_HandleNotifications.disconnect();
+ m_HandleStateChanges.disconnect();
+
+ m_ReconnectTimer->Stop(true);
+
+ m_WorkQueue.Enqueue([this]() {
+ try {
+ ReconnectInternal();
+ } catch (const std::exception&) {
+ Log(LogInformation, "GelfWriter")
+ << "Unable to connect, not flushing buffers. Data may be lost.";
+ }
+ }, PriorityImmediate);
+
+ m_WorkQueue.Enqueue([this]() { DisconnectInternal(); }, PriorityLow);
+ m_WorkQueue.Join();
+
+ Log(LogInformation, "GelfWriter")
+ << "'" << GetName() << "' paused.";
+
+ ObjectImpl<GelfWriter>::Pause();
+}
+
+void GelfWriter::AssertOnWorkQueue()
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+void GelfWriter::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "GelfWriter") << "Exception during Graylog Gelf operation: " << DiagnosticInformation(exp, false);
+ Log(LogDebug, "GelfWriter") << "Exception during Graylog Gelf operation: " << DiagnosticInformation(exp, true);
+
+ DisconnectInternal();
+}
+
+void GelfWriter::Reconnect()
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused()) {
+ SetConnected(false);
+ return;
+ }
+
+ ReconnectInternal();
+}
+
+void GelfWriter::ReconnectInternal()
+{
+ double startTime = Utility::GetTime();
+
+ CONTEXT("Reconnecting to Graylog Gelf '" << GetName() << "'");
+
+ SetShouldConnect(true);
+
+ if (GetConnected())
+ return;
+
+ Log(LogNotice, "GelfWriter")
+ << "Reconnecting to Graylog Gelf on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ bool ssl = GetEnableTls();
+
+ if (ssl) {
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext(GetCertPath(), GetKeyPath(), GetCaPath());
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "GelfWriter")
+ << "Unable to create SSL context.";
+ throw;
+ }
+
+ m_Stream.first = Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, GetHost());
+
+ } else {
+ m_Stream.second = Shared<AsioTcpStream>::Make(IoEngine::Get().GetIoContext());
+ }
+
+ try {
+ icinga::Connect(ssl ? m_Stream.first->lowest_layer() : m_Stream.second->lowest_layer(), GetHost(), GetPort());
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "GelfWriter")
+ << "Can't connect to Graylog Gelf on host '" << GetHost() << "' port '" << GetPort() << ".'";
+ throw;
+ }
+
+ if (ssl) {
+ auto& tlsStream (m_Stream.first->next_layer());
+
+ try {
+ tlsStream.handshake(tlsStream.client);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "GelfWriter")
+ << "TLS handshake with host '" << GetHost() << " failed.'";
+ throw;
+ }
+
+ if (!GetInsecureNoverify()) {
+ if (!tlsStream.GetPeerCertificate()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error("Graylog Gelf didn't present any TLS certificate."));
+ }
+
+ if (!tlsStream.IsVerifyOK()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error(
+ "TLS certificate validation failed: " + std::string(tlsStream.GetVerifyError())
+ ));
+ }
+ }
+ }
+
+ SetConnected(true);
+
+ Log(LogInformation, "GelfWriter")
+ << "Finished reconnecting to Graylog Gelf in " << std::setw(2) << Utility::GetTime() - startTime << " second(s).";
+}
+
+void GelfWriter::ReconnectTimerHandler()
+{
+ m_WorkQueue.Enqueue([this]() { Reconnect(); }, PriorityNormal);
+}
+
+void GelfWriter::Disconnect()
+{
+ AssertOnWorkQueue();
+
+ DisconnectInternal();
+}
+
+void GelfWriter::DisconnectInternal()
+{
+ if (!GetConnected())
+ return;
+
+ if (m_Stream.first) {
+ boost::system::error_code ec;
+ m_Stream.first->next_layer().shutdown(ec);
+
+ // https://stackoverflow.com/a/25703699
+ // As long as the error code's category is not an SSL category, then the protocol was securely shutdown
+ if (ec.category() == boost::asio::error::get_ssl_category()) {
+ Log(LogCritical, "GelfWriter")
+ << "TLS shutdown with host '" << GetHost() << "' could not be done securely.";
+ }
+ } else if (m_Stream.second) {
+ m_Stream.second->close();
+ }
+
+ SetConnected(false);
+
+}
+
+void GelfWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, checkable, cr]() { CheckResultHandlerInternal(checkable, cr); });
+}
+
+void GelfWriter::CheckResultHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("GELF Processing check result for '" << checkable->GetName() << "'");
+
+ Log(LogDebug, "GelfWriter")
+ << "Processing check result for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service) {
+ fields->Set("_service_name", service->GetShortName());
+ fields->Set("_service_state", Service::StateToString(service->GetState()));
+ fields->Set("_last_state", service->GetLastState());
+ fields->Set("_last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("_last_state", host->GetLastState());
+ fields->Set("_last_hard_state", host->GetLastHardState());
+ }
+
+ fields->Set("_hostname", host->GetName());
+ fields->Set("_type", "CHECK RESULT");
+ fields->Set("_state", service ? Service::StateToString(service->GetState()) : Host::StateToString(host->GetState()));
+
+ fields->Set("_current_check_attempt", checkable->GetCheckAttempt());
+ fields->Set("_max_check_attempts", checkable->GetMaxCheckAttempts());
+
+ fields->Set("_reachable", checkable->IsReachable());
+
+ CheckCommand::Ptr checkCommand = checkable->GetCheckCommand();
+
+ if (checkCommand)
+ fields->Set("_check_command", checkCommand->GetName());
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ fields->Set("_latency", cr->CalculateLatency());
+ fields->Set("_execution_time", cr->CalculateExecutionTime());
+ fields->Set("short_message", CompatUtility::GetCheckResultOutput(cr));
+ fields->Set("full_message", cr->GetOutput());
+ fields->Set("_check_source", cr->GetCheckSource());
+ ts = cr->GetExecutionEnd();
+ }
+
+ if (cr && GetEnableSendPerfdata()) {
+ Array::Ptr perfdata = cr->GetPerformanceData();
+
+ if (perfdata) {
+ ObjectLock olock(perfdata);
+ for (const Value& val : perfdata) {
+ PerfdataValue::Ptr pdv;
+
+ if (val.IsObjectType<PerfdataValue>())
+ pdv = val;
+ else {
+ try {
+ pdv = PerfdataValue::Parse(val);
+ } catch (const std::exception&) {
+ Log(LogWarning, "GelfWriter")
+ << "Ignoring invalid perfdata for checkable '"
+ << checkable->GetName() << "' and command '"
+ << checkCommand->GetName() << "' with value: " << val;
+ continue;
+ }
+ }
+
+ String escaped_key = pdv->GetLabel();
+ boost::replace_all(escaped_key, " ", "_");
+ boost::replace_all(escaped_key, ".", "_");
+ boost::replace_all(escaped_key, "\\", "_");
+ boost::algorithm::replace_all(escaped_key, "::", ".");
+
+ fields->Set("_" + escaped_key, pdv->GetValue());
+
+ if (!pdv->GetMin().IsEmpty())
+ fields->Set("_" + escaped_key + "_min", pdv->GetMin());
+ if (!pdv->GetMax().IsEmpty())
+ fields->Set("_" + escaped_key + "_max", pdv->GetMax());
+ if (!pdv->GetWarn().IsEmpty())
+ fields->Set("_" + escaped_key + "_warn", pdv->GetWarn());
+ if (!pdv->GetCrit().IsEmpty())
+ fields->Set("_" + escaped_key + "_crit", pdv->GetCrit());
+
+ if (!pdv->GetUnit().IsEmpty())
+ fields->Set("_" + escaped_key + "_unit", pdv->GetUnit());
+ }
+ }
+ }
+
+ SendLogMessage(checkable, ComposeGelfMessage(fields, GetSource(), ts));
+}
+
+void GelfWriter::NotificationToUserHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notificationType, CheckResult::Ptr const& cr,
+ const String& author, const String& commentText, const String& commandName)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, notification, checkable, user, notificationType, cr, author, commentText, commandName]() {
+ NotificationToUserHandlerInternal(notification, checkable, user, notificationType, cr, author, commentText, commandName);
+ });
+}
+
+void GelfWriter::NotificationToUserHandlerInternal(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notificationType, CheckResult::Ptr const& cr,
+ const String& author, const String& commentText, const String& commandName)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("GELF Processing notification to all users '" << checkable->GetName() << "'");
+
+ Log(LogDebug, "GelfWriter")
+ << "Processing notification for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ String notificationTypeString = Notification::NotificationTypeToStringCompat(notificationType); //TODO: Change that to our own types.
+
+ String authorComment = "";
+
+ if (notificationType == NotificationCustom || notificationType == NotificationAcknowledgement) {
+ authorComment = author + ";" + commentText;
+ }
+
+ String output;
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ output = CompatUtility::GetCheckResultOutput(cr);
+ ts = cr->GetExecutionEnd();
+ }
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service) {
+ fields->Set("_type", "SERVICE NOTIFICATION");
+ //TODO: fix this to _service_name
+ fields->Set("_service", service->GetShortName());
+ fields->Set("short_message", output);
+ } else {
+ fields->Set("_type", "HOST NOTIFICATION");
+ fields->Set("short_message", output);
+ }
+
+ fields->Set("_state", service ? Service::StateToString(service->GetState()) : Host::StateToString(host->GetState()));
+
+ fields->Set("_hostname", host->GetName());
+ fields->Set("_command", commandName);
+ fields->Set("_notification_type", notificationTypeString);
+ fields->Set("_comment", authorComment);
+
+ CheckCommand::Ptr commandObj = checkable->GetCheckCommand();
+
+ if (commandObj)
+ fields->Set("_check_command", commandObj->GetName());
+
+ SendLogMessage(checkable, ComposeGelfMessage(fields, GetSource(), ts));
+}
+
+void GelfWriter::StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, checkable, cr, type]() { StateChangeHandlerInternal(checkable, cr, type); });
+}
+
+void GelfWriter::StateChangeHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("GELF Processing state change '" << checkable->GetName() << "'");
+
+ Log(LogDebug, "GelfWriter")
+ << "Processing state change for '" << checkable->GetName() << "'";
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ fields->Set("_state", service ? Service::StateToString(service->GetState()) : Host::StateToString(host->GetState()));
+ fields->Set("_type", "STATE CHANGE");
+ fields->Set("_current_check_attempt", checkable->GetCheckAttempt());
+ fields->Set("_max_check_attempts", checkable->GetMaxCheckAttempts());
+ fields->Set("_hostname", host->GetName());
+
+ if (service) {
+ fields->Set("_service_name", service->GetShortName());
+ fields->Set("_service_state", Service::StateToString(service->GetState()));
+ fields->Set("_last_state", service->GetLastState());
+ fields->Set("_last_hard_state", service->GetLastHardState());
+ } else {
+ fields->Set("_last_state", host->GetLastState());
+ fields->Set("_last_hard_state", host->GetLastHardState());
+ }
+
+ CheckCommand::Ptr commandObj = checkable->GetCheckCommand();
+
+ if (commandObj)
+ fields->Set("_check_command", commandObj->GetName());
+
+ double ts = Utility::GetTime();
+
+ if (cr) {
+ fields->Set("short_message", CompatUtility::GetCheckResultOutput(cr));
+ fields->Set("full_message", cr->GetOutput());
+ fields->Set("_check_source", cr->GetCheckSource());
+ ts = cr->GetExecutionEnd();
+ }
+
+ SendLogMessage(checkable, ComposeGelfMessage(fields, GetSource(), ts));
+}
+
+String GelfWriter::ComposeGelfMessage(const Dictionary::Ptr& fields, const String& source, double ts)
+{
+ fields->Set("version", "1.1");
+ fields->Set("host", source);
+ fields->Set("timestamp", ts);
+
+ return JsonEncode(fields);
+}
+
+void GelfWriter::SendLogMessage(const Checkable::Ptr& checkable, const String& gelfMessage)
+{
+ std::ostringstream msgbuf;
+ msgbuf << gelfMessage;
+ msgbuf << '\0';
+
+ String log = msgbuf.str();
+
+ if (!GetConnected())
+ return;
+
+ try {
+ Log(LogDebug, "GelfWriter")
+ << "Checkable '" << checkable->GetName() << "' sending message '" << log << "'.";
+
+ if (m_Stream.first) {
+ boost::asio::write(*m_Stream.first, boost::asio::buffer(msgbuf.str()));
+ m_Stream.first->flush();
+ } else {
+ boost::asio::write(*m_Stream.second, boost::asio::buffer(msgbuf.str()));
+ m_Stream.second->flush();
+ }
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "GelfWriter")
+ << "Cannot write to TCP socket on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ throw ex;
+ }
+}
diff --git a/lib/perfdata/gelfwriter.hpp b/lib/perfdata/gelfwriter.hpp
new file mode 100644
index 0000000..ce9ee35
--- /dev/null
+++ b/lib/perfdata/gelfwriter.hpp
@@ -0,0 +1,70 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef GELFWRITER_H
+#define GELFWRITER_H
+
+#include "perfdata/gelfwriter-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include <fstream>
+
+namespace icinga
+{
+
+/**
+ * An Icinga Gelf writer for Graylog.
+ *
+ * @ingroup perfdata
+ */
+class GelfWriter final : public ObjectImpl<GelfWriter>
+{
+public:
+ DECLARE_OBJECT(GelfWriter);
+ DECLARE_OBJECTNAME(GelfWriter);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+private:
+ OptionalTlsStream m_Stream;
+ WorkQueue m_WorkQueue{10000000, 1};
+
+ boost::signals2::connection m_HandleCheckResults, m_HandleNotifications, m_HandleStateChanges;
+ Timer::Ptr m_ReconnectTimer;
+
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void CheckResultHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void NotificationToUserHandler(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notificationType, const CheckResult::Ptr& cr,
+ const String& author, const String& commentText, const String& commandName);
+ void NotificationToUserHandlerInternal(const Notification::Ptr& notification, const Checkable::Ptr& checkable,
+ const User::Ptr& user, NotificationType notification_type, const CheckResult::Ptr& cr,
+ const String& author, const String& comment_text, const String& command_name);
+ void StateChangeHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type);
+ void StateChangeHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, StateType type);
+
+ String ComposeGelfMessage(const Dictionary::Ptr& fields, const String& source, double ts);
+ void SendLogMessage(const Checkable::Ptr& checkable, const String& gelfMessage);
+
+ void ReconnectTimerHandler();
+
+ void Disconnect();
+ void DisconnectInternal();
+ void Reconnect();
+ void ReconnectInternal();
+
+ void AssertOnWorkQueue();
+
+ void ExceptionHandler(boost::exception_ptr exp);
+};
+
+}
+
+#endif /* GELFWRITER_H */
diff --git a/lib/perfdata/gelfwriter.ti b/lib/perfdata/gelfwriter.ti
new file mode 100644
index 0000000..387ee14
--- /dev/null
+++ b/lib/perfdata/gelfwriter.ti
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class GelfWriter : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config] String port {
+ default {{{ return "12201"; }}}
+ };
+ [config] String source {
+ default {{{ return "icinga2"; }}}
+ };
+ [config] bool enable_send_perfdata {
+ default {{{ return false; }}}
+ };
+
+ [no_user_modify] bool connected;
+ [no_user_modify] bool should_connect {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_ha {
+ default {{{ return false; }}}
+ };
+ [config] bool enable_tls {
+ default {{{ return false; }}}
+ };
+ [config] bool insecure_noverify {
+ default {{{ return false; }}}
+ };
+ [config] String ca_path;
+ [config] String cert_path;
+ [config] String key_path;
+};
+
+}
diff --git a/lib/perfdata/graphitewriter.cpp b/lib/perfdata/graphitewriter.cpp
new file mode 100644
index 0000000..6adae02
--- /dev/null
+++ b/lib/perfdata/graphitewriter.cpp
@@ -0,0 +1,514 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/graphitewriter.hpp"
+#include "perfdata/graphitewriter-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/application.hpp"
+#include "base/stream.hpp"
+#include "base/networkstream.hpp"
+#include "base/exception.hpp"
+#include "base/statsfunction.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(GraphiteWriter);
+
+REGISTER_STATSFUNCTION(GraphiteWriter, &GraphiteWriter::StatsFunc);
+
+/*
+ * Enable HA capabilities once the config object is loaded.
+ */
+void GraphiteWriter::OnConfigLoaded()
+{
+ ObjectImpl<GraphiteWriter>::OnConfigLoaded();
+
+ m_WorkQueue.SetName("GraphiteWriter, " + GetName());
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, "GraphiteWriter")
+ << "HA functionality disabled. Won't pause connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ } else {
+ SetHAMode(HARunOnce);
+ }
+}
+
+/**
+ * Feature stats interface
+ *
+ * @param status Key value pairs for feature stats
+ * @param perfdata Array of PerfdataValue objects
+ */
+void GraphiteWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+
+ for (const GraphiteWriter::Ptr& graphitewriter : ConfigType::GetObjectsByType<GraphiteWriter>()) {
+ size_t workQueueItems = graphitewriter->m_WorkQueue.GetLength();
+ double workQueueItemRate = graphitewriter->m_WorkQueue.GetTaskCount(60) / 60.0;
+
+ nodes.emplace_back(graphitewriter->GetName(), new Dictionary({
+ { "work_queue_items", workQueueItems },
+ { "work_queue_item_rate", workQueueItemRate },
+ { "connected", graphitewriter->GetConnected() }
+ }));
+
+ perfdata->Add(new PerfdataValue("graphitewriter_" + graphitewriter->GetName() + "_work_queue_items", workQueueItems));
+ perfdata->Add(new PerfdataValue("graphitewriter_" + graphitewriter->GetName() + "_work_queue_item_rate", workQueueItemRate));
+ }
+
+ status->Set("graphitewriter", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Resume is equivalent to Start, but with HA capabilities to resume at runtime.
+ */
+void GraphiteWriter::Resume()
+{
+ ObjectImpl<GraphiteWriter>::Resume();
+
+ Log(LogInformation, "GraphiteWriter")
+ << "'" << GetName() << "' resumed.";
+
+ /* Register exception handler for WQ tasks. */
+ m_WorkQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ /* Timer for reconnecting */
+ m_ReconnectTimer = Timer::Create();
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->OnTimerExpired.connect([this](const Timer * const&) { ReconnectTimerHandler(); });
+ m_ReconnectTimer->Start();
+ m_ReconnectTimer->Reschedule(0);
+
+ /* Register event handlers. */
+ m_HandleCheckResults = Checkable::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+}
+
+/**
+ * Pause is equivalent to Stop, but with HA capabilities to resume at runtime.
+ */
+void GraphiteWriter::Pause()
+{
+ m_HandleCheckResults.disconnect();
+ m_ReconnectTimer->Stop(true);
+
+ try {
+ ReconnectInternal();
+ } catch (const std::exception&) {
+ Log(LogInformation, "GraphiteWriter")
+ << "'" << GetName() << "' paused. Unable to connect, not flushing buffers. Data may be lost on reload.";
+
+ ObjectImpl<GraphiteWriter>::Pause();
+ return;
+ }
+
+ m_WorkQueue.Join();
+ DisconnectInternal();
+
+ Log(LogInformation, "GraphiteWriter")
+ << "'" << GetName() << "' paused.";
+
+ ObjectImpl<GraphiteWriter>::Pause();
+}
+
+/**
+ * Check if method is called inside the WQ thread.
+ */
+void GraphiteWriter::AssertOnWorkQueue()
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+/**
+ * Exception handler for the WQ.
+ *
+ * Closes the connection if connected.
+ *
+ * @param exp Exception pointer
+ */
+void GraphiteWriter::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, "GraphiteWriter", "Exception during Graphite operation: Verify that your backend is operational!");
+
+ Log(LogDebug, "GraphiteWriter")
+ << "Exception during Graphite operation: " << DiagnosticInformation(std::move(exp));
+
+ if (GetConnected()) {
+ m_Stream->close();
+
+ SetConnected(false);
+ }
+}
+
+/**
+ * Reconnect method, stops when the feature is paused in HA zones.
+ *
+ * Called inside the WQ.
+ */
+void GraphiteWriter::Reconnect()
+{
+ AssertOnWorkQueue();
+
+ if (IsPaused()) {
+ SetConnected(false);
+ return;
+ }
+
+ ReconnectInternal();
+}
+
+/**
+ * Reconnect method, connects to a TCP Stream
+ */
+void GraphiteWriter::ReconnectInternal()
+{
+ double startTime = Utility::GetTime();
+
+ CONTEXT("Reconnecting to Graphite '" << GetName() << "'");
+
+ SetShouldConnect(true);
+
+ if (GetConnected())
+ return;
+
+ Log(LogNotice, "GraphiteWriter")
+ << "Reconnecting to Graphite on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ m_Stream = Shared<AsioTcpStream>::Make(IoEngine::Get().GetIoContext());
+
+ try {
+ icinga::Connect(m_Stream->lowest_layer(), GetHost(), GetPort());
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "GraphiteWriter")
+ << "Can't connect to Graphite on host '" << GetHost() << "' port '" << GetPort() << ".'";
+
+ SetConnected(false);
+
+ throw;
+ }
+
+ SetConnected(true);
+
+ Log(LogInformation, "GraphiteWriter")
+ << "Finished reconnecting to Graphite in " << std::setw(2) << Utility::GetTime() - startTime << " second(s).";
+}
+
+/**
+ * Reconnect handler called by the timer.
+ *
+ * Enqueues a reconnect task into the WQ.
+ */
+void GraphiteWriter::ReconnectTimerHandler()
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this]() { Reconnect(); }, PriorityHigh);
+}
+
+/**
+ * Disconnect the stream.
+ *
+ * Called inside the WQ.
+ */
+void GraphiteWriter::Disconnect()
+{
+ AssertOnWorkQueue();
+
+ DisconnectInternal();
+}
+
+/**
+ * Disconnect the stream.
+ *
+ * Called outside the WQ.
+ */
+void GraphiteWriter::DisconnectInternal()
+{
+ if (!GetConnected())
+ return;
+
+ m_Stream->close();
+
+ SetConnected(false);
+}
+
+/**
+ * Check result event handler, checks whether feature is not paused in HA setups.
+ *
+ * @param checkable Host/Service object
+ * @param cr Check result including performance data
+ */
+void GraphiteWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, checkable, cr]() { CheckResultHandlerInternal(checkable, cr); });
+}
+
+/**
+ * Check result event handler, prepares metadata and perfdata values and calls Send*()
+ *
+ * Called inside the WQ.
+ *
+ * @param checkable Host/Service object
+ * @param cr Check result including performance data
+ */
+void GraphiteWriter::CheckResultHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("Processing check result for '" << checkable->GetName() << "'");
+
+ /* TODO: Deal with missing connection here. Needs refactoring
+ * into parsing the actual performance data and then putting it
+ * into a queue for re-inserting. */
+
+ if (!IcingaApplication::GetInstance()->GetEnablePerfdata() || !checkable->GetEnablePerfdata())
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+
+ String prefix;
+
+ if (service) {
+ prefix = MacroProcessor::ResolveMacros(GetServiceNameTemplate(), resolvers, cr, nullptr, [](const Value& value) -> Value {
+ return EscapeMacroMetric(value);
+ });
+ } else {
+ prefix = MacroProcessor::ResolveMacros(GetHostNameTemplate(), resolvers, cr, nullptr, [](const Value& value) -> Value {
+ return EscapeMacroMetric(value);
+ });
+ }
+
+ String prefixPerfdata = prefix + ".perfdata";
+ String prefixMetadata = prefix + ".metadata";
+
+ double ts = cr->GetExecutionEnd();
+
+ if (GetEnableSendMetadata()) {
+ if (service) {
+ SendMetric(checkable, prefixMetadata, "state", service->GetState(), ts);
+ } else {
+ SendMetric(checkable, prefixMetadata, "state", host->GetState(), ts);
+ }
+
+ SendMetric(checkable, prefixMetadata, "current_attempt", checkable->GetCheckAttempt(), ts);
+ SendMetric(checkable, prefixMetadata, "max_check_attempts", checkable->GetMaxCheckAttempts(), ts);
+ SendMetric(checkable, prefixMetadata, "state_type", checkable->GetStateType(), ts);
+ SendMetric(checkable, prefixMetadata, "reachable", checkable->IsReachable(), ts);
+ SendMetric(checkable, prefixMetadata, "downtime_depth", checkable->GetDowntimeDepth(), ts);
+ SendMetric(checkable, prefixMetadata, "acknowledgement", checkable->GetAcknowledgement(), ts);
+ SendMetric(checkable, prefixMetadata, "latency", cr->CalculateLatency(), ts);
+ SendMetric(checkable, prefixMetadata, "execution_time", cr->CalculateExecutionTime(), ts);
+ }
+
+ SendPerfdata(checkable, prefixPerfdata, cr, ts);
+}
+
+/**
+ * Parse performance data from check result and call SendMetric()
+ *
+ * @param checkable Host/service object
+ * @param prefix Metric prefix string
+ * @param cr Check result including performance data
+ * @param ts Timestamp when the check result was created
+ */
+void GraphiteWriter::SendPerfdata(const Checkable::Ptr& checkable, const String& prefix, const CheckResult::Ptr& cr, double ts)
+{
+ Array::Ptr perfdata = cr->GetPerformanceData();
+
+ if (!perfdata)
+ return;
+
+ CheckCommand::Ptr checkCommand = checkable->GetCheckCommand();
+
+ ObjectLock olock(perfdata);
+ for (const Value& val : perfdata) {
+ PerfdataValue::Ptr pdv;
+
+ if (val.IsObjectType<PerfdataValue>())
+ pdv = val;
+ else {
+ try {
+ pdv = PerfdataValue::Parse(val);
+ } catch (const std::exception&) {
+ Log(LogWarning, "GraphiteWriter")
+ << "Ignoring invalid perfdata for checkable '"
+ << checkable->GetName() << "' and command '"
+ << checkCommand->GetName() << "' with value: " << val;
+ continue;
+ }
+ }
+
+ String escapedKey = EscapeMetricLabel(pdv->GetLabel());
+
+ SendMetric(checkable, prefix, escapedKey + ".value", pdv->GetValue(), ts);
+
+ if (GetEnableSendThresholds()) {
+ if (!pdv->GetCrit().IsEmpty())
+ SendMetric(checkable, prefix, escapedKey + ".crit", pdv->GetCrit(), ts);
+ if (!pdv->GetWarn().IsEmpty())
+ SendMetric(checkable, prefix, escapedKey + ".warn", pdv->GetWarn(), ts);
+ if (!pdv->GetMin().IsEmpty())
+ SendMetric(checkable, prefix, escapedKey + ".min", pdv->GetMin(), ts);
+ if (!pdv->GetMax().IsEmpty())
+ SendMetric(checkable, prefix, escapedKey + ".max", pdv->GetMax(), ts);
+ }
+ }
+}
+
+/**
+ * Computes metric data and sends to Graphite
+ *
+ * @param checkable Host/service object
+ * @param prefix Computed metric prefix string
+ * @param name Metric name
+ * @param value Metric value
+ * @param ts Timestamp when the check result was created
+ */
+void GraphiteWriter::SendMetric(const Checkable::Ptr& checkable, const String& prefix, const String& name, double value, double ts)
+{
+ namespace asio = boost::asio;
+
+ std::ostringstream msgbuf;
+ msgbuf << prefix << "." << name << " " << Convert::ToString(value) << " " << static_cast<long>(ts);
+
+ Log(LogDebug, "GraphiteWriter")
+ << "Checkable '" << checkable->GetName() << "' adds to metric list: '" << msgbuf.str() << "'.";
+
+ // do not send \n to debug log
+ msgbuf << "\n";
+
+ std::unique_lock<std::mutex> lock(m_StreamMutex);
+
+ if (!GetConnected())
+ return;
+
+ try {
+ asio::write(*m_Stream, asio::buffer(msgbuf.str()));
+ m_Stream->flush();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "GraphiteWriter")
+ << "Cannot write to TCP socket on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ throw ex;
+ }
+}
+
+/**
+ * Escape metric tree elements
+ *
+ * Dots are not allowed, e.g. in host names
+ *
+ * @param str Metric part name
+ * @return Escape string
+ */
+String GraphiteWriter::EscapeMetric(const String& str)
+{
+ String result = str;
+
+ //don't allow '.' in metric prefixes
+ boost::replace_all(result, " ", "_");
+ boost::replace_all(result, ".", "_");
+ boost::replace_all(result, "\\", "_");
+ boost::replace_all(result, "/", "_");
+
+ return result;
+}
+
+/**
+ * Escape metric label
+ *
+ * Dots are allowed - users can create trees from perfdata labels
+ *
+ * @param str Metric label name
+ * @return Escaped string
+ */
+String GraphiteWriter::EscapeMetricLabel(const String& str)
+{
+ String result = str;
+
+ //allow to pass '.' in perfdata labels
+ boost::replace_all(result, " ", "_");
+ boost::replace_all(result, "\\", "_");
+ boost::replace_all(result, "/", "_");
+ boost::replace_all(result, "::", ".");
+
+ return result;
+}
+
+/**
+ * Escape macro metrics found via host/service name templates
+ *
+ * @param value Array or string with macro metric names
+ * @return Escaped string. Arrays are joined with dots.
+ */
+Value GraphiteWriter::EscapeMacroMetric(const Value& value)
+{
+ if (value.IsObjectType<Array>()) {
+ Array::Ptr arr = value;
+ ArrayData result;
+
+ ObjectLock olock(arr);
+ for (const Value& arg : arr) {
+ result.push_back(EscapeMetric(arg));
+ }
+
+ return Utility::Join(new Array(std::move(result)), '.');
+ } else
+ return EscapeMetric(value);
+}
+
+/**
+ * Validate the configuration setting 'host_name_template'
+ *
+ * @param lvalue String containing runtime macros.
+ * @param utils Helper, unused
+ */
+void GraphiteWriter::ValidateHostNameTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<GraphiteWriter>::ValidateHostNameTemplate(lvalue, utils);
+
+ if (!MacroProcessor::ValidateMacroString(lvalue()))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "host_name_template" }, "Closing $ not found in macro format string '" + lvalue() + "'."));
+}
+
+/**
+ * Validate the configuration setting 'service_name_template'
+ *
+ * @param lvalue String containing runtime macros.
+ * @param utils Helper, unused
+ */
+void GraphiteWriter::ValidateServiceNameTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<GraphiteWriter>::ValidateServiceNameTemplate(lvalue, utils);
+
+ if (!MacroProcessor::ValidateMacroString(lvalue()))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "service_name_template" }, "Closing $ not found in macro format string '" + lvalue() + "'."));
+}
diff --git a/lib/perfdata/graphitewriter.hpp b/lib/perfdata/graphitewriter.hpp
new file mode 100644
index 0000000..e0c8b78
--- /dev/null
+++ b/lib/perfdata/graphitewriter.hpp
@@ -0,0 +1,69 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef GRAPHITEWRITER_H
+#define GRAPHITEWRITER_H
+
+#include "perfdata/graphitewriter-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include <fstream>
+#include <mutex>
+
+namespace icinga
+{
+
+/**
+ * An Icinga graphite writer.
+ *
+ * @ingroup perfdata
+ */
+class GraphiteWriter final : public ObjectImpl<GraphiteWriter>
+{
+public:
+ DECLARE_OBJECT(GraphiteWriter);
+ DECLARE_OBJECTNAME(GraphiteWriter);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void ValidateHostNameTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+ void ValidateServiceNameTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+private:
+ Shared<AsioTcpStream>::Ptr m_Stream;
+ std::mutex m_StreamMutex;
+ WorkQueue m_WorkQueue{10000000, 1};
+
+ boost::signals2::connection m_HandleCheckResults;
+ Timer::Ptr m_ReconnectTimer;
+
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void CheckResultHandlerInternal(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void SendMetric(const Checkable::Ptr& checkable, const String& prefix, const String& name, double value, double ts);
+ void SendPerfdata(const Checkable::Ptr& checkable, const String& prefix, const CheckResult::Ptr& cr, double ts);
+ static String EscapeMetric(const String& str);
+ static String EscapeMetricLabel(const String& str);
+ static Value EscapeMacroMetric(const Value& value);
+
+ void ReconnectTimerHandler();
+
+ void Disconnect();
+ void DisconnectInternal();
+ void Reconnect();
+ void ReconnectInternal();
+
+ void AssertOnWorkQueue();
+
+ void ExceptionHandler(boost::exception_ptr exp);
+};
+
+}
+
+#endif /* GRAPHITEWRITER_H */
diff --git a/lib/perfdata/graphitewriter.ti b/lib/perfdata/graphitewriter.ti
new file mode 100644
index 0000000..c8db067
--- /dev/null
+++ b/lib/perfdata/graphitewriter.ti
@@ -0,0 +1,38 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class GraphiteWriter : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config] String port {
+ default {{{ return "2003"; }}}
+ };
+ [config] String host_name_template {
+ default {{{ return "icinga2.$host.name$.host.$host.check_command$"; }}}
+ };
+ [config] String service_name_template {
+ default {{{ return "icinga2.$host.name$.services.$service.name$.$service.check_command$"; }}}
+ };
+ [config] bool enable_send_thresholds;
+ [config] bool enable_send_metadata;
+
+ [no_user_modify] bool connected;
+ [no_user_modify] bool should_connect {
+ default {{{ return true; }}}
+ };
+ [config] bool enable_ha {
+ default {{{ return false; }}}
+ };
+};
+
+}
diff --git a/lib/perfdata/influxdb2writer.cpp b/lib/perfdata/influxdb2writer.cpp
new file mode 100644
index 0000000..c92d7d4
--- /dev/null
+++ b/lib/perfdata/influxdb2writer.cpp
@@ -0,0 +1,44 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/influxdb2writer.hpp"
+#include "perfdata/influxdb2writer-ti.cpp"
+#include "remote/url.hpp"
+#include "base/configtype.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/statsfunction.hpp"
+#include <utility>
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/string_body.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE(Influxdb2Writer);
+
+REGISTER_STATSFUNCTION(Influxdb2Writer, &Influxdb2Writer::StatsFunc);
+
+void Influxdb2Writer::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ InfluxdbCommonWriter::StatsFunc<Influxdb2Writer>(status, perfdata);
+}
+
+boost::beast::http::request<boost::beast::http::string_body> Influxdb2Writer::AssembleRequest(String body)
+{
+ auto request (AssembleBaseRequest(std::move(body)));
+
+ request.set(boost::beast::http::field::authorization, "Token " + GetAuthToken());
+
+ return request;
+}
+
+Url::Ptr Influxdb2Writer::AssembleUrl()
+{
+ auto url (AssembleBaseUrl());
+
+ std::vector<String> path ({"api", "v2", "write"});
+ url->SetPath(path);
+
+ url->AddQueryElement("org", GetOrganization());
+ url->AddQueryElement("bucket", GetBucket());
+
+ return url;
+}
diff --git a/lib/perfdata/influxdb2writer.hpp b/lib/perfdata/influxdb2writer.hpp
new file mode 100644
index 0000000..3b20f8b
--- /dev/null
+++ b/lib/perfdata/influxdb2writer.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#ifndef INFLUXDB2WRITER_H
+#define INFLUXDB2WRITER_H
+
+#include "perfdata/influxdb2writer-ti.hpp"
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/string_body.hpp>
+
+namespace icinga
+{
+
+/**
+ * An Icinga InfluxDB v2 writer.
+ *
+ * @ingroup perfdata
+ */
+class Influxdb2Writer final : public ObjectImpl<Influxdb2Writer>
+{
+public:
+ DECLARE_OBJECT(Influxdb2Writer);
+ DECLARE_OBJECTNAME(Influxdb2Writer);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+protected:
+ boost::beast::http::request<boost::beast::http::string_body> AssembleRequest(String body) override;
+ Url::Ptr AssembleUrl() override;
+};
+
+}
+
+#endif /* INFLUXDB2WRITER_H */
diff --git a/lib/perfdata/influxdb2writer.ti b/lib/perfdata/influxdb2writer.ti
new file mode 100644
index 0000000..f806187
--- /dev/null
+++ b/lib/perfdata/influxdb2writer.ti
@@ -0,0 +1,19 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/influxdbcommonwriter.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class Influxdb2Writer : InfluxdbCommonWriter
+{
+ activation_priority 100;
+
+ [config, required] String organization;
+ [config, required] String bucket;
+ [config, required, no_user_view] String auth_token;
+};
+
+}
diff --git a/lib/perfdata/influxdbcommonwriter.cpp b/lib/perfdata/influxdbcommonwriter.cpp
new file mode 100644
index 0000000..fb0bcc9
--- /dev/null
+++ b/lib/perfdata/influxdbcommonwriter.cpp
@@ -0,0 +1,596 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/influxdbcommonwriter.hpp"
+#include "perfdata/influxdbcommonwriter-ti.cpp"
+#include "remote/url.hpp"
+#include "icinga/service.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/checkcommand.hpp"
+#include "base/application.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/stream.hpp"
+#include "base/json.hpp"
+#include "base/networkstream.hpp"
+#include "base/exception.hpp"
+#include "base/statsfunction.hpp"
+#include "base/tlsutility.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/beast/core/flat_buffer.hpp>
+#include <boost/beast/http/field.hpp>
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/parser.hpp>
+#include <boost/beast/http/read.hpp>
+#include <boost/beast/http/status.hpp>
+#include <boost/beast/http/string_body.hpp>
+#include <boost/beast/http/verb.hpp>
+#include <boost/beast/http/write.hpp>
+#include <boost/math/special_functions/fpclassify.hpp>
+#include <boost/regex.hpp>
+#include <boost/scoped_array.hpp>
+#include <memory>
+#include <string>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(InfluxdbCommonWriter);
+
+class InfluxdbInteger final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(InfluxdbInteger);
+
+ InfluxdbInteger(int value)
+ : m_Value(value)
+ { }
+
+ int GetValue() const
+ {
+ return m_Value;
+ }
+
+private:
+ int m_Value;
+};
+
+void InfluxdbCommonWriter::OnConfigLoaded()
+{
+ ObjectImpl<InfluxdbCommonWriter>::OnConfigLoaded();
+
+ m_WorkQueue.SetName(GetReflectionType()->GetName() + ", " + GetName());
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "HA functionality disabled. Won't pause connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ } else {
+ SetHAMode(HARunOnce);
+ }
+}
+
+void InfluxdbCommonWriter::Resume()
+{
+ ObjectImpl<InfluxdbCommonWriter>::Resume();
+
+ Log(LogInformation, GetReflectionType()->GetName())
+ << "'" << GetName() << "' resumed.";
+
+ /* Register exception handler for WQ tasks. */
+ m_WorkQueue.SetExceptionCallback([this](boost::exception_ptr exp) { ExceptionHandler(std::move(exp)); });
+
+ /* Setup timer for periodically flushing m_DataBuffer */
+ m_FlushTimer = Timer::Create();
+ m_FlushTimer->SetInterval(GetFlushInterval());
+ m_FlushTimer->OnTimerExpired.connect([this](const Timer * const&) { FlushTimeout(); });
+ m_FlushTimer->Start();
+ m_FlushTimer->Reschedule(0);
+
+ /* Register for new metrics. */
+ m_HandleCheckResults = Checkable::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+}
+
+/* Pause is equivalent to Stop, but with HA capabilities to resume at runtime. */
+void InfluxdbCommonWriter::Pause()
+{
+ m_HandleCheckResults.disconnect();
+
+ /* Force a flush. */
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "Processing pending tasks and flushing data buffers.";
+
+ m_FlushTimer->Stop(true);
+ m_WorkQueue.Enqueue([this]() { FlushWQ(); }, PriorityLow);
+
+ /* Wait for the flush to complete, implicitly waits for all WQ tasks enqueued prior to pausing. */
+ m_WorkQueue.Join();
+
+ Log(LogInformation, GetReflectionType()->GetName())
+ << "'" << GetName() << "' paused.";
+
+ ObjectImpl<InfluxdbCommonWriter>::Pause();
+}
+
+void InfluxdbCommonWriter::AssertOnWorkQueue()
+{
+ ASSERT(m_WorkQueue.IsWorkerThread());
+}
+
+void InfluxdbCommonWriter::ExceptionHandler(boost::exception_ptr exp)
+{
+ Log(LogCritical, GetReflectionType()->GetName(), "Exception during InfluxDB operation: Verify that your backend is operational!");
+
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "Exception during InfluxDB operation: " << DiagnosticInformation(std::move(exp));
+
+ //TODO: Close the connection, if we keep it open.
+}
+
+OptionalTlsStream InfluxdbCommonWriter::Connect()
+{
+ Log(LogNotice, GetReflectionType()->GetName())
+ << "Reconnecting to InfluxDB on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ OptionalTlsStream stream;
+ bool ssl = GetSslEnable();
+
+ if (ssl) {
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext(GetSslCert(), GetSslKey(), GetSslCaCert());
+ } catch (const std::exception& ex) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Unable to create SSL context.";
+ throw;
+ }
+
+ stream.first = Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, GetHost());
+
+ } else {
+ stream.second = Shared<AsioTcpStream>::Make(IoEngine::Get().GetIoContext());
+ }
+
+ try {
+ icinga::Connect(ssl ? stream.first->lowest_layer() : stream.second->lowest_layer(), GetHost(), GetPort());
+ } catch (const std::exception& ex) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Can't connect to InfluxDB on host '" << GetHost() << "' port '" << GetPort() << "'.";
+ throw;
+ }
+
+ if (ssl) {
+ auto& tlsStream (stream.first->next_layer());
+
+ try {
+ tlsStream.handshake(tlsStream.client);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "TLS handshake with host '" << GetHost() << "' failed.";
+ throw;
+ }
+
+ if (!GetSslInsecureNoverify()) {
+ if (!tlsStream.GetPeerCertificate()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error("InfluxDB didn't present any TLS certificate."));
+ }
+
+ if (!tlsStream.IsVerifyOK()) {
+ BOOST_THROW_EXCEPTION(std::runtime_error(
+ "TLS certificate validation failed: " + std::string(tlsStream.GetVerifyError())
+ ));
+ }
+ }
+ }
+
+ return stream;
+}
+
+void InfluxdbCommonWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (IsPaused())
+ return;
+
+ m_WorkQueue.Enqueue([this, checkable, cr]() { CheckResultHandlerWQ(checkable, cr); }, PriorityLow);
+}
+
+void InfluxdbCommonWriter::CheckResultHandlerWQ(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ AssertOnWorkQueue();
+
+ CONTEXT("Processing check result for '" << checkable->GetName() << "'");
+
+ if (!IcingaApplication::GetInstance()->GetEnablePerfdata() || !checkable->GetEnablePerfdata())
+ return;
+
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+
+ String prefix;
+
+ double ts = cr->GetExecutionEnd();
+
+ // Clone the template and perform an in-place macro expansion of measurement and tag values
+ Dictionary::Ptr tmpl_clean = service ? GetServiceTemplate() : GetHostTemplate();
+ Dictionary::Ptr tmpl = static_pointer_cast<Dictionary>(tmpl_clean->ShallowClone());
+ tmpl->Set("measurement", MacroProcessor::ResolveMacros(tmpl->Get("measurement"), resolvers, cr));
+
+ Dictionary::Ptr tagsClean = tmpl->Get("tags");
+ if (tagsClean) {
+ Dictionary::Ptr tags = new Dictionary();
+
+ {
+ ObjectLock olock(tagsClean);
+ for (const Dictionary::Pair& pair : tagsClean) {
+ String missing_macro;
+ Value value = MacroProcessor::ResolveMacros(pair.second, resolvers, cr, &missing_macro);
+
+ if (missing_macro.IsEmpty()) {
+ tags->Set(pair.first, value);
+ }
+ }
+ }
+
+ tmpl->Set("tags", tags);
+ }
+
+ CheckCommand::Ptr checkCommand = checkable->GetCheckCommand();
+
+ Array::Ptr perfdata = cr->GetPerformanceData();
+
+ if (perfdata) {
+ ObjectLock olock(perfdata);
+ for (const Value& val : perfdata) {
+ PerfdataValue::Ptr pdv;
+
+ if (val.IsObjectType<PerfdataValue>())
+ pdv = val;
+ else {
+ try {
+ pdv = PerfdataValue::Parse(val);
+ } catch (const std::exception&) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Ignoring invalid perfdata for checkable '"
+ << checkable->GetName() << "' and command '"
+ << checkCommand->GetName() << "' with value: " << val;
+ continue;
+ }
+ }
+
+ Dictionary::Ptr fields = new Dictionary();
+ fields->Set("value", pdv->GetValue());
+
+ if (GetEnableSendThresholds()) {
+ if (!pdv->GetCrit().IsEmpty())
+ fields->Set("crit", pdv->GetCrit());
+ if (!pdv->GetWarn().IsEmpty())
+ fields->Set("warn", pdv->GetWarn());
+ if (!pdv->GetMin().IsEmpty())
+ fields->Set("min", pdv->GetMin());
+ if (!pdv->GetMax().IsEmpty())
+ fields->Set("max", pdv->GetMax());
+ }
+ if (!pdv->GetUnit().IsEmpty()) {
+ fields->Set("unit", pdv->GetUnit());
+ }
+
+ SendMetric(checkable, tmpl, pdv->GetLabel(), fields, ts);
+ }
+ }
+
+ if (GetEnableSendMetadata()) {
+ Host::Ptr host;
+ Service::Ptr service;
+ tie(host, service) = GetHostService(checkable);
+
+ Dictionary::Ptr fields = new Dictionary();
+
+ if (service)
+ fields->Set("state", new InfluxdbInteger(service->GetState()));
+ else
+ fields->Set("state", new InfluxdbInteger(host->GetState()));
+
+ fields->Set("current_attempt", new InfluxdbInteger(checkable->GetCheckAttempt()));
+ fields->Set("max_check_attempts", new InfluxdbInteger(checkable->GetMaxCheckAttempts()));
+ fields->Set("state_type", new InfluxdbInteger(checkable->GetStateType()));
+ fields->Set("reachable", checkable->IsReachable());
+ fields->Set("downtime_depth", new InfluxdbInteger(checkable->GetDowntimeDepth()));
+ fields->Set("acknowledgement", new InfluxdbInteger(checkable->GetAcknowledgement()));
+ fields->Set("latency", cr->CalculateLatency());
+ fields->Set("execution_time", cr->CalculateExecutionTime());
+
+ SendMetric(checkable, tmpl, Empty, fields, ts);
+ }
+}
+
+String InfluxdbCommonWriter::EscapeKeyOrTagValue(const String& str)
+{
+ // Iterate over the key name and escape commas and spaces with a backslash
+ String result = str;
+ boost::algorithm::replace_all(result, "\"", "\\\"");
+ boost::algorithm::replace_all(result, "=", "\\=");
+ boost::algorithm::replace_all(result, ",", "\\,");
+ boost::algorithm::replace_all(result, " ", "\\ ");
+
+ // InfluxDB 'feature': although backslashes are allowed in keys they also act
+ // as escape sequences when followed by ',' or ' '. When your tag is like
+ // 'metric=C:\' bad things happen. Backslashes themselves cannot be escaped
+ // and through experimentation they also escape '='. To be safe we replace
+ // trailing backslashes with and underscore.
+ // See https://github.com/influxdata/influxdb/issues/8587 for more info
+ size_t length = result.GetLength();
+ if (result[length - 1] == '\\')
+ result[length - 1] = '_';
+
+ return result;
+}
+
+String InfluxdbCommonWriter::EscapeValue(const Value& value)
+{
+ if (value.IsObjectType<InfluxdbInteger>()) {
+ std::ostringstream os;
+ os << static_cast<InfluxdbInteger::Ptr>(value)->GetValue() << "i";
+ return os.str();
+ }
+
+ if (value.IsBoolean())
+ return value ? "true" : "false";
+
+ if (value.IsString())
+ return "\"" + EscapeKeyOrTagValue(value) + "\"";
+
+ return value;
+}
+
+void InfluxdbCommonWriter::SendMetric(const Checkable::Ptr& checkable, const Dictionary::Ptr& tmpl,
+ const String& label, const Dictionary::Ptr& fields, double ts)
+{
+ std::ostringstream msgbuf;
+ msgbuf << EscapeKeyOrTagValue(tmpl->Get("measurement"));
+
+ Dictionary::Ptr tags = tmpl->Get("tags");
+ if (tags) {
+ ObjectLock olock(tags);
+ for (const Dictionary::Pair& pair : tags) {
+ // Empty macro expansion, no tag
+ if (!pair.second.IsEmpty()) {
+ msgbuf << "," << EscapeKeyOrTagValue(pair.first) << "=" << EscapeKeyOrTagValue(pair.second);
+ }
+ }
+ }
+
+ // Label may be empty in the case of metadata
+ if (!label.IsEmpty())
+ msgbuf << ",metric=" << EscapeKeyOrTagValue(label);
+
+ msgbuf << " ";
+
+ {
+ bool first = true;
+
+ ObjectLock fieldLock(fields);
+ for (const Dictionary::Pair& pair : fields) {
+ if (first)
+ first = false;
+ else
+ msgbuf << ",";
+
+ msgbuf << EscapeKeyOrTagValue(pair.first) << "=" << EscapeValue(pair.second);
+ }
+ }
+
+ msgbuf << " " << static_cast<unsigned long>(ts);
+
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "Checkable '" << checkable->GetName() << "' adds to metric list:'" << msgbuf.str() << "'.";
+
+ // Buffer the data point
+ m_DataBuffer.emplace_back(msgbuf.str());
+ m_DataBufferSize = m_DataBuffer.size();
+
+ // Flush if we've buffered too much to prevent excessive memory use
+ if (static_cast<int>(m_DataBuffer.size()) >= GetFlushThreshold()) {
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "Data buffer overflow writing " << m_DataBuffer.size() << " data points";
+
+ try {
+ FlushWQ();
+ } catch (...) {
+ /* Do nothing. */
+ }
+ }
+}
+
+void InfluxdbCommonWriter::FlushTimeout()
+{
+ m_WorkQueue.Enqueue([this]() { FlushTimeoutWQ(); }, PriorityHigh);
+}
+
+void InfluxdbCommonWriter::FlushTimeoutWQ()
+{
+ AssertOnWorkQueue();
+
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "Timer expired writing " << m_DataBuffer.size() << " data points";
+
+ FlushWQ();
+}
+
+void InfluxdbCommonWriter::FlushWQ()
+{
+ AssertOnWorkQueue();
+
+ namespace beast = boost::beast;
+ namespace http = beast::http;
+
+ /* Flush can be called from 1) Timeout 2) Threshold 3) on shutdown/reload. */
+ if (m_DataBuffer.empty())
+ return;
+
+ Log(LogDebug, GetReflectionType()->GetName())
+ << "Flushing data buffer to InfluxDB.";
+
+ String body = boost::algorithm::join(m_DataBuffer, "\n");
+ m_DataBuffer.clear();
+ m_DataBufferSize = 0;
+
+ OptionalTlsStream stream;
+
+ try {
+ stream = Connect();
+ } catch (const std::exception& ex) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Flush failed, cannot connect to InfluxDB: " << DiagnosticInformation(ex, false);
+ return;
+ }
+
+ Defer s ([&stream]() {
+ if (stream.first) {
+ stream.first->next_layer().shutdown();
+ }
+ });
+
+ auto request (AssembleRequest(std::move(body)));
+
+ try {
+ if (stream.first) {
+ http::write(*stream.first, request);
+ stream.first->flush();
+ } else {
+ http::write(*stream.second, request);
+ stream.second->flush();
+ }
+ } catch (const std::exception& ex) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Cannot write to TCP socket on host '" << GetHost() << "' port '" << GetPort() << "'.";
+ throw;
+ }
+
+ http::parser<false, http::string_body> parser;
+ beast::flat_buffer buf;
+
+ try {
+ if (stream.first) {
+ http::read(*stream.first, buf, parser);
+ } else {
+ http::read(*stream.second, buf, parser);
+ }
+ } catch (const std::exception& ex) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Failed to parse HTTP response from host '" << GetHost() << "' port '" << GetPort() << "': " << DiagnosticInformation(ex);
+ throw;
+ }
+
+ auto& response (parser.get());
+
+ if (response.result() != http::status::no_content) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Unexpected response code: " << response.result();
+
+ auto& contentType (response[http::field::content_type]);
+ if (contentType != "application/json") {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Unexpected Content-Type: " << contentType;
+ return;
+ }
+
+ Dictionary::Ptr jsonResponse;
+ auto& body (response.body());
+
+ try {
+ jsonResponse = JsonDecode(body);
+ } catch (...) {
+ Log(LogWarning, GetReflectionType()->GetName())
+ << "Unable to parse JSON response:\n" << body;
+ return;
+ }
+
+ String error = jsonResponse->Get("error");
+
+ Log(LogCritical, GetReflectionType()->GetName())
+ << "InfluxDB error message:\n" << error;
+ }
+}
+
+boost::beast::http::request<boost::beast::http::string_body> InfluxdbCommonWriter::AssembleBaseRequest(String body)
+{
+ namespace http = boost::beast::http;
+
+ auto url (AssembleUrl());
+ http::request<http::string_body> request (http::verb::post, std::string(url->Format(true)), 10);
+
+ request.set(http::field::user_agent, "Icinga/" + Application::GetAppVersion());
+ request.set(http::field::host, url->GetHost() + ":" + url->GetPort());
+ request.body() = std::move(body);
+ request.content_length(request.body().size());
+
+ return request;
+}
+
+Url::Ptr InfluxdbCommonWriter::AssembleBaseUrl()
+{
+ Url::Ptr url = new Url();
+
+ url->SetScheme(GetSslEnable() ? "https" : "http");
+ url->SetHost(GetHost());
+ url->SetPort(GetPort());
+ url->AddQueryElement("precision", "s");
+
+ return url;
+}
+
+void InfluxdbCommonWriter::ValidateHostTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<InfluxdbCommonWriter>::ValidateHostTemplate(lvalue, utils);
+
+ String measurement = lvalue()->Get("measurement");
+ if (!MacroProcessor::ValidateMacroString(measurement))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "host_template", "measurement" }, "Closing $ not found in macro format string '" + measurement + "'."));
+
+ Dictionary::Ptr tags = lvalue()->Get("tags");
+ if (tags) {
+ ObjectLock olock(tags);
+ for (const Dictionary::Pair& pair : tags) {
+ if (!MacroProcessor::ValidateMacroString(pair.second))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "host_template", "tags", pair.first }, "Closing $ not found in macro format string '" + pair.second));
+ }
+ }
+}
+
+void InfluxdbCommonWriter::ValidateServiceTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<InfluxdbCommonWriter>::ValidateServiceTemplate(lvalue, utils);
+
+ String measurement = lvalue()->Get("measurement");
+ if (!MacroProcessor::ValidateMacroString(measurement))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "service_template", "measurement" }, "Closing $ not found in macro format string '" + measurement + "'."));
+
+ Dictionary::Ptr tags = lvalue()->Get("tags");
+ if (tags) {
+ ObjectLock olock(tags);
+ for (const Dictionary::Pair& pair : tags) {
+ if (!MacroProcessor::ValidateMacroString(pair.second))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "service_template", "tags", pair.first }, "Closing $ not found in macro format string '" + pair.second));
+ }
+ }
+}
+
diff --git a/lib/perfdata/influxdbcommonwriter.hpp b/lib/perfdata/influxdbcommonwriter.hpp
new file mode 100644
index 0000000..380b20c
--- /dev/null
+++ b/lib/perfdata/influxdbcommonwriter.hpp
@@ -0,0 +1,101 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#ifndef INFLUXDBCOMMONWRITER_H
+#define INFLUXDBCOMMONWRITER_H
+
+#include "perfdata/influxdbcommonwriter-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/timer.hpp"
+#include "base/tlsstream.hpp"
+#include "base/workqueue.hpp"
+#include "remote/url.hpp"
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/string_body.hpp>
+#include <atomic>
+#include <fstream>
+
+namespace icinga
+{
+
+/**
+ * Common base class for InfluxDB v1/v2 writers.
+ *
+ * @ingroup perfdata
+ */
+class InfluxdbCommonWriter : public ObjectImpl<InfluxdbCommonWriter>
+{
+public:
+ DECLARE_OBJECT(InfluxdbCommonWriter);
+
+ template<class InfluxWriter>
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void ValidateHostTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+ void ValidateServiceTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+ boost::beast::http::request<boost::beast::http::string_body> AssembleBaseRequest(String body);
+ Url::Ptr AssembleBaseUrl();
+ virtual boost::beast::http::request<boost::beast::http::string_body> AssembleRequest(String body) = 0;
+ virtual Url::Ptr AssembleUrl() = 0;
+
+private:
+ boost::signals2::connection m_HandleCheckResults;
+ Timer::Ptr m_FlushTimer;
+ WorkQueue m_WorkQueue{10000000, 1};
+ std::vector<String> m_DataBuffer;
+ std::atomic_size_t m_DataBufferSize{0};
+
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void CheckResultHandlerWQ(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void SendMetric(const Checkable::Ptr& checkable, const Dictionary::Ptr& tmpl,
+ const String& label, const Dictionary::Ptr& fields, double ts);
+ void FlushTimeout();
+ void FlushTimeoutWQ();
+ void FlushWQ();
+
+ static String EscapeKeyOrTagValue(const String& str);
+ static String EscapeValue(const Value& value);
+
+ OptionalTlsStream Connect();
+
+ void AssertOnWorkQueue();
+
+ void ExceptionHandler(boost::exception_ptr exp);
+};
+
+template<class InfluxWriter>
+void InfluxdbCommonWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ DictionaryData nodes;
+ auto typeName (InfluxWriter::TypeInstance->GetName().ToLower());
+
+ for (const typename InfluxWriter::Ptr& influxwriter : ConfigType::GetObjectsByType<InfluxWriter>()) {
+ size_t workQueueItems = influxwriter->m_WorkQueue.GetLength();
+ double workQueueItemRate = influxwriter->m_WorkQueue.GetTaskCount(60) / 60.0;
+ size_t dataBufferItems = influxwriter->m_DataBufferSize;
+
+ nodes.emplace_back(influxwriter->GetName(), new Dictionary({
+ { "work_queue_items", workQueueItems },
+ { "work_queue_item_rate", workQueueItemRate },
+ { "data_buffer_items", dataBufferItems }
+ }));
+
+ perfdata->Add(new PerfdataValue(typeName + "_" + influxwriter->GetName() + "_work_queue_items", workQueueItems));
+ perfdata->Add(new PerfdataValue(typeName + "_" + influxwriter->GetName() + "_work_queue_item_rate", workQueueItemRate));
+ perfdata->Add(new PerfdataValue(typeName + "_" + influxwriter->GetName() + "_data_queue_items", dataBufferItems));
+ }
+
+ status->Set(typeName, new Dictionary(std::move(nodes)));
+}
+
+}
+
+#endif /* INFLUXDBCOMMONWRITER_H */
diff --git a/lib/perfdata/influxdbcommonwriter.ti b/lib/perfdata/influxdbcommonwriter.ti
new file mode 100644
index 0000000..5cfe83f
--- /dev/null
+++ b/lib/perfdata/influxdbcommonwriter.ti
@@ -0,0 +1,88 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+abstract class InfluxdbCommonWriter : ConfigObject
+{
+ [config, required] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config, required] String port {
+ default {{{ return "8086"; }}}
+ };
+ [config] bool ssl_enable {
+ default {{{ return false; }}}
+ };
+ [config] bool ssl_insecure_noverify {
+ default {{{ return false; }}}
+ };
+ [config] String ssl_ca_cert {
+ default {{{ return ""; }}}
+ };
+ [config] String ssl_cert {
+ default {{{ return ""; }}}
+ };
+ [config] String ssl_key{
+ default {{{ return ""; }}}
+ };
+ [config, required] Dictionary::Ptr host_template {
+ default {{{
+ return new Dictionary({
+ { "measurement", "$host.check_command$" },
+ { "tags", new Dictionary({
+ { "hostname", "$host.name$" }
+ }) }
+ });
+ }}}
+ };
+ [config, required] Dictionary::Ptr service_template {
+ default {{{
+ return new Dictionary({
+ { "measurement", "$service.check_command$" },
+ { "tags", new Dictionary({
+ { "hostname", "$host.name$" },
+ { "service", "$service.name$" }
+ }) }
+ });
+ }}}
+ };
+ [config] bool enable_send_thresholds {
+ default {{{ return false; }}}
+ };
+ [config] bool enable_send_metadata {
+ default {{{ return false; }}}
+ };
+ [config] int flush_interval {
+ default {{{ return 10; }}}
+ };
+ [config] int flush_threshold {
+ default {{{ return 1024; }}}
+ };
+ [config] bool enable_ha {
+ default {{{ return false; }}}
+ };
+};
+
+validator InfluxdbCommonWriter {
+ Dictionary host_template {
+ required measurement;
+ String measurement;
+ Dictionary "tags" {
+ String "*";
+ };
+ };
+ Dictionary service_template {
+ required measurement;
+ String measurement;
+ Dictionary "tags" {
+ String "*";
+ };
+ };
+};
+
+}
diff --git a/lib/perfdata/influxdbwriter.cpp b/lib/perfdata/influxdbwriter.cpp
new file mode 100644
index 0000000..4bc992d
--- /dev/null
+++ b/lib/perfdata/influxdbwriter.cpp
@@ -0,0 +1,56 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/influxdbwriter.hpp"
+#include "perfdata/influxdbwriter-ti.cpp"
+#include "base/base64.hpp"
+#include "remote/url.hpp"
+#include "base/configtype.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/statsfunction.hpp"
+#include <boost/beast/http/message.hpp>
+#include <boost/beast/http/string_body.hpp>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(InfluxdbWriter);
+
+REGISTER_STATSFUNCTION(InfluxdbWriter, &InfluxdbWriter::StatsFunc);
+
+void InfluxdbWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ InfluxdbCommonWriter::StatsFunc<InfluxdbWriter>(status, perfdata);
+}
+
+boost::beast::http::request<boost::beast::http::string_body> InfluxdbWriter::AssembleRequest(String body)
+{
+ auto request (AssembleBaseRequest(std::move(body)));
+ Dictionary::Ptr basicAuth = GetBasicAuth();
+
+ if (basicAuth) {
+ request.set(
+ boost::beast::http::field::authorization,
+ "Basic " + Base64::Encode(basicAuth->Get("username") + ":" + basicAuth->Get("password"))
+ );
+ }
+
+ return request;
+}
+
+Url::Ptr InfluxdbWriter::AssembleUrl()
+{
+ auto url (AssembleBaseUrl());
+
+ std::vector<String> path;
+ path.emplace_back("write");
+ url->SetPath(path);
+
+ url->AddQueryElement("db", GetDatabase());
+
+ if (!GetUsername().IsEmpty())
+ url->AddQueryElement("u", GetUsername());
+ if (!GetPassword().IsEmpty())
+ url->AddQueryElement("p", GetPassword());
+
+ return url;
+}
diff --git a/lib/perfdata/influxdbwriter.hpp b/lib/perfdata/influxdbwriter.hpp
new file mode 100644
index 0000000..48676cc
--- /dev/null
+++ b/lib/perfdata/influxdbwriter.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef INFLUXDBWRITER_H
+#define INFLUXDBWRITER_H
+
+#include "perfdata/influxdbwriter-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * An Icinga InfluxDB v1 writer.
+ *
+ * @ingroup perfdata
+ */
+class InfluxdbWriter final : public ObjectImpl<InfluxdbWriter>
+{
+public:
+ DECLARE_OBJECT(InfluxdbWriter);
+ DECLARE_OBJECTNAME(InfluxdbWriter);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+protected:
+ boost::beast::http::request<boost::beast::http::string_body> AssembleRequest(String body) override;
+ Url::Ptr AssembleUrl() override;
+};
+
+}
+
+#endif /* INFLUXDBWRITER_H */
diff --git a/lib/perfdata/influxdbwriter.ti b/lib/perfdata/influxdbwriter.ti
new file mode 100644
index 0000000..e6fc84e
--- /dev/null
+++ b/lib/perfdata/influxdbwriter.ti
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/influxdbcommonwriter.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class InfluxdbWriter : InfluxdbCommonWriter
+{
+ activation_priority 100;
+
+ [config, required] String database {
+ default {{{ return "icinga2"; }}}
+ };
+ [config] String username {
+ default {{{ return ""; }}}
+ };
+ [config, no_user_view] String password {
+ default {{{ return ""; }}}
+ };
+ [config, no_user_view] Dictionary::Ptr basic_auth;
+};
+
+validator InfluxdbWriter {
+ Dictionary basic_auth {
+ required username;
+ String username;
+ required password;
+ String password;
+ };
+};
+
+}
diff --git a/lib/perfdata/opentsdbwriter.cpp b/lib/perfdata/opentsdbwriter.cpp
new file mode 100644
index 0000000..2a9cfc0
--- /dev/null
+++ b/lib/perfdata/opentsdbwriter.cpp
@@ -0,0 +1,525 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/opentsdbwriter.hpp"
+#include "perfdata/opentsdbwriter-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/checkcommand.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "icinga/compatutility.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/application.hpp"
+#include "base/stream.hpp"
+#include "base/networkstream.hpp"
+#include "base/exception.hpp"
+#include "base/statsfunction.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/algorithm/string/replace.hpp>
+
+using namespace icinga;
+
+REGISTER_TYPE(OpenTsdbWriter);
+
+REGISTER_STATSFUNCTION(OpenTsdbWriter, &OpenTsdbWriter::StatsFunc);
+
+/*
+ * Enable HA capabilities once the config object is loaded.
+ */
+void OpenTsdbWriter::OnConfigLoaded()
+{
+ ObjectImpl<OpenTsdbWriter>::OnConfigLoaded();
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "HA functionality disabled. Won't pause connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ } else {
+ SetHAMode(HARunOnce);
+ }
+}
+
+/**
+ * Feature stats interface
+ *
+ * @param status Key value pairs for feature stats
+ */
+void OpenTsdbWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const OpenTsdbWriter::Ptr& opentsdbwriter : ConfigType::GetObjectsByType<OpenTsdbWriter>()) {
+ nodes.emplace_back(opentsdbwriter->GetName(), new Dictionary({
+ { "connected", opentsdbwriter->GetConnected() }
+ }));
+ }
+
+ status->Set("opentsdbwriter", new Dictionary(std::move(nodes)));
+}
+
+/**
+ * Resume is equivalent to Start, but with HA capabilities to resume at runtime.
+ */
+void OpenTsdbWriter::Resume()
+{
+ ObjectImpl<OpenTsdbWriter>::Resume();
+
+ Log(LogInformation, "OpentsdbWriter")
+ << "'" << GetName() << "' resumed.";
+
+ ReadConfigTemplate(m_ServiceConfigTemplate, m_HostConfigTemplate);
+
+ m_ReconnectTimer = Timer::Create();
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->OnTimerExpired.connect([this](const Timer * const&) { ReconnectTimerHandler(); });
+ m_ReconnectTimer->Start();
+ m_ReconnectTimer->Reschedule(0);
+
+ m_HandleCheckResults = Service::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable, const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+}
+
+/**
+ * Pause is equivalent to Stop, but with HA capabilities to resume at runtime.
+ */
+void OpenTsdbWriter::Pause()
+{
+ m_HandleCheckResults.disconnect();
+ m_ReconnectTimer->Stop(true);
+
+ Log(LogInformation, "OpentsdbWriter")
+ << "'" << GetName() << "' paused.";
+
+ m_Stream->close();
+
+ SetConnected(false);
+
+ ObjectImpl<OpenTsdbWriter>::Pause();
+}
+
+/**
+ * Reconnect handler called by the timer.
+ * Handles TLS
+ */
+void OpenTsdbWriter::ReconnectTimerHandler()
+{
+ if (IsPaused())
+ return;
+
+ SetShouldConnect(true);
+
+ if (GetConnected())
+ return;
+
+ double startTime = Utility::GetTime();
+
+ Log(LogNotice, "OpenTsdbWriter")
+ << "Reconnecting to OpenTSDB TSD on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ /*
+ * We're using telnet as input method. Future PRs may change this into using the HTTP API.
+ * http://opentsdb.net/docs/build/html/user_guide/writing/index.html#telnet
+ */
+ m_Stream = Shared<AsioTcpStream>::Make(IoEngine::Get().GetIoContext());
+
+ try {
+ icinga::Connect(m_Stream->lowest_layer(), GetHost(), GetPort());
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "OpenTsdbWriter")
+ << "Can't connect to OpenTSDB on host '" << GetHost() << "' port '" << GetPort() << "'.";
+
+ SetConnected(false);
+
+ return;
+ }
+
+ SetConnected(true);
+
+ Log(LogInformation, "OpenTsdbWriter")
+ << "Finished reconnecting to OpenTSDB in " << std::setw(2) << Utility::GetTime() - startTime << " second(s).";
+}
+
+/**
+ * Registered check result handler processing data.
+ * Calculates tags from the config.
+ *
+ * @param checkable Host/service object
+ * @param cr Check result
+ */
+void OpenTsdbWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (IsPaused())
+ return;
+
+ CONTEXT("Processing check result for '" << checkable->GetName() << "'");
+
+ if (!IcingaApplication::GetInstance()->GetEnablePerfdata() || !checkable->GetEnablePerfdata())
+ return;
+
+ Service::Ptr service = dynamic_pointer_cast<Service>(checkable);
+ Host::Ptr host;
+ Dictionary::Ptr config_tmpl;
+ Dictionary::Ptr config_tmpl_tags;
+ String config_tmpl_metric;
+
+ if (service) {
+ host = service->GetHost();
+ config_tmpl = m_ServiceConfigTemplate;
+ }
+ else {
+ host = static_pointer_cast<Host>(checkable);
+ config_tmpl = m_HostConfigTemplate;
+ }
+
+ // Get the tags nested dictionary in the service/host template in the config
+ if (config_tmpl) {
+ config_tmpl_tags = config_tmpl->Get("tags");
+ config_tmpl_metric = config_tmpl->Get("metric");
+ }
+
+ String metric;
+ std::map<String, String> tags;
+
+ // Resolve macros in configuration template and build custom tag list
+ if (config_tmpl_tags || !config_tmpl_metric.IsEmpty()) {
+
+ // Configure config template macro resolver
+ MacroProcessor::ResolverList resolvers;
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+
+ // Resolve macros for the service and host template config line
+ if (config_tmpl_tags) {
+ ObjectLock olock(config_tmpl_tags);
+
+ for (const Dictionary::Pair& pair : config_tmpl_tags) {
+
+ String missing_macro;
+ Value value = MacroProcessor::ResolveMacros(pair.second, resolvers, cr, &missing_macro);
+
+ if (!missing_macro.IsEmpty()) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "Unable to resolve macro:'" << missing_macro
+ << "' for this host or service.";
+
+ continue;
+ }
+
+ String tagname = Convert::ToString(pair.first);
+ tags[tagname] = EscapeTag(value);
+
+ }
+ }
+
+ // Resolve macros for the metric config line
+ if (!config_tmpl_metric.IsEmpty()) {
+
+ String missing_macro;
+ Value value = MacroProcessor::ResolveMacros(config_tmpl_metric, resolvers, cr, &missing_macro);
+
+ if (!missing_macro.IsEmpty()) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "Unable to resolve macro:'" << missing_macro
+ << "' for this host or service.";
+
+ }
+ else {
+
+ config_tmpl_metric = Convert::ToString(value);
+
+ }
+ }
+ }
+
+ String escaped_hostName = EscapeTag(host->GetName());
+ tags["host"] = escaped_hostName;
+
+ double ts = cr->GetExecutionEnd();
+
+ if (service) {
+
+ if (!config_tmpl_metric.IsEmpty()) {
+ metric = config_tmpl_metric;
+ } else {
+ String serviceName = service->GetShortName();
+ String escaped_serviceName = EscapeMetric(serviceName);
+ metric = "icinga.service." + escaped_serviceName;
+ }
+
+ SendMetric(checkable, metric + ".state", tags, service->GetState(), ts);
+
+ } else {
+ if (!config_tmpl_metric.IsEmpty()) {
+ metric = config_tmpl_metric;
+ } else {
+ metric = "icinga.host";
+ }
+ SendMetric(checkable, metric + ".state", tags, host->GetState(), ts);
+ }
+
+ SendMetric(checkable, metric + ".state_type", tags, checkable->GetStateType(), ts);
+ SendMetric(checkable, metric + ".reachable", tags, checkable->IsReachable(), ts);
+ SendMetric(checkable, metric + ".downtime_depth", tags, checkable->GetDowntimeDepth(), ts);
+ SendMetric(checkable, metric + ".acknowledgement", tags, checkable->GetAcknowledgement(), ts);
+
+ SendPerfdata(checkable, metric, tags, cr, ts);
+
+ metric = "icinga.check";
+
+ if (service) {
+ tags["type"] = "service";
+ String serviceName = service->GetShortName();
+ String escaped_serviceName = EscapeTag(serviceName);
+ tags["service"] = escaped_serviceName;
+ } else {
+ tags["type"] = "host";
+ }
+
+ SendMetric(checkable, metric + ".current_attempt", tags, checkable->GetCheckAttempt(), ts);
+ SendMetric(checkable, metric + ".max_check_attempts", tags, checkable->GetMaxCheckAttempts(), ts);
+ SendMetric(checkable, metric + ".latency", tags, cr->CalculateLatency(), ts);
+ SendMetric(checkable, metric + ".execution_time", tags, cr->CalculateExecutionTime(), ts);
+}
+
+/**
+ * Parse and send performance data metrics to OpenTSDB
+ *
+ * @param checkable Host/service object
+ * @param metric Full metric name
+ * @param tags Tag key pairs
+ * @param cr Check result containing performance data
+ * @param ts Timestamp when the check result was received
+ */
+void OpenTsdbWriter::SendPerfdata(const Checkable::Ptr& checkable, const String& metric,
+ const std::map<String, String>& tags, const CheckResult::Ptr& cr, double ts)
+{
+ Array::Ptr perfdata = cr->GetPerformanceData();
+
+ if (!perfdata)
+ return;
+
+ CheckCommand::Ptr checkCommand = checkable->GetCheckCommand();
+
+ ObjectLock olock(perfdata);
+ for (const Value& val : perfdata) {
+ PerfdataValue::Ptr pdv;
+
+ if (val.IsObjectType<PerfdataValue>())
+ pdv = val;
+ else {
+ try {
+ pdv = PerfdataValue::Parse(val);
+ } catch (const std::exception&) {
+ Log(LogWarning, "OpenTsdbWriter")
+ << "Ignoring invalid perfdata for checkable '"
+ << checkable->GetName() << "' and command '"
+ << checkCommand->GetName() << "' with value: " << val;
+ continue;
+ }
+ }
+
+ String metric_name;
+ std::map<String, String> tags_new = tags;
+
+ // Do not break original functionality where perfdata labels form
+ // part of the metric name
+ if (!GetEnableGenericMetrics()) {
+ String escaped_key = EscapeMetric(pdv->GetLabel());
+ boost::algorithm::replace_all(escaped_key, "::", ".");
+ metric_name = metric + "." + escaped_key;
+ } else {
+ String escaped_key = EscapeTag(pdv->GetLabel());
+ metric_name = metric;
+ tags_new["label"] = escaped_key;
+ }
+
+ SendMetric(checkable, metric_name, tags_new, pdv->GetValue(), ts);
+
+ if (!pdv->GetCrit().IsEmpty())
+ SendMetric(checkable, metric_name + "_crit", tags_new, pdv->GetCrit(), ts);
+ if (!pdv->GetWarn().IsEmpty())
+ SendMetric(checkable, metric_name + "_warn", tags_new, pdv->GetWarn(), ts);
+ if (!pdv->GetMin().IsEmpty())
+ SendMetric(checkable, metric_name + "_min", tags_new, pdv->GetMin(), ts);
+ if (!pdv->GetMax().IsEmpty())
+ SendMetric(checkable, metric_name + "_max", tags_new, pdv->GetMax(), ts);
+ }
+}
+
+/**
+ * Send given metric to OpenTSDB
+ *
+ * @param checkable Host/service object
+ * @param metric Full metric name
+ * @param tags Tag key pairs
+ * @param value Floating point metric value
+ * @param ts Timestamp where the metric was received from the check result
+ */
+void OpenTsdbWriter::SendMetric(const Checkable::Ptr& checkable, const String& metric,
+ const std::map<String, String>& tags, double value, double ts)
+{
+ String tags_string = "";
+
+ for (const Dictionary::Pair& tag : tags) {
+ tags_string += " " + tag.first + "=" + Convert::ToString(tag.second);
+ }
+
+ std::ostringstream msgbuf;
+ /*
+ * must be (http://opentsdb.net/docs/build/html/user_guide/query/timeseries.html)
+ * put <metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
+ * "tags" must include at least one tag, we use "host=HOSTNAME"
+ */
+ msgbuf << "put " << metric << " " << static_cast<long>(ts) << " " << Convert::ToString(value) << tags_string;
+
+ Log(LogDebug, "OpenTsdbWriter")
+ << "Checkable '" << checkable->GetName() << "' adds to metric list: '" << msgbuf.str() << "'.";
+
+ /* do not send \n to debug log */
+ msgbuf << "\n";
+ String put = msgbuf.str();
+
+ ObjectLock olock(this);
+
+ if (!GetConnected())
+ return;
+
+ try {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "Checkable '" << checkable->GetName() << "' sending message '" << put << "'.";
+
+ boost::asio::write(*m_Stream, boost::asio::buffer(msgbuf.str()));
+ m_Stream->flush();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "OpenTsdbWriter")
+ << "Cannot write to TCP socket on host '" << GetHost() << "' port '" << GetPort() << "'.";
+ }
+}
+
+/**
+ * Escape tags for OpenTSDB
+ * http://opentsdb.net/docs/build/html/user_guide/query/timeseries.html#precisions-on-metrics-and-tags
+ *
+ * @param str Tag name
+ * @return Escaped tag
+ */
+String OpenTsdbWriter::EscapeTag(const String& str)
+{
+ String result = str;
+
+ boost::replace_all(result, " ", "_");
+ boost::replace_all(result, "\\", "_");
+ boost::replace_all(result, ":", "_");
+
+ return result;
+}
+
+/**
+ * Escape metric name for OpenTSDB
+ * http://opentsdb.net/docs/build/html/user_guide/query/timeseries.html#precisions-on-metrics-and-tags
+ *
+ * @param str Metric name
+ * @return Escaped metric
+ */
+String OpenTsdbWriter::EscapeMetric(const String& str)
+{
+ String result = str;
+
+ boost::replace_all(result, " ", "_");
+ boost::replace_all(result, ".", "_");
+ boost::replace_all(result, "\\", "_");
+ boost::replace_all(result, ":", "_");
+
+ return result;
+}
+
+/**
+* Saves the template dictionaries defined in the config file into running memory
+*
+* @param stemplate The dictionary to save the service configuration to
+* @param htemplate The dictionary to save the host configuration to
+*/
+void OpenTsdbWriter::ReadConfigTemplate(const Dictionary::Ptr& stemplate,
+ const Dictionary::Ptr& htemplate)
+{
+
+ m_ServiceConfigTemplate = GetServiceTemplate();
+
+ if (!m_ServiceConfigTemplate) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "Unable to locate service template configuration.";
+ } else if (m_ServiceConfigTemplate->GetLength() == 0) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "The service template configuration is empty.";
+ }
+
+ m_HostConfigTemplate = GetHostTemplate();
+
+ if (!m_HostConfigTemplate) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "Unable to locate host template configuration.";
+ } else if (m_HostConfigTemplate->GetLength() == 0) {
+ Log(LogDebug, "OpenTsdbWriter")
+ << "The host template configuration is empty.";
+ }
+
+}
+
+
+/**
+* Validates the host_template configuration block in the configuration
+* file and checks for syntax errors.
+*
+* @param lvalue The host_template dictionary
+* @param utils Validation helper utilities
+*/
+void OpenTsdbWriter::ValidateHostTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<OpenTsdbWriter>::ValidateHostTemplate(lvalue, utils);
+
+ String metric = lvalue()->Get("metric");
+ if (!MacroProcessor::ValidateMacroString(metric))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "host_template", "metric" }, "Closing $ not found in macro format string '" + metric + "'."));
+
+ Dictionary::Ptr tags = lvalue()->Get("tags");
+ if (tags) {
+ ObjectLock olock(tags);
+ for (const Dictionary::Pair& pair : tags) {
+ if (!MacroProcessor::ValidateMacroString(pair.second))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "host_template", "tags", pair.first }, "Closing $ not found in macro format string '" + pair.second));
+ }
+ }
+}
+
+/**
+* Validates the service_template configuration block in the
+* configuration file and checks for syntax errors.
+*
+* @param lvalue The service_template dictionary
+* @param utils Validation helper utilities
+*/
+void OpenTsdbWriter::ValidateServiceTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<OpenTsdbWriter>::ValidateServiceTemplate(lvalue, utils);
+
+ String metric = lvalue()->Get("metric");
+ if (!MacroProcessor::ValidateMacroString(metric))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "service_template", "metric" }, "Closing $ not found in macro format string '" + metric + "'."));
+
+ Dictionary::Ptr tags = lvalue()->Get("tags");
+ if (tags) {
+ ObjectLock olock(tags);
+ for (const Dictionary::Pair& pair : tags) {
+ if (!MacroProcessor::ValidateMacroString(pair.second))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "service_template", "tags", pair.first }, "Closing $ not found in macro format string '" + pair.second));
+ }
+ }
+}
diff --git a/lib/perfdata/opentsdbwriter.hpp b/lib/perfdata/opentsdbwriter.hpp
new file mode 100644
index 0000000..e37ef42
--- /dev/null
+++ b/lib/perfdata/opentsdbwriter.hpp
@@ -0,0 +1,62 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OPENTSDBWRITER_H
+#define OPENTSDBWRITER_H
+
+#include "perfdata/opentsdbwriter-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/timer.hpp"
+#include <fstream>
+
+namespace icinga
+{
+
+/**
+ * An Icinga opentsdb writer.
+ *
+ * @ingroup perfdata
+ */
+class OpenTsdbWriter final : public ObjectImpl<OpenTsdbWriter>
+{
+public:
+ DECLARE_OBJECT(OpenTsdbWriter);
+ DECLARE_OBJECTNAME(OpenTsdbWriter);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void ValidateHostTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+ void ValidateServiceTemplate(const Lazy<Dictionary::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+private:
+ Shared<AsioTcpStream>::Ptr m_Stream;
+
+ boost::signals2::connection m_HandleCheckResults;
+ Timer::Ptr m_ReconnectTimer;
+
+ Dictionary::Ptr m_ServiceConfigTemplate;
+ Dictionary::Ptr m_HostConfigTemplate;
+
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ void SendMetric(const Checkable::Ptr& checkable, const String& metric,
+ const std::map<String, String>& tags, double value, double ts);
+ void SendPerfdata(const Checkable::Ptr& checkable, const String& metric,
+ const std::map<String, String>& tags, const CheckResult::Ptr& cr, double ts);
+ static String EscapeTag(const String& str);
+ static String EscapeMetric(const String& str);
+
+ void ReconnectTimerHandler();
+
+ void ReadConfigTemplate(const Dictionary::Ptr& stemplate,
+ const Dictionary::Ptr& htemplate);
+};
+
+}
+
+#endif /* OPENTSDBWRITER_H */
diff --git a/lib/perfdata/opentsdbwriter.ti b/lib/perfdata/opentsdbwriter.ti
new file mode 100644
index 0000000..626350a
--- /dev/null
+++ b/lib/perfdata/opentsdbwriter.ti
@@ -0,0 +1,55 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class OpenTsdbWriter : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String host {
+ default {{{ return "127.0.0.1"; }}}
+ };
+ [config] String port {
+ default {{{ return "4242"; }}}
+ };
+ [config] bool enable_ha {
+ default {{{ return false; }}}
+ };
+ [config] Dictionary::Ptr host_template {
+ default {{{ return new Dictionary(); }}}
+
+ };
+ [config] Dictionary::Ptr service_template {
+ default {{{ return new Dictionary(); }}}
+ };
+ [config] bool enable_generic_metrics {
+ default {{{ return false; }}}
+ };
+
+ [no_user_modify] bool connected;
+ [no_user_modify] bool should_connect {
+ default {{{ return true; }}}
+ };
+};
+
+validator OpenTsdbWriter {
+ Dictionary host_template {
+ String metric;
+ Dictionary "tags" {
+ String "*";
+ };
+ };
+ Dictionary service_template {
+ String metric;
+ Dictionary "tags" {
+ String "*";
+ };
+ };
+};
+
+}
diff --git a/lib/perfdata/perfdatawriter.cpp b/lib/perfdata/perfdatawriter.cpp
new file mode 100644
index 0000000..849f19e
--- /dev/null
+++ b/lib/perfdata/perfdatawriter.cpp
@@ -0,0 +1,201 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "perfdata/perfdatawriter.hpp"
+#include "perfdata/perfdatawriter-ti.cpp"
+#include "icinga/service.hpp"
+#include "icinga/macroprocessor.hpp"
+#include "icinga/icingaapplication.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/utility.hpp"
+#include "base/context.hpp"
+#include "base/exception.hpp"
+#include "base/application.hpp"
+#include "base/statsfunction.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(PerfdataWriter);
+
+REGISTER_STATSFUNCTION(PerfdataWriter, &PerfdataWriter::StatsFunc);
+
+void PerfdataWriter::OnConfigLoaded()
+{
+ ObjectImpl<PerfdataWriter>::OnConfigLoaded();
+
+ if (!GetEnableHa()) {
+ Log(LogDebug, "PerfdataWriter")
+ << "HA functionality disabled. Won't pause connection: " << GetName();
+
+ SetHAMode(HARunEverywhere);
+ } else {
+ SetHAMode(HARunOnce);
+ }
+}
+
+void PerfdataWriter::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr&)
+{
+ DictionaryData nodes;
+
+ for (const PerfdataWriter::Ptr& perfdatawriter : ConfigType::GetObjectsByType<PerfdataWriter>()) {
+ nodes.emplace_back(perfdatawriter->GetName(), 1); //add more stats
+ }
+
+ status->Set("perfdatawriter", new Dictionary(std::move(nodes)));
+}
+
+void PerfdataWriter::Resume()
+{
+ ObjectImpl<PerfdataWriter>::Resume();
+
+ Log(LogInformation, "PerfdataWriter")
+ << "'" << GetName() << "' resumed.";
+
+ m_HandleCheckResults = Checkable::OnNewCheckResult.connect([this](const Checkable::Ptr& checkable,
+ const CheckResult::Ptr& cr, const MessageOrigin::Ptr&) {
+ CheckResultHandler(checkable, cr);
+ });
+
+ m_RotationTimer = Timer::Create();
+ m_RotationTimer->OnTimerExpired.connect([this](const Timer * const&) { RotationTimerHandler(); });
+ m_RotationTimer->SetInterval(GetRotationInterval());
+ m_RotationTimer->Start();
+
+ RotateFile(m_ServiceOutputFile, GetServiceTempPath(), GetServicePerfdataPath());
+ RotateFile(m_HostOutputFile, GetHostTempPath(), GetHostPerfdataPath());
+}
+
+void PerfdataWriter::Pause()
+{
+ m_HandleCheckResults.disconnect();
+ m_RotationTimer->Stop(true);
+
+#ifdef I2_DEBUG
+ //m_HostOutputFile << "\n# Pause the feature" << "\n\n";
+ //m_ServiceOutputFile << "\n# Pause the feature" << "\n\n";
+#endif /* I2_DEBUG */
+
+ /* Force a rotation closing the file stream. */
+ RotateAllFiles();
+
+ Log(LogInformation, "PerfdataWriter")
+ << "'" << GetName() << "' paused.";
+
+ ObjectImpl<PerfdataWriter>::Pause();
+}
+
+Value PerfdataWriter::EscapeMacroMetric(const Value& value)
+{
+ if (value.IsObjectType<Array>())
+ return Utility::Join(value, ';');
+ else
+ return value;
+}
+
+void PerfdataWriter::CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr)
+{
+ if (IsPaused())
+ return;
+
+ CONTEXT("Writing performance data for object '" << checkable->GetName() << "'");
+
+ if (!IcingaApplication::GetInstance()->GetEnablePerfdata() || !checkable->GetEnablePerfdata())
+ return;
+
+ Service::Ptr service = dynamic_pointer_cast<Service>(checkable);
+ Host::Ptr host;
+
+ if (service)
+ host = service->GetHost();
+ else
+ host = static_pointer_cast<Host>(checkable);
+
+ MacroProcessor::ResolverList resolvers;
+ if (service)
+ resolvers.emplace_back("service", service);
+ resolvers.emplace_back("host", host);
+
+ if (service) {
+ String line = MacroProcessor::ResolveMacros(GetServiceFormatTemplate(), resolvers, cr, nullptr, &PerfdataWriter::EscapeMacroMetric);
+
+ {
+ std::unique_lock<std::mutex> lock(m_StreamMutex);
+
+ if (!m_ServiceOutputFile.good())
+ return;
+
+ m_ServiceOutputFile << line << "\n";
+ }
+ } else {
+ String line = MacroProcessor::ResolveMacros(GetHostFormatTemplate(), resolvers, cr, nullptr, &PerfdataWriter::EscapeMacroMetric);
+
+ {
+ std::unique_lock<std::mutex> lock(m_StreamMutex);
+
+ if (!m_HostOutputFile.good())
+ return;
+
+ m_HostOutputFile << line << "\n";
+ }
+ }
+}
+
+void PerfdataWriter::RotateFile(std::ofstream& output, const String& temp_path, const String& perfdata_path)
+{
+ Log(LogDebug, "PerfdataWriter")
+ << "Rotating perfdata files.";
+
+ std::unique_lock<std::mutex> lock(m_StreamMutex);
+
+ if (output.good()) {
+ output.close();
+
+ if (Utility::PathExists(temp_path)) {
+ String finalFile = perfdata_path + "." + Convert::ToString((long)Utility::GetTime());
+
+ Log(LogDebug, "PerfdataWriter")
+ << "Closed output file and renaming into '" << finalFile << "'.";
+
+ Utility::RenameFile(temp_path, finalFile);
+ }
+ }
+
+ output.open(temp_path.CStr());
+
+ if (!output.good()) {
+ Log(LogWarning, "PerfdataWriter")
+ << "Could not open perfdata file '" << temp_path << "' for writing. Perfdata will be lost.";
+ }
+}
+
+void PerfdataWriter::RotationTimerHandler()
+{
+ if (IsPaused())
+ return;
+
+ RotateAllFiles();
+}
+
+void PerfdataWriter::RotateAllFiles()
+{
+ RotateFile(m_ServiceOutputFile, GetServiceTempPath(), GetServicePerfdataPath());
+ RotateFile(m_HostOutputFile, GetHostTempPath(), GetHostPerfdataPath());
+}
+
+void PerfdataWriter::ValidateHostFormatTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<PerfdataWriter>::ValidateHostFormatTemplate(lvalue, utils);
+
+ if (!MacroProcessor::ValidateMacroString(lvalue()))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "host_format_template" }, "Closing $ not found in macro format string '" + lvalue() + "'."));
+}
+
+void PerfdataWriter::ValidateServiceFormatTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<PerfdataWriter>::ValidateServiceFormatTemplate(lvalue, utils);
+
+ if (!MacroProcessor::ValidateMacroString(lvalue()))
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "service_format_template" }, "Closing $ not found in macro format string '" + lvalue() + "'."));
+}
diff --git a/lib/perfdata/perfdatawriter.hpp b/lib/perfdata/perfdatawriter.hpp
new file mode 100644
index 0000000..961d4e9
--- /dev/null
+++ b/lib/perfdata/perfdatawriter.hpp
@@ -0,0 +1,53 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PERFDATAWRITER_H
+#define PERFDATAWRITER_H
+
+#include "perfdata/perfdatawriter-ti.hpp"
+#include "icinga/service.hpp"
+#include "base/configobject.hpp"
+#include "base/timer.hpp"
+#include <fstream>
+
+namespace icinga
+{
+
+/**
+ * An Icinga perfdata writer.
+ *
+ * @ingroup icinga
+ */
+class PerfdataWriter final : public ObjectImpl<PerfdataWriter>
+{
+public:
+ DECLARE_OBJECT(PerfdataWriter);
+ DECLARE_OBJECTNAME(PerfdataWriter);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+
+ void ValidateHostFormatTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+ void ValidateServiceFormatTemplate(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+
+protected:
+ void OnConfigLoaded() override;
+ void Resume() override;
+ void Pause() override;
+
+private:
+ boost::signals2::connection m_HandleCheckResults;
+ Timer::Ptr m_RotationTimer;
+ std::ofstream m_ServiceOutputFile;
+ std::ofstream m_HostOutputFile;
+ std::mutex m_StreamMutex;
+
+ void CheckResultHandler(const Checkable::Ptr& checkable, const CheckResult::Ptr& cr);
+ static Value EscapeMacroMetric(const Value& value);
+
+ void RotationTimerHandler();
+ void RotateAllFiles();
+ void RotateFile(std::ofstream& output, const String& temp_path, const String& perfdata_path);
+};
+
+}
+
+#endif /* PERFDATAWRITER_H */
diff --git a/lib/perfdata/perfdatawriter.ti b/lib/perfdata/perfdatawriter.ti
new file mode 100644
index 0000000..d6d99e8
--- /dev/null
+++ b/lib/perfdata/perfdatawriter.ti
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/application.hpp"
+
+library perfdata;
+
+namespace icinga
+{
+
+class PerfdataWriter : ConfigObject
+{
+ activation_priority 100;
+
+ [config] String host_perfdata_path {
+ default {{{ return Configuration::SpoolDir + "/perfdata/host-perfdata"; }}}
+ };
+ [config] String service_perfdata_path {
+ default {{{ return Configuration::SpoolDir + "/perfdata/service-perfdata"; }}}
+ };
+ [config] String host_temp_path {
+ default {{{ return Configuration::SpoolDir + "/tmp/host-perfdata"; }}}
+ };
+ [config] String service_temp_path {
+ default {{{ return Configuration::SpoolDir + "/tmp/service-perfdata"; }}}
+ };
+ [config] String host_format_template {
+ default {{{
+ return "DATATYPE::HOSTPERFDATA\t"
+ "TIMET::$host.last_check$\t"
+ "HOSTNAME::$host.name$\t"
+ "HOSTPERFDATA::$host.perfdata$\t"
+ "HOSTCHECKCOMMAND::$host.check_command$\t"
+ "HOSTSTATE::$host.state$\t"
+ "HOSTSTATETYPE::$host.state_type$";
+ }}}
+ };
+ [config] String service_format_template {
+ default {{{
+ return "DATATYPE::SERVICEPERFDATA\t"
+ "TIMET::$service.last_check$\t"
+ "HOSTNAME::$host.name$\t"
+ "SERVICEDESC::$service.name$\t"
+ "SERVICEPERFDATA::$service.perfdata$\t"
+ "SERVICECHECKCOMMAND::$service.check_command$\t"
+ "HOSTSTATE::$host.state$\t"
+ "HOSTSTATETYPE::$host.state_type$\t"
+ "SERVICESTATE::$service.state$\t"
+ "SERVICESTATETYPE::$service.state_type$";
+ }}}
+ };
+
+ [config] double rotation_interval {
+ default {{{ return 30; }}}
+ };
+ [config] bool enable_ha {
+ default {{{ return false; }}}
+ };
+};
+
+}
diff --git a/lib/pgsql_shim/CMakeLists.txt b/lib/pgsql_shim/CMakeLists.txt
new file mode 100644
index 0000000..327b64a
--- /dev/null
+++ b/lib/pgsql_shim/CMakeLists.txt
@@ -0,0 +1,32 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+link_directories(${PostgreSQL_LIBRARY_DIRS})
+include_directories(${PostgreSQL_INCLUDE_DIRS})
+
+set(pgsql_shim_SOURCES
+ pgsql_shim.def
+ pgsqlinterface.cpp pgsqlinterface.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(pgsql_shim pgsql_shim pgsql_shim_SOURCES)
+endif()
+
+add_library(pgsql_shim SHARED ${pgsql_shim_SOURCES})
+
+include(GenerateExportHeader)
+generate_export_header(pgsql_shim)
+
+target_link_libraries(pgsql_shim ${PostgreSQL_LIBRARIES})
+
+set_target_properties (
+ pgsql_shim PROPERTIES
+ FOLDER Lib
+ VERSION ${SPEC_VERSION}
+)
+
+install(
+ TARGETS pgsql_shim
+ RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/icinga2
+)
diff --git a/lib/pgsql_shim/pgsql_shim.def b/lib/pgsql_shim/pgsql_shim.def
new file mode 100644
index 0000000..7580d67
--- /dev/null
+++ b/lib/pgsql_shim/pgsql_shim.def
@@ -0,0 +1,3 @@
+LIBRARY pgsql_shim
+EXPORTS
+ create_pgsql_shim
diff --git a/lib/pgsql_shim/pgsqlinterface.cpp b/lib/pgsql_shim/pgsqlinterface.cpp
new file mode 100644
index 0000000..95b6e7d
--- /dev/null
+++ b/lib/pgsql_shim/pgsqlinterface.cpp
@@ -0,0 +1,108 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "pgsql_shim/pgsqlinterface.hpp"
+
+using namespace icinga;
+
+struct PgsqlInterfaceImpl final : public PgsqlInterface
+{
+ void Destroy() override
+ {
+ delete this;
+ }
+
+ void clear(PGresult *res) const override
+ {
+ PQclear(res);
+ }
+
+ char *cmdTuples(PGresult *res) const override
+ {
+ return PQcmdTuples(res);
+ }
+
+ char *errorMessage(const PGconn *conn) const override
+ {
+ return PQerrorMessage(conn);
+ }
+
+ size_t escapeStringConn(PGconn *conn, char *to, const char *from, size_t length, int *error) const override
+ {
+ return PQescapeStringConn(conn, to, from, length, error);
+ }
+
+ PGresult *exec(PGconn *conn, const char *query) const override
+ {
+ return PQexec(conn, query);
+ }
+
+ void finish(PGconn *conn) const override
+ {
+ PQfinish(conn);
+ }
+
+ char *fname(const PGresult *res, int field_num) const override
+ {
+ return PQfname(res, field_num);
+ }
+
+ int getisnull(const PGresult *res, int tup_num, int field_num) const override
+ {
+ return PQgetisnull(res, tup_num, field_num);
+ }
+
+ char *getvalue(const PGresult *res, int tup_num, int field_num) const override
+ {
+ return PQgetvalue(res, tup_num, field_num);
+ }
+
+ int isthreadsafe() const override
+ {
+ return PQisthreadsafe();
+ }
+
+ int nfields(const PGresult *res) const override
+ {
+ return PQnfields(res);
+ }
+
+ int ntuples(const PGresult *res) const override
+ {
+ return PQntuples(res);
+ }
+
+ char *resultErrorMessage(const PGresult *res) const override
+ {
+ return PQresultErrorMessage(res);
+ }
+
+ ExecStatusType resultStatus(const PGresult *res) const override
+ {
+ return PQresultStatus(res);
+ }
+
+ int serverVersion(const PGconn *conn) const override
+ {
+ return PQserverVersion(conn);
+ }
+
+ PGconn *setdbLogin(const char *pghost, const char *pgport, const char *pgoptions, const char *pgtty, const char *dbName, const char *login, const char *pwd) const override
+ {
+ return PQsetdbLogin(pghost, pgport, pgoptions, pgtty, dbName, login, pwd);
+ }
+
+ PGconn *connectdb(const char *conninfo) const override
+ {
+ return PQconnectdb(conninfo);
+ }
+
+ ConnStatusType status(const PGconn *conn) const override
+ {
+ return PQstatus(conn);
+ }
+};
+
+PgsqlInterface *create_pgsql_shim()
+{
+ return new PgsqlInterfaceImpl();
+}
diff --git a/lib/pgsql_shim/pgsqlinterface.hpp b/lib/pgsql_shim/pgsqlinterface.hpp
new file mode 100644
index 0000000..2fe3303
--- /dev/null
+++ b/lib/pgsql_shim/pgsqlinterface.hpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PGSQLINTERFACE_H
+#define PGSQLINTERFACE_H
+
+#include "pgsql_shim/pgsql_shim_export.h"
+#include <memory>
+#include <libpq-fe.h>
+
+namespace icinga
+{
+
+struct PgsqlInterface
+{
+ PgsqlInterface(const PgsqlInterface&) = delete;
+ PgsqlInterface& operator=(PgsqlInterface&) = delete;
+
+ virtual void Destroy() = 0;
+
+ virtual void clear(PGresult *res) const = 0;
+ virtual char *cmdTuples(PGresult *res) const = 0;
+ virtual char *errorMessage(const PGconn *conn) const = 0;
+ virtual size_t escapeStringConn(PGconn *conn, char *to, const char *from, size_t length, int *error) const = 0;
+ virtual PGresult *exec(PGconn *conn, const char *query) const = 0;
+ virtual void finish(PGconn *conn) const = 0;
+ virtual char *fname(const PGresult *res, int field_num) const = 0;
+ virtual int getisnull(const PGresult *res, int tup_num, int field_num) const = 0;
+ virtual char *getvalue(const PGresult *res, int tup_num, int field_num) const = 0;
+ virtual int isthreadsafe() const = 0;
+ virtual int nfields(const PGresult *res) const = 0;
+ virtual int ntuples(const PGresult *res) const = 0;
+ virtual char *resultErrorMessage(const PGresult *res) const = 0;
+ virtual ExecStatusType resultStatus(const PGresult *res) const = 0;
+ virtual int serverVersion(const PGconn *conn) const = 0;
+ virtual PGconn *setdbLogin(const char *pghost, const char *pgport, const char *pgoptions, const char *pgtty, const char *dbName, const char *login, const char *pwd) const = 0;
+ virtual PGconn *connectdb(const char *conninfo) const = 0;
+ virtual ConnStatusType status(const PGconn *conn) const = 0;
+
+protected:
+ PgsqlInterface() = default;
+ ~PgsqlInterface() = default;
+};
+
+struct PgsqlInterfaceDeleter
+{
+ void operator()(PgsqlInterface *ifc) const
+ {
+ ifc->Destroy();
+ }
+};
+
+}
+
+extern "C"
+{
+ PGSQL_SHIM_EXPORT icinga::PgsqlInterface *create_pgsql_shim();
+}
+
+typedef icinga::PgsqlInterface *(*create_pgsql_shim_ptr)();
+
+#endif /* PGSQLINTERFACE_H */
diff --git a/lib/remote/CMakeLists.txt b/lib/remote/CMakeLists.txt
new file mode 100644
index 0000000..740b112
--- /dev/null
+++ b/lib/remote/CMakeLists.txt
@@ -0,0 +1,67 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+mkclass_target(apilistener.ti apilistener-ti.cpp apilistener-ti.hpp)
+mkclass_target(apiuser.ti apiuser-ti.cpp apiuser-ti.hpp)
+mkclass_target(endpoint.ti endpoint-ti.cpp endpoint-ti.hpp)
+mkclass_target(zone.ti zone-ti.cpp zone-ti.hpp)
+
+set(remote_SOURCES
+ i2-remote.hpp
+ actionshandler.cpp actionshandler.hpp
+ apiaction.cpp apiaction.hpp
+ apifunction.cpp apifunction.hpp
+ apilistener.cpp apilistener.hpp apilistener-ti.hpp apilistener-configsync.cpp apilistener-filesync.cpp
+ apilistener-authority.cpp
+ apiuser.cpp apiuser.hpp apiuser-ti.hpp
+ configfileshandler.cpp configfileshandler.hpp
+ configobjectslock.cpp configobjectslock.hpp
+ configobjectutility.cpp configobjectutility.hpp
+ configpackageshandler.cpp configpackageshandler.hpp
+ configpackageutility.cpp configpackageutility.hpp
+ configstageshandler.cpp configstageshandler.hpp
+ consolehandler.cpp consolehandler.hpp
+ createobjecthandler.cpp createobjecthandler.hpp
+ deleteobjecthandler.cpp deleteobjecthandler.hpp
+ endpoint.cpp endpoint.hpp endpoint-ti.hpp
+ eventqueue.cpp eventqueue.hpp
+ eventshandler.cpp eventshandler.hpp
+ filterutility.cpp filterutility.hpp
+ httphandler.cpp httphandler.hpp
+ httpserverconnection.cpp httpserverconnection.hpp
+ httputility.cpp httputility.hpp
+ infohandler.cpp infohandler.hpp
+ jsonrpc.cpp jsonrpc.hpp
+ jsonrpcconnection.cpp jsonrpcconnection.hpp jsonrpcconnection-heartbeat.cpp jsonrpcconnection-pki.cpp
+ messageorigin.cpp messageorigin.hpp
+ modifyobjecthandler.cpp modifyobjecthandler.hpp
+ objectqueryhandler.cpp objectqueryhandler.hpp
+ pkiutility.cpp pkiutility.hpp
+ statushandler.cpp statushandler.hpp
+ templatequeryhandler.cpp templatequeryhandler.hpp
+ typequeryhandler.cpp typequeryhandler.hpp
+ url.cpp url.hpp url-characters.hpp
+ variablequeryhandler.cpp variablequeryhandler.hpp
+ zone.cpp zone.hpp zone-ti.hpp
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(remote remote remote_SOURCES)
+endif()
+
+add_library(remote OBJECT ${remote_SOURCES})
+
+add_dependencies(remote base config)
+
+set_target_properties (
+ remote PROPERTIES
+ FOLDER Lib
+)
+
+#install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}/api\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}/api/log\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}/api/zones\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}/api/zones-stage\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}/certs\")")
+install(CODE "file(MAKE_DIRECTORY \"\$ENV{DESTDIR}${ICINGA2_FULL_DATADIR}/certificate-requests\")")
+
+
diff --git a/lib/remote/actionshandler.cpp b/lib/remote/actionshandler.cpp
new file mode 100644
index 0000000..016c76d
--- /dev/null
+++ b/lib/remote/actionshandler.cpp
@@ -0,0 +1,145 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/actionshandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "remote/apiaction.hpp"
+#include "base/defer.hpp"
+#include "base/exception.hpp"
+#include "base/logger.hpp"
+#include <set>
+
+using namespace icinga;
+
+thread_local ApiUser::Ptr ActionsHandler::AuthenticatedApiUser;
+
+REGISTER_URLHANDLER("/v1/actions", ActionsHandler);
+
+bool ActionsHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() != 3)
+ return false;
+
+ if (request.method() != http::verb::post)
+ return false;
+
+ String actionName = url->GetPath()[2];
+
+ ApiAction::Ptr action = ApiAction::GetByName(actionName);
+
+ if (!action) {
+ HttpUtility::SendJsonError(response, params, 404, "Action '" + actionName + "' does not exist.");
+ return true;
+ }
+
+ QueryDescription qd;
+
+ const std::vector<String>& types = action->GetTypes();
+ std::vector<Value> objs;
+
+ String permission = "actions/" + actionName;
+
+ if (!types.empty()) {
+ qd.Types = std::set<String>(types.begin(), types.end());
+ qd.Permission = permission;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ if (objs.empty()) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.");
+ return true;
+ }
+ } else {
+ FilterUtility::CheckPermission(user, permission);
+ objs.emplace_back(nullptr);
+ }
+
+ ArrayData results;
+
+ Log(LogNotice, "ApiActionHandler")
+ << "Running action " << actionName;
+
+ bool verbose = false;
+
+ ActionsHandler::AuthenticatedApiUser = user;
+ Defer a ([]() {
+ ActionsHandler::AuthenticatedApiUser = nullptr;
+ });
+
+ if (params)
+ verbose = HttpUtility::GetLastParameter(params, "verbose");
+
+ for (const ConfigObject::Ptr& obj : objs) {
+ try {
+ results.emplace_back(action->Invoke(obj, params));
+ } catch (const std::exception& ex) {
+ Dictionary::Ptr fail = new Dictionary({
+ { "code", 500 },
+ { "status", "Action execution failed: '" + DiagnosticInformation(ex, false) + "'." }
+ });
+
+ /* Exception for actions. Normally we would handle this inside SendJsonError(). */
+ if (verbose)
+ fail->Set("diagnostic_information", DiagnosticInformation(ex));
+
+ results.emplace_back(std::move(fail));
+ }
+ }
+
+ int statusCode = 500;
+ std::set<int> okStatusCodes, nonOkStatusCodes;
+
+ for (const Dictionary::Ptr& res : results) {
+ if (!res->Contains("code")) {
+ continue;
+ }
+
+ auto code = res->Get("code");
+
+ if (code >= 200 && code <= 299) {
+ okStatusCodes.insert(code);
+ } else {
+ nonOkStatusCodes.insert(code);
+ }
+ }
+
+ size_t okSize = okStatusCodes.size();
+ size_t nonOkSize = nonOkStatusCodes.size();
+
+ if (okSize == 1u && nonOkSize == 0u) {
+ statusCode = *okStatusCodes.begin();
+ } else if (nonOkSize == 1u) {
+ statusCode = *nonOkStatusCodes.begin();
+ } else if (okSize >= 2u && nonOkSize == 0u) {
+ statusCode = 200;
+ }
+
+ response.result(statusCode);
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
diff --git a/lib/remote/actionshandler.hpp b/lib/remote/actionshandler.hpp
new file mode 100644
index 0000000..ca662ca
--- /dev/null
+++ b/lib/remote/actionshandler.hpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ACTIONSHANDLER_H
+#define ACTIONSHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class ActionsHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ActionsHandler);
+
+ static thread_local ApiUser::Ptr AuthenticatedApiUser;
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* ACTIONSHANDLER_H */
diff --git a/lib/remote/apiaction.cpp b/lib/remote/apiaction.cpp
new file mode 100644
index 0000000..4da91f0
--- /dev/null
+++ b/lib/remote/apiaction.cpp
@@ -0,0 +1,40 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/apiaction.hpp"
+#include "base/singleton.hpp"
+
+using namespace icinga;
+
+ApiAction::ApiAction(std::vector<String> types, Callback action)
+ : m_Types(std::move(types)), m_Callback(std::move(action))
+{ }
+
+Value ApiAction::Invoke(const ConfigObject::Ptr& target, const Dictionary::Ptr& params)
+{
+ return m_Callback(target, params);
+}
+
+const std::vector<String>& ApiAction::GetTypes() const
+{
+ return m_Types;
+}
+
+ApiAction::Ptr ApiAction::GetByName(const String& name)
+{
+ return ApiActionRegistry::GetInstance()->GetItem(name);
+}
+
+void ApiAction::Register(const String& name, const ApiAction::Ptr& action)
+{
+ ApiActionRegistry::GetInstance()->Register(name, action);
+}
+
+void ApiAction::Unregister(const String& name)
+{
+ ApiActionRegistry::GetInstance()->Unregister(name);
+}
+
+ApiActionRegistry *ApiActionRegistry::GetInstance()
+{
+ return Singleton<ApiActionRegistry>::GetInstance();
+}
diff --git a/lib/remote/apiaction.hpp b/lib/remote/apiaction.hpp
new file mode 100644
index 0000000..f2719c1
--- /dev/null
+++ b/lib/remote/apiaction.hpp
@@ -0,0 +1,69 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APIACTION_H
+#define APIACTION_H
+
+#include "remote/i2-remote.hpp"
+#include "base/registry.hpp"
+#include "base/value.hpp"
+#include "base/dictionary.hpp"
+#include "base/configobject.hpp"
+#include <vector>
+#include <boost/algorithm/string/replace.hpp>
+
+namespace icinga
+{
+
+/**
+ * An action available over the external HTTP API.
+ *
+ * @ingroup remote
+ */
+class ApiAction final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ApiAction);
+
+ typedef std::function<Value(const ConfigObject::Ptr& target, const Dictionary::Ptr& params)> Callback;
+
+ ApiAction(std::vector<String> registerTypes, Callback function);
+
+ Value Invoke(const ConfigObject::Ptr& target, const Dictionary::Ptr& params);
+
+ const std::vector<String>& GetTypes() const;
+
+ static ApiAction::Ptr GetByName(const String& name);
+ static void Register(const String& name, const ApiAction::Ptr& action);
+ static void Unregister(const String& name);
+
+private:
+ std::vector<String> m_Types;
+ Callback m_Callback;
+};
+
+/**
+ * A registry for API actions.
+ *
+ * @ingroup remote
+ */
+class ApiActionRegistry : public Registry<ApiActionRegistry, ApiAction::Ptr>
+{
+public:
+ static ApiActionRegistry *GetInstance();
+};
+
+#define REGISTER_APIACTION(name, types, callback) \
+ INITIALIZE_ONCE([]() { \
+ String registerName = #name; \
+ boost::algorithm::replace_all(registerName, "_", "-"); \
+ std::vector<String> registerTypes; \
+ String typeNames = types; \
+ if (!typeNames.IsEmpty()) \
+ registerTypes = typeNames.Split(";"); \
+ ApiAction::Ptr action = new ApiAction(registerTypes, callback); \
+ ApiActionRegistry::GetInstance()->Register(registerName, action); \
+ })
+
+}
+
+#endif /* APIACTION_H */
diff --git a/lib/remote/apifunction.cpp b/lib/remote/apifunction.cpp
new file mode 100644
index 0000000..5b855cc
--- /dev/null
+++ b/lib/remote/apifunction.cpp
@@ -0,0 +1,35 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/apifunction.hpp"
+#include "base/singleton.hpp"
+
+using namespace icinga;
+
+ApiFunction::ApiFunction(Callback function)
+ : m_Callback(std::move(function))
+{ }
+
+Value ApiFunction::Invoke(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& arguments)
+{
+ return m_Callback(origin, arguments);
+}
+
+ApiFunction::Ptr ApiFunction::GetByName(const String& name)
+{
+ return ApiFunctionRegistry::GetInstance()->GetItem(name);
+}
+
+void ApiFunction::Register(const String& name, const ApiFunction::Ptr& function)
+{
+ ApiFunctionRegistry::GetInstance()->Register(name, function);
+}
+
+void ApiFunction::Unregister(const String& name)
+{
+ ApiFunctionRegistry::GetInstance()->Unregister(name);
+}
+
+ApiFunctionRegistry *ApiFunctionRegistry::GetInstance()
+{
+ return Singleton<ApiFunctionRegistry>::GetInstance();
+}
diff --git a/lib/remote/apifunction.hpp b/lib/remote/apifunction.hpp
new file mode 100644
index 0000000..e611320
--- /dev/null
+++ b/lib/remote/apifunction.hpp
@@ -0,0 +1,59 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APIFUNCTION_H
+#define APIFUNCTION_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/messageorigin.hpp"
+#include "base/registry.hpp"
+#include "base/value.hpp"
+#include "base/dictionary.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * A function available over the internal cluster API.
+ *
+ * @ingroup base
+ */
+class ApiFunction final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ApiFunction);
+
+ typedef std::function<Value(const MessageOrigin::Ptr& origin, const Dictionary::Ptr&)> Callback;
+
+ ApiFunction(Callback function);
+
+ Value Invoke(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& arguments);
+
+ static ApiFunction::Ptr GetByName(const String& name);
+ static void Register(const String& name, const ApiFunction::Ptr& function);
+ static void Unregister(const String& name);
+
+private:
+ Callback m_Callback;
+};
+
+/**
+ * A registry for API functions.
+ *
+ * @ingroup base
+ */
+class ApiFunctionRegistry : public Registry<ApiFunctionRegistry, ApiFunction::Ptr>
+{
+public:
+ static ApiFunctionRegistry *GetInstance();
+};
+
+#define REGISTER_APIFUNCTION(name, ns, callback) \
+ INITIALIZE_ONCE([]() { \
+ ApiFunction::Ptr func = new ApiFunction(callback); \
+ ApiFunctionRegistry::GetInstance()->Register(#ns "::" #name, func); \
+ })
+
+}
+
+#endif /* APIFUNCTION_H */
diff --git a/lib/remote/apilistener-authority.cpp b/lib/remote/apilistener-authority.cpp
new file mode 100644
index 0000000..f33a190
--- /dev/null
+++ b/lib/remote/apilistener-authority.cpp
@@ -0,0 +1,84 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/zone.hpp"
+#include "remote/apilistener.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+std::atomic<bool> ApiListener::m_UpdatedObjectAuthority (false);
+
+void ApiListener::UpdateObjectAuthority()
+{
+ /* Always run this, even if there is no 'api' feature enabled. */
+ if (auto listener = ApiListener::GetInstance()) {
+ Log(LogNotice, "ApiListener")
+ << "Updating object authority for objects at endpoint '" << listener->GetIdentity() << "'.";
+ } else {
+ Log(LogNotice, "ApiListener")
+ << "Updating object authority for local objects.";
+ }
+
+ Zone::Ptr my_zone = Zone::GetLocalZone();
+
+ std::vector<Endpoint::Ptr> endpoints;
+ Endpoint::Ptr my_endpoint;
+
+ if (my_zone) {
+ my_endpoint = Endpoint::GetLocalEndpoint();
+
+ int num_total = 0;
+
+ for (const Endpoint::Ptr& endpoint : my_zone->GetEndpoints()) {
+ num_total++;
+
+ if (endpoint != my_endpoint && !endpoint->GetConnected())
+ continue;
+
+ endpoints.push_back(endpoint);
+ }
+
+ double startTime = Application::GetStartTime();
+
+ /* 30 seconds cold startup, don't update any authority to give the secondary endpoint time to reconnect. */
+ if (num_total > 1 && endpoints.size() <= 1 && (startTime == 0 || Utility::GetTime() - startTime < 30))
+ return;
+
+ std::sort(endpoints.begin(), endpoints.end(),
+ [](const ConfigObject::Ptr& a, const ConfigObject::Ptr& b) {
+ return a->GetName() < b->GetName();
+ }
+ );
+ }
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+ if (!object->IsActive() || object->GetHAMode() != HARunOnce)
+ continue;
+
+ bool authority;
+
+ if (!my_zone)
+ authority = true;
+ else
+ authority = endpoints[Utility::SDBM(object->GetName()) % endpoints.size()] == my_endpoint;
+
+#ifdef I2_DEBUG
+// //Enable on demand, causes heavy logging on each run.
+// Log(LogDebug, "ApiListener")
+// << "Setting authority '" << Convert::ToString(authority) << "' for object '" << object->GetName() << "' of type '" << object->GetReflectionType()->GetName() << "'.";
+#endif /* I2_DEBUG */
+
+ object->SetAuthority(authority);
+ }
+ }
+
+ m_UpdatedObjectAuthority.store(true);
+}
diff --git a/lib/remote/apilistener-configsync.cpp b/lib/remote/apilistener-configsync.cpp
new file mode 100644
index 0000000..a12db0b
--- /dev/null
+++ b/lib/remote/apilistener-configsync.cpp
@@ -0,0 +1,464 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/apilistener.hpp"
+#include "remote/apifunction.hpp"
+#include "remote/configobjectutility.hpp"
+#include "remote/jsonrpc.hpp"
+#include "base/configtype.hpp"
+#include "base/json.hpp"
+#include "base/convert.hpp"
+#include "config/vmops.hpp"
+#include <fstream>
+
+using namespace icinga;
+
+REGISTER_APIFUNCTION(UpdateObject, config, &ApiListener::ConfigUpdateObjectAPIHandler);
+REGISTER_APIFUNCTION(DeleteObject, config, &ApiListener::ConfigDeleteObjectAPIHandler);
+
+INITIALIZE_ONCE([]() {
+ ConfigObject::OnActiveChanged.connect(&ApiListener::ConfigUpdateObjectHandler);
+ ConfigObject::OnVersionChanged.connect(&ApiListener::ConfigUpdateObjectHandler);
+});
+
+void ApiListener::ConfigUpdateObjectHandler(const ConfigObject::Ptr& object, const Value& cookie)
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ if (object->IsActive()) {
+ /* Sync object config */
+ listener->UpdateConfigObject(object, cookie);
+ } else if (!object->IsActive() && object->GetExtension("ConfigObjectDeleted")) {
+ /* Delete object */
+ listener->DeleteConfigObject(object, cookie);
+ }
+}
+
+Value ApiListener::ConfigUpdateObjectAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Log(LogNotice, "ApiListener")
+ << "Received config update for object: " << JsonEncode(params);
+
+ /* check permissions */
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return Empty;
+
+ String objType = params->Get("type");
+ String objName = params->Get("name");
+
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ String identity = origin->FromClient->GetIdentity();
+
+ /* discard messages if the client is not configured on this node */
+ if (!endpoint) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding 'config update object' message from '" << identity << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Zone::Ptr endpointZone = endpoint->GetZone();
+
+ /* discard messages if the sender is in a child zone */
+ if (!Zone::GetLocalZone()->IsChildOf(endpointZone)) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding 'config update object' message"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << objName << "' of type '" << objType << "'. Sender is in a child zone.";
+ return Empty;
+ }
+
+ String objZone = params->Get("zone");
+
+ if (!objZone.IsEmpty() && !Zone::GetByName(objZone)) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding 'config update object' message"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << objName << "' of type '" << objType << "'. Objects zone '" << objZone << "' isn't known locally.";
+ return Empty;
+ }
+
+ /* ignore messages if the endpoint does not accept config */
+ if (!listener->GetAcceptConfig()) {
+ Log(LogWarning, "ApiListener")
+ << "Ignoring config update"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << objName << "' of type '" << objType << "'. '" << listener->GetName() << "' does not accept config.";
+ return Empty;
+ }
+
+ /* update the object */
+ double objVersion = params->Get("version");
+
+ Type::Ptr ptype = Type::GetByName(objType);
+ auto *ctype = dynamic_cast<ConfigType *>(ptype.get());
+
+ if (!ctype) {
+ // This never happens with icinga cluster endpoints, only with development errors.
+ Log(LogCritical, "ApiListener")
+ << "Config type '" << objType << "' does not exist.";
+ return Empty;
+ }
+
+ ConfigObject::Ptr object = ctype->GetObject(objName);
+
+ String config = params->Get("config");
+
+ bool newObject = false;
+
+ if (!object && !config.IsEmpty()) {
+ newObject = true;
+
+ /* object does not exist, create it through the API */
+ Array::Ptr errors = new Array();
+
+ /*
+ * Create the config object through our internal API.
+ * IMPORTANT: Pass the origin to prevent cluster sync loops.
+ */
+ if (!ConfigObjectUtility::CreateObject(ptype, objName, config, errors, nullptr, origin)) {
+ Log(LogCritical, "ApiListener")
+ << "Could not create object '" << objName << "':";
+
+ ObjectLock olock(errors);
+ for (const String& error : errors) {
+ Log(LogCritical, "ApiListener", error);
+ }
+
+ return Empty;
+ }
+
+ object = ctype->GetObject(objName);
+
+ if (!object)
+ return Empty;
+
+ /* object was created, update its version */
+ object->SetVersion(objVersion, false, origin);
+ }
+
+ if (!object)
+ return Empty;
+
+ /* update object attributes if version was changed or if this is a new object */
+ if (newObject || objVersion <= object->GetVersion()) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding config update"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << object->GetName()
+ << "': Object version " << std::fixed << object->GetVersion()
+ << " is more recent than the received version " << std::fixed << objVersion << ".";
+
+ return Empty;
+ }
+
+ Log(LogNotice, "ApiListener")
+ << "Processing config update"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << object->GetName()
+ << "': Object version " << object->GetVersion()
+ << " is older than the received version " << objVersion << ".";
+
+ Dictionary::Ptr modified_attributes = params->Get("modified_attributes");
+
+ if (modified_attributes) {
+ ObjectLock olock(modified_attributes);
+ for (const Dictionary::Pair& kv : modified_attributes) {
+ /* update all modified attributes
+ * but do not update the object version yet.
+ * This triggers cluster events otherwise.
+ */
+ object->ModifyAttribute(kv.first, kv.second, false);
+ }
+ }
+
+ /* check whether original attributes changed and restore them locally */
+ Array::Ptr newOriginalAttributes = params->Get("original_attributes");
+ Dictionary::Ptr objOriginalAttributes = object->GetOriginalAttributes();
+
+ if (newOriginalAttributes && objOriginalAttributes) {
+ std::vector<String> restoreAttrs;
+
+ {
+ ObjectLock xlock(objOriginalAttributes);
+ for (const Dictionary::Pair& kv : objOriginalAttributes) {
+ /* original attribute was removed, restore it */
+ if (!newOriginalAttributes->Contains(kv.first))
+ restoreAttrs.push_back(kv.first);
+ }
+ }
+
+ for (const String& key : restoreAttrs) {
+ /* do not update the object version yet. */
+ object->RestoreAttribute(key, false);
+ }
+ }
+
+ /* keep the object version in sync with the sender */
+ object->SetVersion(objVersion, false, origin);
+
+ return Empty;
+}
+
+Value ApiListener::ConfigDeleteObjectAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ Log(LogNotice, "ApiListener")
+ << "Received config delete for object: " << JsonEncode(params);
+
+ /* check permissions */
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return Empty;
+
+ String objType = params->Get("type");
+ String objName = params->Get("name");
+
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ String identity = origin->FromClient->GetIdentity();
+
+ if (!endpoint) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding 'config delete object' message from '" << identity << "': Invalid endpoint origin (client not allowed).";
+ return Empty;
+ }
+
+ Zone::Ptr endpointZone = endpoint->GetZone();
+
+ /* discard messages if the sender is in a child zone */
+ if (!Zone::GetLocalZone()->IsChildOf(endpointZone)) {
+ Log(LogNotice, "ApiListener")
+ << "Discarding 'config delete object' message"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << objName << "' of type '" << objType << "'. Sender is in a child zone.";
+ return Empty;
+ }
+
+ if (!listener->GetAcceptConfig()) {
+ Log(LogWarning, "ApiListener")
+ << "Ignoring config delete"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << objName << "' of type '" << objType << "'. '" << listener->GetName() << "' does not accept config.";
+ return Empty;
+ }
+
+ /* delete the object */
+ Type::Ptr ptype = Type::GetByName(objType);
+ auto *ctype = dynamic_cast<ConfigType *>(ptype.get());
+
+ if (!ctype) {
+ // This never happens with icinga cluster endpoints, only with development errors.
+ Log(LogCritical, "ApiListener")
+ << "Config type '" << objType << "' does not exist.";
+ return Empty;
+ }
+
+ ConfigObject::Ptr object = ctype->GetObject(objName);
+
+ if (!object) {
+ Log(LogNotice, "ApiListener")
+ << "Could not delete non-existent object '" << objName << "' with type '" << params->Get("type") << "'.";
+ return Empty;
+ }
+
+ if (object->GetPackage() != "_api") {
+ Log(LogCritical, "ApiListener")
+ << "Could not delete object '" << objName << "': Not created by the API.";
+ return Empty;
+ }
+
+ Log(LogNotice, "ApiListener")
+ << "Processing config delete"
+ << " from '" << identity << "' (endpoint: '" << endpoint->GetName() << "', zone: '" << endpointZone->GetName() << "')"
+ << " for object '" << object->GetName() << "'.";
+
+ Array::Ptr errors = new Array();
+
+ /*
+ * Delete the config object through our internal API.
+ * IMPORTANT: Pass the origin to prevent cluster sync loops.
+ */
+ if (!ConfigObjectUtility::DeleteObject(object, true, errors, nullptr, origin)) {
+ Log(LogCritical, "ApiListener", "Could not delete object:");
+
+ ObjectLock olock(errors);
+ for (const String& error : errors) {
+ Log(LogCritical, "ApiListener", error);
+ }
+ }
+
+ return Empty;
+}
+
+void ApiListener::UpdateConfigObject(const ConfigObject::Ptr& object, const MessageOrigin::Ptr& origin,
+ const JsonRpcConnection::Ptr& client)
+{
+ /* only send objects to zones which have access to the object */
+ if (client) {
+ Zone::Ptr target_zone = client->GetEndpoint()->GetZone();
+
+ if (target_zone && !target_zone->CanAccessObject(object)) {
+ Log(LogDebug, "ApiListener")
+ << "Not sending 'update config' message to unauthorized zone '" << target_zone->GetName() << "'"
+ << " for object: '" << object->GetName() << "'.";
+
+ return;
+ }
+ }
+
+ if (object->GetPackage() != "_api" && object->GetVersion() == 0)
+ return;
+
+ Dictionary::Ptr params = new Dictionary();
+
+ Dictionary::Ptr message = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "config::UpdateObject" },
+ { "params", params }
+ });
+
+ params->Set("name", object->GetName());
+ params->Set("type", object->GetReflectionType()->GetName());
+ params->Set("version", object->GetVersion());
+
+ String zoneName = object->GetZoneName();
+
+ if (!zoneName.IsEmpty())
+ params->Set("zone", zoneName);
+
+ if (object->GetPackage() == "_api") {
+ std::ifstream fp(ConfigObjectUtility::GetExistingObjectConfigPath(object).CStr(), std::ifstream::binary);
+ if (!fp)
+ return;
+
+ String content((std::istreambuf_iterator<char>(fp)), std::istreambuf_iterator<char>());
+ params->Set("config", content);
+ }
+
+ Dictionary::Ptr original_attributes = object->GetOriginalAttributes();
+ Dictionary::Ptr modified_attributes = new Dictionary();
+ ArrayData newOriginalAttributes;
+
+ if (original_attributes) {
+ ObjectLock olock(original_attributes);
+ for (const Dictionary::Pair& kv : original_attributes) {
+ std::vector<String> tokens = kv.first.Split(".");
+
+ Value value = object;
+ for (const String& token : tokens) {
+ value = VMOps::GetField(value, token);
+ }
+
+ modified_attributes->Set(kv.first, value);
+
+ newOriginalAttributes.push_back(kv.first);
+ }
+ }
+
+ params->Set("modified_attributes", modified_attributes);
+
+ /* only send the original attribute keys */
+ params->Set("original_attributes", new Array(std::move(newOriginalAttributes)));
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "ApiListener")
+ << "Sent update for object '" << object->GetName() << "': " << JsonEncode(params);
+#endif /* I2_DEBUG */
+
+ if (client)
+ client->SendMessage(message);
+ else {
+ Zone::Ptr target = static_pointer_cast<Zone>(object->GetZone());
+
+ if (!target)
+ target = Zone::GetLocalZone();
+
+ RelayMessage(origin, target, message, false);
+ }
+}
+
+
+void ApiListener::DeleteConfigObject(const ConfigObject::Ptr& object, const MessageOrigin::Ptr& origin,
+ const JsonRpcConnection::Ptr& client)
+{
+ if (object->GetPackage() != "_api")
+ return;
+
+ /* only send objects to zones which have access to the object */
+ if (client) {
+ Zone::Ptr target_zone = client->GetEndpoint()->GetZone();
+
+ if (target_zone && !target_zone->CanAccessObject(object)) {
+ Log(LogDebug, "ApiListener")
+ << "Not sending 'delete config' message to unauthorized zone '" << target_zone->GetName() << "'"
+ << " for object: '" << object->GetName() << "'.";
+
+ return;
+ }
+ }
+
+ Dictionary::Ptr params = new Dictionary();
+
+ Dictionary::Ptr message = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "config::DeleteObject" },
+ { "params", params }
+ });
+
+ params->Set("name", object->GetName());
+ params->Set("type", object->GetReflectionType()->GetName());
+ params->Set("version", object->GetVersion());
+
+
+#ifdef I2_DEBUG
+ Log(LogDebug, "ApiListener")
+ << "Sent delete for object '" << object->GetName() << "': " << JsonEncode(params);
+#endif /* I2_DEBUG */
+
+ if (client)
+ client->SendMessage(message);
+ else {
+ Zone::Ptr target = static_pointer_cast<Zone>(object->GetZone());
+
+ if (!target)
+ target = Zone::GetLocalZone();
+
+ RelayMessage(origin, target, message, true);
+ }
+}
+
+/* Initial sync on connect for new endpoints */
+void ApiListener::SendRuntimeConfigObjects(const JsonRpcConnection::Ptr& aclient)
+{
+ Endpoint::Ptr endpoint = aclient->GetEndpoint();
+ ASSERT(endpoint);
+
+ Zone::Ptr azone = endpoint->GetZone();
+
+ Log(LogInformation, "ApiListener")
+ << "Syncing runtime objects to endpoint '" << endpoint->GetName() << "'.";
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ auto *dtype = dynamic_cast<ConfigType *>(type.get());
+
+ if (!dtype)
+ continue;
+
+ for (const ConfigObject::Ptr& object : dtype->GetObjects()) {
+ /* don't sync objects for non-matching parent-child zones */
+ if (!azone->CanAccessObject(object))
+ continue;
+
+ /* send the config object to the connected client */
+ UpdateConfigObject(object, nullptr, aclient);
+ }
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Finished syncing runtime objects to endpoint '" << endpoint->GetName() << "'.";
+}
diff --git a/lib/remote/apilistener-filesync.cpp b/lib/remote/apilistener-filesync.cpp
new file mode 100644
index 0000000..acf8deb
--- /dev/null
+++ b/lib/remote/apilistener-filesync.cpp
@@ -0,0 +1,887 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/apilistener.hpp"
+#include "remote/apifunction.hpp"
+#include "config/configcompiler.hpp"
+#include "base/tlsutility.hpp"
+#include "base/json.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/convert.hpp"
+#include "base/application.hpp"
+#include "base/exception.hpp"
+#include "base/shared.hpp"
+#include "base/utility.hpp"
+#include <fstream>
+#include <iomanip>
+#include <thread>
+
+using namespace icinga;
+
+REGISTER_APIFUNCTION(Update, config, &ApiListener::ConfigUpdateHandler);
+
+std::mutex ApiListener::m_ConfigSyncStageLock;
+
+/**
+ * Entrypoint for updating all authoritative configs from /etc/zones.d, packages, etc.
+ * into var/lib/icinga2/api/zones
+ */
+void ApiListener::SyncLocalZoneDirs() const
+{
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ try {
+ SyncLocalZoneDir(zone);
+ } catch (const std::exception&) {
+ continue;
+ }
+ }
+}
+
+/**
+ * Sync a zone directory where we have an authoritative copy (zones.d, packages, etc.)
+ *
+ * This function collects the registered zone config dirs from
+ * the config compiler and reads the file content into the config
+ * information structure.
+ *
+ * Returns early when there are no updates.
+ *
+ * @param zone Pointer to the zone object being synced.
+ */
+void ApiListener::SyncLocalZoneDir(const Zone::Ptr& zone) const
+{
+ if (!zone)
+ return;
+
+ ConfigDirInformation newConfigInfo;
+ newConfigInfo.UpdateV1 = new Dictionary();
+ newConfigInfo.UpdateV2 = new Dictionary();
+ newConfigInfo.Checksums = new Dictionary();
+
+ String zoneName = zone->GetName();
+
+ // Load registered zone paths, e.g. '_etc', '_api' and user packages.
+ for (const ZoneFragment& zf : ConfigCompiler::GetZoneDirs(zoneName)) {
+ ConfigDirInformation newConfigPart = LoadConfigDir(zf.Path);
+
+ // Config files '*.conf'.
+ {
+ ObjectLock olock(newConfigPart.UpdateV1);
+ for (const Dictionary::Pair& kv : newConfigPart.UpdateV1) {
+ String path = "/" + zf.Tag + kv.first;
+
+ newConfigInfo.UpdateV1->Set(path, kv.second);
+ newConfigInfo.Checksums->Set(path, GetChecksum(kv.second));
+ }
+ }
+
+ // Meta files.
+ {
+ ObjectLock olock(newConfigPart.UpdateV2);
+ for (const Dictionary::Pair& kv : newConfigPart.UpdateV2) {
+ String path = "/" + zf.Tag + kv.first;
+
+ newConfigInfo.UpdateV2->Set(path, kv.second);
+ newConfigInfo.Checksums->Set(path, GetChecksum(kv.second));
+ }
+ }
+ }
+
+ size_t sumUpdates = newConfigInfo.UpdateV1->GetLength() + newConfigInfo.UpdateV2->GetLength();
+
+ // Return early if there are no updates.
+ if (sumUpdates == 0)
+ return;
+
+ String productionZonesDir = GetApiZonesDir() + zoneName;
+
+ Log(LogInformation, "ApiListener")
+ << "Copying " << sumUpdates << " zone configuration files for zone '" << zoneName << "' to '" << productionZonesDir << "'.";
+
+ // Purge files to allow deletion via zones.d.
+ if (Utility::PathExists(productionZonesDir))
+ Utility::RemoveDirRecursive(productionZonesDir);
+
+ Utility::MkDirP(productionZonesDir, 0700);
+
+ // Copy content and add additional meta data.
+ size_t numBytes = 0;
+
+ /* Note: We cannot simply copy directories here.
+ *
+ * Zone directories are registered from everywhere and we already
+ * have read their content into memory with LoadConfigDir().
+ */
+ Dictionary::Ptr newConfig = MergeConfigUpdate(newConfigInfo);
+
+ {
+ ObjectLock olock(newConfig);
+
+ for (const Dictionary::Pair& kv : newConfig) {
+ String dst = productionZonesDir + "/" + kv.first;
+
+ Utility::MkDirP(Utility::DirName(dst), 0755);
+
+ Log(LogInformation, "ApiListener")
+ << "Updating configuration file: " << dst;
+
+ String content = kv.second;
+
+ std::ofstream fp(dst.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+
+ fp << content;
+ fp.close();
+
+ numBytes += content.GetLength();
+ }
+ }
+
+ // Additional metadata.
+ String tsPath = productionZonesDir + "/.timestamp";
+
+ if (!Utility::PathExists(tsPath)) {
+ std::ofstream fp(tsPath.CStr(), std::ofstream::out | std::ostream::trunc);
+
+ fp << std::fixed << Utility::GetTime();
+ fp.close();
+ }
+
+ String authPath = productionZonesDir + "/.authoritative";
+
+ if (!Utility::PathExists(authPath)) {
+ std::ofstream fp(authPath.CStr(), std::ofstream::out | std::ostream::trunc);
+ fp.close();
+ }
+
+ // Checksums.
+ String checksumsPath = productionZonesDir + "/.checksums";
+
+ if (Utility::PathExists(checksumsPath))
+ Utility::Remove(checksumsPath);
+
+ std::ofstream fp(checksumsPath.CStr(), std::ofstream::out | std::ostream::trunc);
+
+ fp << std::fixed << JsonEncode(newConfigInfo.Checksums);
+ fp.close();
+
+ Log(LogNotice, "ApiListener")
+ << "Updated meta data for cluster config sync. Checksum: '" << checksumsPath
+ << "', timestamp: '" << tsPath << "', auth: '" << authPath << "'.";
+}
+
+/**
+ * Entrypoint for sending a file based config update to a cluster client.
+ * This includes security checks for zone relations.
+ * Loads the zone config files where this client belongs to
+ * and sends the 'config::Update' JSON-RPC message.
+ *
+ * @param aclient Connected JSON-RPC client.
+ */
+void ApiListener::SendConfigUpdate(const JsonRpcConnection::Ptr& aclient)
+{
+ Endpoint::Ptr endpoint = aclient->GetEndpoint();
+ ASSERT(endpoint);
+
+ Zone::Ptr clientZone = endpoint->GetZone();
+ Zone::Ptr localZone = Zone::GetLocalZone();
+
+ // Don't send config updates to parent zones
+ if (!clientZone->IsChildOf(localZone))
+ return;
+
+ Dictionary::Ptr configUpdateV1 = new Dictionary();
+ Dictionary::Ptr configUpdateV2 = new Dictionary();
+ Dictionary::Ptr configUpdateChecksums = new Dictionary(); // new since 2.11
+
+ String zonesDir = GetApiZonesDir();
+
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ String zoneName = zone->GetName();
+ String zoneDir = zonesDir + zoneName;
+
+ // Only sync child and global zones.
+ if (!zone->IsChildOf(clientZone) && !zone->IsGlobal())
+ continue;
+
+ // Zone was configured, but there's no configuration directory.
+ if (!Utility::PathExists(zoneDir))
+ continue;
+
+ Log(LogInformation, "ApiListener")
+ << "Syncing configuration files for " << (zone->IsGlobal() ? "global " : "")
+ << "zone '" << zoneName << "' to endpoint '" << endpoint->GetName() << "'.";
+
+ ConfigDirInformation config = LoadConfigDir(zoneDir);
+
+ configUpdateV1->Set(zoneName, config.UpdateV1);
+ configUpdateV2->Set(zoneName, config.UpdateV2);
+ configUpdateChecksums->Set(zoneName, config.Checksums); // new since 2.11
+ }
+
+ Dictionary::Ptr message = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "config::Update" },
+ { "params", new Dictionary({
+ { "update", configUpdateV1 },
+ { "update_v2", configUpdateV2 }, // Since 2.4.2.
+ { "checksums", configUpdateChecksums } // Since 2.11.0.
+ }) }
+ });
+
+ aclient->SendMessage(message);
+}
+
+static bool CompareTimestampsConfigChange(const Dictionary::Ptr& productionConfig, const Dictionary::Ptr& receivedConfig,
+ const String& stageConfigZoneDir)
+{
+ double productionTimestamp;
+ double receivedTimestamp;
+
+ // Missing production timestamp means that something really broke. Always trigger a config change then.
+ if (!productionConfig->Contains("/.timestamp"))
+ productionTimestamp = 0;
+ else
+ productionTimestamp = productionConfig->Get("/.timestamp");
+
+ // Missing received config timestamp means that something really broke. Always trigger a config change then.
+ if (!receivedConfig->Contains("/.timestamp"))
+ receivedTimestamp = Utility::GetTime() + 10;
+ else
+ receivedTimestamp = receivedConfig->Get("/.timestamp");
+
+ bool configChange;
+
+ // Skip update if our configuration files are more recent.
+ if (productionTimestamp >= receivedTimestamp) {
+
+ Log(LogInformation, "ApiListener")
+ << "Our production configuration is more recent than the received configuration update."
+ << " Ignoring configuration file update for path '" << stageConfigZoneDir << "'. Current timestamp '"
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", productionTimestamp) << "' ("
+ << std::fixed << std::setprecision(6) << productionTimestamp
+ << ") >= received timestamp '"
+ << Utility::FormatDateTime("%Y-%m-%d %H:%M:%S %z", receivedTimestamp) << "' ("
+ << receivedTimestamp << ").";
+
+ configChange = false;
+
+ } else {
+ configChange = true;
+ }
+
+ // Update the .timestamp file inside the staging directory.
+ String tsPath = stageConfigZoneDir + "/.timestamp";
+
+ if (!Utility::PathExists(tsPath)) {
+ std::ofstream fp(tsPath.CStr(), std::ofstream::out | std::ostream::trunc);
+ fp << std::fixed << receivedTimestamp;
+ fp.close();
+ }
+
+ return configChange;
+}
+
+/**
+ * Registered handler when a new config::Update message is received.
+ *
+ * Checks destination and permissions first, locks the transaction and analyses the update.
+ * The newly received configuration is not copied to production immediately,
+ * but into the staging directory first.
+ * Last, the async validation and restart is triggered.
+ *
+ * @param origin Where this message came from.
+ * @param params Message parameters including the config updates.
+ * @returns Empty, required by the interface.
+ */
+Value ApiListener::ConfigUpdateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ // Verify permissions and trust relationship.
+ if (!origin->FromClient->GetEndpoint() || (origin->FromZone && !Zone::GetLocalZone()->IsChildOf(origin->FromZone)))
+ return Empty;
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener) {
+ Log(LogCritical, "ApiListener", "No instance available.");
+ return Empty;
+ }
+
+ if (!listener->GetAcceptConfig()) {
+ Log(LogWarning, "ApiListener")
+ << "Ignoring config update. '" << listener->GetName() << "' does not accept config.";
+ return Empty;
+ }
+
+ std::thread([origin, params, listener]() {
+ try {
+ listener->HandleConfigUpdate(origin, params);
+ } catch (const std::exception& ex) {
+ auto msg ("Exception during config sync: " + DiagnosticInformation(ex));
+
+ Log(LogCritical, "ApiListener") << msg;
+ listener->UpdateLastFailedZonesStageValidation(msg);
+ }
+ }).detach();
+ return Empty;
+}
+
+void ApiListener::HandleConfigUpdate(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ /* Only one transaction is allowed, concurrent message handlers need to wait.
+ * This affects two parent endpoints sending the config in the same moment.
+ */
+ std::lock_guard<std::mutex> lock(m_ConfigSyncStageLock);
+
+ String apiZonesStageDir = GetApiZonesStageDir();
+ String fromEndpointName = origin->FromClient->GetEndpoint()->GetName();
+ String fromZoneName = GetFromZoneName(origin->FromZone);
+
+ Log(LogInformation, "ApiListener")
+ << "Applying config update from endpoint '" << fromEndpointName
+ << "' of zone '" << fromZoneName << "'.";
+
+ // Config files.
+ Dictionary::Ptr updateV1 = params->Get("update");
+ // Meta data files: .timestamp, etc.
+ Dictionary::Ptr updateV2 = params->Get("update_v2");
+
+ // New since 2.11.0.
+ Dictionary::Ptr checksums;
+
+ if (params->Contains("checksums"))
+ checksums = params->Get("checksums");
+
+ bool configChange = false;
+
+ // Keep track of the relative config paths for later validation and copying. TODO: Find a better algorithm.
+ std::vector<String> relativePaths;
+
+ /*
+ * We can and must safely purge the staging directory, as the difference is taken between
+ * runtime production config and newly received configuration.
+ * This is needed to not mix deleted/changed content between received and stage
+ * config.
+ */
+ if (Utility::PathExists(apiZonesStageDir))
+ Utility::RemoveDirRecursive(apiZonesStageDir);
+
+ Utility::MkDirP(apiZonesStageDir, 0700);
+
+ // Analyse and process the update.
+ size_t count = 0;
+
+ ObjectLock olock(updateV1);
+
+ for (const Dictionary::Pair& kv : updateV1) {
+
+ // Check for the configured zones.
+ String zoneName = kv.first;
+ Zone::Ptr zone = Zone::GetByName(zoneName);
+
+ if (!zone) {
+ Log(LogWarning, "ApiListener")
+ << "Ignoring config update from endpoint '" << fromEndpointName
+ << "' for unknown zone '" << zoneName << "'.";
+
+ continue;
+ }
+
+ // Ignore updates where we have an authoritive copy in etc/zones.d, packages, etc.
+ if (ConfigCompiler::HasZoneConfigAuthority(zoneName)) {
+ Log(LogInformation, "ApiListener")
+ << "Ignoring config update from endpoint '" << fromEndpointName
+ << "' for zone '" << zoneName << "' because we have an authoritative version of the zone's config.";
+
+ continue;
+ }
+
+ // Put the received configuration into our stage directory.
+ String productionConfigZoneDir = GetApiZonesDir() + zoneName;
+ String stageConfigZoneDir = GetApiZonesStageDir() + zoneName;
+
+ Utility::MkDirP(productionConfigZoneDir, 0700);
+ Utility::MkDirP(stageConfigZoneDir, 0700);
+
+ // Merge the config information.
+ ConfigDirInformation newConfigInfo;
+ newConfigInfo.UpdateV1 = kv.second;
+
+ // Load metadata.
+ if (updateV2)
+ newConfigInfo.UpdateV2 = updateV2->Get(kv.first);
+
+ // Load checksums. New since 2.11.
+ if (checksums)
+ newConfigInfo.Checksums = checksums->Get(kv.first);
+
+ // Load the current production config details.
+ ConfigDirInformation productionConfigInfo = LoadConfigDir(productionConfigZoneDir);
+
+ // Merge updateV1 and updateV2
+ Dictionary::Ptr productionConfig = MergeConfigUpdate(productionConfigInfo);
+ Dictionary::Ptr newConfig = MergeConfigUpdate(newConfigInfo);
+
+ bool timestampChanged = false;
+
+ if (CompareTimestampsConfigChange(productionConfig, newConfig, stageConfigZoneDir)) {
+ timestampChanged = true;
+ }
+
+ /* If we have received 'checksums' via cluster message, go for it.
+ * Otherwise do the old timestamp dance for versions < 2.11.
+ */
+ if (checksums) {
+ Log(LogInformation, "ApiListener")
+ << "Received configuration for zone '" << zoneName << "' from endpoint '"
+ << fromEndpointName << "'. Comparing the timestamp and checksums.";
+
+ if (timestampChanged) {
+
+ if (CheckConfigChange(productionConfigInfo, newConfigInfo))
+ configChange = true;
+ }
+
+ } else {
+ /* Fallback to timestamp handling when the parent endpoint didn't send checks.
+ * This can happen when the satellite is 2.11 and the master is 2.10.
+ *
+ * TODO: Deprecate and remove this behaviour in 2.13+.
+ */
+
+ Log(LogWarning, "ApiListener")
+ << "Received configuration update without checksums from parent endpoint "
+ << fromEndpointName << ". This behaviour is deprecated. Please upgrade the parent endpoint to 2.11+";
+
+ if (timestampChanged) {
+ configChange = true;
+ }
+
+ // Keep another hack when there's a timestamp file missing.
+ {
+ ObjectLock olock(newConfig);
+
+ for (const Dictionary::Pair &kv : newConfig) {
+
+ // This is super expensive with a string content comparison.
+ if (productionConfig->Get(kv.first) != kv.second) {
+ if (!Utility::Match("*/.timestamp", kv.first))
+ configChange = true;
+ }
+ }
+ }
+ }
+
+ // Dump the received configuration for this zone into the stage directory.
+ size_t numBytes = 0;
+
+ {
+ ObjectLock olock(newConfig);
+
+ for (const Dictionary::Pair& kv : newConfig) {
+
+ /* Store the relative config file path for later validation and activation.
+ * IMPORTANT: Store this prior to any filters.
+ * */
+ relativePaths.push_back(zoneName + "/" + kv.first);
+
+ String path = stageConfigZoneDir + "/" + kv.first;
+
+ if (Utility::Match("*.conf", path)) {
+ Log(LogInformation, "ApiListener")
+ << "Stage: Updating received configuration file '" << path << "' for zone '" << zoneName << "'.";
+ }
+
+ // Parent nodes < 2.11 always send this, avoid this bug and deny its receival prior to writing it on disk.
+ if (Utility::BaseName(path) == ".authoritative")
+ continue;
+
+ // Sync string content only.
+ String content = kv.second;
+
+ // Generate a directory tree (zones/1/2/3 might not exist yet).
+ Utility::MkDirP(Utility::DirName(path), 0755);
+
+ // Write the content to file.
+ std::ofstream fp(path.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fp << content;
+ fp.close();
+
+ numBytes += content.GetLength();
+ }
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Applying configuration file update for path '" << stageConfigZoneDir << "' ("
+ << numBytes << " Bytes).";
+
+ if (timestampChanged) {
+ // If the update removes a path, delete it on disk and signal a config change.
+ ObjectLock xlock(productionConfig);
+
+ for (const Dictionary::Pair& kv : productionConfig) {
+ if (!newConfig->Contains(kv.first)) {
+ configChange = true;
+
+ String path = stageConfigZoneDir + "/" + kv.first;
+ Utility::Remove(path);
+ }
+ }
+ }
+
+ count++;
+ }
+
+ /*
+ * We have processed all configuration files and stored them in the staging directory.
+ *
+ * We need to store them locally for later analysis. A config change means
+ * that we will validate the configuration in a separate process sandbox,
+ * and only copy the configuration to production when everything is ok.
+ *
+ * A successful validation also triggers the final restart.
+ */
+ if (configChange) {
+ Log(LogInformation, "ApiListener")
+ << "Received configuration updates (" << count << ") from endpoint '" << fromEndpointName
+ << "' are different to production, triggering validation and reload.";
+ TryActivateZonesStage(relativePaths);
+ } else {
+ Log(LogInformation, "ApiListener")
+ << "Received configuration updates (" << count << ") from endpoint '" << fromEndpointName
+ << "' are equal to production, skipping validation and reload.";
+ ClearLastFailedZonesStageValidation();
+ }
+}
+
+/**
+ * Spawns a new validation process with 'Internal.ZonesStageVarDir' set to override the config validation zone dirs with
+ * our current stage. Then waits for the validation result and if it was successful, the configuration is copied from
+ * stage to production and a restart is triggered. On validation failure, there is no restart and this is logged.
+ *
+ * The caller of this function must hold m_ConfigSyncStageLock.
+ *
+ * @param relativePaths Collected paths including the zone name, which are copied from stage to current directories.
+ */
+void ApiListener::TryActivateZonesStage(const std::vector<String>& relativePaths)
+{
+ VERIFY(Application::GetArgC() >= 1);
+
+ /* Inherit parent process args. */
+ Array::Ptr args = new Array({
+ Application::GetExePath(Application::GetArgV()[0]),
+ });
+
+ for (int i = 1; i < Application::GetArgC(); i++) {
+ String argV = Application::GetArgV()[i];
+
+ if (argV == "-d" || argV == "--daemonize")
+ continue;
+
+ args->Add(argV);
+ }
+
+ args->Add("--validate");
+
+ // Set the ZonesStageDir. This creates our own local chroot without any additional automated zone includes.
+ args->Add("--define");
+ args->Add("Internal.ZonesStageVarDir=" + GetApiZonesStageDir());
+
+ Process::Ptr process = new Process(Process::PrepareCommand(args));
+ process->SetTimeout(Application::GetReloadTimeout());
+
+ process->Run();
+ const ProcessResult& pr = process->WaitForResult();
+
+ String apiDir = GetApiDir();
+ String apiZonesDir = GetApiZonesDir();
+ String apiZonesStageDir = GetApiZonesStageDir();
+
+ String logFile = apiDir + "/zones-stage-startup.log";
+ std::ofstream fpLog(logFile.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpLog << pr.Output;
+ fpLog.close();
+
+ String statusFile = apiDir + "/zones-stage-status";
+ std::ofstream fpStatus(statusFile.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpStatus << pr.ExitStatus;
+ fpStatus.close();
+
+ // Validation went fine, copy stage and reload.
+ if (pr.ExitStatus == 0) {
+ Log(LogInformation, "ApiListener")
+ << "Config validation for stage '" << apiZonesStageDir << "' was OK, replacing into '" << apiZonesDir << "' and triggering reload.";
+
+ // Purge production before copying stage.
+ if (Utility::PathExists(apiZonesDir))
+ Utility::RemoveDirRecursive(apiZonesDir);
+
+ Utility::MkDirP(apiZonesDir, 0700);
+
+ // Copy all synced configuration files from stage to production.
+ for (const String& path : relativePaths) {
+ if (!Utility::PathExists(apiZonesStageDir + path))
+ continue;
+
+ Log(LogInformation, "ApiListener")
+ << "Copying file '" << path << "' from config sync staging to production zones directory.";
+
+ String stagePath = apiZonesStageDir + path;
+ String currentPath = apiZonesDir + path;
+
+ Utility::MkDirP(Utility::DirName(currentPath), 0700);
+
+ Utility::CopyFile(stagePath, currentPath);
+ }
+
+ // Clear any failed deployment before
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (listener)
+ listener->ClearLastFailedZonesStageValidation();
+
+ Application::RequestRestart();
+
+ // All good, return early.
+ return;
+ }
+
+ String failedLogFile = apiDir + "/zones-stage-startup-last-failed.log";
+ std::ofstream fpFailedLog(failedLogFile.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpFailedLog << pr.Output;
+ fpFailedLog.close();
+
+ // Error case.
+ Log(LogCritical, "ApiListener")
+ << "Config validation failed for staged cluster config sync in '" << apiZonesStageDir
+ << "'. Aborting. Logs: '" << failedLogFile << "'";
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (listener)
+ listener->UpdateLastFailedZonesStageValidation(pr.Output);
+}
+
+/**
+ * Update the structure from the last failed validation output.
+ * Uses the current timestamp.
+ *
+ * @param log The process output from the config validation.
+ */
+void ApiListener::UpdateLastFailedZonesStageValidation(const String& log)
+{
+ Dictionary::Ptr lastFailedZonesStageValidation = new Dictionary({
+ { "log", log },
+ { "ts", Utility::GetTime() }
+ });
+
+ SetLastFailedZonesStageValidation(lastFailedZonesStageValidation);
+}
+
+/**
+ * Clear the structure for the last failed reload.
+ *
+ */
+void ApiListener::ClearLastFailedZonesStageValidation()
+{
+ SetLastFailedZonesStageValidation(Dictionary::Ptr());
+}
+
+/**
+ * Generate a config checksum.
+ *
+ * @param content String content used for generating the checksum.
+ * @returns The checksum as string.
+ */
+String ApiListener::GetChecksum(const String& content)
+{
+ return SHA256(content);
+}
+
+bool ApiListener::CheckConfigChange(const ConfigDirInformation& oldConfig, const ConfigDirInformation& newConfig)
+{
+ Dictionary::Ptr oldChecksums = oldConfig.Checksums;
+ Dictionary::Ptr newChecksums = newConfig.Checksums;
+
+ // TODO: Figure out whether normal users need this for debugging.
+ Log(LogDebug, "ApiListener")
+ << "Checking for config change between stage and production. Old (" << oldChecksums->GetLength() << "): '"
+ << JsonEncode(oldChecksums)
+ << "' vs. new (" << newChecksums->GetLength() << "): '"
+ << JsonEncode(newChecksums) << "'.";
+
+ /* Since internal files are synced here too, we can not depend on length.
+ * So we need to go through both checksum sets to cover the cases"everything is new" and "everything was deleted".
+ */
+ {
+ ObjectLock olock(oldChecksums);
+ for (const Dictionary::Pair& kv : oldChecksums) {
+ String path = kv.first;
+ String oldChecksum = kv.second;
+
+ /* Ignore internal files, especially .timestamp and .checksums.
+ *
+ * If we don't, this results in "always change" restart loops.
+ */
+ if (Utility::Match("/.*", path)) {
+ Log(LogDebug, "ApiListener")
+ << "Ignoring old internal file '" << path << "'.";
+
+ continue;
+ }
+
+ Log(LogDebug, "ApiListener")
+ << "Checking " << path << " for old checksum: " << oldChecksum << ".";
+
+ // Check if key exists first for more verbose logging.
+ // Note: Don't do this later on.
+ if (!newChecksums->Contains(path)) {
+ Log(LogDebug, "ApiListener")
+ << "File '" << path << "' was deleted by remote.";
+
+ return true;
+ }
+
+ String newChecksum = newChecksums->Get(path);
+
+ if (newChecksum != kv.second) {
+ Log(LogDebug, "ApiListener")
+ << "Path '" << path << "' doesn't match old checksum '"
+ << oldChecksum << "' with new checksum '" << newChecksum << "'.";
+
+ return true;
+ }
+ }
+ }
+
+ {
+ ObjectLock olock(newChecksums);
+ for (const Dictionary::Pair& kv : newChecksums) {
+ String path = kv.first;
+ String newChecksum = kv.second;
+
+ /* Ignore internal files, especially .timestamp and .checksums.
+ *
+ * If we don't, this results in "always change" restart loops.
+ */
+ if (Utility::Match("/.*", path)) {
+ Log(LogDebug, "ApiListener")
+ << "Ignoring new internal file '" << path << "'.";
+
+ continue;
+ }
+
+ Log(LogDebug, "ApiListener")
+ << "Checking " << path << " for new checksum: " << newChecksum << ".";
+
+ // Check if the checksum exists, checksums in both sets have already been compared
+ if (!oldChecksums->Contains(path)) {
+ Log(LogDebug, "ApiListener")
+ << "File '" << path << "' was added by remote.";
+
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Load the given config dir and read their file content into the config structure.
+ *
+ * @param dir Path to the config directory.
+ * @returns ConfigDirInformation structure.
+ */
+ConfigDirInformation ApiListener::LoadConfigDir(const String& dir)
+{
+ ConfigDirInformation config;
+ config.UpdateV1 = new Dictionary();
+ config.UpdateV2 = new Dictionary();
+ config.Checksums = new Dictionary();
+
+ Utility::GlobRecursive(dir, "*", [&config, dir](const String& file) { ConfigGlobHandler(config, dir, file); }, GlobFile);
+ return config;
+}
+
+/**
+ * Read the given file and store it in the config information structure.
+ * Callback function for Glob().
+ *
+ * @param config Reference to the config information object.
+ * @param path File path.
+ * @param file Full file name.
+ */
+void ApiListener::ConfigGlobHandler(ConfigDirInformation& config, const String& path, const String& file)
+{
+ // Avoid loading the authoritative marker for syncs at all cost.
+ if (Utility::BaseName(file) == ".authoritative")
+ return;
+
+ CONTEXT("Creating config update for file '" << file << "'");
+
+ Log(LogNotice, "ApiListener")
+ << "Creating config update for file '" << file << "'.";
+
+ std::ifstream fp(file.CStr(), std::ifstream::binary);
+ if (!fp)
+ return;
+
+ String content((std::istreambuf_iterator<char>(fp)), std::istreambuf_iterator<char>());
+
+ Dictionary::Ptr update;
+ String relativePath = file.SubStr(path.GetLength());
+
+ /*
+ * 'update' messages contain conf files. 'update_v2' syncs everything else (.timestamp).
+ *
+ * **Keep this intact to stay compatible with older clients.**
+ */
+ String sanitizedContent = Utility::ValidateUTF8(content);
+
+ if (Utility::Match("*.conf", file)) {
+ update = config.UpdateV1;
+
+ // Configuration files should be automatically sanitized with UTF8.
+ update->Set(relativePath, sanitizedContent);
+ } else {
+ update = config.UpdateV2;
+
+ /*
+ * Ensure that only valid UTF8 content is being read for the cluster config sync.
+ * Binary files are not supported when wrapped into JSON encoded messages.
+ * Rationale: https://github.com/Icinga/icinga2/issues/7382
+ */
+ if (content != sanitizedContent) {
+ Log(LogCritical, "ApiListener")
+ << "Ignoring file '" << file << "' for cluster config sync: Does not contain valid UTF8. Binary files are not supported.";
+ return;
+ }
+
+ update->Set(relativePath, content);
+ }
+
+ /* Calculate a checksum for each file (and a global one later).
+ *
+ * IMPORTANT: Ignore the .authoritative file above, this must not be synced.
+ * */
+ config.Checksums->Set(relativePath, GetChecksum(content));
+}
+
+/**
+ * Compatibility helper for merging config update v1 and v2 into a global result.
+ *
+ * @param config Config information structure.
+ * @returns Dictionary which holds the merged information.
+ */
+Dictionary::Ptr ApiListener::MergeConfigUpdate(const ConfigDirInformation& config)
+{
+ Dictionary::Ptr result = new Dictionary();
+
+ if (config.UpdateV1)
+ config.UpdateV1->CopyTo(result);
+
+ if (config.UpdateV2)
+ config.UpdateV2->CopyTo(result);
+
+ return result;
+}
diff --git a/lib/remote/apilistener.cpp b/lib/remote/apilistener.cpp
new file mode 100644
index 0000000..85443e2
--- /dev/null
+++ b/lib/remote/apilistener.cpp
@@ -0,0 +1,1970 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/apilistener.hpp"
+#include "remote/apilistener-ti.cpp"
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/jsonrpc.hpp"
+#include "remote/apifunction.hpp"
+#include "remote/configpackageutility.hpp"
+#include "remote/configobjectutility.hpp"
+#include "base/atomic-file.hpp"
+#include "base/convert.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/netstring.hpp"
+#include "base/json.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/stdiostream.hpp"
+#include "base/perfdatavalue.hpp"
+#include "base/application.hpp"
+#include "base/context.hpp"
+#include "base/statsfunction.hpp"
+#include "base/exception.hpp"
+#include "base/tcpsocket.hpp"
+#include <boost/asio/buffer.hpp>
+#include <boost/asio/io_context_strand.hpp>
+#include <boost/asio/ip/tcp.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
+#include <boost/lexical_cast.hpp>
+#include <boost/regex.hpp>
+#include <boost/system/error_code.hpp>
+#include <boost/thread/locks.hpp>
+#include <climits>
+#include <cstdint>
+#include <fstream>
+#include <memory>
+#include <openssl/ssl.h>
+#include <openssl/tls1.h>
+#include <openssl/x509.h>
+#include <sstream>
+#include <utility>
+
+using namespace icinga;
+
+REGISTER_TYPE(ApiListener);
+
+boost::signals2::signal<void(bool)> ApiListener::OnMasterChanged;
+ApiListener::Ptr ApiListener::m_Instance;
+
+REGISTER_STATSFUNCTION(ApiListener, &ApiListener::StatsFunc);
+
+REGISTER_APIFUNCTION(Hello, icinga, &ApiListener::HelloAPIHandler);
+
+ApiListener::ApiListener()
+{
+ m_RelayQueue.SetName("ApiListener, RelayQueue");
+ m_SyncQueue.SetName("ApiListener, SyncQueue");
+}
+
+String ApiListener::GetApiDir()
+{
+ return Configuration::DataDir + "/api/";
+}
+
+String ApiListener::GetApiZonesDir()
+{
+ return GetApiDir() + "zones/";
+}
+
+String ApiListener::GetApiZonesStageDir()
+{
+ return GetApiDir() + "zones-stage/";
+}
+
+String ApiListener::GetCertsDir()
+{
+ return Configuration::DataDir + "/certs/";
+}
+
+String ApiListener::GetCaDir()
+{
+ return Configuration::DataDir + "/ca/";
+}
+
+String ApiListener::GetCertificateRequestsDir()
+{
+ return Configuration::DataDir + "/certificate-requests/";
+}
+
+String ApiListener::GetDefaultCertPath()
+{
+ return GetCertsDir() + "/" + ScriptGlobal::Get("NodeName") + ".crt";
+}
+
+String ApiListener::GetDefaultKeyPath()
+{
+ return GetCertsDir() + "/" + ScriptGlobal::Get("NodeName") + ".key";
+}
+
+String ApiListener::GetDefaultCaPath()
+{
+ return GetCertsDir() + "/ca.crt";
+}
+
+double ApiListener::GetTlsHandshakeTimeout() const
+{
+ return Configuration::TlsHandshakeTimeout;
+}
+
+void ApiListener::SetTlsHandshakeTimeout(double value, bool suppress_events, const Value& cookie)
+{
+ Configuration::TlsHandshakeTimeout = value;
+}
+
+void ApiListener::CopyCertificateFile(const String& oldCertPath, const String& newCertPath)
+{
+ struct stat st1, st2;
+
+ if (!oldCertPath.IsEmpty() && stat(oldCertPath.CStr(), &st1) >= 0 && (stat(newCertPath.CStr(), &st2) < 0 || st1.st_mtime > st2.st_mtime)) {
+ Log(LogWarning, "ApiListener")
+ << "Copying '" << oldCertPath << "' certificate file to '" << newCertPath << "'";
+
+ Utility::MkDirP(Utility::DirName(newCertPath), 0700);
+ Utility::CopyFile(oldCertPath, newCertPath);
+ }
+}
+
+void ApiListener::OnConfigLoaded()
+{
+ if (m_Instance)
+ BOOST_THROW_EXCEPTION(ScriptError("Only one ApiListener object is allowed.", GetDebugInfo()));
+
+ m_Instance = this;
+
+ String defaultCertPath = GetDefaultCertPath();
+ String defaultKeyPath = GetDefaultKeyPath();
+ String defaultCaPath = GetDefaultCaPath();
+
+ /* Migrate certificate location < 2.8 to the new default path. */
+ String oldCertPath = GetCertPath();
+ String oldKeyPath = GetKeyPath();
+ String oldCaPath = GetCaPath();
+
+ CopyCertificateFile(oldCertPath, defaultCertPath);
+ CopyCertificateFile(oldKeyPath, defaultKeyPath);
+ CopyCertificateFile(oldCaPath, defaultCaPath);
+
+ if (!oldCertPath.IsEmpty() && !oldKeyPath.IsEmpty() && !oldCaPath.IsEmpty()) {
+ Log(LogWarning, "ApiListener", "Please read the upgrading documentation for v2.8: https://icinga.com/docs/icinga2/latest/doc/16-upgrading-icinga-2/");
+ }
+
+ /* Create the internal API object storage. */
+ ConfigObjectUtility::CreateStorage();
+
+ /* Cache API packages and their active stage name. */
+ UpdateActivePackageStagesCache();
+
+ /* set up SSL context */
+ std::shared_ptr<X509> cert;
+ try {
+ cert = GetX509Certificate(defaultCertPath);
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot get certificate from cert path: '"
+ + defaultCertPath + "'.", GetDebugInfo()));
+ }
+
+ try {
+ SetIdentity(GetCertificateCN(cert));
+ } catch (const std::exception&) {
+ BOOST_THROW_EXCEPTION(ScriptError("Cannot get certificate common name from cert path: '"
+ + defaultCertPath + "'.", GetDebugInfo()));
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "My API identity: " << GetIdentity();
+
+ UpdateSSLContext();
+}
+
+std::shared_ptr<X509> ApiListener::RenewCert(const std::shared_ptr<X509>& cert, bool ca)
+{
+ std::shared_ptr<EVP_PKEY> pubkey (X509_get_pubkey(cert.get()), EVP_PKEY_free);
+ auto subject (X509_get_subject_name(cert.get()));
+ auto cacert (GetX509Certificate(GetDefaultCaPath()));
+ auto newcert (CreateCertIcingaCA(pubkey.get(), subject, ca));
+
+ /* verify that the new cert matches the CA we're using for the ApiListener;
+ * this ensures that the CA we have in /var/lib/icinga2/ca matches the one
+ * we're using for cluster connections (there's no point in sending a client
+ * a certificate it wouldn't be able to use to connect to us anyway) */
+ try {
+ if (!VerifyCertificate(cacert, newcert, GetCrlPath())) {
+ Log(LogWarning, "ApiListener")
+ << "The CA in '" << GetDefaultCaPath() << "' does not match the CA which Icinga uses "
+ << "for its own cluster connections. This is most likely a configuration problem.";
+
+ return nullptr;
+ }
+ } catch (const std::exception&) { } /* Swallow the exception on purpose, cacert will never be a non-CA certificate. */
+
+ return newcert;
+}
+
+void ApiListener::UpdateSSLContext()
+{
+ auto ctx (SetupSslContext(GetDefaultCertPath(), GetDefaultKeyPath(), GetDefaultCaPath(), GetCrlPath(), GetCipherList(), GetTlsProtocolmin(), GetDebugInfo()));
+
+ {
+ boost::unique_lock<decltype(m_SSLContextMutex)> lock (m_SSLContextMutex);
+
+ m_SSLContext = std::move(ctx);
+ }
+
+ for (const Endpoint::Ptr& endpoint : ConfigType::GetObjectsByType<Endpoint>()) {
+ for (const JsonRpcConnection::Ptr& client : endpoint->GetClients()) {
+ client->Disconnect();
+ }
+ }
+
+ for (const JsonRpcConnection::Ptr& client : m_AnonymousClients) {
+ client->Disconnect();
+ }
+}
+
+void ApiListener::OnAllConfigLoaded()
+{
+ m_LocalEndpoint = Endpoint::GetByName(GetIdentity());
+
+ if (!m_LocalEndpoint)
+ BOOST_THROW_EXCEPTION(ScriptError("Endpoint object for '" + GetIdentity() + "' is missing.", GetDebugInfo()));
+}
+
+/**
+ * Starts the component.
+ */
+void ApiListener::Start(bool runtimeCreated)
+{
+ Log(LogInformation, "ApiListener")
+ << "'" << GetName() << "' started.";
+
+ SyncLocalZoneDirs();
+
+ m_RenewOwnCertTimer = Timer::Create();
+
+ if (Utility::PathExists(GetIcingaCADir() + "/ca.key")) {
+ RenewOwnCert();
+ RenewCA();
+
+ m_RenewOwnCertTimer->OnTimerExpired.connect([this](const Timer * const&) {
+ RenewOwnCert();
+ RenewCA();
+ });
+ } else {
+ m_RenewOwnCertTimer->OnTimerExpired.connect([this](const Timer * const&) {
+ JsonRpcConnection::SendCertificateRequest(nullptr, nullptr, String());
+ });
+ }
+
+ m_RenewOwnCertTimer->SetInterval(RENEW_INTERVAL);
+ m_RenewOwnCertTimer->Start();
+
+ ObjectImpl<ApiListener>::Start(runtimeCreated);
+
+ {
+ std::unique_lock<std::mutex> lock(m_LogLock);
+ OpenLogFile();
+ }
+
+ /* create the primary JSON-RPC listener */
+ if (!AddListener(GetBindHost(), GetBindPort())) {
+ Log(LogCritical, "ApiListener")
+ << "Cannot add listener on host '" << GetBindHost() << "' for port '" << GetBindPort() << "'.";
+ Application::Exit(EXIT_FAILURE);
+ }
+
+ m_Timer = Timer::Create();
+ m_Timer->OnTimerExpired.connect([this](const Timer * const&) { ApiTimerHandler(); });
+ m_Timer->SetInterval(5);
+ m_Timer->Start();
+ m_Timer->Reschedule(0);
+
+ m_ReconnectTimer = Timer::Create();
+ m_ReconnectTimer->OnTimerExpired.connect([this](const Timer * const&) { ApiReconnectTimerHandler(); });
+ m_ReconnectTimer->SetInterval(10);
+ m_ReconnectTimer->Start();
+ m_ReconnectTimer->Reschedule(0);
+
+ /* Keep this in relative sync with the cold startup in UpdateObjectAuthority() and the reconnect interval above.
+ * Previous: 60s reconnect, 30s OA, 60s cold startup.
+ * Now: 10s reconnect, 10s OA, 30s cold startup.
+ */
+ m_AuthorityTimer = Timer::Create();
+ m_AuthorityTimer->OnTimerExpired.connect([](const Timer * const&) { UpdateObjectAuthority(); });
+ m_AuthorityTimer->SetInterval(10);
+ m_AuthorityTimer->Start();
+
+ m_CleanupCertificateRequestsTimer = Timer::Create();
+ m_CleanupCertificateRequestsTimer->OnTimerExpired.connect([this](const Timer * const&) { CleanupCertificateRequestsTimerHandler(); });
+ m_CleanupCertificateRequestsTimer->SetInterval(3600);
+ m_CleanupCertificateRequestsTimer->Start();
+ m_CleanupCertificateRequestsTimer->Reschedule(0);
+
+ m_ApiPackageIntegrityTimer = Timer::Create();
+ m_ApiPackageIntegrityTimer->OnTimerExpired.connect([this](const Timer * const&) { CheckApiPackageIntegrity(); });
+ m_ApiPackageIntegrityTimer->SetInterval(300);
+ m_ApiPackageIntegrityTimer->Start();
+
+ OnMasterChanged(true);
+}
+
+void ApiListener::RenewOwnCert()
+{
+ auto certPath (GetDefaultCertPath());
+ auto cert (GetX509Certificate(certPath));
+
+ if (IsCertUptodate(cert)) {
+ return;
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Our certificate will expire soon, but we own the CA. Renewing.";
+
+ cert = RenewCert(cert);
+
+ if (!cert) {
+ return;
+ }
+
+ AtomicFile::Write(certPath, 0644, CertificateToString(cert));
+ UpdateSSLContext();
+}
+
+void ApiListener::RenewCA()
+{
+ auto certPath (GetCaDir() + "/ca.crt");
+ auto cert (GetX509Certificate(certPath));
+
+ if (IsCaUptodate(cert.get())) {
+ return;
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Our CA will expire soon, but we own it. Renewing.";
+
+ cert = RenewCert(cert, true);
+
+ if (!cert) {
+ return;
+ }
+
+ auto certStr (CertificateToString(cert));
+
+ AtomicFile::Write(GetDefaultCaPath(), 0644, certStr);
+ AtomicFile::Write(certPath, 0644, certStr);
+ UpdateSSLContext();
+}
+
+void ApiListener::Stop(bool runtimeDeleted)
+{
+ m_ApiPackageIntegrityTimer->Stop(true);
+ m_CleanupCertificateRequestsTimer->Stop(true);
+ m_AuthorityTimer->Stop(true);
+ m_ReconnectTimer->Stop(true);
+ m_Timer->Stop(true);
+ m_RenewOwnCertTimer->Stop(true);
+
+ ObjectImpl<ApiListener>::Stop(runtimeDeleted);
+
+ Log(LogInformation, "ApiListener")
+ << "'" << GetName() << "' stopped.";
+
+ {
+ std::unique_lock<std::mutex> lock(m_LogLock);
+ CloseLogFile();
+ RotateLogFile();
+ }
+
+ RemoveStatusFile();
+}
+
+ApiListener::Ptr ApiListener::GetInstance()
+{
+ return m_Instance;
+}
+
+Endpoint::Ptr ApiListener::GetMaster() const
+{
+ Zone::Ptr zone = Zone::GetLocalZone();
+
+ if (!zone)
+ return nullptr;
+
+ std::vector<String> names;
+
+ for (const Endpoint::Ptr& endpoint : zone->GetEndpoints())
+ if (endpoint->GetConnected() || endpoint->GetName() == GetIdentity())
+ names.push_back(endpoint->GetName());
+
+ std::sort(names.begin(), names.end());
+
+ return Endpoint::GetByName(*names.begin());
+}
+
+bool ApiListener::IsMaster() const
+{
+ Endpoint::Ptr master = GetMaster();
+
+ if (!master)
+ return false;
+
+ return master == GetLocalEndpoint();
+}
+
+/**
+ * Creates a new JSON-RPC listener on the specified port.
+ *
+ * @param node The host the listener should be bound to.
+ * @param service The port to listen on.
+ */
+bool ApiListener::AddListener(const String& node, const String& service)
+{
+ namespace asio = boost::asio;
+ namespace ip = asio::ip;
+ using ip::tcp;
+
+ ObjectLock olock(this);
+
+ if (!m_SSLContext) {
+ Log(LogCritical, "ApiListener", "SSL context is required for AddListener()");
+ return false;
+ }
+
+ auto& io (IoEngine::Get().GetIoContext());
+ auto acceptor (Shared<tcp::acceptor>::Make(io));
+
+ try {
+ tcp::resolver resolver (io);
+ tcp::resolver::query query (node, service, tcp::resolver::query::passive);
+
+ auto result (resolver.resolve(query));
+ auto current (result.begin());
+
+ for (;;) {
+ try {
+ acceptor->open(current->endpoint().protocol());
+
+ {
+ auto fd (acceptor->native_handle());
+
+ const int optFalse = 0;
+ setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast<const char *>(&optFalse), sizeof(optFalse));
+
+ const int optTrue = 1;
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast<const char *>(&optTrue), sizeof(optTrue));
+#ifdef SO_REUSEPORT
+ setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, reinterpret_cast<const char *>(&optTrue), sizeof(optTrue));
+#endif /* SO_REUSEPORT */
+ }
+
+ acceptor->bind(current->endpoint());
+
+ break;
+ } catch (const std::exception&) {
+ if (++current == result.end()) {
+ throw;
+ }
+
+ if (acceptor->is_open()) {
+ acceptor->close();
+ }
+ }
+ }
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ApiListener")
+ << "Cannot bind TCP socket for host '" << node << "' on port '" << service << "': " << ex.what();
+ return false;
+ }
+
+ acceptor->listen(INT_MAX);
+
+ auto localEndpoint (acceptor->local_endpoint());
+
+ Log(LogInformation, "ApiListener")
+ << "Started new listener on '[" << localEndpoint.address() << "]:" << localEndpoint.port() << "'";
+
+ IoEngine::SpawnCoroutine(io, [this, acceptor](asio::yield_context yc) { ListenerCoroutineProc(yc, acceptor); });
+
+ UpdateStatusFile(localEndpoint);
+
+ return true;
+}
+
+void ApiListener::ListenerCoroutineProc(boost::asio::yield_context yc, const Shared<boost::asio::ip::tcp::acceptor>::Ptr& server)
+{
+ namespace asio = boost::asio;
+
+ auto& io (IoEngine::Get().GetIoContext());
+
+ time_t lastModified = -1;
+ const String crlPath = GetCrlPath();
+
+ if (!crlPath.IsEmpty()) {
+ lastModified = Utility::GetFileCreationTime(crlPath);
+ }
+
+ for (;;) {
+ try {
+ asio::ip::tcp::socket socket (io);
+
+ server->async_accept(socket.lowest_layer(), yc);
+
+ if (!crlPath.IsEmpty()) {
+ time_t currentCreationTime = Utility::GetFileCreationTime(crlPath);
+
+ if (lastModified != currentCreationTime) {
+ UpdateSSLContext();
+
+ lastModified = currentCreationTime;
+ }
+ }
+
+ boost::shared_lock<decltype(m_SSLContextMutex)> lock (m_SSLContextMutex);
+ auto sslConn (Shared<AsioTlsStream>::Make(io, *m_SSLContext));
+
+ lock.unlock();
+ sslConn->lowest_layer() = std::move(socket);
+
+ auto strand (Shared<asio::io_context::strand>::Make(io));
+
+ IoEngine::SpawnCoroutine(*strand, [this, strand, sslConn](asio::yield_context yc) {
+ Timeout::Ptr timeout(new Timeout(strand->context(), *strand, boost::posix_time::microseconds(int64_t(GetConnectTimeout() * 1e6)),
+ [sslConn](asio::yield_context yc) {
+ Log(LogWarning, "ApiListener")
+ << "Timeout while processing incoming connection from "
+ << sslConn->lowest_layer().remote_endpoint();
+
+ boost::system::error_code ec;
+ sslConn->lowest_layer().cancel(ec);
+ }
+ ));
+ Defer cancelTimeout([timeout]() { timeout->Cancel(); });
+
+ NewClientHandler(yc, strand, sslConn, String(), RoleServer);
+ });
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ApiListener")
+ << "Cannot accept new connection: " << ex.what();
+ }
+ }
+}
+
+/**
+ * Creates a new JSON-RPC client and connects to the specified endpoint.
+ *
+ * @param endpoint The endpoint.
+ */
+void ApiListener::AddConnection(const Endpoint::Ptr& endpoint)
+{
+ namespace asio = boost::asio;
+ using asio::ip::tcp;
+
+ if (!m_SSLContext) {
+ Log(LogCritical, "ApiListener", "SSL context is required for AddConnection()");
+ return;
+ }
+
+ auto& io (IoEngine::Get().GetIoContext());
+ auto strand (Shared<asio::io_context::strand>::Make(io));
+
+ IoEngine::SpawnCoroutine(*strand, [this, strand, endpoint, &io](asio::yield_context yc) {
+ String host = endpoint->GetHost();
+ String port = endpoint->GetPort();
+
+ Log(LogInformation, "ApiListener")
+ << "Reconnecting to endpoint '" << endpoint->GetName() << "' via host '" << host << "' and port '" << port << "'";
+
+ try {
+ boost::shared_lock<decltype(m_SSLContextMutex)> lock (m_SSLContextMutex);
+ auto sslConn (Shared<AsioTlsStream>::Make(io, *m_SSLContext, endpoint->GetName()));
+
+ lock.unlock();
+
+ Timeout::Ptr timeout(new Timeout(strand->context(), *strand, boost::posix_time::microseconds(int64_t(GetConnectTimeout() * 1e6)),
+ [sslConn, endpoint, host, port](asio::yield_context yc) {
+ Log(LogCritical, "ApiListener")
+ << "Timeout while reconnecting to endpoint '" << endpoint->GetName() << "' via host '" << host
+ << "' and port '" << port << "', cancelling attempt";
+
+ boost::system::error_code ec;
+ sslConn->lowest_layer().cancel(ec);
+ }
+ ));
+ Defer cancelTimeout([&timeout]() { timeout->Cancel(); });
+
+ Connect(sslConn->lowest_layer(), host, port, yc);
+
+ NewClientHandler(yc, strand, sslConn, endpoint->GetName(), RoleClient);
+
+ endpoint->SetConnecting(false);
+ Log(LogInformation, "ApiListener")
+ << "Finished reconnecting to endpoint '" << endpoint->GetName() << "' via host '" << host << "' and port '" << port << "'";
+ } catch (const std::exception& ex) {
+ endpoint->SetConnecting(false);
+
+ Log(LogCritical, "ApiListener")
+ << "Cannot connect to host '" << host << "' on port '" << port << "': " << ex.what();
+ }
+ });
+}
+
+void ApiListener::NewClientHandler(
+ boost::asio::yield_context yc, const Shared<boost::asio::io_context::strand>::Ptr& strand,
+ const Shared<AsioTlsStream>::Ptr& client, const String& hostname, ConnectionRole role
+)
+{
+ try {
+ NewClientHandlerInternal(yc, strand, client, hostname, role);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ApiListener")
+ << "Exception while handling new API client connection: " << DiagnosticInformation(ex, false);
+
+ Log(LogDebug, "ApiListener")
+ << "Exception while handling new API client connection: " << DiagnosticInformation(ex);
+ }
+}
+
+static const auto l_AppVersionInt (([]() -> unsigned long {
+ auto appVersion (Application::GetAppVersion());
+ boost::regex rgx (R"EOF(^[rv]?(\d+)\.(\d+)\.(\d+))EOF");
+ boost::smatch match;
+
+ if (!boost::regex_search(appVersion.GetData(), match, rgx)) {
+ return 0;
+ }
+
+ return 100u * 100u * boost::lexical_cast<unsigned long>(match[1].str())
+ + 100u * boost::lexical_cast<unsigned long>(match[2].str())
+ + boost::lexical_cast<unsigned long>(match[3].str());
+})());
+
+static const auto l_MyCapabilities (
+ (uint_fast64_t)ApiCapabilities::ExecuteArbitraryCommand | (uint_fast64_t)ApiCapabilities::IfwApiCheckCommand
+);
+
+/**
+ * Processes a new client connection.
+ *
+ * @param client The new client.
+ */
+void ApiListener::NewClientHandlerInternal(
+ boost::asio::yield_context yc, const Shared<boost::asio::io_context::strand>::Ptr& strand,
+ const Shared<AsioTlsStream>::Ptr& client, const String& hostname, ConnectionRole role
+)
+{
+ namespace asio = boost::asio;
+ namespace ssl = asio::ssl;
+
+ String conninfo;
+
+ {
+ std::ostringstream conninfo_;
+
+ if (role == RoleClient) {
+ conninfo_ << "to";
+ } else {
+ conninfo_ << "from";
+ }
+
+ auto endpoint (client->lowest_layer().remote_endpoint());
+
+ conninfo_ << " [" << endpoint.address() << "]:" << endpoint.port();
+
+ conninfo = conninfo_.str();
+ }
+
+ auto& sslConn (client->next_layer());
+
+ boost::system::error_code ec;
+
+ {
+ Timeout::Ptr handshakeTimeout (new Timeout(
+ strand->context(),
+ *strand,
+ boost::posix_time::microseconds(intmax_t(Configuration::TlsHandshakeTimeout * 1000000)),
+ [strand, client](asio::yield_context yc) {
+ boost::system::error_code ec;
+ client->lowest_layer().cancel(ec);
+ }
+ ));
+
+ sslConn.async_handshake(role == RoleClient ? sslConn.client : sslConn.server, yc[ec]);
+
+ handshakeTimeout->Cancel();
+ }
+
+ if (ec) {
+ // https://github.com/boostorg/beast/issues/915
+ // Google Chrome 73+ seems not close the connection properly, https://stackoverflow.com/questions/56272906/how-to-fix-certificate-unknown-error-from-chrome-v73
+ if (ec == asio::ssl::error::stream_truncated) {
+ Log(LogNotice, "ApiListener")
+ << "TLS stream was truncated, ignoring connection from " << conninfo;
+ return;
+ }
+
+ Log(LogCritical, "ApiListener")
+ << "Client TLS handshake failed (" << conninfo << "): " << ec.message();
+ return;
+ }
+
+ bool willBeShutDown = false;
+
+ Defer shutDownIfNeeded ([&sslConn, &willBeShutDown, &yc]() {
+ if (!willBeShutDown) {
+ // Ignore the error, but do not throw an exception being swallowed at all cost.
+ // https://github.com/Icinga/icinga2/issues/7351
+ boost::system::error_code ec;
+ sslConn.async_shutdown(yc[ec]);
+ }
+ });
+
+ std::shared_ptr<X509> cert (sslConn.GetPeerCertificate());
+ bool verify_ok = false;
+ String identity;
+ Endpoint::Ptr endpoint;
+
+ if (cert) {
+ verify_ok = sslConn.IsVerifyOK();
+
+ String verifyError = sslConn.GetVerifyError();
+
+ try {
+ identity = GetCertificateCN(cert);
+ } catch (const std::exception&) {
+ Log(LogCritical, "ApiListener")
+ << "Cannot get certificate common name from peer (" << conninfo << ") cert.";
+ return;
+ }
+
+ if (!hostname.IsEmpty()) {
+ if (identity != hostname) {
+ Log(LogWarning, "ApiListener")
+ << "Unexpected certificate common name while connecting to endpoint '"
+ << hostname << "': got '" << identity << "'";
+ return;
+ } else if (!verify_ok) {
+ Log(LogWarning, "ApiListener")
+ << "Certificate validation failed for endpoint '" << hostname
+ << "': " << verifyError;
+ }
+ }
+
+ if (verify_ok) {
+ endpoint = Endpoint::GetByName(identity);
+ }
+
+ Log log(LogInformation, "ApiListener");
+
+ log << "New client connection for identity '" << identity << "' " << conninfo;
+
+ if (!verify_ok) {
+ log << " (certificate validation failed: " << verifyError << ")";
+ } else if (!endpoint) {
+ log << " (no Endpoint object found for identity)";
+ }
+ } else {
+ Log(LogInformation, "ApiListener")
+ << "New client connection " << conninfo << " (no client certificate)";
+ }
+
+ ClientType ctype;
+
+ try {
+ if (role == RoleClient) {
+ JsonRpc::SendMessage(client, new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "icinga::Hello" },
+ { "params", new Dictionary({
+ { "version", (double)l_AppVersionInt },
+ { "capabilities", (double)l_MyCapabilities }
+ }) }
+ }), yc);
+
+ client->async_flush(yc);
+
+ ctype = ClientJsonRpc;
+ } else {
+ {
+ boost::system::error_code ec;
+
+ if (client->async_fill(yc[ec]) == 0u) {
+ if (identity.IsEmpty()) {
+ Log(LogInformation, "ApiListener")
+ << "No data received on new API connection " << conninfo << ". "
+ << "Ensure that the remote endpoints are properly configured in a cluster setup.";
+ } else {
+ Log(LogWarning, "ApiListener")
+ << "No data received on new API connection " << conninfo << " for identity '" << identity << "'. "
+ << "Ensure that the remote endpoints are properly configured in a cluster setup.";
+ }
+
+ return;
+ }
+ }
+
+ char firstByte = 0;
+
+ {
+ asio::mutable_buffer firstByteBuf (&firstByte, 1);
+ client->peek(firstByteBuf);
+ }
+
+ if (firstByte >= '0' && firstByte <= '9') {
+ JsonRpc::SendMessage(client, new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "icinga::Hello" },
+ { "params", new Dictionary({
+ { "version", (double)l_AppVersionInt },
+ { "capabilities", (double)l_MyCapabilities }
+ }) }
+ }), yc);
+
+ client->async_flush(yc);
+
+ ctype = ClientJsonRpc;
+ } else {
+ ctype = ClientHttp;
+ }
+ }
+ } catch (const boost::system::system_error& systemError) {
+ if (systemError.code() == boost::asio::error::operation_aborted) {
+ shutDownIfNeeded.Cancel();
+ }
+
+ throw;
+ }
+
+ if (ctype == ClientJsonRpc) {
+ Log(LogNotice, "ApiListener", "New JSON-RPC client");
+
+ if (endpoint && endpoint->GetConnected()) {
+ Log(LogNotice, "ApiListener")
+ << "Ignoring JSON-RPC connection " << conninfo
+ << ". We're already connected to Endpoint '" << endpoint->GetName() << "'.";
+ return;
+ }
+
+ JsonRpcConnection::Ptr aclient = new JsonRpcConnection(identity, verify_ok, client, role);
+
+ if (endpoint) {
+ endpoint->AddClient(aclient);
+
+ Utility::QueueAsyncCallback([this, aclient, endpoint]() {
+ SyncClient(aclient, endpoint, true);
+ });
+ } else if (!AddAnonymousClient(aclient)) {
+ Log(LogNotice, "ApiListener")
+ << "Ignoring anonymous JSON-RPC connection " << conninfo
+ << ". Max connections (" << GetMaxAnonymousClients() << ") exceeded.";
+
+ aclient = nullptr;
+ }
+
+ if (aclient) {
+ aclient->Start();
+
+ willBeShutDown = true;
+ }
+ } else {
+ Log(LogNotice, "ApiListener", "New HTTP client");
+
+ HttpServerConnection::Ptr aclient = new HttpServerConnection(identity, verify_ok, client);
+ AddHttpClient(aclient);
+ aclient->Start();
+
+ willBeShutDown = true;
+ }
+}
+
+void ApiListener::SyncClient(const JsonRpcConnection::Ptr& aclient, const Endpoint::Ptr& endpoint, bool needSync)
+{
+ Zone::Ptr eZone = endpoint->GetZone();
+
+ try {
+ {
+ ObjectLock olock(endpoint);
+
+ endpoint->SetSyncing(true);
+ }
+
+ Zone::Ptr myZone = Zone::GetLocalZone();
+ auto parent (myZone->GetParent());
+
+ if (parent == eZone || (!parent && eZone == myZone)) {
+ JsonRpcConnection::SendCertificateRequest(aclient, nullptr, String());
+
+ if (Utility::PathExists(ApiListener::GetCertificateRequestsDir())) {
+ Utility::Glob(ApiListener::GetCertificateRequestsDir() + "/*.json", [aclient](const String& newPath) {
+ JsonRpcConnection::SendCertificateRequest(aclient, nullptr, newPath);
+ }, GlobFile);
+ }
+ }
+
+ /* Make sure that the config updates are synced
+ * before the logs are replayed.
+ */
+
+ Log(LogInformation, "ApiListener")
+ << "Sending config updates for endpoint '" << endpoint->GetName() << "' in zone '" << eZone->GetName() << "'.";
+
+ /* sync zone file config */
+ SendConfigUpdate(aclient);
+
+ Log(LogInformation, "ApiListener")
+ << "Finished sending config file updates for endpoint '" << endpoint->GetName() << "' in zone '" << eZone->GetName() << "'.";
+
+ /* sync runtime config */
+ SendRuntimeConfigObjects(aclient);
+
+ Log(LogInformation, "ApiListener")
+ << "Finished sending runtime config updates for endpoint '" << endpoint->GetName() << "' in zone '" << eZone->GetName() << "'.";
+
+ if (!needSync) {
+ ObjectLock olock2(endpoint);
+ endpoint->SetSyncing(false);
+ return;
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Sending replay log for endpoint '" << endpoint->GetName() << "' in zone '" << eZone->GetName() << "'.";
+
+ ReplayLog(aclient);
+
+ if (eZone == Zone::GetLocalZone())
+ UpdateObjectAuthority();
+
+ Log(LogInformation, "ApiListener")
+ << "Finished sending replay log for endpoint '" << endpoint->GetName() << "' in zone '" << eZone->GetName() << "'.";
+ } catch (const std::exception& ex) {
+ {
+ ObjectLock olock2(endpoint);
+ endpoint->SetSyncing(false);
+ }
+
+ Log(LogCritical, "ApiListener")
+ << "Error while syncing endpoint '" << endpoint->GetName() << "': " << DiagnosticInformation(ex, false);
+
+ Log(LogDebug, "ApiListener")
+ << "Error while syncing endpoint '" << endpoint->GetName() << "': " << DiagnosticInformation(ex);
+ }
+
+ Log(LogInformation, "ApiListener")
+ << "Finished syncing endpoint '" << endpoint->GetName() << "' in zone '" << eZone->GetName() << "'.";
+}
+
+void ApiListener::ApiTimerHandler()
+{
+ double now = Utility::GetTime();
+
+ std::vector<int> files;
+ Utility::Glob(GetApiDir() + "log/*", [&files](const String& file) { LogGlobHandler(files, file); }, GlobFile);
+ std::sort(files.begin(), files.end());
+
+ for (int ts : files) {
+ bool need = false;
+ auto localZone (GetLocalEndpoint()->GetZone());
+
+ for (const Endpoint::Ptr& endpoint : ConfigType::GetObjectsByType<Endpoint>()) {
+ if (endpoint == GetLocalEndpoint())
+ continue;
+
+ auto zone (endpoint->GetZone());
+
+ /* only care for endpoints in a) the same zone b) our parent zone c) immediate child zones */
+ if (!(zone == localZone || zone == localZone->GetParent() || zone->GetParent() == localZone)) {
+ continue;
+ }
+
+ if (endpoint->GetLogDuration() >= 0 && ts < now - endpoint->GetLogDuration())
+ continue;
+
+ if (ts > endpoint->GetLocalLogPosition()) {
+ need = true;
+ break;
+ }
+ }
+
+ if (!need) {
+ String path = GetApiDir() + "log/" + Convert::ToString(ts);
+ Log(LogNotice, "ApiListener")
+ << "Removing old log file: " << path;
+ (void)unlink(path.CStr());
+ }
+ }
+
+ for (const Endpoint::Ptr& endpoint : ConfigType::GetObjectsByType<Endpoint>()) {
+ if (!endpoint->GetConnected())
+ continue;
+
+ double ts = endpoint->GetRemoteLogPosition();
+
+ if (ts == 0)
+ continue;
+
+ Dictionary::Ptr lmessage = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "log::SetLogPosition" },
+ { "params", new Dictionary({
+ { "log_position", ts }
+ }) }
+ });
+
+ double maxTs = 0;
+
+ for (const JsonRpcConnection::Ptr& client : endpoint->GetClients()) {
+ if (client->GetTimestamp() > maxTs)
+ maxTs = client->GetTimestamp();
+ }
+
+ for (const JsonRpcConnection::Ptr& client : endpoint->GetClients()) {
+ if (client->GetTimestamp() == maxTs) {
+ client->SendMessage(lmessage);
+ } else {
+ client->Disconnect();
+ }
+ }
+
+ Log(LogNotice, "ApiListener")
+ << "Setting log position for identity '" << endpoint->GetName() << "': "
+ << Utility::FormatDateTime("%Y/%m/%d %H:%M:%S", ts);
+ }
+}
+
+void ApiListener::ApiReconnectTimerHandler()
+{
+ Zone::Ptr my_zone = Zone::GetLocalZone();
+
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ /* don't connect to global zones */
+ if (zone->GetGlobal())
+ continue;
+
+ /* only connect to endpoints in a) the same zone b) our parent zone c) immediate child zones */
+ if (my_zone != zone && my_zone != zone->GetParent() && zone != my_zone->GetParent()) {
+ Log(LogDebug, "ApiListener")
+ << "Not connecting to Zone '" << zone->GetName()
+ << "' because it's not in the same zone, a parent or a child zone.";
+ continue;
+ }
+
+ for (const Endpoint::Ptr& endpoint : zone->GetEndpoints()) {
+ /* don't connect to ourselves */
+ if (endpoint == GetLocalEndpoint()) {
+ Log(LogDebug, "ApiListener")
+ << "Not connecting to Endpoint '" << endpoint->GetName() << "' because that's us.";
+ continue;
+ }
+
+ /* don't try to connect to endpoints which don't have a host and port */
+ if (endpoint->GetHost().IsEmpty() || endpoint->GetPort().IsEmpty()) {
+ Log(LogDebug, "ApiListener")
+ << "Not connecting to Endpoint '" << endpoint->GetName()
+ << "' because the host/port attributes are missing.";
+ continue;
+ }
+
+ /* don't try to connect if there's already a connection attempt */
+ if (endpoint->GetConnecting()) {
+ Log(LogDebug, "ApiListener")
+ << "Not connecting to Endpoint '" << endpoint->GetName()
+ << "' because we're already trying to connect to it.";
+ continue;
+ }
+
+ /* don't try to connect if we're already connected */
+ if (endpoint->GetConnected()) {
+ Log(LogDebug, "ApiListener")
+ << "Not connecting to Endpoint '" << endpoint->GetName()
+ << "' because we're already connected to it.";
+ continue;
+ }
+
+ /* Set connecting state to prevent duplicated queue inserts later. */
+ endpoint->SetConnecting(true);
+
+ AddConnection(endpoint);
+ }
+ }
+
+ Endpoint::Ptr master = GetMaster();
+
+ if (master)
+ Log(LogNotice, "ApiListener")
+ << "Current zone master: " << master->GetName();
+
+ std::vector<String> names;
+ for (const Endpoint::Ptr& endpoint : ConfigType::GetObjectsByType<Endpoint>())
+ if (endpoint->GetConnected())
+ names.emplace_back(endpoint->GetName() + " (" + Convert::ToString(endpoint->GetClients().size()) + ")");
+
+ Log(LogNotice, "ApiListener")
+ << "Connected endpoints: " << Utility::NaturalJoin(names);
+}
+
+static void CleanupCertificateRequest(const String& path, double expiryTime)
+{
+#ifndef _WIN32
+ struct stat statbuf;
+ if (lstat(path.CStr(), &statbuf) < 0)
+ return;
+#else /* _WIN32 */
+ struct _stat statbuf;
+ if (_stat(path.CStr(), &statbuf) < 0)
+ return;
+#endif /* _WIN32 */
+
+ if (statbuf.st_mtime < expiryTime)
+ (void) unlink(path.CStr());
+}
+
+void ApiListener::CleanupCertificateRequestsTimerHandler()
+{
+ String requestsDir = GetCertificateRequestsDir();
+
+ if (Utility::PathExists(requestsDir)) {
+ /* remove certificate requests that are older than a week */
+ double expiryTime = Utility::GetTime() - 7 * 24 * 60 * 60;
+ Utility::Glob(requestsDir + "/*.json", [expiryTime](const String& path) {
+ CleanupCertificateRequest(path, expiryTime);
+ }, GlobFile);
+ }
+}
+
+void ApiListener::RelayMessage(const MessageOrigin::Ptr& origin,
+ const ConfigObject::Ptr& secobj, const Dictionary::Ptr& message, bool log)
+{
+ if (!IsActive())
+ return;
+
+ m_RelayQueue.Enqueue([this, origin, secobj, message, log]() { SyncRelayMessage(origin, secobj, message, log); }, PriorityNormal, true);
+}
+
+void ApiListener::PersistMessage(const Dictionary::Ptr& message, const ConfigObject::Ptr& secobj)
+{
+ double ts = message->Get("ts");
+
+ ASSERT(ts != 0);
+
+ Dictionary::Ptr pmessage = new Dictionary();
+ pmessage->Set("timestamp", ts);
+
+ pmessage->Set("message", JsonEncode(message));
+
+ if (secobj) {
+ Dictionary::Ptr secname = new Dictionary();
+ secname->Set("type", secobj->GetReflectionType()->GetName());
+ secname->Set("name", secobj->GetName());
+ pmessage->Set("secobj", secname);
+ }
+
+ std::unique_lock<std::mutex> lock(m_LogLock);
+ if (m_LogFile) {
+ NetString::WriteStringToStream(m_LogFile, JsonEncode(pmessage));
+ m_LogMessageCount++;
+ SetLogMessageTimestamp(ts);
+
+ if (m_LogMessageCount > 50000) {
+ CloseLogFile();
+ RotateLogFile();
+ OpenLogFile();
+ }
+ }
+}
+
+void ApiListener::SyncSendMessage(const Endpoint::Ptr& endpoint, const Dictionary::Ptr& message)
+{
+ ObjectLock olock(endpoint);
+
+ if (!endpoint->GetSyncing()) {
+ Log(LogNotice, "ApiListener")
+ << "Sending message '" << message->Get("method") << "' to '" << endpoint->GetName() << "'";
+
+ double maxTs = 0;
+
+ for (const JsonRpcConnection::Ptr& client : endpoint->GetClients()) {
+ if (client->GetTimestamp() > maxTs)
+ maxTs = client->GetTimestamp();
+ }
+
+ for (const JsonRpcConnection::Ptr& client : endpoint->GetClients()) {
+ if (client->GetTimestamp() != maxTs)
+ continue;
+
+ client->SendMessage(message);
+ }
+ }
+}
+
+/**
+ * Relay a message to a directly connected zone or to a global zone.
+ * If some other zone is passed as the target zone, it is not relayed.
+ *
+ * @param targetZone The zone to relay to
+ * @param origin Information about where this message is relayed from (if it was not generated locally)
+ * @param message The message to relay
+ * @param currentZoneMaster The current master node of the local zone
+ * @return true if the message has been relayed to all relevant endpoints,
+ * false if it hasn't and must be persisted in the replay log
+ */
+bool ApiListener::RelayMessageOne(const Zone::Ptr& targetZone, const MessageOrigin::Ptr& origin, const Dictionary::Ptr& message, const Endpoint::Ptr& currentZoneMaster)
+{
+ ASSERT(targetZone);
+
+ Zone::Ptr localZone = Zone::GetLocalZone();
+
+ /* only relay the message to a) the same local zone, b) the parent zone and c) direct child zones. Exception is a global zone. */
+ if (!targetZone->GetGlobal() &&
+ targetZone != localZone &&
+ targetZone != localZone->GetParent() &&
+ targetZone->GetParent() != localZone) {
+ return true;
+ }
+
+ Endpoint::Ptr localEndpoint = GetLocalEndpoint();
+
+ std::vector<Endpoint::Ptr> skippedEndpoints;
+
+ std::set<Zone::Ptr> allTargetZones;
+ if (targetZone->GetGlobal()) {
+ /* if the zone is global, the message has to be relayed to our local zone and direct children */
+ allTargetZones.insert(localZone);
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ if (zone->GetParent() == localZone) {
+ allTargetZones.insert(zone);
+ }
+ }
+ } else {
+ /* whereas if it's not global, the message is just relayed to the zone itself */
+ allTargetZones.insert(targetZone);
+ }
+
+ bool needsReplay = false;
+
+ for (const Zone::Ptr& currentTargetZone : allTargetZones) {
+ bool relayed = false, log_needed = false, log_done = false;
+
+ for (const Endpoint::Ptr& targetEndpoint : currentTargetZone->GetEndpoints()) {
+ /* Don't relay messages to ourselves. */
+ if (targetEndpoint == localEndpoint)
+ continue;
+
+ log_needed = true;
+
+ /* Don't relay messages to disconnected endpoints. */
+ if (!targetEndpoint->GetConnected()) {
+ if (currentTargetZone == localZone)
+ log_done = false;
+
+ continue;
+ }
+
+ log_done = true;
+
+ /* Don't relay the message to the zone through more than one endpoint unless this is our own zone.
+ * 'relayed' is set to true on success below, enabling the checks in the second iteration.
+ */
+ if (relayed && currentTargetZone != localZone) {
+ skippedEndpoints.push_back(targetEndpoint);
+ continue;
+ }
+
+ /* Don't relay messages back to the endpoint which we got the message from. */
+ if (origin && origin->FromClient && targetEndpoint == origin->FromClient->GetEndpoint()) {
+ skippedEndpoints.push_back(targetEndpoint);
+ continue;
+ }
+
+ /* Don't relay messages back to the zone which we got the message from. */
+ if (origin && origin->FromZone && currentTargetZone == origin->FromZone) {
+ skippedEndpoints.push_back(targetEndpoint);
+ continue;
+ }
+
+ /* Only relay message to the zone master if we're not currently the zone master.
+ * e1 is zone master, e2 and e3 are zone members.
+ *
+ * Message is sent from e2 or e3:
+ * !isMaster == true
+ * targetEndpoint e1 is zone master -> send the message
+ * targetEndpoint e3 is not zone master -> skip it, avoid routing loops
+ *
+ * Message is sent from e1:
+ * !isMaster == false -> send the messages to e2 and e3 being the zone routing master.
+ */
+ bool isMaster = (currentZoneMaster == localEndpoint);
+
+ if (!isMaster && targetEndpoint != currentZoneMaster) {
+ skippedEndpoints.push_back(targetEndpoint);
+ continue;
+ }
+
+ relayed = true;
+
+ SyncSendMessage(targetEndpoint, message);
+ }
+
+ if (log_needed && !log_done) {
+ needsReplay = true;
+ }
+ }
+
+ if (!skippedEndpoints.empty()) {
+ double ts = message->Get("ts");
+
+ for (const Endpoint::Ptr& skippedEndpoint : skippedEndpoints)
+ skippedEndpoint->SetLocalLogPosition(ts);
+ }
+
+ return !needsReplay;
+}
+
+void ApiListener::SyncRelayMessage(const MessageOrigin::Ptr& origin,
+ const ConfigObject::Ptr& secobj, const Dictionary::Ptr& message, bool log)
+{
+ double ts = Utility::GetTime();
+ message->Set("ts", ts);
+
+ Log(LogNotice, "ApiListener")
+ << "Relaying '" << message->Get("method") << "' message";
+
+ if (origin && origin->FromZone)
+ message->Set("originZone", origin->FromZone->GetName());
+
+ Zone::Ptr target_zone;
+
+ if (secobj) {
+ if (secobj->GetReflectionType() == Zone::TypeInstance)
+ target_zone = static_pointer_cast<Zone>(secobj);
+ else
+ target_zone = static_pointer_cast<Zone>(secobj->GetZone());
+ }
+
+ if (!target_zone)
+ target_zone = Zone::GetLocalZone();
+
+ Endpoint::Ptr master = GetMaster();
+
+ bool need_log = !RelayMessageOne(target_zone, origin, message, master);
+
+ for (const Zone::Ptr& zone : target_zone->GetAllParentsRaw()) {
+ if (!RelayMessageOne(zone, origin, message, master))
+ need_log = true;
+ }
+
+ if (log && need_log)
+ PersistMessage(message, secobj);
+}
+
+/* must hold m_LogLock */
+void ApiListener::OpenLogFile()
+{
+ String path = GetApiDir() + "log/current";
+
+ Utility::MkDirP(Utility::DirName(path), 0750);
+
+ std::unique_ptr<std::fstream> fp = std::make_unique<std::fstream>(path.CStr(), std::fstream::out | std::ofstream::app);
+
+ if (!fp->good()) {
+ Log(LogWarning, "ApiListener")
+ << "Could not open spool file: " << path;
+ return;
+ }
+
+ m_LogFile = new StdioStream(fp.release(), true);
+ m_LogMessageCount = 0;
+ SetLogMessageTimestamp(Utility::GetTime());
+}
+
+/* must hold m_LogLock */
+void ApiListener::CloseLogFile()
+{
+ if (!m_LogFile)
+ return;
+
+ m_LogFile->Close();
+ m_LogFile.reset();
+}
+
+/* must hold m_LogLock */
+void ApiListener::RotateLogFile()
+{
+ double ts = GetLogMessageTimestamp();
+
+ if (ts == 0)
+ ts = Utility::GetTime();
+
+ String oldpath = GetApiDir() + "log/current";
+ String newpath = GetApiDir() + "log/" + Convert::ToString(static_cast<int>(ts)+1);
+
+ // If the log is being rotated more than once per second,
+ // don't overwrite the previous one, but silently deny rotation.
+ if (!Utility::PathExists(newpath)) {
+ try {
+ Utility::RenameFile(oldpath, newpath);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ApiListener")
+ << "Cannot rotate replay log file from '" << oldpath << "' to '"
+ << newpath << "': " << ex.what();
+ }
+ }
+}
+
+void ApiListener::LogGlobHandler(std::vector<int>& files, const String& file)
+{
+ String name = Utility::BaseName(file);
+
+ if (name == "current")
+ return;
+
+ int ts;
+
+ try {
+ ts = Convert::ToLong(name);
+ } catch (const std::exception&) {
+ return;
+ }
+
+ files.push_back(ts);
+}
+
+void ApiListener::ReplayLog(const JsonRpcConnection::Ptr& client)
+{
+ Endpoint::Ptr endpoint = client->GetEndpoint();
+
+ if (endpoint->GetLogDuration() == 0) {
+ ObjectLock olock2(endpoint);
+ endpoint->SetSyncing(false);
+ return;
+ }
+
+ CONTEXT("Replaying log for Endpoint '" << endpoint->GetName() << "'");
+
+ int count = -1;
+ double peer_ts = endpoint->GetLocalLogPosition();
+ double logpos_ts = peer_ts;
+ bool last_sync = false;
+
+ Endpoint::Ptr target_endpoint = client->GetEndpoint();
+ ASSERT(target_endpoint);
+
+ Zone::Ptr target_zone = target_endpoint->GetZone();
+
+ if (!target_zone) {
+ ObjectLock olock2(endpoint);
+ endpoint->SetSyncing(false);
+ return;
+ }
+
+ for (;;) {
+ std::unique_lock<std::mutex> lock(m_LogLock);
+
+ CloseLogFile();
+
+ if (count == -1 || count > 50000) {
+ OpenLogFile();
+ lock.unlock();
+ } else {
+ last_sync = true;
+ }
+
+ count = 0;
+
+ std::vector<int> files;
+ Utility::Glob(GetApiDir() + "log/*", [&files](const String& file) { LogGlobHandler(files, file); }, GlobFile);
+ std::sort(files.begin(), files.end());
+
+ std::vector<std::pair<int, String>> allFiles;
+
+ for (int ts : files) {
+ if (ts >= peer_ts) {
+ allFiles.emplace_back(ts, GetApiDir() + "log/" + Convert::ToString(ts));
+ }
+ }
+
+ allFiles.emplace_back(Utility::GetTime() + 1, GetApiDir() + "log/current");
+
+ for (auto& file : allFiles) {
+ Log(LogNotice, "ApiListener")
+ << "Replaying log: " << file.second;
+
+ auto *fp = new std::fstream(file.second.CStr(), std::fstream::in | std::fstream::binary);
+ StdioStream::Ptr logStream = new StdioStream(fp, true);
+
+ String message;
+ StreamReadContext src;
+ while (true) {
+ Dictionary::Ptr pmessage;
+
+ try {
+ StreamReadStatus srs = NetString::ReadStringFromStream(logStream, &message, src);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ pmessage = JsonDecode(message);
+ } catch (const std::exception&) {
+ Log(LogWarning, "ApiListener")
+ << "Unexpected end-of-file for cluster log: " << file.second;
+
+ /* Log files may be incomplete or corrupted. This is perfectly OK. */
+ break;
+ }
+
+ if (pmessage->Get("timestamp") <= peer_ts)
+ continue;
+
+ Dictionary::Ptr secname = pmessage->Get("secobj");
+
+ if (secname) {
+ ConfigObject::Ptr secobj = ConfigObject::GetObject(secname->Get("type"), secname->Get("name"));
+
+ if (!secobj)
+ continue;
+
+ if (!target_zone->CanAccessObject(secobj))
+ continue;
+ }
+
+ try {
+ client->SendRawMessage(pmessage->Get("message"));
+ count++;
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "ApiListener")
+ << "Error while replaying log for endpoint '" << endpoint->GetName() << "': " << DiagnosticInformation(ex, false);
+
+ Log(LogDebug, "ApiListener")
+ << "Error while replaying log for endpoint '" << endpoint->GetName() << "': " << DiagnosticInformation(ex);
+
+ break;
+ }
+
+ peer_ts = pmessage->Get("timestamp");
+
+ if (file.first > logpos_ts + 10) {
+ logpos_ts = file.first;
+
+ Dictionary::Ptr lmessage = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "log::SetLogPosition" },
+ { "params", new Dictionary({
+ { "log_position", logpos_ts }
+ }) }
+ });
+
+ client->SendMessage(lmessage);
+ }
+ }
+
+ logStream->Close();
+ }
+
+ if (count > 0) {
+ Log(LogInformation, "ApiListener")
+ << "Replayed " << count << " messages.";
+ }
+ else {
+ Log(LogNotice, "ApiListener")
+ << "Replayed " << count << " messages.";
+ }
+
+ if (last_sync) {
+ {
+ ObjectLock olock2(endpoint);
+ endpoint->SetSyncing(false);
+ }
+
+ OpenLogFile();
+
+ break;
+ }
+ }
+}
+
+void ApiListener::StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata)
+{
+ std::pair<Dictionary::Ptr, Dictionary::Ptr> stats;
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ stats = listener->GetStatus();
+
+ ObjectLock olock(stats.second);
+ for (const Dictionary::Pair& kv : stats.second)
+ perfdata->Add(new PerfdataValue("api_" + kv.first, kv.second));
+
+ status->Set("api", stats.first);
+}
+
+std::pair<Dictionary::Ptr, Dictionary::Ptr> ApiListener::GetStatus()
+{
+ Dictionary::Ptr perfdata = new Dictionary();
+
+ /* cluster stats */
+
+ double allEndpoints = 0;
+ Array::Ptr allNotConnectedEndpoints = new Array();
+ Array::Ptr allConnectedEndpoints = new Array();
+
+ Zone::Ptr my_zone = Zone::GetLocalZone();
+
+ Dictionary::Ptr connectedZones = new Dictionary();
+
+ for (const Zone::Ptr& zone : ConfigType::GetObjectsByType<Zone>()) {
+ /* only check endpoints in a) the same zone b) our parent zone c) immediate child zones */
+ if (my_zone != zone && my_zone != zone->GetParent() && zone != my_zone->GetParent()) {
+ Log(LogDebug, "ApiListener")
+ << "Not checking connection to Zone '" << zone->GetName() << "' because it's not in the same zone, a parent or a child zone.";
+ continue;
+ }
+
+ bool zoneConnected = false;
+ int countZoneEndpoints = 0;
+ double zoneLag = 0;
+
+ ArrayData zoneEndpoints;
+
+ for (const Endpoint::Ptr& endpoint : zone->GetEndpoints()) {
+ zoneEndpoints.emplace_back(endpoint->GetName());
+
+ if (endpoint->GetName() == GetIdentity())
+ continue;
+
+ double eplag = CalculateZoneLag(endpoint);
+
+ if (eplag > 0 && eplag > zoneLag)
+ zoneLag = eplag;
+
+ allEndpoints++;
+ countZoneEndpoints++;
+
+ if (!endpoint->GetConnected()) {
+ allNotConnectedEndpoints->Add(endpoint->GetName());
+ } else {
+ allConnectedEndpoints->Add(endpoint->GetName());
+ zoneConnected = true;
+ }
+ }
+
+ /* if there's only one endpoint inside the zone, we're not connected - that's us, fake it */
+ if (zone->GetEndpoints().size() == 1 && countZoneEndpoints == 0)
+ zoneConnected = true;
+
+ String parentZoneName;
+ Zone::Ptr parentZone = zone->GetParent();
+ if (parentZone)
+ parentZoneName = parentZone->GetName();
+
+ Dictionary::Ptr zoneStats = new Dictionary({
+ { "connected", zoneConnected },
+ { "client_log_lag", zoneLag },
+ { "endpoints", new Array(std::move(zoneEndpoints)) },
+ { "parent_zone", parentZoneName }
+ });
+
+ connectedZones->Set(zone->GetName(), zoneStats);
+ }
+
+ /* connection stats */
+ size_t jsonRpcAnonymousClients = GetAnonymousClients().size();
+ size_t httpClients = GetHttpClients().size();
+ size_t syncQueueItems = m_SyncQueue.GetLength();
+ size_t relayQueueItems = m_RelayQueue.GetLength();
+ double workQueueItemRate = JsonRpcConnection::GetWorkQueueRate();
+ double syncQueueItemRate = m_SyncQueue.GetTaskCount(60) / 60.0;
+ double relayQueueItemRate = m_RelayQueue.GetTaskCount(60) / 60.0;
+
+ Dictionary::Ptr status = new Dictionary({
+ { "identity", GetIdentity() },
+ { "num_endpoints", allEndpoints },
+ { "num_conn_endpoints", allConnectedEndpoints->GetLength() },
+ { "num_not_conn_endpoints", allNotConnectedEndpoints->GetLength() },
+ { "conn_endpoints", allConnectedEndpoints },
+ { "not_conn_endpoints", allNotConnectedEndpoints },
+
+ { "zones", connectedZones },
+
+ { "json_rpc", new Dictionary({
+ { "anonymous_clients", jsonRpcAnonymousClients },
+ { "sync_queue_items", syncQueueItems },
+ { "relay_queue_items", relayQueueItems },
+ { "work_queue_item_rate", workQueueItemRate },
+ { "sync_queue_item_rate", syncQueueItemRate },
+ { "relay_queue_item_rate", relayQueueItemRate }
+ }) },
+
+ { "http", new Dictionary({
+ { "clients", httpClients }
+ }) }
+ });
+
+ /* performance data */
+ perfdata->Set("num_endpoints", allEndpoints);
+ perfdata->Set("num_conn_endpoints", Convert::ToDouble(allConnectedEndpoints->GetLength()));
+ perfdata->Set("num_not_conn_endpoints", Convert::ToDouble(allNotConnectedEndpoints->GetLength()));
+
+ perfdata->Set("num_json_rpc_anonymous_clients", jsonRpcAnonymousClients);
+ perfdata->Set("num_http_clients", httpClients);
+ perfdata->Set("num_json_rpc_sync_queue_items", syncQueueItems);
+ perfdata->Set("num_json_rpc_relay_queue_items", relayQueueItems);
+
+ perfdata->Set("num_json_rpc_work_queue_item_rate", workQueueItemRate);
+ perfdata->Set("num_json_rpc_sync_queue_item_rate", syncQueueItemRate);
+ perfdata->Set("num_json_rpc_relay_queue_item_rate", relayQueueItemRate);
+
+ return std::make_pair(status, perfdata);
+}
+
+double ApiListener::CalculateZoneLag(const Endpoint::Ptr& endpoint)
+{
+ double remoteLogPosition = endpoint->GetRemoteLogPosition();
+ double eplag = Utility::GetTime() - remoteLogPosition;
+
+ if ((endpoint->GetSyncing() || !endpoint->GetConnected()) && remoteLogPosition != 0)
+ return eplag;
+
+ return 0;
+}
+
+bool ApiListener::AddAnonymousClient(const JsonRpcConnection::Ptr& aclient)
+{
+ std::unique_lock<std::mutex> lock(m_AnonymousClientsLock);
+
+ if (GetMaxAnonymousClients() >= 0 && (long)m_AnonymousClients.size() + 1 > (long)GetMaxAnonymousClients())
+ return false;
+
+ m_AnonymousClients.insert(aclient);
+ return true;
+}
+
+void ApiListener::RemoveAnonymousClient(const JsonRpcConnection::Ptr& aclient)
+{
+ std::unique_lock<std::mutex> lock(m_AnonymousClientsLock);
+ m_AnonymousClients.erase(aclient);
+}
+
+std::set<JsonRpcConnection::Ptr> ApiListener::GetAnonymousClients() const
+{
+ std::unique_lock<std::mutex> lock(m_AnonymousClientsLock);
+ return m_AnonymousClients;
+}
+
+void ApiListener::AddHttpClient(const HttpServerConnection::Ptr& aclient)
+{
+ std::unique_lock<std::mutex> lock(m_HttpClientsLock);
+ m_HttpClients.insert(aclient);
+}
+
+void ApiListener::RemoveHttpClient(const HttpServerConnection::Ptr& aclient)
+{
+ std::unique_lock<std::mutex> lock(m_HttpClientsLock);
+ m_HttpClients.erase(aclient);
+}
+
+std::set<HttpServerConnection::Ptr> ApiListener::GetHttpClients() const
+{
+ std::unique_lock<std::mutex> lock(m_HttpClientsLock);
+ return m_HttpClients;
+}
+
+static void LogAppVersion(unsigned long version, Log& log)
+{
+ log << version / 100u << "." << version % 100u << ".x";
+}
+
+Value ApiListener::HelloAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ if (origin) {
+ auto client (origin->FromClient);
+
+ if (client) {
+ auto endpoint (client->GetEndpoint());
+
+ if (endpoint) {
+ unsigned long nodeVersion = params->Get("version");
+
+ endpoint->SetIcingaVersion(nodeVersion);
+ endpoint->SetCapabilities((double)params->Get("capabilities"));
+
+ if (nodeVersion == 0u) {
+ nodeVersion = 21200;
+ }
+
+ if (endpoint->GetZone()->GetParent() == Zone::GetLocalZone()) {
+ switch (l_AppVersionInt / 100 - nodeVersion / 100) {
+ case 0:
+ case 1:
+ break;
+ default:
+ Log log (LogWarning, "ApiListener");
+ log << "Unexpected Icinga version of endpoint '" << endpoint->GetName() << "': ";
+
+ LogAppVersion(nodeVersion / 100u, log);
+ log << " Expected one of: ";
+
+ LogAppVersion(l_AppVersionInt / 100u, log);
+ log << ", ";
+
+ LogAppVersion((l_AppVersionInt / 100u - 1u), log);
+ }
+ }
+ }
+ }
+ }
+
+ return Empty;
+}
+
+Endpoint::Ptr ApiListener::GetLocalEndpoint() const
+{
+ return m_LocalEndpoint;
+}
+
+void ApiListener::UpdateActivePackageStagesCache()
+{
+ std::unique_lock<std::mutex> lock(m_ActivePackageStagesLock);
+
+ for (auto package : ConfigPackageUtility::GetPackages()) {
+ String activeStage;
+
+ try {
+ activeStage = ConfigPackageUtility::GetActiveStageFromFile(package);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "ApiListener")
+ << ex.what();
+ continue;
+ }
+
+ Log(LogNotice, "ApiListener")
+ << "Updating cache: Config package '" << package << "' has active stage '" << activeStage << "'.";
+
+ m_ActivePackageStages[package] = activeStage;
+ }
+}
+
+void ApiListener::CheckApiPackageIntegrity()
+{
+ std::unique_lock<std::mutex> lock(m_ActivePackageStagesLock);
+
+ for (auto package : ConfigPackageUtility::GetPackages()) {
+ String activeStage;
+ try {
+ activeStage = ConfigPackageUtility::GetActiveStageFromFile(package);
+ } catch (const std::exception& ex) {
+ /* An error means that the stage is broken, try to repair it. */
+ auto it = m_ActivePackageStages.find(package);
+
+ if (it == m_ActivePackageStages.end())
+ continue;
+
+ String activeStageCached = it->second;
+
+ Log(LogInformation, "ApiListener")
+ << "Repairing broken API config package '" << package
+ << "', setting active stage '" << activeStageCached << "'.";
+
+ ConfigPackageUtility::SetActiveStageToFile(package, activeStageCached);
+ }
+ }
+}
+
+void ApiListener::SetActivePackageStage(const String& package, const String& stage)
+{
+ std::unique_lock<std::mutex> lock(m_ActivePackageStagesLock);
+ m_ActivePackageStages[package] = stage;
+}
+
+String ApiListener::GetActivePackageStage(const String& package)
+{
+ std::unique_lock<std::mutex> lock(m_ActivePackageStagesLock);
+
+ if (m_ActivePackageStages.find(package) == m_ActivePackageStages.end())
+ BOOST_THROW_EXCEPTION(ScriptError("Package " + package + " has no active stage."));
+
+ return m_ActivePackageStages[package];
+}
+
+void ApiListener::RemoveActivePackageStage(const String& package)
+{
+ /* This is the rare occassion when a package has been deleted. */
+ std::unique_lock<std::mutex> lock(m_ActivePackageStagesLock);
+
+ auto it = m_ActivePackageStages.find(package);
+
+ if (it == m_ActivePackageStages.end())
+ return;
+
+ m_ActivePackageStages.erase(it);
+}
+
+void ApiListener::ValidateTlsProtocolmin(const Lazy<String>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<ApiListener>::ValidateTlsProtocolmin(lvalue, utils);
+
+ try {
+ ResolveTlsProtocolVersion(lvalue());
+ } catch (const std::exception& ex) {
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "tls_protocolmin" }, ex.what()));
+ }
+}
+
+void ApiListener::ValidateTlsHandshakeTimeout(const Lazy<double>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<ApiListener>::ValidateTlsHandshakeTimeout(lvalue, utils);
+
+ if (lvalue() <= 0)
+ BOOST_THROW_EXCEPTION(ValidationError(this, { "tls_handshake_timeout" }, "Value must be greater than 0."));
+}
+
+bool ApiListener::IsHACluster()
+{
+ Zone::Ptr zone = Zone::GetLocalZone();
+
+ if (!zone)
+ return false;
+
+ return zone->IsSingleInstance();
+}
+
+/* Provide a helper function for zone origin name. */
+String ApiListener::GetFromZoneName(const Zone::Ptr& fromZone)
+{
+ String fromZoneName;
+
+ if (fromZone) {
+ fromZoneName = fromZone->GetName();
+ } else {
+ Zone::Ptr lzone = Zone::GetLocalZone();
+
+ if (lzone)
+ fromZoneName = lzone->GetName();
+ }
+
+ return fromZoneName;
+}
+
+void ApiListener::UpdateStatusFile(boost::asio::ip::tcp::endpoint localEndpoint)
+{
+ String path = Configuration::CacheDir + "/api-state.json";
+
+ Utility::SaveJsonFile(path, 0644, new Dictionary({
+ {"host", String(localEndpoint.address().to_string())},
+ {"port", localEndpoint.port()}
+ }));
+}
+
+void ApiListener::RemoveStatusFile()
+{
+ String path = Configuration::CacheDir + "/api-state.json";
+
+ Utility::Remove(path);
+}
diff --git a/lib/remote/apilistener.hpp b/lib/remote/apilistener.hpp
new file mode 100644
index 0000000..fced0a8
--- /dev/null
+++ b/lib/remote/apilistener.hpp
@@ -0,0 +1,265 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APILISTENER_H
+#define APILISTENER_H
+
+#include "remote/apilistener-ti.hpp"
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/httpserverconnection.hpp"
+#include "remote/endpoint.hpp"
+#include "remote/messageorigin.hpp"
+#include "base/configobject.hpp"
+#include "base/process.hpp"
+#include "base/shared.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/tlsstream.hpp"
+#include "base/threadpool.hpp"
+#include <atomic>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/ip/tcp.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/thread/shared_mutex.hpp>
+#include <cstdint>
+#include <mutex>
+#include <set>
+
+namespace icinga
+{
+
+class JsonRpcConnection;
+
+/**
+ * @ingroup remote
+ */
+struct ConfigDirInformation
+{
+ Dictionary::Ptr UpdateV1;
+ Dictionary::Ptr UpdateV2;
+ Dictionary::Ptr Checksums;
+};
+
+/**
+ * If the version reported by icinga::Hello is not enough to tell whether
+ * the peer has a specific capability, add the latter to this bitmask.
+ *
+ * Note that due to the capability exchange via JSON-RPC and the state storage via JSON
+ * the bitmask numbers are stored in IEEE 754 64-bit floats.
+ * The latter have 53 digit bits which limit the bitmask.
+ * Not to run out of bits:
+ *
+ * Once all Icinga versions which don't have a specific capability are completely EOL,
+ * remove the respective capability checks and assume the peer has the capability.
+ * Once all Icinga versions which still check for the capability are completely EOL,
+ * remove the respective bit from icinga::Hello.
+ * Once all Icinga versions which still have the respective bit in icinga::Hello
+ * are completely EOL, remove the bit here.
+ * Once all Icinga versions which still have the respective bit here
+ * are completely EOL, feel free to re-use the bit.
+ *
+ * completely EOL = not supported, even if an important customer of us used it and
+ * not expected to appear in a multi-level cluster, e.g. a 4 level cluster with
+ * v2.11 -> v2.10 -> v2.9 -> v2.8 - v2.7 isn't here
+ *
+ * @ingroup remote
+ */
+enum class ApiCapabilities : uint_fast64_t
+{
+ ExecuteArbitraryCommand = 1u << 0u,
+ IfwApiCheckCommand = 1u << 1u,
+};
+
+/**
+* @ingroup remote
+*/
+class ApiListener final : public ObjectImpl<ApiListener>
+{
+public:
+ DECLARE_OBJECT(ApiListener);
+ DECLARE_OBJECTNAME(ApiListener);
+
+ static boost::signals2::signal<void(bool)> OnMasterChanged;
+
+ ApiListener();
+
+ static String GetApiDir();
+ static String GetApiZonesDir();
+ static String GetApiZonesStageDir();
+ static String GetCertsDir();
+ static String GetCaDir();
+ static String GetCertificateRequestsDir();
+
+ std::shared_ptr<X509> RenewCert(const std::shared_ptr<X509>& cert, bool ca = false);
+ void UpdateSSLContext();
+
+ static ApiListener::Ptr GetInstance();
+
+ Endpoint::Ptr GetMaster() const;
+ bool IsMaster() const;
+
+ Endpoint::Ptr GetLocalEndpoint() const;
+
+ void SyncSendMessage(const Endpoint::Ptr& endpoint, const Dictionary::Ptr& message);
+ void RelayMessage(const MessageOrigin::Ptr& origin, const ConfigObject::Ptr& secobj, const Dictionary::Ptr& message, bool log);
+
+ static void StatsFunc(const Dictionary::Ptr& status, const Array::Ptr& perfdata);
+ std::pair<Dictionary::Ptr, Dictionary::Ptr> GetStatus();
+
+ bool AddAnonymousClient(const JsonRpcConnection::Ptr& aclient);
+ void RemoveAnonymousClient(const JsonRpcConnection::Ptr& aclient);
+ std::set<JsonRpcConnection::Ptr> GetAnonymousClients() const;
+
+ void AddHttpClient(const HttpServerConnection::Ptr& aclient);
+ void RemoveHttpClient(const HttpServerConnection::Ptr& aclient);
+ std::set<HttpServerConnection::Ptr> GetHttpClients() const;
+
+ static double CalculateZoneLag(const Endpoint::Ptr& endpoint);
+
+ /* filesync */
+ static Value ConfigUpdateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+ void HandleConfigUpdate(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ /* configsync */
+ static void ConfigUpdateObjectHandler(const ConfigObject::Ptr& object, const Value& cookie);
+ static Value ConfigUpdateObjectAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+ static Value ConfigDeleteObjectAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ /* API config packages */
+ void SetActivePackageStage(const String& package, const String& stage);
+ String GetActivePackageStage(const String& package);
+ void RemoveActivePackageStage(const String& package);
+
+ static Value HelloAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+
+ static void UpdateObjectAuthority();
+
+ static bool IsHACluster();
+ static String GetFromZoneName(const Zone::Ptr& fromZone);
+
+ static String GetDefaultCertPath();
+ static String GetDefaultKeyPath();
+ static String GetDefaultCaPath();
+
+ static inline
+ bool UpdatedObjectAuthority()
+ {
+ return m_UpdatedObjectAuthority.load();
+ }
+
+ double GetTlsHandshakeTimeout() const override;
+ void SetTlsHandshakeTimeout(double value, bool suppress_events, const Value& cookie) override;
+
+protected:
+ void OnConfigLoaded() override;
+ void OnAllConfigLoaded() override;
+ void Start(bool runtimeCreated) override;
+ void Stop(bool runtimeDeleted) override;
+
+ void ValidateTlsProtocolmin(const Lazy<String>& lvalue, const ValidationUtils& utils) override;
+ void ValidateTlsHandshakeTimeout(const Lazy<double>& lvalue, const ValidationUtils& utils) override;
+
+private:
+ Shared<boost::asio::ssl::context>::Ptr m_SSLContext;
+ boost::shared_mutex m_SSLContextMutex;
+
+ mutable std::mutex m_AnonymousClientsLock;
+ mutable std::mutex m_HttpClientsLock;
+ std::set<JsonRpcConnection::Ptr> m_AnonymousClients;
+ std::set<HttpServerConnection::Ptr> m_HttpClients;
+
+ Timer::Ptr m_Timer;
+ Timer::Ptr m_ReconnectTimer;
+ Timer::Ptr m_AuthorityTimer;
+ Timer::Ptr m_CleanupCertificateRequestsTimer;
+ Timer::Ptr m_ApiPackageIntegrityTimer;
+ Timer::Ptr m_RenewOwnCertTimer;
+
+ Endpoint::Ptr m_LocalEndpoint;
+
+ static ApiListener::Ptr m_Instance;
+ static std::atomic<bool> m_UpdatedObjectAuthority;
+
+ void ApiTimerHandler();
+ void ApiReconnectTimerHandler();
+ void CleanupCertificateRequestsTimerHandler();
+ void CheckApiPackageIntegrity();
+
+ bool AddListener(const String& node, const String& service);
+ void AddConnection(const Endpoint::Ptr& endpoint);
+
+ void NewClientHandler(
+ boost::asio::yield_context yc, const Shared<boost::asio::io_context::strand>::Ptr& strand,
+ const Shared<AsioTlsStream>::Ptr& client, const String& hostname, ConnectionRole role
+ );
+ void NewClientHandlerInternal(
+ boost::asio::yield_context yc, const Shared<boost::asio::io_context::strand>::Ptr& strand,
+ const Shared<AsioTlsStream>::Ptr& client, const String& hostname, ConnectionRole role
+ );
+ void ListenerCoroutineProc(boost::asio::yield_context yc, const Shared<boost::asio::ip::tcp::acceptor>::Ptr& server);
+
+ WorkQueue m_RelayQueue;
+ WorkQueue m_SyncQueue{0, 4};
+
+ std::mutex m_LogLock;
+ Stream::Ptr m_LogFile;
+ size_t m_LogMessageCount{0};
+
+ bool RelayMessageOne(const Zone::Ptr& zone, const MessageOrigin::Ptr& origin, const Dictionary::Ptr& message, const Endpoint::Ptr& currentZoneMaster);
+ void SyncRelayMessage(const MessageOrigin::Ptr& origin, const ConfigObject::Ptr& secobj, const Dictionary::Ptr& message, bool log);
+ void PersistMessage(const Dictionary::Ptr& message, const ConfigObject::Ptr& secobj);
+
+ void OpenLogFile();
+ void RotateLogFile();
+ void CloseLogFile();
+ static void LogGlobHandler(std::vector<int>& files, const String& file);
+ void ReplayLog(const JsonRpcConnection::Ptr& client);
+
+ static void CopyCertificateFile(const String& oldCertPath, const String& newCertPath);
+
+ void UpdateStatusFile(boost::asio::ip::tcp::endpoint localEndpoint);
+ void RemoveStatusFile();
+
+ /* filesync */
+ static std::mutex m_ConfigSyncStageLock;
+
+ void SyncLocalZoneDirs() const;
+ void SyncLocalZoneDir(const Zone::Ptr& zone) const;
+ void RenewOwnCert();
+ void RenewCA();
+
+ void SendConfigUpdate(const JsonRpcConnection::Ptr& aclient);
+
+ static Dictionary::Ptr MergeConfigUpdate(const ConfigDirInformation& config);
+
+ static ConfigDirInformation LoadConfigDir(const String& dir);
+ static void ConfigGlobHandler(ConfigDirInformation& config, const String& path, const String& file);
+
+ static void TryActivateZonesStage(const std::vector<String>& relativePaths);
+
+ static String GetChecksum(const String& content);
+ static bool CheckConfigChange(const ConfigDirInformation& oldConfig, const ConfigDirInformation& newConfig);
+
+ void UpdateLastFailedZonesStageValidation(const String& log);
+ void ClearLastFailedZonesStageValidation();
+
+ /* configsync */
+ void UpdateConfigObject(const ConfigObject::Ptr& object, const MessageOrigin::Ptr& origin,
+ const JsonRpcConnection::Ptr& client = nullptr);
+ void DeleteConfigObject(const ConfigObject::Ptr& object, const MessageOrigin::Ptr& origin,
+ const JsonRpcConnection::Ptr& client = nullptr);
+ void SendRuntimeConfigObjects(const JsonRpcConnection::Ptr& aclient);
+
+ void SyncClient(const JsonRpcConnection::Ptr& aclient, const Endpoint::Ptr& endpoint, bool needSync);
+
+ /* API Config Packages */
+ mutable std::mutex m_ActivePackageStagesLock;
+ std::map<String, String> m_ActivePackageStages;
+
+ void UpdateActivePackageStagesCache();
+};
+
+}
+
+#endif /* APILISTENER_H */
diff --git a/lib/remote/apilistener.ti b/lib/remote/apilistener.ti
new file mode 100644
index 0000000..8317abc
--- /dev/null
+++ b/lib/remote/apilistener.ti
@@ -0,0 +1,66 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/i2-remote.hpp"
+#include "base/configobject.hpp"
+#include "base/application.hpp"
+#include "base/tlsutility.hpp"
+
+library remote;
+
+namespace icinga
+{
+
+class ApiListener : ConfigObject
+{
+ activation_priority 50;
+
+ [config, deprecated] String cert_path;
+ [config, deprecated] String key_path;
+ [config, deprecated] String ca_path;
+ [config] String crl_path;
+ [config] String cipher_list {
+ default {{{ return DEFAULT_TLS_CIPHERS; }}}
+ };
+ [config] String tls_protocolmin {
+ default {{{ return DEFAULT_TLS_PROTOCOLMIN; }}}
+ };
+
+ [config] String bind_host {
+ default {{{ return Configuration::ApiBindHost; }}}
+ };
+ [config] String bind_port {
+ default {{{ return Configuration::ApiBindPort; }}}
+ };
+
+ [config] bool accept_config;
+ [config] bool accept_commands;
+ [config] int max_anonymous_clients {
+ default {{{ return -1; }}}
+ };
+
+ [config, deprecated] double tls_handshake_timeout {
+ get;
+ set;
+ default {{{ return Configuration::TlsHandshakeTimeout; }}}
+ };
+
+ [config] double connect_timeout {
+ default {{{ return DEFAULT_CONNECT_TIMEOUT; }}}
+ };
+
+ [config, no_user_view, no_user_modify] String ticket_salt;
+
+ [config] Array::Ptr access_control_allow_origin;
+ [config, deprecated] bool access_control_allow_credentials;
+ [config, deprecated] String access_control_allow_headers;
+ [config, deprecated] String access_control_allow_methods;
+
+
+ [state, no_user_modify] Timestamp log_message_timestamp;
+
+ [no_user_modify] String identity;
+
+ [state, no_user_modify] Dictionary::Ptr last_failed_zones_stage_validation;
+};
+
+}
diff --git a/lib/remote/apiuser.cpp b/lib/remote/apiuser.cpp
new file mode 100644
index 0000000..2959d89
--- /dev/null
+++ b/lib/remote/apiuser.cpp
@@ -0,0 +1,55 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/apiuser.hpp"
+#include "remote/apiuser-ti.cpp"
+#include "base/configtype.hpp"
+#include "base/base64.hpp"
+#include "base/tlsutility.hpp"
+#include "base/utility.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(ApiUser);
+
+ApiUser::Ptr ApiUser::GetByClientCN(const String& cn)
+{
+ for (const ApiUser::Ptr& user : ConfigType::GetObjectsByType<ApiUser>()) {
+ if (user->GetClientCN() == cn)
+ return user;
+ }
+
+ return nullptr;
+}
+
+ApiUser::Ptr ApiUser::GetByAuthHeader(const String& auth_header)
+{
+ String::SizeType pos = auth_header.FindFirstOf(" ");
+ String username, password;
+
+ if (pos != String::NPos && auth_header.SubStr(0, pos) == "Basic") {
+ String credentials_base64 = auth_header.SubStr(pos + 1);
+ String credentials = Base64::Decode(credentials_base64);
+
+ String::SizeType cpos = credentials.FindFirstOf(":");
+
+ if (cpos != String::NPos) {
+ username = credentials.SubStr(0, cpos);
+ password = credentials.SubStr(cpos + 1);
+ }
+ }
+
+ const ApiUser::Ptr& user = ApiUser::GetByName(username);
+
+ /* Deny authentication if:
+ * 1) user does not exist
+ * 2) given password is empty
+ * 2) configured password does not match.
+ */
+ if (!user || password.IsEmpty())
+ return nullptr;
+ else if (user && !Utility::ComparePasswords(password, user->GetPassword()))
+ return nullptr;
+
+ return user;
+}
+
diff --git a/lib/remote/apiuser.hpp b/lib/remote/apiuser.hpp
new file mode 100644
index 0000000..fc132ee
--- /dev/null
+++ b/lib/remote/apiuser.hpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef APIUSER_H
+#define APIUSER_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/apiuser-ti.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup remote
+ */
+class ApiUser final : public ObjectImpl<ApiUser>
+{
+public:
+ DECLARE_OBJECT(ApiUser);
+ DECLARE_OBJECTNAME(ApiUser);
+
+ static ApiUser::Ptr GetByClientCN(const String& cn);
+ static ApiUser::Ptr GetByAuthHeader(const String& auth_header);
+};
+
+}
+
+#endif /* APIUSER_H */
diff --git a/lib/remote/apiuser.ti b/lib/remote/apiuser.ti
new file mode 100644
index 0000000..0b49a1d
--- /dev/null
+++ b/lib/remote/apiuser.ti
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include "base/function.hpp"
+
+library remote;
+
+namespace icinga
+{
+
+class ApiUser : ConfigObject
+{
+ /* No show config */
+ [config, no_user_view] String password;
+ [deprecated, config, no_user_view] String password_hash;
+ [config] String client_cn (ClientCN);
+ [config] array(Value) permissions;
+};
+
+validator ApiUser {
+ Array permissions {
+ String "*";
+ Dictionary "*" {
+ required permission;
+ String permission;
+ Function filter;
+ };
+ };
+};
+
+}
diff --git a/lib/remote/configfileshandler.cpp b/lib/remote/configfileshandler.cpp
new file mode 100644
index 0000000..779ecd1
--- /dev/null
+++ b/lib/remote/configfileshandler.cpp
@@ -0,0 +1,94 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/configfileshandler.hpp"
+#include "remote/configpackageutility.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <fstream>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/config/files", ConfigFilesHandler);
+
+bool ConfigFilesHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ const std::vector<String>& urlPath = url->GetPath();
+
+ if (urlPath.size() >= 4)
+ params->Set("package", urlPath[3]);
+
+ if (urlPath.size() >= 5)
+ params->Set("stage", urlPath[4]);
+
+ if (urlPath.size() >= 6) {
+ std::vector<String> tmpPath(urlPath.begin() + 5, urlPath.end());
+ params->Set("path", boost::algorithm::join(tmpPath, "/"));
+ }
+
+ if (request[http::field::accept] == "application/json") {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid Accept header. Either remove the Accept header or set it to 'application/octet-stream'.");
+ return true;
+ }
+
+ FilterUtility::CheckPermission(user, "config/query");
+
+ String packageName = HttpUtility::GetLastParameter(params, "package");
+ String stageName = HttpUtility::GetLastParameter(params, "stage");
+
+ if (!ConfigPackageUtility::ValidatePackageName(packageName)) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid package name.");
+ return true;
+ }
+
+ if (!ConfigPackageUtility::ValidateStageName(stageName)) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid stage name.");
+ return true;
+ }
+
+ String relativePath = HttpUtility::GetLastParameter(params, "path");
+
+ if (ConfigPackageUtility::ContainsDotDot(relativePath)) {
+ HttpUtility::SendJsonError(response, params, 400, "Path contains '..' (not allowed).");
+ return true;
+ }
+
+ String path = ConfigPackageUtility::GetPackageDir() + "/" + packageName + "/" + stageName + "/" + relativePath;
+
+ if (!Utility::PathExists(path)) {
+ HttpUtility::SendJsonError(response, params, 404, "Path not found.");
+ return true;
+ }
+
+ try {
+ std::ifstream fp(path.CStr(), std::ifstream::in | std::ifstream::binary);
+ fp.exceptions(std::ifstream::badbit);
+
+ String content((std::istreambuf_iterator<char>(fp)), std::istreambuf_iterator<char>());
+ response.result(http::status::ok);
+ response.set(http::field::content_type, "application/octet-stream");
+ response.body() = content;
+ response.content_length(response.body().size());
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 500, "Could not read file.",
+ DiagnosticInformation(ex));
+ }
+
+ return true;
+}
diff --git a/lib/remote/configfileshandler.hpp b/lib/remote/configfileshandler.hpp
new file mode 100644
index 0000000..ea48b1e
--- /dev/null
+++ b/lib/remote/configfileshandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGFILESHANDLER_H
+#define CONFIGFILESHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class ConfigFilesHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigFilesHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* CONFIGFILESHANDLER_H */
diff --git a/lib/remote/configobjectslock.cpp b/lib/remote/configobjectslock.cpp
new file mode 100644
index 0000000..e529c83
--- /dev/null
+++ b/lib/remote/configobjectslock.cpp
@@ -0,0 +1,24 @@
+/* Icinga 2 | (c) 2022 Icinga GmbH | GPLv2+ */
+
+#ifndef _WIN32
+
+#include "base/shared-memory.hpp"
+#include "remote/configobjectslock.hpp"
+#include <boost/interprocess/sync/lock_options.hpp>
+
+using namespace icinga;
+
+// On *nix one process may write config objects while another is loading the config, so this uses IPC.
+static SharedMemory<boost::interprocess::interprocess_sharable_mutex> l_ConfigObjectsMutex;
+
+ConfigObjectsExclusiveLock::ConfigObjectsExclusiveLock()
+ : m_Lock(l_ConfigObjectsMutex.Get())
+{
+}
+
+ConfigObjectsSharedLock::ConfigObjectsSharedLock(std::try_to_lock_t)
+ : m_Lock(l_ConfigObjectsMutex.Get(), boost::interprocess::try_to_lock)
+{
+}
+
+#endif /* _WIN32 */
diff --git a/lib/remote/configobjectslock.hpp b/lib/remote/configobjectslock.hpp
new file mode 100644
index 0000000..ee90981
--- /dev/null
+++ b/lib/remote/configobjectslock.hpp
@@ -0,0 +1,72 @@
+/* Icinga 2 | (c) 2023 Icinga GmbH | GPLv2+ */
+
+#pragma once
+
+#include <mutex>
+
+#ifndef _WIN32
+#include <boost/interprocess/sync/interprocess_sharable_mutex.hpp>
+#include <boost/interprocess/sync/scoped_lock.hpp>
+#include <boost/interprocess/sync/sharable_lock.hpp>
+#endif /* _WIN32 */
+
+namespace icinga
+{
+
+#ifdef _WIN32
+
+class ConfigObjectsSharedLock
+{
+public:
+ inline ConfigObjectsSharedLock(std::try_to_lock_t)
+ {
+ }
+
+ constexpr explicit operator bool() const
+ {
+ return true;
+ }
+};
+
+#else /* _WIN32 */
+
+/**
+ * Waits until all ConfigObjects*Lock-s have vanished. For its lifetime disallows such.
+ * Keep an instance alive during reload to forbid runtime config changes!
+ * This way Icinga reads a consistent config which doesn't suddenly get runtime-changed.
+ *
+ * @ingroup remote
+ */
+class ConfigObjectsExclusiveLock
+{
+public:
+ ConfigObjectsExclusiveLock();
+
+private:
+ boost::interprocess::scoped_lock<boost::interprocess::interprocess_sharable_mutex> m_Lock;
+};
+
+/**
+ * Waits until the only ConfigObjectsExclusiveLock has vanished (if any). For its lifetime disallows such.
+ * Keep an instance alive during runtime config changes to delay a reload (if any)!
+ * This way Icinga reads a consistent config which doesn't suddenly get runtime-changed.
+ *
+ * @ingroup remote
+ */
+class ConfigObjectsSharedLock
+{
+public:
+ ConfigObjectsSharedLock(std::try_to_lock_t);
+
+ inline explicit operator bool() const
+ {
+ return m_Lock.owns();
+ }
+
+private:
+ boost::interprocess::sharable_lock<boost::interprocess::interprocess_sharable_mutex> m_Lock;
+};
+
+#endif /* _WIN32 */
+
+}
diff --git a/lib/remote/configobjectutility.cpp b/lib/remote/configobjectutility.cpp
new file mode 100644
index 0000000..62c910b
--- /dev/null
+++ b/lib/remote/configobjectutility.cpp
@@ -0,0 +1,377 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/configobjectutility.hpp"
+#include "remote/configpackageutility.hpp"
+#include "remote/apilistener.hpp"
+#include "config/configcompiler.hpp"
+#include "config/configitem.hpp"
+#include "base/configwriter.hpp"
+#include "base/exception.hpp"
+#include "base/dependencygraph.hpp"
+#include "base/tlsutility.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <boost/filesystem.hpp>
+#include <boost/system/error_code.hpp>
+#include <fstream>
+#include <utility>
+
+using namespace icinga;
+
+String ConfigObjectUtility::GetConfigDir()
+{
+ String prefix = ConfigPackageUtility::GetPackageDir() + "/_api/";
+ String activeStage = ConfigPackageUtility::GetActiveStage("_api");
+
+ if (activeStage.IsEmpty())
+ RepairPackage("_api");
+
+ return prefix + activeStage;
+}
+
+String ConfigObjectUtility::ComputeNewObjectConfigPath(const Type::Ptr& type, const String& fullName)
+{
+ String typeDir = type->GetPluralName();
+ boost::algorithm::to_lower(typeDir);
+
+ /* This may throw an exception the caller above must handle. */
+ String prefix = GetConfigDir() + "/conf.d/" + type->GetPluralName().ToLower() + "/";
+
+ String escapedName = EscapeName(fullName);
+
+ String longPath = prefix + escapedName + ".conf";
+
+ /*
+ * The long path may cause trouble due to exceeding the allowed filename length of the filesystem. Therefore, the
+ * preferred solution would be to use the truncated and hashed version as returned at the end of this function.
+ * However, for compatibility reasons, we have to keep the old long version in some cases. Notably, this could lead
+ * to the creation of objects that can't be synced to child nodes if they are running an older version. Thus, for
+ * now, the fix is only enabled for comments and downtimes, as these are the object types for which the issue is
+ * most likely triggered but can't be worked around easily (you'd have to rename the host and/or service in order to
+ * be able to schedule a downtime or add an acknowledgement, which is not feasible) and the impact of not syncing
+ * these objects through the whole cluster is limited. For other object types, we currently prefer to fail the
+ * creation early so that configuration inconsistencies throughout the cluster are avoided.
+ *
+ * TODO: Remove this in v2.16 and truncate all.
+ */
+ if (type->GetName() != "Comment" && type->GetName() != "Downtime") {
+ return longPath;
+ }
+
+ /* Maximum length 80 bytes object name + 3 bytes "..." + 40 bytes SHA1 (hex-encoded) */
+ return prefix + Utility::TruncateUsingHash<80+3+40>(escapedName) + ".conf";
+}
+
+String ConfigObjectUtility::GetExistingObjectConfigPath(const ConfigObject::Ptr& object)
+{
+ return object->GetDebugInfo().Path;
+}
+
+void ConfigObjectUtility::RepairPackage(const String& package)
+{
+ /* Try to fix the active stage, whenever we find a directory in there.
+ * This automatically heals packages < 2.11 which remained broken.
+ */
+ String dir = ConfigPackageUtility::GetPackageDir() + "/" + package + "/";
+
+ namespace fs = boost::filesystem;
+
+ /* Use iterators to workaround VS builds on Windows. */
+ fs::path path(dir.Begin(), dir.End());
+
+ fs::recursive_directory_iterator end;
+
+ String foundActiveStage;
+
+ for (fs::recursive_directory_iterator it(path); it != end; ++it) {
+ boost::system::error_code ec;
+
+ const fs::path d = *it;
+ if (fs::is_directory(d, ec)) {
+ /* Extract the relative directory name. */
+ foundActiveStage = d.stem().string();
+
+ break; // Use the first found directory.
+ }
+ }
+
+ if (!foundActiveStage.IsEmpty()) {
+ Log(LogInformation, "ConfigObjectUtility")
+ << "Repairing config package '" << package << "' with stage '" << foundActiveStage << "'.";
+
+ ConfigPackageUtility::ActivateStage(package, foundActiveStage);
+ } else {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Cannot repair package '" + package + "', please check the troubleshooting docs."));
+ }
+}
+
+void ConfigObjectUtility::CreateStorage()
+{
+ std::unique_lock<std::mutex> lock(ConfigPackageUtility::GetStaticPackageMutex());
+
+ /* For now, we only use _api as our creation target. */
+ String package = "_api";
+
+ if (!ConfigPackageUtility::PackageExists(package)) {
+ Log(LogNotice, "ConfigObjectUtility")
+ << "Package " << package << " doesn't exist yet, creating it.";
+
+ ConfigPackageUtility::CreatePackage(package);
+
+ String stage = ConfigPackageUtility::CreateStage(package);
+ ConfigPackageUtility::ActivateStage(package, stage);
+ }
+}
+
+String ConfigObjectUtility::EscapeName(const String& name)
+{
+ return Utility::EscapeString(name, "<>:\"/\\|?*", true);
+}
+
+String ConfigObjectUtility::CreateObjectConfig(const Type::Ptr& type, const String& fullName,
+ bool ignoreOnError, const Array::Ptr& templates, const Dictionary::Ptr& attrs)
+{
+ auto *nc = dynamic_cast<NameComposer *>(type.get());
+ Dictionary::Ptr nameParts;
+ String name;
+
+ if (nc) {
+ nameParts = nc->ParseName(fullName);
+ name = nameParts->Get("name");
+ } else
+ name = fullName;
+
+ Dictionary::Ptr allAttrs = new Dictionary();
+
+ if (attrs) {
+ attrs->CopyTo(allAttrs);
+
+ ObjectLock olock(attrs);
+ for (const Dictionary::Pair& kv : attrs) {
+ int fid = type->GetFieldId(kv.first.SubStr(0, kv.first.FindFirstOf(".")));
+
+ if (fid < 0)
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid attribute specified: " + kv.first));
+
+ Field field = type->GetFieldInfo(fid);
+
+ if (!(field.Attributes & FAConfig) || kv.first == "name")
+ BOOST_THROW_EXCEPTION(ScriptError("Attribute is marked for internal use only and may not be set: " + kv.first));
+ }
+ }
+
+ if (nameParts)
+ nameParts->CopyTo(allAttrs);
+
+ allAttrs->Remove("name");
+
+ /* update the version for config sync */
+ allAttrs->Set("version", Utility::GetTime());
+
+ std::ostringstream config;
+ ConfigWriter::EmitConfigItem(config, type->GetName(), name, false, ignoreOnError, templates, allAttrs);
+ ConfigWriter::EmitRaw(config, "\n");
+
+ return config.str();
+}
+
+bool ConfigObjectUtility::CreateObject(const Type::Ptr& type, const String& fullName,
+ const String& config, const Array::Ptr& errors, const Array::Ptr& diagnosticInformation, const Value& cookie)
+{
+ CreateStorage();
+
+ {
+ auto configType (dynamic_cast<ConfigType*>(type.get()));
+
+ if (configType && configType->GetObject(fullName)) {
+ errors->Add("Object '" + fullName + "' already exists.");
+ return false;
+ }
+ }
+
+ String path;
+
+ try {
+ path = ComputeNewObjectConfigPath(type, fullName);
+ } catch (const std::exception& ex) {
+ errors->Add("Config package broken: " + DiagnosticInformation(ex, false));
+ return false;
+ }
+
+ Utility::MkDirP(Utility::DirName(path), 0700);
+
+ std::ofstream fp(path.CStr(), std::ofstream::out | std::ostream::trunc);
+ fp << config;
+ fp.close();
+
+ std::unique_ptr<Expression> expr = ConfigCompiler::CompileFile(path, String(), "_api");
+
+ try {
+ ActivationScope ascope;
+
+ ScriptFrame frame(true);
+ expr->Evaluate(frame);
+ expr.reset();
+
+ WorkQueue upq;
+ upq.SetName("ConfigObjectUtility::CreateObject");
+
+ std::vector<ConfigItem::Ptr> newItems;
+
+ /*
+ * Disable logging for object creation, but do so ourselves later on.
+ * Duplicate the error handling for better logging and debugging here.
+ */
+ if (!ConfigItem::CommitItems(ascope.GetContext(), upq, newItems, true)) {
+ if (errors) {
+ Log(LogNotice, "ConfigObjectUtility")
+ << "Failed to commit config item '" << fullName << "'. Aborting and removing config path '" << path << "'.";
+
+ Utility::Remove(path);
+
+ for (const boost::exception_ptr& ex : upq.GetExceptions()) {
+ errors->Add(DiagnosticInformation(ex, false));
+
+ if (diagnosticInformation)
+ diagnosticInformation->Add(DiagnosticInformation(ex));
+ }
+ }
+
+ return false;
+ }
+
+ /*
+ * Activate the config object.
+ * uq, items, runtimeCreated, silent, withModAttrs, cookie
+ * IMPORTANT: Forward the cookie aka origin in order to prevent sync loops in the same zone!
+ */
+ if (!ConfigItem::ActivateItems(newItems, true, false, false, cookie)) {
+ if (errors) {
+ Log(LogNotice, "ConfigObjectUtility")
+ << "Failed to activate config object '" << fullName << "'. Aborting and removing config path '" << path << "'.";
+
+ Utility::Remove(path);
+
+ for (const boost::exception_ptr& ex : upq.GetExceptions()) {
+ errors->Add(DiagnosticInformation(ex, false));
+
+ if (diagnosticInformation)
+ diagnosticInformation->Add(DiagnosticInformation(ex));
+ }
+ }
+
+ return false;
+ }
+
+ /* if (type != Comment::TypeInstance && type != Downtime::TypeInstance)
+ * Does not work since this would require libicinga, which has a dependency on libremote
+ * Would work if these libs were static.
+ */
+ if (type->GetName() != "Comment" && type->GetName() != "Downtime")
+ ApiListener::UpdateObjectAuthority();
+
+ // At this stage we should have a config object already. If not, it was ignored before.
+ auto *ctype = dynamic_cast<ConfigType *>(type.get());
+ ConfigObject::Ptr obj = ctype->GetObject(fullName);
+
+ if (obj) {
+ Log(LogInformation, "ConfigObjectUtility")
+ << "Created and activated object '" << fullName << "' of type '" << type->GetName() << "'.";
+ } else {
+ Log(LogNotice, "ConfigObjectUtility")
+ << "Object '" << fullName << "' was not created but ignored due to errors.";
+ }
+
+ } catch (const std::exception& ex) {
+ Utility::Remove(path);
+
+ if (errors)
+ errors->Add(DiagnosticInformation(ex, false));
+
+ if (diagnosticInformation)
+ diagnosticInformation->Add(DiagnosticInformation(ex));
+
+ return false;
+ }
+
+ return true;
+}
+
+bool ConfigObjectUtility::DeleteObjectHelper(const ConfigObject::Ptr& object, bool cascade,
+ const Array::Ptr& errors, const Array::Ptr& diagnosticInformation, const Value& cookie)
+{
+ std::vector<Object::Ptr> parents = DependencyGraph::GetParents(object);
+
+ Type::Ptr type = object->GetReflectionType();
+
+ String name = object->GetName();
+
+ if (!parents.empty() && !cascade) {
+ if (errors) {
+ errors->Add("Object '" + name + "' of type '" + type->GetName() +
+ "' cannot be deleted because other objects depend on it. "
+ "Use cascading delete to delete it anyway.");
+ }
+
+ return false;
+ }
+
+ for (const Object::Ptr& pobj : parents) {
+ ConfigObject::Ptr parentObj = dynamic_pointer_cast<ConfigObject>(pobj);
+
+ if (!parentObj)
+ continue;
+
+ DeleteObjectHelper(parentObj, cascade, errors, diagnosticInformation, cookie);
+ }
+
+ ConfigItem::Ptr item = ConfigItem::GetByTypeAndName(type, name);
+
+ try {
+ /* mark this object for cluster delete event */
+ object->SetExtension("ConfigObjectDeleted", true);
+
+ /*
+ * Trigger deactivation signal for DB IDO and runtime object delections.
+ * IMPORTANT: Specify the cookie aka origin in order to prevent sync loops
+ * in the same zone!
+ */
+ object->Deactivate(true, cookie);
+
+ if (item)
+ item->Unregister();
+ else
+ object->Unregister();
+
+ } catch (const std::exception& ex) {
+ if (errors)
+ errors->Add(DiagnosticInformation(ex, false));
+
+ if (diagnosticInformation)
+ diagnosticInformation->Add(DiagnosticInformation(ex));
+
+ return false;
+ }
+
+ if (object->GetPackage() == "_api") {
+ Utility::Remove(GetExistingObjectConfigPath(object));
+ }
+
+ Log(LogInformation, "ConfigObjectUtility")
+ << "Deleted object '" << name << "' of type '" << type->GetName() << "'.";
+
+ return true;
+}
+
+bool ConfigObjectUtility::DeleteObject(const ConfigObject::Ptr& object, bool cascade, const Array::Ptr& errors,
+ const Array::Ptr& diagnosticInformation, const Value& cookie)
+{
+ if (object->GetPackage() != "_api") {
+ if (errors)
+ errors->Add("Object cannot be deleted because it was not created using the API.");
+
+ return false;
+ }
+
+ return DeleteObjectHelper(object, cascade, errors, diagnosticInformation, cookie);
+}
diff --git a/lib/remote/configobjectutility.hpp b/lib/remote/configobjectutility.hpp
new file mode 100644
index 0000000..5a113c8
--- /dev/null
+++ b/lib/remote/configobjectutility.hpp
@@ -0,0 +1,47 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGOBJECTUTILITY_H
+#define CONFIGOBJECTUTILITY_H
+
+#include "remote/i2-remote.hpp"
+#include "base/array.hpp"
+#include "base/configobject.hpp"
+#include "base/dictionary.hpp"
+#include "base/type.hpp"
+
+namespace icinga
+{
+
+/**
+ * Helper functions.
+ *
+ * @ingroup remote
+ */
+class ConfigObjectUtility
+{
+
+public:
+ static String GetConfigDir();
+ static String ComputeNewObjectConfigPath(const Type::Ptr& type, const String& fullName);
+ static String GetExistingObjectConfigPath(const ConfigObject::Ptr& object);
+ static void RepairPackage(const String& package);
+ static void CreateStorage();
+
+ static String CreateObjectConfig(const Type::Ptr& type, const String& fullName,
+ bool ignoreOnError, const Array::Ptr& templates, const Dictionary::Ptr& attrs);
+
+ static bool CreateObject(const Type::Ptr& type, const String& fullName,
+ const String& config, const Array::Ptr& errors, const Array::Ptr& diagnosticInformation, const Value& cookie = Empty);
+
+ static bool DeleteObject(const ConfigObject::Ptr& object, bool cascade, const Array::Ptr& errors,
+ const Array::Ptr& diagnosticInformation, const Value& cookie = Empty);
+
+private:
+ static String EscapeName(const String& name);
+ static bool DeleteObjectHelper(const ConfigObject::Ptr& object, bool cascade, const Array::Ptr& errors,
+ const Array::Ptr& diagnosticInformation, const Value& cookie = Empty);
+};
+
+}
+
+#endif /* CONFIGOBJECTUTILITY_H */
diff --git a/lib/remote/configpackageshandler.cpp b/lib/remote/configpackageshandler.cpp
new file mode 100644
index 0000000..98b3268
--- /dev/null
+++ b/lib/remote/configpackageshandler.cpp
@@ -0,0 +1,179 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/configpackageshandler.hpp"
+#include "remote/configpackageutility.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/config/packages", ConfigPackagesHandler);
+
+bool ConfigPackagesHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() > 4)
+ return false;
+
+ if (request.method() == http::verb::get)
+ HandleGet(user, request, url, response, params);
+ else if (request.method() == http::verb::post)
+ HandlePost(user, request, url, response, params);
+ else if (request.method() == http::verb::delete_)
+ HandleDelete(user, request, url, response, params);
+ else
+ return false;
+
+ return true;
+}
+
+void ConfigPackagesHandler::HandleGet(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+)
+{
+ namespace http = boost::beast::http;
+
+ FilterUtility::CheckPermission(user, "config/query");
+
+ std::vector<String> packages;
+
+ try {
+ packages = ConfigPackageUtility::GetPackages();
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 500, "Could not retrieve packages.",
+ DiagnosticInformation(ex));
+ return;
+ }
+
+ ArrayData results;
+
+ {
+ std::unique_lock<std::mutex> lock(ConfigPackageUtility::GetStaticPackageMutex());
+
+ for (const String& package : packages) {
+ String activeStage;
+
+ try {
+ activeStage = ConfigPackageUtility::GetActiveStage(package);
+ } catch (const std::exception&) { } /* Should never happen. */
+
+ results.emplace_back(new Dictionary({
+ { "name", package },
+ { "stages", Array::FromVector(ConfigPackageUtility::GetStages(package)) },
+ { "active-stage", activeStage }
+ }));
+ }
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+}
+
+void ConfigPackagesHandler::HandlePost(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+)
+{
+ namespace http = boost::beast::http;
+
+ FilterUtility::CheckPermission(user, "config/modify");
+
+ if (url->GetPath().size() >= 4)
+ params->Set("package", url->GetPath()[3]);
+
+ String packageName = HttpUtility::GetLastParameter(params, "package");
+
+ if (!ConfigPackageUtility::ValidatePackageName(packageName)) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid package name '" + packageName + "'.");
+ return;
+ }
+
+ try {
+ std::unique_lock<std::mutex> lock(ConfigPackageUtility::GetStaticPackageMutex());
+
+ ConfigPackageUtility::CreatePackage(packageName);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 500, "Could not create package '" + packageName + "'.",
+ DiagnosticInformation(ex));
+ return;
+ }
+
+ Dictionary::Ptr result1 = new Dictionary({
+ { "code", 200 },
+ { "package", packageName },
+ { "status", "Created package." }
+ });
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+}
+
+void ConfigPackagesHandler::HandleDelete(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+)
+{
+ namespace http = boost::beast::http;
+
+ FilterUtility::CheckPermission(user, "config/modify");
+
+ if (url->GetPath().size() >= 4)
+ params->Set("package", url->GetPath()[3]);
+
+ String packageName = HttpUtility::GetLastParameter(params, "package");
+
+ if (!ConfigPackageUtility::ValidatePackageName(packageName)) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid package name '" + packageName + "'.");
+ return;
+ }
+
+ try {
+ ConfigPackageUtility::DeletePackage(packageName);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 500, "Failed to delete package '" + packageName + "'.",
+ DiagnosticInformation(ex));
+ return;
+ }
+
+ Dictionary::Ptr result1 = new Dictionary({
+ { "code", 200 },
+ { "package", packageName },
+ { "status", "Deleted package." }
+ });
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+}
diff --git a/lib/remote/configpackageshandler.hpp b/lib/remote/configpackageshandler.hpp
new file mode 100644
index 0000000..0a05ea1
--- /dev/null
+++ b/lib/remote/configpackageshandler.hpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGMODULESHANDLER_H
+#define CONFIGMODULESHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class ConfigPackagesHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigPackagesHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+
+private:
+ void HandleGet(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+ );
+ void HandlePost(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+ );
+ void HandleDelete(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+ );
+
+};
+
+}
+
+#endif /* CONFIGMODULESHANDLER_H */
diff --git a/lib/remote/configpackageutility.cpp b/lib/remote/configpackageutility.cpp
new file mode 100644
index 0000000..e795401
--- /dev/null
+++ b/lib/remote/configpackageutility.cpp
@@ -0,0 +1,413 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/configpackageutility.hpp"
+#include "remote/apilistener.hpp"
+#include "base/application.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/regex.hpp>
+#include <algorithm>
+#include <cctype>
+#include <fstream>
+
+using namespace icinga;
+
+String ConfigPackageUtility::GetPackageDir()
+{
+ return Configuration::DataDir + "/api/packages";
+}
+
+void ConfigPackageUtility::CreatePackage(const String& name)
+{
+ String path = GetPackageDir() + "/" + name;
+
+ if (Utility::PathExists(path))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Package already exists."));
+
+ Utility::MkDirP(path, 0700);
+ WritePackageConfig(name);
+}
+
+void ConfigPackageUtility::DeletePackage(const String& name)
+{
+ String path = GetPackageDir() + "/" + name;
+
+ if (!Utility::PathExists(path))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Package does not exist."));
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ /* config packages without API make no sense. */
+ if (!listener)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("No ApiListener instance configured."));
+
+ listener->RemoveActivePackageStage(name);
+
+ Utility::RemoveDirRecursive(path);
+ Application::RequestRestart();
+}
+
+std::vector<String> ConfigPackageUtility::GetPackages()
+{
+ String packageDir = GetPackageDir();
+
+ std::vector<String> packages;
+
+ /* Package directory does not exist, no packages have been created thus far. */
+ if (!Utility::PathExists(packageDir))
+ return packages;
+
+ Utility::Glob(packageDir + "/*", [&packages](const String& path) { packages.emplace_back(Utility::BaseName(path)); }, GlobDirectory);
+
+ return packages;
+}
+
+bool ConfigPackageUtility::PackageExists(const String& name)
+{
+ auto packages (GetPackages());
+ return std::find(packages.begin(), packages.end(), name) != packages.end();
+}
+
+String ConfigPackageUtility::CreateStage(const String& packageName, const Dictionary::Ptr& files)
+{
+ String stageName = Utility::NewUniqueID();
+
+ String path = GetPackageDir() + "/" + packageName;
+
+ if (!Utility::PathExists(path))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Package does not exist."));
+
+ path += "/" + stageName;
+
+ Utility::MkDirP(path, 0700);
+ Utility::MkDirP(path + "/conf.d", 0700);
+ Utility::MkDirP(path + "/zones.d", 0700);
+ WriteStageConfig(packageName, stageName);
+
+ bool foundDotDot = false;
+
+ if (files) {
+ ObjectLock olock(files);
+ for (const Dictionary::Pair& kv : files) {
+ if (ContainsDotDot(kv.first)) {
+ foundDotDot = true;
+ break;
+ }
+
+ String filePath = path + "/" + kv.first;
+
+ Log(LogInformation, "ConfigPackageUtility")
+ << "Updating configuration file: " << filePath;
+
+ // Pass the directory and generate a dir tree, if it does not already exist
+ Utility::MkDirP(Utility::DirName(filePath), 0750);
+ std::ofstream fp(filePath.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fp << kv.second;
+ fp.close();
+ }
+ }
+
+ if (foundDotDot) {
+ Utility::RemoveDirRecursive(path);
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Path must not contain '..'."));
+ }
+
+ return stageName;
+}
+
+void ConfigPackageUtility::WritePackageConfig(const String& packageName)
+{
+ String stageName = GetActiveStage(packageName);
+
+ String includePath = GetPackageDir() + "/" + packageName + "/include.conf";
+ std::ofstream fpInclude(includePath.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpInclude << "include \"*/include.conf\"\n";
+ fpInclude.close();
+
+ String activePath = GetPackageDir() + "/" + packageName + "/active.conf";
+ std::ofstream fpActive(activePath.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpActive << "if (!globals.contains(\"ActiveStages\")) {\n"
+ << " globals.ActiveStages = {}\n"
+ << "}\n"
+ << "\n"
+ << "if (globals.contains(\"ActiveStageOverride\")) {\n"
+ << " var arr = ActiveStageOverride.split(\":\")\n"
+ << " if (arr[0] == \"" << packageName << "\") {\n"
+ << " if (arr.len() < 2) {\n"
+ << " log(LogCritical, \"Config\", \"Invalid value for ActiveStageOverride\")\n"
+ << " } else {\n"
+ << " ActiveStages[\"" << packageName << "\"] = arr[1]\n"
+ << " }\n"
+ << " }\n"
+ << "}\n"
+ << "\n"
+ << "if (!ActiveStages.contains(\"" << packageName << "\")) {\n"
+ << " ActiveStages[\"" << packageName << "\"] = \"" << stageName << "\"\n"
+ << "}\n";
+ fpActive.close();
+}
+
+void ConfigPackageUtility::WriteStageConfig(const String& packageName, const String& stageName)
+{
+ String path = GetPackageDir() + "/" + packageName + "/" + stageName + "/include.conf";
+ std::ofstream fp(path.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fp << "include \"../active.conf\"\n"
+ << "if (ActiveStages[\"" << packageName << "\"] == \"" << stageName << "\") {\n"
+ << " include_recursive \"conf.d\"\n"
+ << " include_zones \"" << packageName << "\", \"zones.d\"\n"
+ << "}\n";
+ fp.close();
+}
+
+void ConfigPackageUtility::ActivateStage(const String& packageName, const String& stageName)
+{
+ SetActiveStage(packageName, stageName);
+
+ WritePackageConfig(packageName);
+}
+
+void ConfigPackageUtility::TryActivateStageCallback(const ProcessResult& pr, const String& packageName, const String& stageName,
+ bool activate, bool reload, const Shared<Defer>::Ptr& resetPackageUpdates)
+{
+ String logFile = GetPackageDir() + "/" + packageName + "/" + stageName + "/startup.log";
+ std::ofstream fpLog(logFile.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpLog << pr.Output;
+ fpLog.close();
+
+ String statusFile = GetPackageDir() + "/" + packageName + "/" + stageName + "/status";
+ std::ofstream fpStatus(statusFile.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc);
+ fpStatus << pr.ExitStatus;
+ fpStatus.close();
+
+ /* validation went fine, activate stage and reload */
+ if (pr.ExitStatus == 0) {
+ if (activate) {
+ {
+ std::unique_lock<std::mutex> lock(GetStaticPackageMutex());
+
+ ActivateStage(packageName, stageName);
+ }
+
+ if (reload) {
+ /*
+ * Cancel the deferred callback before going out of scope so that the config stages handler
+ * flag isn't resetting earlier and allowing other clients to submit further requests while
+ * Icinga2 is reloading. Otherwise, the ongoing request will be cancelled halfway before the
+ * operation is completed once the new worker becomes ready.
+ */
+ resetPackageUpdates->Cancel();
+
+ Application::RequestRestart();
+ }
+ }
+ } else {
+ Log(LogCritical, "ConfigPackageUtility")
+ << "Config validation failed for package '"
+ << packageName << "' and stage '" << stageName << "'.";
+ }
+}
+
+void ConfigPackageUtility::AsyncTryActivateStage(const String& packageName, const String& stageName, bool activate, bool reload,
+ const Shared<Defer>::Ptr& resetPackageUpdates)
+{
+ VERIFY(Application::GetArgC() >= 1);
+
+ // prepare arguments
+ Array::Ptr args = new Array({
+ Application::GetExePath(Application::GetArgV()[0]),
+ });
+
+ // copy all arguments of parent process
+ for (int i = 1; i < Application::GetArgC(); i++) {
+ String argV = Application::GetArgV()[i];
+
+ if (argV == "-d" || argV == "--daemonize")
+ continue;
+
+ args->Add(argV);
+ }
+
+ // add arguments for validation
+ args->Add("--validate");
+ args->Add("--define");
+ args->Add("ActiveStageOverride=" + packageName + ":" + stageName);
+
+ Process::Ptr process = new Process(Process::PrepareCommand(args));
+ process->SetTimeout(Application::GetReloadTimeout());
+ process->Run([packageName, stageName, activate, reload, resetPackageUpdates](const ProcessResult& pr) {
+ TryActivateStageCallback(pr, packageName, stageName, activate, reload, resetPackageUpdates);
+ });
+}
+
+void ConfigPackageUtility::DeleteStage(const String& packageName, const String& stageName)
+{
+ String path = GetPackageDir() + "/" + packageName + "/" + stageName;
+
+ if (!Utility::PathExists(path))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Stage does not exist."));
+
+ if (GetActiveStage(packageName) == stageName)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Active stage cannot be deleted."));
+
+ Utility::RemoveDirRecursive(path);
+}
+
+std::vector<String> ConfigPackageUtility::GetStages(const String& packageName)
+{
+ std::vector<String> stages;
+ Utility::Glob(GetPackageDir() + "/" + packageName + "/*", [&stages](const String& path) { stages.emplace_back(Utility::BaseName(path)); }, GlobDirectory);
+ return stages;
+}
+
+String ConfigPackageUtility::GetActiveStageFromFile(const String& packageName)
+{
+ /* Lock the transaction, reading this only happens on startup or when something really is broken. */
+ std::unique_lock<std::mutex> lock(GetStaticActiveStageMutex());
+
+ String path = GetPackageDir() + "/" + packageName + "/active-stage";
+
+ std::ifstream fp;
+ fp.open(path.CStr());
+
+ String stage;
+ std::getline(fp, stage.GetData());
+
+ fp.close();
+
+ if (fp.fail())
+ return ""; /* Don't use exceptions here. The caller must deal with empty stages at this point. Happens on initial package creation for example. */
+
+ return stage.Trim();
+}
+
+void ConfigPackageUtility::SetActiveStageToFile(const String& packageName, const String& stageName)
+{
+ std::unique_lock<std::mutex> lock(GetStaticActiveStageMutex());
+
+ String activeStagePath = GetPackageDir() + "/" + packageName + "/active-stage";
+
+ std::ofstream fpActiveStage(activeStagePath.CStr(), std::ofstream::out | std::ostream::binary | std::ostream::trunc); //TODO: fstream exceptions
+ fpActiveStage << stageName;
+ fpActiveStage.close();
+}
+
+String ConfigPackageUtility::GetActiveStage(const String& packageName)
+{
+ String activeStage;
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ /* If we don't have an API feature, just use the file storage without caching this.
+ * This happens when ScheduledDowntime objects generate Downtime objects.
+ * TODO: Make the API a first class citizen.
+ */
+ if (!listener)
+ return GetActiveStageFromFile(packageName);
+
+ /* First use runtime state. */
+ try {
+ activeStage = listener->GetActivePackageStage(packageName);
+ } catch (const std::exception& ex) {
+ /* Fallback to reading the file, happens on restarts. */
+ activeStage = GetActiveStageFromFile(packageName);
+
+ /* When we've read something, correct memory. */
+ if (!activeStage.IsEmpty())
+ listener->SetActivePackageStage(packageName, activeStage);
+ }
+
+ return activeStage;
+}
+
+void ConfigPackageUtility::SetActiveStage(const String& packageName, const String& stageName)
+{
+ /* Update the marker on disk for restarts. */
+ SetActiveStageToFile(packageName, stageName);
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ /* No API, no caching. */
+ if (!listener)
+ return;
+
+ listener->SetActivePackageStage(packageName, stageName);
+}
+
+std::vector<std::pair<String, bool> > ConfigPackageUtility::GetFiles(const String& packageName, const String& stageName)
+{
+ std::vector<std::pair<String, bool> > paths;
+ Utility::GlobRecursive(GetPackageDir() + "/" + packageName + "/" + stageName, "*", [&paths](const String& path) {
+ CollectPaths(path, paths);
+ }, GlobDirectory | GlobFile);
+
+ return paths;
+}
+
+void ConfigPackageUtility::CollectPaths(const String& path, std::vector<std::pair<String, bool> >& paths)
+{
+#ifndef _WIN32
+ struct stat statbuf;
+ int rc = lstat(path.CStr(), &statbuf);
+ if (rc < 0)
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("lstat")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(path));
+
+ paths.emplace_back(path, S_ISDIR(statbuf.st_mode));
+#else /* _WIN32 */
+ struct _stat statbuf;
+ int rc = _stat(path.CStr(), &statbuf);
+ if (rc < 0)
+ BOOST_THROW_EXCEPTION(posix_error()
+ << boost::errinfo_api_function("_stat")
+ << boost::errinfo_errno(errno)
+ << boost::errinfo_file_name(path));
+
+ paths.emplace_back(path, ((statbuf.st_mode & S_IFMT) == S_IFDIR));
+#endif /* _WIN32 */
+}
+
+bool ConfigPackageUtility::ContainsDotDot(const String& path)
+{
+ std::vector<String> tokens = path.Split("/\\");
+
+ for (const String& part : tokens) {
+ if (part == "..")
+ return true;
+ }
+
+ return false;
+}
+
+bool ConfigPackageUtility::ValidatePackageName(const String& packageName)
+{
+ return ValidateFreshName(packageName) || PackageExists(packageName);
+}
+
+bool ConfigPackageUtility::ValidateFreshName(const String& name)
+{
+ if (name.IsEmpty())
+ return false;
+
+ /* check for path injection */
+ if (ContainsDotDot(name))
+ return false;
+
+ return std::all_of(name.Begin(), name.End(), [](char c) {
+ return std::isalnum(c, std::locale::classic()) || c == '_' || c == '-';
+ });
+}
+
+std::mutex& ConfigPackageUtility::GetStaticPackageMutex()
+{
+ static std::mutex mutex;
+ return mutex;
+}
+
+std::mutex& ConfigPackageUtility::GetStaticActiveStageMutex()
+{
+ static std::mutex mutex;
+ return mutex;
+}
diff --git a/lib/remote/configpackageutility.hpp b/lib/remote/configpackageutility.hpp
new file mode 100644
index 0000000..240f591
--- /dev/null
+++ b/lib/remote/configpackageutility.hpp
@@ -0,0 +1,73 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGMODULEUTILITY_H
+#define CONFIGMODULEUTILITY_H
+
+#include "remote/i2-remote.hpp"
+#include "base/application.hpp"
+#include "base/dictionary.hpp"
+#include "base/process.hpp"
+#include "base/string.hpp"
+#include "base/defer.hpp"
+#include "base/shared.hpp"
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * Helper functions.
+ *
+ * @ingroup remote
+ */
+class ConfigPackageUtility
+{
+
+public:
+ static String GetPackageDir();
+
+ static void CreatePackage(const String& name);
+ static void DeletePackage(const String& name);
+ static std::vector<String> GetPackages();
+ static bool PackageExists(const String& name);
+
+ static String CreateStage(const String& packageName, const Dictionary::Ptr& files = nullptr);
+ static void DeleteStage(const String& packageName, const String& stageName);
+ static std::vector<String> GetStages(const String& packageName);
+ static String GetActiveStageFromFile(const String& packageName);
+ static String GetActiveStage(const String& packageName);
+ static void SetActiveStage(const String& packageName, const String& stageName);
+ static void SetActiveStageToFile(const String& packageName, const String& stageName);
+ static void ActivateStage(const String& packageName, const String& stageName);
+ static void AsyncTryActivateStage(const String& packageName, const String& stageName, bool activate, bool reload,
+ const Shared<Defer>::Ptr& resetPackageUpdates);
+
+ static std::vector<std::pair<String, bool> > GetFiles(const String& packageName, const String& stageName);
+
+ static bool ContainsDotDot(const String& path);
+ static bool ValidatePackageName(const String& packageName);
+
+ static inline
+ bool ValidateStageName(const String& stageName)
+ {
+ return ValidateFreshName(stageName);
+ }
+
+ static std::mutex& GetStaticPackageMutex();
+ static std::mutex& GetStaticActiveStageMutex();
+
+private:
+ static void CollectPaths(const String& path, std::vector<std::pair<String, bool> >& paths);
+
+ static void WritePackageConfig(const String& packageName);
+ static void WriteStageConfig(const String& packageName, const String& stageName);
+
+ static void TryActivateStageCallback(const ProcessResult& pr, const String& packageName, const String& stageName, bool activate,
+ bool reload, const Shared<Defer>::Ptr& resetPackageUpdates);
+
+ static bool ValidateFreshName(const String& name);
+};
+
+}
+
+#endif /* CONFIGMODULEUTILITY_H */
diff --git a/lib/remote/configstageshandler.cpp b/lib/remote/configstageshandler.cpp
new file mode 100644
index 0000000..b5aaadd
--- /dev/null
+++ b/lib/remote/configstageshandler.cpp
@@ -0,0 +1,225 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/configstageshandler.hpp"
+#include "remote/configpackageutility.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/application.hpp"
+#include "base/defer.hpp"
+#include "base/exception.hpp"
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/config/stages", ConfigStagesHandler);
+
+std::atomic<bool> ConfigStagesHandler::m_RunningPackageUpdates (false);
+
+bool ConfigStagesHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() > 5)
+ return false;
+
+ if (request.method() == http::verb::get)
+ HandleGet(user, request, url, response, params);
+ else if (request.method() == http::verb::post)
+ HandlePost(user, request, url, response, params);
+ else if (request.method() == http::verb::delete_)
+ HandleDelete(user, request, url, response, params);
+ else
+ return false;
+
+ return true;
+}
+
+void ConfigStagesHandler::HandleGet(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+)
+{
+ namespace http = boost::beast::http;
+
+ FilterUtility::CheckPermission(user, "config/query");
+
+ if (url->GetPath().size() >= 4)
+ params->Set("package", url->GetPath()[3]);
+
+ if (url->GetPath().size() >= 5)
+ params->Set("stage", url->GetPath()[4]);
+
+ String packageName = HttpUtility::GetLastParameter(params, "package");
+ String stageName = HttpUtility::GetLastParameter(params, "stage");
+
+ if (!ConfigPackageUtility::ValidatePackageName(packageName))
+ return HttpUtility::SendJsonError(response, params, 400, "Invalid package name '" + packageName + "'.");
+
+ if (!ConfigPackageUtility::ValidateStageName(stageName))
+ return HttpUtility::SendJsonError(response, params, 400, "Invalid stage name '" + stageName + "'.");
+
+ ArrayData results;
+
+ std::vector<std::pair<String, bool> > paths = ConfigPackageUtility::GetFiles(packageName, stageName);
+
+ String prefixPath = ConfigPackageUtility::GetPackageDir() + "/" + packageName + "/" + stageName + "/";
+
+ for (const auto& kv : paths) {
+ results.push_back(new Dictionary({
+ { "type", kv.second ? "directory" : "file" },
+ { "name", kv.first.SubStr(prefixPath.GetLength()) }
+ }));
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+}
+
+void ConfigStagesHandler::HandlePost(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+)
+{
+ namespace http = boost::beast::http;
+
+ FilterUtility::CheckPermission(user, "config/modify");
+
+ if (url->GetPath().size() >= 4)
+ params->Set("package", url->GetPath()[3]);
+
+ String packageName = HttpUtility::GetLastParameter(params, "package");
+
+ if (!ConfigPackageUtility::ValidatePackageName(packageName))
+ return HttpUtility::SendJsonError(response, params, 400, "Invalid package name '" + packageName + "'.");
+
+ bool reload = true;
+
+ if (params->Contains("reload"))
+ reload = HttpUtility::GetLastParameter(params, "reload");
+
+ bool activate = true;
+
+ if (params->Contains("activate"))
+ activate = HttpUtility::GetLastParameter(params, "activate");
+
+ Dictionary::Ptr files = params->Get("files");
+
+ String stageName;
+
+ try {
+ if (!files)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Parameter 'files' must be specified."));
+
+ if (reload && !activate)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Parameter 'reload' must be false when 'activate' is false."));
+
+ if (m_RunningPackageUpdates.exchange(true)) {
+ return HttpUtility::SendJsonError(response, params, 423,
+ "Conflicting request, there is already an ongoing package update in progress. Please try it again later.");
+ }
+
+ auto resetPackageUpdates (Shared<Defer>::Make([]() { ConfigStagesHandler::m_RunningPackageUpdates.store(false); }));
+
+ std::unique_lock<std::mutex> lock(ConfigPackageUtility::GetStaticPackageMutex());
+
+ stageName = ConfigPackageUtility::CreateStage(packageName, files);
+
+ /* validate the config. on success, activate stage and reload */
+ ConfigPackageUtility::AsyncTryActivateStage(packageName, stageName, activate, reload, resetPackageUpdates);
+ } catch (const std::exception& ex) {
+ return HttpUtility::SendJsonError(response, params, 500,
+ "Stage creation failed.",
+ DiagnosticInformation(ex));
+ }
+
+
+ String responseStatus = "Created stage. ";
+
+ if (reload)
+ responseStatus += "Reload triggered.";
+ else
+ responseStatus += "Reload skipped.";
+
+ Dictionary::Ptr result1 = new Dictionary({
+ { "package", packageName },
+ { "stage", stageName },
+ { "code", 200 },
+ { "status", responseStatus }
+ });
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+}
+
+void ConfigStagesHandler::HandleDelete(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+)
+{
+ namespace http = boost::beast::http;
+
+ FilterUtility::CheckPermission(user, "config/modify");
+
+ if (url->GetPath().size() >= 4)
+ params->Set("package", url->GetPath()[3]);
+
+ if (url->GetPath().size() >= 5)
+ params->Set("stage", url->GetPath()[4]);
+
+ String packageName = HttpUtility::GetLastParameter(params, "package");
+ String stageName = HttpUtility::GetLastParameter(params, "stage");
+
+ if (!ConfigPackageUtility::ValidatePackageName(packageName))
+ return HttpUtility::SendJsonError(response, params, 400, "Invalid package name '" + packageName + "'.");
+
+ if (!ConfigPackageUtility::ValidateStageName(stageName))
+ return HttpUtility::SendJsonError(response, params, 400, "Invalid stage name '" + stageName + "'.");
+
+ try {
+ ConfigPackageUtility::DeleteStage(packageName, stageName);
+ } catch (const std::exception& ex) {
+ return HttpUtility::SendJsonError(response, params, 500,
+ "Failed to delete stage '" + stageName + "' in package '" + packageName + "'.",
+ DiagnosticInformation(ex));
+ }
+
+ Dictionary::Ptr result1 = new Dictionary({
+ { "code", 200 },
+ { "package", packageName },
+ { "stage", stageName },
+ { "status", "Stage deleted." }
+ });
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+}
+
diff --git a/lib/remote/configstageshandler.hpp b/lib/remote/configstageshandler.hpp
new file mode 100644
index 0000000..88f248c
--- /dev/null
+++ b/lib/remote/configstageshandler.hpp
@@ -0,0 +1,56 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONFIGSTAGESHANDLER_H
+#define CONFIGSTAGESHANDLER_H
+
+#include "remote/httphandler.hpp"
+#include <atomic>
+
+namespace icinga
+{
+
+class ConfigStagesHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigStagesHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+
+private:
+ void HandleGet(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+ );
+ void HandlePost(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+ );
+ void HandleDelete(
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params
+ );
+
+ static std::atomic<bool> m_RunningPackageUpdates;
+};
+
+}
+
+#endif /* CONFIGSTAGESHANDLER_H */
diff --git a/lib/remote/consolehandler.cpp b/lib/remote/consolehandler.cpp
new file mode 100644
index 0000000..f5a470a
--- /dev/null
+++ b/lib/remote/consolehandler.cpp
@@ -0,0 +1,327 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/configobjectslock.hpp"
+#include "remote/consolehandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "config/configcompiler.hpp"
+#include "base/configtype.hpp"
+#include "base/configwriter.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/timer.hpp"
+#include "base/namespace.hpp"
+#include "base/initialize.hpp"
+#include "base/utility.hpp"
+#include <boost/thread/once.hpp>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/console", ConsoleHandler);
+
+static std::mutex l_QueryMutex;
+static std::map<String, ApiScriptFrame> l_ApiScriptFrames;
+static Timer::Ptr l_FrameCleanupTimer;
+static std::mutex l_ApiScriptMutex;
+
+static void ScriptFrameCleanupHandler()
+{
+ std::unique_lock<std::mutex> lock(l_ApiScriptMutex);
+
+ std::vector<String> cleanup_keys;
+
+ typedef std::pair<String, ApiScriptFrame> KVPair;
+
+ for (const KVPair& kv : l_ApiScriptFrames) {
+ if (kv.second.Seen < Utility::GetTime() - 1800)
+ cleanup_keys.push_back(kv.first);
+ }
+
+ for (const String& key : cleanup_keys)
+ l_ApiScriptFrames.erase(key);
+}
+
+static void EnsureFrameCleanupTimer()
+{
+ static boost::once_flag once = BOOST_ONCE_INIT;
+
+ boost::call_once(once, []() {
+ l_FrameCleanupTimer = Timer::Create();
+ l_FrameCleanupTimer->OnTimerExpired.connect([](const Timer * const&) { ScriptFrameCleanupHandler(); });
+ l_FrameCleanupTimer->SetInterval(30);
+ l_FrameCleanupTimer->Start();
+ });
+}
+
+bool ConsoleHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() != 3)
+ return false;
+
+ if (request.method() != http::verb::post)
+ return false;
+
+ QueryDescription qd;
+
+ String methodName = url->GetPath()[2];
+
+ FilterUtility::CheckPermission(user, "console");
+
+ String session = HttpUtility::GetLastParameter(params, "session");
+
+ if (session.IsEmpty())
+ session = Utility::NewUniqueID();
+
+ String command = HttpUtility::GetLastParameter(params, "command");
+
+ bool sandboxed = HttpUtility::GetLastParameter(params, "sandboxed");
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ HttpUtility::SendJsonError(response, params, 503, "Icinga is reloading.");
+ return true;
+ }
+
+ if (methodName == "execute-script")
+ return ExecuteScriptHelper(request, response, params, command, session, sandboxed);
+ else if (methodName == "auto-complete-script")
+ return AutocompleteScriptHelper(request, response, params, command, session, sandboxed);
+
+ HttpUtility::SendJsonError(response, params, 400, "Invalid method specified: " + methodName);
+ return true;
+}
+
+bool ConsoleHandler::ExecuteScriptHelper(boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params, const String& command, const String& session, bool sandboxed)
+{
+ namespace http = boost::beast::http;
+
+ Log(LogNotice, "Console")
+ << "Executing expression: " << command;
+
+ EnsureFrameCleanupTimer();
+
+ ApiScriptFrame& lsf = l_ApiScriptFrames[session];
+ lsf.Seen = Utility::GetTime();
+
+ if (!lsf.Locals)
+ lsf.Locals = new Dictionary();
+
+ String fileName = "<" + Convert::ToString(lsf.NextLine) + ">";
+ lsf.NextLine++;
+
+ lsf.Lines[fileName] = command;
+
+ Dictionary::Ptr resultInfo;
+ std::unique_ptr<Expression> expr;
+ Value exprResult;
+
+ try {
+ expr = ConfigCompiler::CompileText(fileName, command);
+
+ ScriptFrame frame(true);
+ frame.Locals = lsf.Locals;
+ frame.Self = lsf.Locals;
+ frame.Sandboxed = sandboxed;
+
+ exprResult = expr->Evaluate(frame);
+
+ resultInfo = new Dictionary({
+ { "code", 200 },
+ { "status", "Executed successfully." },
+ { "result", Serialize(exprResult, 0) }
+ });
+ } catch (const ScriptError& ex) {
+ DebugInfo di = ex.GetDebugInfo();
+
+ std::ostringstream msgbuf;
+
+ msgbuf << di.Path << ": " << lsf.Lines[di.Path] << "\n"
+ << String(di.Path.GetLength() + 2, ' ')
+ << String(di.FirstColumn, ' ') << String(di.LastColumn - di.FirstColumn + 1, '^') << "\n"
+ << ex.what() << "\n";
+
+ resultInfo = new Dictionary({
+ { "code", 500 },
+ { "status", String(msgbuf.str()) },
+ { "incomplete_expression", ex.IsIncompleteExpression() },
+ { "debug_info", new Dictionary({
+ { "path", di.Path },
+ { "first_line", di.FirstLine },
+ { "first_column", di.FirstColumn },
+ { "last_line", di.LastLine },
+ { "last_column", di.LastColumn }
+ }) }
+ });
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ resultInfo }) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
+
+bool ConsoleHandler::AutocompleteScriptHelper(boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params, const String& command, const String& session, bool sandboxed)
+{
+ namespace http = boost::beast::http;
+
+ Log(LogInformation, "Console")
+ << "Auto-completing expression: " << command;
+
+ EnsureFrameCleanupTimer();
+
+ ApiScriptFrame& lsf = l_ApiScriptFrames[session];
+ lsf.Seen = Utility::GetTime();
+
+ if (!lsf.Locals)
+ lsf.Locals = new Dictionary();
+
+
+ ScriptFrame frame(true);
+ frame.Locals = lsf.Locals;
+ frame.Self = lsf.Locals;
+ frame.Sandboxed = sandboxed;
+
+ Dictionary::Ptr result1 = new Dictionary({
+ { "code", 200 },
+ { "status", "Auto-completed successfully." },
+ { "suggestions", Array::FromVector(GetAutocompletionSuggestions(command, frame)) }
+ });
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
+
+static void AddSuggestion(std::vector<String>& matches, const String& word, const String& suggestion)
+{
+ if (suggestion.Find(word) != 0)
+ return;
+
+ matches.push_back(suggestion);
+}
+
+static void AddSuggestions(std::vector<String>& matches, const String& word, const String& pword, bool withFields, const Value& value)
+{
+ String prefix;
+
+ if (!pword.IsEmpty())
+ prefix = pword + ".";
+
+ if (value.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dict = value;
+
+ ObjectLock olock(dict);
+ for (const Dictionary::Pair& kv : dict) {
+ AddSuggestion(matches, word, prefix + kv.first);
+ }
+ }
+
+ if (value.IsObjectType<Namespace>()) {
+ Namespace::Ptr ns = value;
+
+ ObjectLock olock(ns);
+ for (const Namespace::Pair& kv : ns) {
+ AddSuggestion(matches, word, prefix + kv.first);
+ }
+ }
+
+ if (withFields) {
+ Type::Ptr type = value.GetReflectionType();
+
+ for (int i = 0; i < type->GetFieldCount(); i++) {
+ Field field = type->GetFieldInfo(i);
+
+ AddSuggestion(matches, word, prefix + field.Name);
+ }
+
+ while (type) {
+ Object::Ptr prototype = type->GetPrototype();
+ Dictionary::Ptr dict = dynamic_pointer_cast<Dictionary>(prototype);
+
+ if (dict) {
+ ObjectLock olock(dict);
+ for (const Dictionary::Pair& kv : dict) {
+ AddSuggestion(matches, word, prefix + kv.first);
+ }
+ }
+
+ type = type->GetBaseType();
+ }
+ }
+}
+
+std::vector<String> ConsoleHandler::GetAutocompletionSuggestions(const String& word, ScriptFrame& frame)
+{
+ std::vector<String> matches;
+
+ for (const String& keyword : ConfigWriter::GetKeywords()) {
+ AddSuggestion(matches, word, keyword);
+ }
+
+ {
+ ObjectLock olock(frame.Locals);
+ for (const Dictionary::Pair& kv : frame.Locals) {
+ AddSuggestion(matches, word, kv.first);
+ }
+ }
+
+ {
+ ObjectLock olock(ScriptGlobal::GetGlobals());
+ for (const Namespace::Pair& kv : ScriptGlobal::GetGlobals()) {
+ AddSuggestion(matches, word, kv.first);
+ }
+ }
+
+ Namespace::Ptr systemNS = ScriptGlobal::Get("System");
+
+ AddSuggestions(matches, word, "", false, systemNS);
+ AddSuggestions(matches, word, "", true, systemNS->Get("Configuration"));
+ AddSuggestions(matches, word, "", false, ScriptGlobal::Get("Types"));
+ AddSuggestions(matches, word, "", false, ScriptGlobal::Get("Icinga"));
+
+ String::SizeType cperiod = word.RFind(".");
+
+ if (cperiod != String::NPos) {
+ String pword = word.SubStr(0, cperiod);
+
+ Value value;
+
+ try {
+ std::unique_ptr<Expression> expr = ConfigCompiler::CompileText("temp", pword);
+
+ if (expr)
+ value = expr->Evaluate(frame);
+
+ AddSuggestions(matches, word, pword, true, value);
+ } catch (...) { /* Ignore the exception */ }
+ }
+
+ return matches;
+}
diff --git a/lib/remote/consolehandler.hpp b/lib/remote/consolehandler.hpp
new file mode 100644
index 0000000..df0d77d
--- /dev/null
+++ b/lib/remote/consolehandler.hpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CONSOLEHANDLER_H
+#define CONSOLEHANDLER_H
+
+#include "remote/httphandler.hpp"
+#include "base/scriptframe.hpp"
+
+namespace icinga
+{
+
+struct ApiScriptFrame
+{
+ double Seen{0};
+ int NextLine{1};
+ std::map<String, String> Lines;
+ Dictionary::Ptr Locals;
+};
+
+class ConsoleHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConsoleHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+
+ static std::vector<String> GetAutocompletionSuggestions(const String& word, ScriptFrame& frame);
+
+private:
+ static bool ExecuteScriptHelper(boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params, const String& command, const String& session, bool sandboxed);
+ static bool AutocompleteScriptHelper(boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params, const String& command, const String& session, bool sandboxed);
+
+};
+
+}
+
+#endif /* CONSOLEHANDLER_H */
diff --git a/lib/remote/createobjecthandler.cpp b/lib/remote/createobjecthandler.cpp
new file mode 100644
index 0000000..598eeec
--- /dev/null
+++ b/lib/remote/createobjecthandler.cpp
@@ -0,0 +1,155 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/createobjecthandler.hpp"
+#include "remote/configobjectslock.hpp"
+#include "remote/configobjectutility.hpp"
+#include "remote/httputility.hpp"
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/filterutility.hpp"
+#include "remote/apiaction.hpp"
+#include "remote/zone.hpp"
+#include "base/configtype.hpp"
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/objects", CreateObjectHandler);
+
+bool CreateObjectHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() != 4)
+ return false;
+
+ if (request.method() != http::verb::put)
+ return false;
+
+ Type::Ptr type = FilterUtility::TypeFromPluralName(url->GetPath()[2]);
+
+ if (!type) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid type specified.");
+ return true;
+ }
+
+ FilterUtility::CheckPermission(user, "objects/create/" + type->GetName());
+
+ String name = url->GetPath()[3];
+ Array::Ptr templates = params->Get("templates");
+ Dictionary::Ptr attrs = params->Get("attrs");
+
+ /* Put created objects into the local zone if not explicitly defined.
+ * This allows additional zone members to sync the
+ * configuration at some later point.
+ */
+ Zone::Ptr localZone = Zone::GetLocalZone();
+ String localZoneName;
+
+ if (localZone) {
+ localZoneName = localZone->GetName();
+
+ if (!attrs) {
+ attrs = new Dictionary({
+ { "zone", localZoneName }
+ });
+ } else if (!attrs->Contains("zone")) {
+ attrs->Set("zone", localZoneName);
+ }
+ }
+
+ /* Sanity checks for unique groups array. */
+ if (attrs->Contains("groups")) {
+ Array::Ptr groups = attrs->Get("groups");
+
+ if (groups)
+ attrs->Set("groups", groups->Unique());
+ }
+
+ Dictionary::Ptr result1 = new Dictionary();
+ String status;
+ Array::Ptr errors = new Array();
+ Array::Ptr diagnosticInformation = new Array();
+
+ bool ignoreOnError = false;
+
+ if (params->Contains("ignore_on_error"))
+ ignoreOnError = HttpUtility::GetLastParameter(params, "ignore_on_error");
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ String config;
+
+ bool verbose = false;
+
+ if (params)
+ verbose = HttpUtility::GetLastParameter(params, "verbose");
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ HttpUtility::SendJsonError(response, params, 503, "Icinga is reloading");
+ return true;
+ }
+
+ /* Object creation can cause multiple errors and optionally diagnostic information.
+ * We can't use SendJsonError() here.
+ */
+ try {
+ config = ConfigObjectUtility::CreateObjectConfig(type, name, ignoreOnError, templates, attrs);
+ } catch (const std::exception& ex) {
+ errors->Add(DiagnosticInformation(ex, false));
+ diagnosticInformation->Add(DiagnosticInformation(ex));
+
+ if (verbose)
+ result1->Set("diagnostic_information", diagnosticInformation);
+
+ result1->Set("errors", errors);
+ result1->Set("code", 500);
+ result1->Set("status", "Object could not be created.");
+
+ response.result(http::status::internal_server_error);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+ }
+
+ if (!ConfigObjectUtility::CreateObject(type, name, config, errors, diagnosticInformation)) {
+ result1->Set("errors", errors);
+ result1->Set("code", 500);
+ result1->Set("status", "Object could not be created.");
+
+ if (verbose)
+ result1->Set("diagnostic_information", diagnosticInformation);
+
+ response.result(http::status::internal_server_error);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+ }
+
+ auto *ctype = dynamic_cast<ConfigType *>(type.get());
+ ConfigObject::Ptr obj = ctype->GetObject(name);
+
+ result1->Set("code", 200);
+
+ if (obj)
+ result1->Set("status", "Object was created");
+ else if (!obj && ignoreOnError)
+ result1->Set("status", "Object was not created but 'ignore_on_error' was set to true");
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
diff --git a/lib/remote/createobjecthandler.hpp b/lib/remote/createobjecthandler.hpp
new file mode 100644
index 0000000..4bcf21b
--- /dev/null
+++ b/lib/remote/createobjecthandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CREATEOBJECTHANDLER_H
+#define CREATEOBJECTHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class CreateObjectHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(CreateObjectHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* CREATEOBJECTHANDLER_H */
diff --git a/lib/remote/deleteobjecthandler.cpp b/lib/remote/deleteobjecthandler.cpp
new file mode 100644
index 0000000..a4fd98d
--- /dev/null
+++ b/lib/remote/deleteobjecthandler.cpp
@@ -0,0 +1,123 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/deleteobjecthandler.hpp"
+#include "remote/configobjectslock.hpp"
+#include "remote/configobjectutility.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "remote/apiaction.hpp"
+#include "config/configitem.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/objects", DeleteObjectHandler);
+
+bool DeleteObjectHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() < 3 || url->GetPath().size() > 4)
+ return false;
+
+ if (request.method() != http::verb::delete_)
+ return false;
+
+ Type::Ptr type = FilterUtility::TypeFromPluralName(url->GetPath()[2]);
+
+ if (!type) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid type specified.");
+ return true;
+ }
+
+ QueryDescription qd;
+ qd.Types.insert(type->GetName());
+ qd.Permission = "objects/delete/" + type->GetName();
+
+ params->Set("type", type->GetName());
+
+ if (url->GetPath().size() >= 4) {
+ String attr = type->GetName();
+ boost::algorithm::to_lower(attr);
+ params->Set(attr, url->GetPath()[3]);
+ }
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ bool cascade = HttpUtility::GetLastParameter(params, "cascade");
+ bool verbose = HttpUtility::GetLastParameter(params, "verbose");
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ HttpUtility::SendJsonError(response, params, 503, "Icinga is reloading");
+ return true;
+ }
+
+ ArrayData results;
+
+ bool success = true;
+
+ for (const ConfigObject::Ptr& obj : objs) {
+ int code;
+ String status;
+ Array::Ptr errors = new Array();
+ Array::Ptr diagnosticInformation = new Array();
+
+ if (!ConfigObjectUtility::DeleteObject(obj, cascade, errors, diagnosticInformation)) {
+ code = 500;
+ status = "Object could not be deleted.";
+ success = false;
+ } else {
+ code = 200;
+ status = "Object was deleted.";
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "type", type->GetName() },
+ { "name", obj->GetName() },
+ { "code", code },
+ { "status", status },
+ { "errors", errors }
+ });
+
+ if (verbose)
+ result->Set("diagnostic_information", diagnosticInformation);
+
+ results.push_back(result);
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ if (!success)
+ response.result(http::status::internal_server_error);
+ else
+ response.result(http::status::ok);
+
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
+
diff --git a/lib/remote/deleteobjecthandler.hpp b/lib/remote/deleteobjecthandler.hpp
new file mode 100644
index 0000000..19a46e4
--- /dev/null
+++ b/lib/remote/deleteobjecthandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef DELETEOBJECTHANDLER_H
+#define DELETEOBJECTHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class DeleteObjectHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(DeleteObjectHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* DELETEOBJECTHANDLER_H */
diff --git a/lib/remote/endpoint.cpp b/lib/remote/endpoint.cpp
new file mode 100644
index 0000000..e534fc1
--- /dev/null
+++ b/lib/remote/endpoint.cpp
@@ -0,0 +1,138 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/endpoint.hpp"
+#include "remote/endpoint-ti.cpp"
+#include "remote/apilistener.hpp"
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/zone.hpp"
+#include "base/configtype.hpp"
+#include "base/utility.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(Endpoint);
+
+boost::signals2::signal<void(const Endpoint::Ptr&, const JsonRpcConnection::Ptr&)> Endpoint::OnConnected;
+boost::signals2::signal<void(const Endpoint::Ptr&, const JsonRpcConnection::Ptr&)> Endpoint::OnDisconnected;
+
+void Endpoint::OnAllConfigLoaded()
+{
+ ObjectImpl<Endpoint>::OnAllConfigLoaded();
+
+ if (!m_Zone)
+ BOOST_THROW_EXCEPTION(ScriptError("Endpoint '" + GetName() +
+ "' does not belong to a zone.", GetDebugInfo()));
+}
+
+void Endpoint::SetCachedZone(const Zone::Ptr& zone)
+{
+ if (m_Zone)
+ BOOST_THROW_EXCEPTION(ScriptError("Endpoint '" + GetName()
+ + "' is in more than one zone.", GetDebugInfo()));
+
+ m_Zone = zone;
+}
+
+void Endpoint::AddClient(const JsonRpcConnection::Ptr& client)
+{
+ bool was_master = ApiListener::GetInstance()->IsMaster();
+
+ {
+ std::unique_lock<std::mutex> lock(m_ClientsLock);
+ m_Clients.insert(client);
+ }
+
+ bool is_master = ApiListener::GetInstance()->IsMaster();
+
+ if (was_master != is_master)
+ ApiListener::OnMasterChanged(is_master);
+
+ OnConnected(this, client);
+}
+
+void Endpoint::RemoveClient(const JsonRpcConnection::Ptr& client)
+{
+ bool was_master = ApiListener::GetInstance()->IsMaster();
+
+ {
+ std::unique_lock<std::mutex> lock(m_ClientsLock);
+ m_Clients.erase(client);
+
+ Log(LogWarning, "ApiListener")
+ << "Removing API client for endpoint '" << GetName() << "'. " << m_Clients.size() << " API clients left.";
+
+ SetConnecting(false);
+ }
+
+ bool is_master = ApiListener::GetInstance()->IsMaster();
+
+ if (was_master != is_master)
+ ApiListener::OnMasterChanged(is_master);
+
+ OnDisconnected(this, client);
+}
+
+std::set<JsonRpcConnection::Ptr> Endpoint::GetClients() const
+{
+ std::unique_lock<std::mutex> lock(m_ClientsLock);
+ return m_Clients;
+}
+
+Zone::Ptr Endpoint::GetZone() const
+{
+ return m_Zone;
+}
+
+bool Endpoint::GetConnected() const
+{
+ std::unique_lock<std::mutex> lock(m_ClientsLock);
+ return !m_Clients.empty();
+}
+
+Endpoint::Ptr Endpoint::GetLocalEndpoint()
+{
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return nullptr;
+
+ return listener->GetLocalEndpoint();
+}
+
+void Endpoint::AddMessageSent(int bytes)
+{
+ double time = Utility::GetTime();
+ m_MessagesSent.InsertValue(time, 1);
+ m_BytesSent.InsertValue(time, bytes);
+ SetLastMessageSent(time);
+}
+
+void Endpoint::AddMessageReceived(int bytes)
+{
+ double time = Utility::GetTime();
+ m_MessagesReceived.InsertValue(time, 1);
+ m_BytesReceived.InsertValue(time, bytes);
+ SetLastMessageReceived(time);
+}
+
+double Endpoint::GetMessagesSentPerSecond() const
+{
+ return m_MessagesSent.CalculateRate(Utility::GetTime(), 60);
+}
+
+double Endpoint::GetMessagesReceivedPerSecond() const
+{
+ return m_MessagesReceived.CalculateRate(Utility::GetTime(), 60);
+}
+
+double Endpoint::GetBytesSentPerSecond() const
+{
+ return m_BytesSent.CalculateRate(Utility::GetTime(), 60);
+}
+
+double Endpoint::GetBytesReceivedPerSecond() const
+{
+ return m_BytesReceived.CalculateRate(Utility::GetTime(), 60);
+}
diff --git a/lib/remote/endpoint.hpp b/lib/remote/endpoint.hpp
new file mode 100644
index 0000000..d641c2c
--- /dev/null
+++ b/lib/remote/endpoint.hpp
@@ -0,0 +1,68 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ENDPOINT_H
+#define ENDPOINT_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/endpoint-ti.hpp"
+#include "base/ringbuffer.hpp"
+#include <set>
+
+namespace icinga
+{
+
+class JsonRpcConnection;
+class Zone;
+
+/**
+ * An endpoint that can be used to send and receive messages.
+ *
+ * @ingroup remote
+ */
+class Endpoint final : public ObjectImpl<Endpoint>
+{
+public:
+ DECLARE_OBJECT(Endpoint);
+ DECLARE_OBJECTNAME(Endpoint);
+
+ static boost::signals2::signal<void(const Endpoint::Ptr&, const intrusive_ptr<JsonRpcConnection>&)> OnConnected;
+ static boost::signals2::signal<void(const Endpoint::Ptr&, const intrusive_ptr<JsonRpcConnection>&)> OnDisconnected;
+
+ void AddClient(const intrusive_ptr<JsonRpcConnection>& client);
+ void RemoveClient(const intrusive_ptr<JsonRpcConnection>& client);
+ std::set<intrusive_ptr<JsonRpcConnection> > GetClients() const;
+
+ intrusive_ptr<Zone> GetZone() const;
+
+ bool GetConnected() const override;
+
+ static Endpoint::Ptr GetLocalEndpoint();
+
+ void SetCachedZone(const intrusive_ptr<Zone>& zone);
+
+ void AddMessageSent(int bytes);
+ void AddMessageReceived(int bytes);
+
+ double GetMessagesSentPerSecond() const override;
+ double GetMessagesReceivedPerSecond() const override;
+
+ double GetBytesSentPerSecond() const override;
+ double GetBytesReceivedPerSecond() const override;
+
+protected:
+ void OnAllConfigLoaded() override;
+
+private:
+ mutable std::mutex m_ClientsLock;
+ std::set<intrusive_ptr<JsonRpcConnection> > m_Clients;
+ intrusive_ptr<Zone> m_Zone;
+
+ mutable RingBuffer m_MessagesSent{60};
+ mutable RingBuffer m_MessagesReceived{60};
+ mutable RingBuffer m_BytesSent{60};
+ mutable RingBuffer m_BytesReceived{60};
+};
+
+}
+
+#endif /* ENDPOINT_H */
diff --git a/lib/remote/endpoint.ti b/lib/remote/endpoint.ti
new file mode 100644
index 0000000..78551ec
--- /dev/null
+++ b/lib/remote/endpoint.ti
@@ -0,0 +1,59 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+#include <cstdint>
+
+library remote;
+
+namespace icinga
+{
+
+class Endpoint : ConfigObject
+{
+ load_after Zone;
+
+ [config] String host;
+ [config, required] String port {
+ default {{{ return "5665"; }}}
+ };
+ [config] double log_duration {
+ default {{{ return 86400; }}}
+ };
+
+ [state] Timestamp local_log_position;
+ [state] Timestamp remote_log_position;
+ [state] "unsigned long" icinga_version {
+ default {{{ return 0; }}}
+ };
+ [state] uint_fast64_t capabilities {
+ default {{{ return 0; }}}
+ };
+
+ [no_user_modify] bool connecting;
+ [no_user_modify] bool syncing;
+
+ [no_user_modify, no_storage] bool connected {
+ get;
+ };
+
+ Timestamp last_message_sent;
+ Timestamp last_message_received;
+
+ [no_user_modify, no_storage] double messages_sent_per_second {
+ get;
+ };
+
+ [no_user_modify, no_storage] double messages_received_per_second {
+ get;
+ };
+
+ [no_user_modify, no_storage] double bytes_sent_per_second {
+ get;
+ };
+
+ [no_user_modify, no_storage] double bytes_received_per_second {
+ get;
+ };
+};
+
+}
diff --git a/lib/remote/eventqueue.cpp b/lib/remote/eventqueue.cpp
new file mode 100644
index 0000000..d79b615
--- /dev/null
+++ b/lib/remote/eventqueue.cpp
@@ -0,0 +1,351 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configcompiler.hpp"
+#include "remote/eventqueue.hpp"
+#include "remote/filterutility.hpp"
+#include "base/io-engine.hpp"
+#include "base/singleton.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include <boost/asio/spawn.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
+#include <boost/date_time/posix_time/ptime.hpp>
+#include <boost/system/error_code.hpp>
+#include <chrono>
+#include <utility>
+
+using namespace icinga;
+
+EventQueue::EventQueue(String name)
+ : m_Name(std::move(name))
+{ }
+
+bool EventQueue::CanProcessEvent(const String& type) const
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ return m_Types.find(type) != m_Types.end();
+}
+
+void EventQueue::ProcessEvent(const Dictionary::Ptr& event)
+{
+ Namespace::Ptr frameNS = new Namespace();
+ ScriptFrame frame(true, frameNS);
+ frame.Sandboxed = true;
+
+ try {
+ if (!FilterUtility::EvaluateFilter(frame, m_Filter.get(), event, "event"))
+ return;
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "EventQueue")
+ << "Error occurred while evaluating event filter for queue '" << m_Name << "': " << DiagnosticInformation(ex);
+ return;
+ }
+
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ typedef std::pair<void *const, std::deque<Dictionary::Ptr> > kv_pair;
+ for (kv_pair& kv : m_Events) {
+ kv.second.push_back(event);
+ }
+
+ m_CV.notify_all();
+}
+
+void EventQueue::AddClient(void *client)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ auto result = m_Events.insert(std::make_pair(client, std::deque<Dictionary::Ptr>()));
+ ASSERT(result.second);
+
+#ifndef I2_DEBUG
+ (void)result;
+#endif /* I2_DEBUG */
+}
+
+void EventQueue::RemoveClient(void *client)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ m_Events.erase(client);
+}
+
+void EventQueue::UnregisterIfUnused(const String& name, const EventQueue::Ptr& queue)
+{
+ std::unique_lock<std::mutex> lock(queue->m_Mutex);
+
+ if (queue->m_Events.empty())
+ Unregister(name);
+}
+
+void EventQueue::SetTypes(const std::set<String>& types)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Types = types;
+}
+
+void EventQueue::SetFilter(std::unique_ptr<Expression> filter)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+ m_Filter.swap(filter);
+}
+
+Dictionary::Ptr EventQueue::WaitForEvent(void *client, double timeout)
+{
+ std::unique_lock<std::mutex> lock(m_Mutex);
+
+ for (;;) {
+ auto it = m_Events.find(client);
+ ASSERT(it != m_Events.end());
+
+ if (!it->second.empty()) {
+ Dictionary::Ptr result = *it->second.begin();
+ it->second.pop_front();
+ return result;
+ }
+
+ if (m_CV.wait_for(lock, std::chrono::duration<double>(timeout)) == std::cv_status::timeout)
+ return nullptr;
+ }
+}
+
+std::vector<EventQueue::Ptr> EventQueue::GetQueuesForType(const String& type)
+{
+ EventQueueRegistry::ItemMap queues = EventQueueRegistry::GetInstance()->GetItems();
+
+ std::vector<EventQueue::Ptr> availQueues;
+
+ typedef std::pair<String, EventQueue::Ptr> kv_pair;
+ for (const kv_pair& kv : queues) {
+ if (kv.second->CanProcessEvent(type))
+ availQueues.push_back(kv.second);
+ }
+
+ return availQueues;
+}
+
+EventQueue::Ptr EventQueue::GetByName(const String& name)
+{
+ return EventQueueRegistry::GetInstance()->GetItem(name);
+}
+
+void EventQueue::Register(const String& name, const EventQueue::Ptr& function)
+{
+ EventQueueRegistry::GetInstance()->Register(name, function);
+}
+
+void EventQueue::Unregister(const String& name)
+{
+ EventQueueRegistry::GetInstance()->Unregister(name);
+}
+
+EventQueueRegistry *EventQueueRegistry::GetInstance()
+{
+ return Singleton<EventQueueRegistry>::GetInstance();
+}
+
+std::mutex EventsInbox::m_FiltersMutex;
+std::map<String, EventsInbox::Filter> EventsInbox::m_Filters ({{"", EventsInbox::Filter{1, Expression::Ptr()}}});
+
+EventsRouter EventsRouter::m_Instance;
+
+EventsInbox::EventsInbox(String filter, const String& filterSource)
+ : m_Timer(IoEngine::Get().GetIoContext())
+{
+ std::unique_lock<std::mutex> lock (m_FiltersMutex);
+ m_Filter = m_Filters.find(filter);
+
+ if (m_Filter == m_Filters.end()) {
+ lock.unlock();
+
+ auto expr (ConfigCompiler::CompileText(filterSource, filter));
+
+ lock.lock();
+
+ m_Filter = m_Filters.find(filter);
+
+ if (m_Filter == m_Filters.end()) {
+ m_Filter = m_Filters.emplace(std::move(filter), Filter{1, Expression::Ptr(expr.release())}).first;
+ } else {
+ ++m_Filter->second.Refs;
+ }
+ } else {
+ ++m_Filter->second.Refs;
+ }
+}
+
+EventsInbox::~EventsInbox()
+{
+ std::unique_lock<std::mutex> lock (m_FiltersMutex);
+
+ if (!--m_Filter->second.Refs) {
+ m_Filters.erase(m_Filter);
+ }
+}
+
+const Expression::Ptr& EventsInbox::GetFilter()
+{
+ return m_Filter->second.Expr;
+}
+
+void EventsInbox::Push(Dictionary::Ptr event)
+{
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ m_Queue.emplace(std::move(event));
+ m_Timer.expires_at(boost::posix_time::neg_infin);
+}
+
+Dictionary::Ptr EventsInbox::Shift(boost::asio::yield_context yc, double timeout)
+{
+ std::unique_lock<std::mutex> lock (m_Mutex, std::defer_lock);
+
+ m_Timer.expires_at(boost::posix_time::neg_infin);
+
+ {
+ boost::system::error_code ec;
+
+ while (!lock.try_lock()) {
+ m_Timer.async_wait(yc[ec]);
+ }
+ }
+
+ if (m_Queue.empty()) {
+ m_Timer.expires_from_now(boost::posix_time::milliseconds((unsigned long)(timeout * 1000.0)));
+ lock.unlock();
+
+ {
+ boost::system::error_code ec;
+ m_Timer.async_wait(yc[ec]);
+
+ while (!lock.try_lock()) {
+ m_Timer.async_wait(yc[ec]);
+ }
+ }
+
+ if (m_Queue.empty()) {
+ return nullptr;
+ }
+ }
+
+ auto event (std::move(m_Queue.front()));
+ m_Queue.pop();
+ return event;
+}
+
+EventsSubscriber::EventsSubscriber(std::set<EventType> types, String filter, const String& filterSource)
+ : m_Types(std::move(types)), m_Inbox(new EventsInbox(std::move(filter), filterSource))
+{
+ EventsRouter::GetInstance().Subscribe(m_Types, m_Inbox);
+}
+
+EventsSubscriber::~EventsSubscriber()
+{
+ EventsRouter::GetInstance().Unsubscribe(m_Types, m_Inbox);
+}
+
+const EventsInbox::Ptr& EventsSubscriber::GetInbox()
+{
+ return m_Inbox;
+}
+
+EventsFilter::EventsFilter(std::map<Expression::Ptr, std::set<EventsInbox::Ptr>> inboxes)
+ : m_Inboxes(std::move(inboxes))
+{
+}
+
+EventsFilter::operator bool()
+{
+ return !m_Inboxes.empty();
+}
+
+void EventsFilter::Push(Dictionary::Ptr event)
+{
+ for (auto& perFilter : m_Inboxes) {
+ if (perFilter.first) {
+ ScriptFrame frame(true, new Namespace());
+ frame.Sandboxed = true;
+
+ try {
+ if (!FilterUtility::EvaluateFilter(frame, perFilter.first.get(), event, "event")) {
+ continue;
+ }
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "EventQueue")
+ << "Error occurred while evaluating event filter for queue: " << DiagnosticInformation(ex);
+ continue;
+ }
+ }
+
+ for (auto& inbox : perFilter.second) {
+ inbox->Push(event);
+ }
+ }
+}
+
+EventsRouter& EventsRouter::GetInstance()
+{
+ return m_Instance;
+}
+
+void EventsRouter::Subscribe(const std::set<EventType>& types, const EventsInbox::Ptr& inbox)
+{
+ const auto& filter (inbox->GetFilter());
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ for (auto type : types) {
+ auto perType (m_Subscribers.find(type));
+
+ if (perType == m_Subscribers.end()) {
+ perType = m_Subscribers.emplace(type, decltype(perType->second)()).first;
+ }
+
+ auto perFilter (perType->second.find(filter));
+
+ if (perFilter == perType->second.end()) {
+ perFilter = perType->second.emplace(filter, decltype(perFilter->second)()).first;
+ }
+
+ perFilter->second.emplace(inbox);
+ }
+}
+
+void EventsRouter::Unsubscribe(const std::set<EventType>& types, const EventsInbox::Ptr& inbox)
+{
+ const auto& filter (inbox->GetFilter());
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ for (auto type : types) {
+ auto perType (m_Subscribers.find(type));
+
+ if (perType != m_Subscribers.end()) {
+ auto perFilter (perType->second.find(filter));
+
+ if (perFilter != perType->second.end()) {
+ perFilter->second.erase(inbox);
+
+ if (perFilter->second.empty()) {
+ perType->second.erase(perFilter);
+ }
+ }
+
+ if (perType->second.empty()) {
+ m_Subscribers.erase(perType);
+ }
+ }
+ }
+}
+
+EventsFilter EventsRouter::GetInboxes(EventType type)
+{
+ std::unique_lock<std::mutex> lock (m_Mutex);
+
+ auto perType (m_Subscribers.find(type));
+
+ if (perType == m_Subscribers.end()) {
+ return EventsFilter({});
+ }
+
+ return EventsFilter(perType->second);
+}
diff --git a/lib/remote/eventqueue.hpp b/lib/remote/eventqueue.hpp
new file mode 100644
index 0000000..32bd34a
--- /dev/null
+++ b/lib/remote/eventqueue.hpp
@@ -0,0 +1,177 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EVENTQUEUE_H
+#define EVENTQUEUE_H
+
+#include "remote/httphandler.hpp"
+#include "base/object.hpp"
+#include "config/expression.hpp"
+#include <boost/asio/deadline_timer.hpp>
+#include <boost/asio/spawn.hpp>
+#include <condition_variable>
+#include <cstddef>
+#include <cstdint>
+#include <mutex>
+#include <set>
+#include <map>
+#include <deque>
+#include <queue>
+
+namespace icinga
+{
+
+class EventQueue final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(EventQueue);
+
+ EventQueue(String name);
+
+ bool CanProcessEvent(const String& type) const;
+ void ProcessEvent(const Dictionary::Ptr& event);
+ void AddClient(void *client);
+ void RemoveClient(void *client);
+
+ void SetTypes(const std::set<String>& types);
+ void SetFilter(std::unique_ptr<Expression> filter);
+
+ Dictionary::Ptr WaitForEvent(void *client, double timeout = 5);
+
+ static std::vector<EventQueue::Ptr> GetQueuesForType(const String& type);
+ static void UnregisterIfUnused(const String& name, const EventQueue::Ptr& queue);
+
+ static EventQueue::Ptr GetByName(const String& name);
+ static void Register(const String& name, const EventQueue::Ptr& function);
+ static void Unregister(const String& name);
+
+private:
+ String m_Name;
+
+ mutable std::mutex m_Mutex;
+ std::condition_variable m_CV;
+
+ std::set<String> m_Types;
+ std::unique_ptr<Expression> m_Filter;
+
+ std::map<void *, std::deque<Dictionary::Ptr> > m_Events;
+};
+
+/**
+ * A registry for API event queues.
+ *
+ * @ingroup base
+ */
+class EventQueueRegistry : public Registry<EventQueueRegistry, EventQueue::Ptr>
+{
+public:
+ static EventQueueRegistry *GetInstance();
+};
+
+enum class EventType : uint_fast8_t
+{
+ AcknowledgementCleared,
+ AcknowledgementSet,
+ CheckResult,
+ CommentAdded,
+ CommentRemoved,
+ DowntimeAdded,
+ DowntimeRemoved,
+ DowntimeStarted,
+ DowntimeTriggered,
+ Flapping,
+ Notification,
+ StateChange,
+ ObjectCreated,
+ ObjectDeleted,
+ ObjectModified
+};
+
+class EventsInbox : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(EventsInbox);
+
+ EventsInbox(String filter, const String& filterSource);
+ EventsInbox(const EventsInbox&) = delete;
+ EventsInbox(EventsInbox&&) = delete;
+ EventsInbox& operator=(const EventsInbox&) = delete;
+ EventsInbox& operator=(EventsInbox&&) = delete;
+ ~EventsInbox();
+
+ const Expression::Ptr& GetFilter();
+
+ void Push(Dictionary::Ptr event);
+ Dictionary::Ptr Shift(boost::asio::yield_context yc, double timeout = 5);
+
+private:
+ struct Filter
+ {
+ std::size_t Refs;
+ Expression::Ptr Expr;
+ };
+
+ static std::mutex m_FiltersMutex;
+ static std::map<String, Filter> m_Filters;
+
+ std::mutex m_Mutex;
+ decltype(m_Filters.begin()) m_Filter;
+ std::queue<Dictionary::Ptr> m_Queue;
+ boost::asio::deadline_timer m_Timer;
+};
+
+class EventsSubscriber
+{
+public:
+ EventsSubscriber(std::set<EventType> types, String filter, const String& filterSource);
+ EventsSubscriber(const EventsSubscriber&) = delete;
+ EventsSubscriber(EventsSubscriber&&) = delete;
+ EventsSubscriber& operator=(const EventsSubscriber&) = delete;
+ EventsSubscriber& operator=(EventsSubscriber&&) = delete;
+ ~EventsSubscriber();
+
+ const EventsInbox::Ptr& GetInbox();
+
+private:
+ std::set<EventType> m_Types;
+ EventsInbox::Ptr m_Inbox;
+};
+
+class EventsFilter
+{
+public:
+ EventsFilter(std::map<Expression::Ptr, std::set<EventsInbox::Ptr>> inboxes);
+
+ operator bool();
+
+ void Push(Dictionary::Ptr event);
+
+private:
+ std::map<Expression::Ptr, std::set<EventsInbox::Ptr>> m_Inboxes;
+};
+
+class EventsRouter
+{
+public:
+ static EventsRouter& GetInstance();
+
+ void Subscribe(const std::set<EventType>& types, const EventsInbox::Ptr& inbox);
+ void Unsubscribe(const std::set<EventType>& types, const EventsInbox::Ptr& inbox);
+ EventsFilter GetInboxes(EventType type);
+
+private:
+ static EventsRouter m_Instance;
+
+ EventsRouter() = default;
+ EventsRouter(const EventsRouter&) = delete;
+ EventsRouter(EventsRouter&&) = delete;
+ EventsRouter& operator=(const EventsRouter&) = delete;
+ EventsRouter& operator=(EventsRouter&&) = delete;
+ ~EventsRouter() = default;
+
+ std::mutex m_Mutex;
+ std::map<EventType, std::map<Expression::Ptr, std::set<EventsInbox::Ptr>>> m_Subscribers;
+};
+
+}
+
+#endif /* EVENTQUEUE_H */
diff --git a/lib/remote/eventshandler.cpp b/lib/remote/eventshandler.cpp
new file mode 100644
index 0000000..e05ef22
--- /dev/null
+++ b/lib/remote/eventshandler.cpp
@@ -0,0 +1,137 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/eventshandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "config/configcompiler.hpp"
+#include "config/expression.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include <boost/asio/buffer.hpp>
+#include <boost/asio/write.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <map>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/events", EventsHandler);
+
+const std::map<String, EventType> l_EventTypes ({
+ {"AcknowledgementCleared", EventType::AcknowledgementCleared},
+ {"AcknowledgementSet", EventType::AcknowledgementSet},
+ {"CheckResult", EventType::CheckResult},
+ {"CommentAdded", EventType::CommentAdded},
+ {"CommentRemoved", EventType::CommentRemoved},
+ {"DowntimeAdded", EventType::DowntimeAdded},
+ {"DowntimeRemoved", EventType::DowntimeRemoved},
+ {"DowntimeStarted", EventType::DowntimeStarted},
+ {"DowntimeTriggered", EventType::DowntimeTriggered},
+ {"Flapping", EventType::Flapping},
+ {"Notification", EventType::Notification},
+ {"StateChange", EventType::StateChange},
+ {"ObjectCreated", EventType::ObjectCreated},
+ {"ObjectDeleted", EventType::ObjectDeleted},
+ {"ObjectModified", EventType::ObjectModified}
+});
+
+const String l_ApiQuery ("<API query>");
+
+bool EventsHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace asio = boost::asio;
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() != 2)
+ return false;
+
+ if (request.method() != http::verb::post)
+ return false;
+
+ if (request.version() == 10) {
+ HttpUtility::SendJsonError(response, params, 400, "HTTP/1.0 not supported for event streams.");
+ return true;
+ }
+
+ Array::Ptr types = params->Get("types");
+
+ if (!types) {
+ HttpUtility::SendJsonError(response, params, 400, "'types' query parameter is required.");
+ return true;
+ }
+
+ {
+ ObjectLock olock(types);
+ for (const String& type : types) {
+ FilterUtility::CheckPermission(user, "events/" + type);
+ }
+ }
+
+ String queueName = HttpUtility::GetLastParameter(params, "queue");
+
+ if (queueName.IsEmpty()) {
+ HttpUtility::SendJsonError(response, params, 400, "'queue' query parameter is required.");
+ return true;
+ }
+
+ std::set<EventType> eventTypes;
+
+ {
+ ObjectLock olock(types);
+ for (const String& type : types) {
+ auto typeId (l_EventTypes.find(type));
+
+ if (typeId != l_EventTypes.end()) {
+ eventTypes.emplace(typeId->second);
+ }
+ }
+ }
+
+ EventsSubscriber subscriber (std::move(eventTypes), HttpUtility::GetLastParameter(params, "filter"), l_ApiQuery);
+
+ server.StartStreaming();
+
+ response.result(http::status::ok);
+ response.set(http::field::content_type, "application/json");
+
+ IoBoundWorkSlot dontLockTheIoThread (yc);
+
+ http::async_write(stream, response, yc);
+ stream.async_flush(yc);
+
+ asio::const_buffer newLine ("\n", 1);
+
+ for (;;) {
+ auto event (subscriber.GetInbox()->Shift(yc));
+
+ if (event) {
+ CpuBoundWork buildingResponse (yc);
+
+ String body = JsonEncode(event);
+
+ boost::algorithm::replace_all(body, "\n", "");
+
+ asio::const_buffer payload (body.CStr(), body.GetLength());
+
+ buildingResponse.Done();
+
+ asio::async_write(stream, payload, yc);
+ asio::async_write(stream, newLine, yc);
+ stream.async_flush(yc);
+ } else if (server.Disconnected()) {
+ return true;
+ }
+ }
+}
+
diff --git a/lib/remote/eventshandler.hpp b/lib/remote/eventshandler.hpp
new file mode 100644
index 0000000..c823415
--- /dev/null
+++ b/lib/remote/eventshandler.hpp
@@ -0,0 +1,31 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EVENTSHANDLER_H
+#define EVENTSHANDLER_H
+
+#include "remote/httphandler.hpp"
+#include "remote/eventqueue.hpp"
+
+namespace icinga
+{
+
+class EventsHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(EventsHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* EVENTSHANDLER_H */
diff --git a/lib/remote/filterutility.cpp b/lib/remote/filterutility.cpp
new file mode 100644
index 0000000..468b91e
--- /dev/null
+++ b/lib/remote/filterutility.cpp
@@ -0,0 +1,354 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/filterutility.hpp"
+#include "remote/httputility.hpp"
+#include "config/applyrule.hpp"
+#include "config/configcompiler.hpp"
+#include "config/expression.hpp"
+#include "base/namespace.hpp"
+#include "base/json.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <memory>
+
+using namespace icinga;
+
+Type::Ptr FilterUtility::TypeFromPluralName(const String& pluralName)
+{
+ String uname = pluralName;
+ boost::algorithm::to_lower(uname);
+
+ for (const Type::Ptr& type : Type::GetAllTypes()) {
+ String pname = type->GetPluralName();
+ boost::algorithm::to_lower(pname);
+
+ if (uname == pname)
+ return type;
+ }
+
+ return nullptr;
+}
+
+void ConfigObjectTargetProvider::FindTargets(const String& type, const std::function<void (const Value&)>& addTarget) const
+{
+ Type::Ptr ptype = Type::GetByName(type);
+ auto *ctype = dynamic_cast<ConfigType *>(ptype.get());
+
+ if (ctype) {
+ for (const ConfigObject::Ptr& object : ctype->GetObjects()) {
+ addTarget(object);
+ }
+ }
+}
+
+Value ConfigObjectTargetProvider::GetTargetByName(const String& type, const String& name) const
+{
+ ConfigObject::Ptr obj = ConfigObject::GetObject(type, name);
+
+ if (!obj)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Object does not exist."));
+
+ return obj;
+}
+
+bool ConfigObjectTargetProvider::IsValidType(const String& type) const
+{
+ Type::Ptr ptype = Type::GetByName(type);
+
+ if (!ptype)
+ return false;
+
+ return ConfigObject::TypeInstance->IsAssignableFrom(ptype);
+}
+
+String ConfigObjectTargetProvider::GetPluralName(const String& type) const
+{
+ return Type::GetByName(type)->GetPluralName();
+}
+
+bool FilterUtility::EvaluateFilter(ScriptFrame& frame, Expression *filter,
+ const Object::Ptr& target, const String& variableName)
+{
+ if (!filter)
+ return true;
+
+ Type::Ptr type = target->GetReflectionType();
+ String varName;
+
+ if (variableName.IsEmpty())
+ varName = type->GetName().ToLower();
+ else
+ varName = variableName;
+
+ Namespace::Ptr frameNS;
+
+ if (frame.Self.IsEmpty()) {
+ frameNS = new Namespace();
+ frame.Self = frameNS;
+ } else {
+ /* Enforce a namespace object for 'frame.self'. */
+ ASSERT(frame.Self.IsObjectType<Namespace>());
+
+ frameNS = frame.Self;
+
+ ASSERT(frameNS != ScriptGlobal::GetGlobals());
+ }
+
+ frameNS->Set("obj", target);
+ frameNS->Set(varName, target);
+
+ for (int fid = 0; fid < type->GetFieldCount(); fid++) {
+ Field field = type->GetFieldInfo(fid);
+
+ if ((field.Attributes & FANavigation) == 0)
+ continue;
+
+ Object::Ptr joinedObj = target->NavigateField(fid);
+
+ if (field.NavigationName)
+ frameNS->Set(field.NavigationName, joinedObj);
+ else
+ frameNS->Set(field.Name, joinedObj);
+ }
+
+ return Convert::ToBool(filter->Evaluate(frame));
+}
+
+static void FilteredAddTarget(ScriptFrame& permissionFrame, Expression *permissionFilter,
+ ScriptFrame& frame, Expression *ufilter, std::vector<Value>& result, const String& variableName, const Object::Ptr& target)
+{
+ if (FilterUtility::EvaluateFilter(permissionFrame, permissionFilter, target, variableName)) {
+ if (FilterUtility::EvaluateFilter(frame, ufilter, target, variableName)) {
+ result.emplace_back(std::move(target));
+ }
+ }
+}
+
+/**
+ * Checks whether the given API user is granted the given permission
+ *
+ * When you desire an exception to be raised when the given user doesn't have the given permission,
+ * you need to use FilterUtility::CheckPermission().
+ *
+ * @param user ApiUser pointer to the user object you want to check the permission of
+ * @param permission The actual permission you want to check the user permission against
+ * @param permissionFilter Expression pointer that is used as an output buffer for all the filter expressions of the
+ * individual permissions of the given user to be evaluated. It's up to the caller to delete
+ * this pointer when it's not needed any more.
+ *
+ * @return bool
+ */
+bool FilterUtility::HasPermission(const ApiUser::Ptr& user, const String& permission, std::unique_ptr<Expression>* permissionFilter)
+{
+ if (permissionFilter)
+ *permissionFilter = nullptr;
+
+ if (permission.IsEmpty())
+ return true;
+
+ bool foundPermission = false;
+ String requiredPermission = permission.ToLower();
+
+ Array::Ptr permissions = user->GetPermissions();
+ if (permissions) {
+ ObjectLock olock(permissions);
+ for (const Value& item : permissions) {
+ String permission;
+ Function::Ptr filter;
+ if (item.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dict = item;
+ permission = dict->Get("permission");
+ filter = dict->Get("filter");
+ } else
+ permission = item;
+
+ permission = permission.ToLower();
+
+ if (!Utility::Match(permission, requiredPermission))
+ continue;
+
+ foundPermission = true;
+
+ if (filter && permissionFilter) {
+ std::vector<std::unique_ptr<Expression> > args;
+ args.emplace_back(new GetScopeExpression(ScopeThis));
+ std::unique_ptr<Expression> indexer{new IndexerExpression(std::unique_ptr<Expression>(MakeLiteral(filter)), std::unique_ptr<Expression>(MakeLiteral("call")))};
+ FunctionCallExpression *fexpr = new FunctionCallExpression(std::move(indexer), std::move(args));
+
+ if (!*permissionFilter)
+ permissionFilter->reset(fexpr);
+ else
+ *permissionFilter = std::make_unique<LogicalOrExpression>(std::move(*permissionFilter), std::unique_ptr<Expression>(fexpr));
+ }
+ }
+ }
+
+ if (!foundPermission) {
+ Log(LogWarning, "FilterUtility")
+ << "Missing permission: " << requiredPermission;
+ }
+
+ return foundPermission;
+}
+
+void FilterUtility::CheckPermission(const ApiUser::Ptr& user, const String& permission, std::unique_ptr<Expression>* permissionFilter)
+{
+ if (!HasPermission(user, permission, permissionFilter)) {
+ BOOST_THROW_EXCEPTION(ScriptError("Missing permission: " + permission.ToLower()));
+ }
+}
+
+std::vector<Value> FilterUtility::GetFilterTargets(const QueryDescription& qd, const Dictionary::Ptr& query, const ApiUser::Ptr& user, const String& variableName)
+{
+ std::vector<Value> result;
+
+ TargetProvider::Ptr provider;
+
+ if (qd.Provider)
+ provider = qd.Provider;
+ else
+ provider = new ConfigObjectTargetProvider();
+
+ std::unique_ptr<Expression> permissionFilter;
+ CheckPermission(user, qd.Permission, &permissionFilter);
+
+ Namespace::Ptr permissionFrameNS = new Namespace();
+ ScriptFrame permissionFrame(false, permissionFrameNS);
+
+ for (const String& type : qd.Types) {
+ String attr = type;
+ boost::algorithm::to_lower(attr);
+
+ if (attr == "type")
+ attr = "name";
+
+ if (query && query->Contains(attr)) {
+ String name = HttpUtility::GetLastParameter(query, attr);
+ Object::Ptr target = provider->GetTargetByName(type, name);
+
+ if (!FilterUtility::EvaluateFilter(permissionFrame, permissionFilter.get(), target, variableName))
+ BOOST_THROW_EXCEPTION(ScriptError("Access denied to object '" + name + "' of type '" + type + "'"));
+
+ result.emplace_back(std::move(target));
+ }
+
+ attr = provider->GetPluralName(type);
+ boost::algorithm::to_lower(attr);
+
+ if (query && query->Contains(attr)) {
+ Array::Ptr names = query->Get(attr);
+ if (names) {
+ ObjectLock olock(names);
+ for (const String& name : names) {
+ Object::Ptr target = provider->GetTargetByName(type, name);
+
+ if (!FilterUtility::EvaluateFilter(permissionFrame, permissionFilter.get(), target, variableName))
+ BOOST_THROW_EXCEPTION(ScriptError("Access denied to object '" + name + "' of type '" + type + "'"));
+
+ result.emplace_back(std::move(target));
+ }
+ }
+ }
+ }
+
+ if ((query && query->Contains("filter")) || result.empty()) {
+ if (!query->Contains("type"))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Type must be specified when using a filter."));
+
+ String type = HttpUtility::GetLastParameter(query, "type");
+
+ if (!provider->IsValidType(type))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid type specified."));
+
+ if (qd.Types.find(type) == qd.Types.end())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid type specified for this query."));
+
+ Namespace::Ptr frameNS = new Namespace();
+ ScriptFrame frame(false, frameNS);
+ frame.Sandboxed = true;
+
+ if (query->Contains("filter")) {
+ String filter = HttpUtility::GetLastParameter(query, "filter");
+ std::unique_ptr<Expression> ufilter = ConfigCompiler::CompileText("<API query>", filter);
+ Dictionary::Ptr filter_vars = query->Get("filter_vars");
+ bool targeted = false;
+ std::vector<ConfigObject::Ptr> targets;
+
+ if (dynamic_cast<ConfigObjectTargetProvider*>(provider.get())) {
+ auto dict (dynamic_cast<DictExpression*>(ufilter.get()));
+
+ if (dict) {
+ auto& subex (dict->GetExpressions());
+
+ if (subex.size() == 1u) {
+ if (type == "Host") {
+ std::vector<const String *> targetNames;
+
+ if (ApplyRule::GetTargetHosts(subex.at(0).get(), targetNames, filter_vars)) {
+ static const auto typeHost (Type::GetByName("Host"));
+ static const auto ctypeHost (dynamic_cast<ConfigType*>(typeHost.get()));
+ targeted = true;
+
+ for (auto name : targetNames) {
+ auto target (ctypeHost->GetObject(*name));
+
+ if (target) {
+ targets.emplace_back(target);
+ }
+ }
+ }
+ } else if (type == "Service") {
+ std::vector<std::pair<const String *, const String *>> targetNames;
+
+ if (ApplyRule::GetTargetServices(subex.at(0).get(), targetNames, filter_vars)) {
+ static const auto typeService (Type::GetByName("Service"));
+ static const auto ctypeService (dynamic_cast<ConfigType*>(typeService.get()));
+ targeted = true;
+
+ for (auto name : targetNames) {
+ auto target (ctypeService->GetObject(*name.first + "!" + *name.second));
+
+ if (target) {
+ targets.emplace_back(target);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (targeted) {
+ for (auto& target : targets) {
+ if (FilterUtility::EvaluateFilter(permissionFrame, permissionFilter.get(), target, variableName)) {
+ result.emplace_back(std::move(target));
+ }
+ }
+ } else {
+ if (filter_vars) {
+ ObjectLock olock (filter_vars);
+
+ for (auto& kv : filter_vars) {
+ frameNS->Set(kv.first, kv.second);
+ }
+ }
+
+ provider->FindTargets(type, [&permissionFrame, &permissionFilter, &frame, &ufilter, &result, variableName](const Object::Ptr& target) {
+ FilteredAddTarget(permissionFrame, permissionFilter.get(), frame, &*ufilter, result, variableName, target);
+ });
+ }
+ } else {
+ /* Ensure to pass a nullptr as filter expression.
+ * GCC 8.1.1 on F28 causes problems, see GH #6533.
+ */
+ provider->FindTargets(type, [&permissionFrame, &permissionFilter, &frame, &result, variableName](const Object::Ptr& target) {
+ FilteredAddTarget(permissionFrame, permissionFilter.get(), frame, nullptr, result, variableName, target);
+ });
+ }
+ }
+
+ return result;
+}
+
diff --git a/lib/remote/filterutility.hpp b/lib/remote/filterutility.hpp
new file mode 100644
index 0000000..7271367
--- /dev/null
+++ b/lib/remote/filterutility.hpp
@@ -0,0 +1,64 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef FILTERUTILITY_H
+#define FILTERUTILITY_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/apiuser.hpp"
+#include "config/expression.hpp"
+#include "base/dictionary.hpp"
+#include "base/configobject.hpp"
+#include <set>
+
+namespace icinga
+{
+
+class TargetProvider : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TargetProvider);
+
+ virtual void FindTargets(const String& type, const std::function<void (const Value&)>& addTarget) const = 0;
+ virtual Value GetTargetByName(const String& type, const String& name) const = 0;
+ virtual bool IsValidType(const String& type) const = 0;
+ virtual String GetPluralName(const String& type) const = 0;
+};
+
+class ConfigObjectTargetProvider final : public TargetProvider
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ConfigObjectTargetProvider);
+
+ void FindTargets(const String& type, const std::function<void (const Value&)>& addTarget) const override;
+ Value GetTargetByName(const String& type, const String& name) const override;
+ bool IsValidType(const String& type) const override;
+ String GetPluralName(const String& type) const override;
+};
+
+struct QueryDescription
+{
+ std::set<String> Types;
+ TargetProvider::Ptr Provider;
+ String Permission;
+};
+
+/**
+ * Filter utilities.
+ *
+ * @ingroup remote
+ */
+class FilterUtility
+{
+public:
+ static Type::Ptr TypeFromPluralName(const String& pluralName);
+ static void CheckPermission(const ApiUser::Ptr& user, const String& permission, std::unique_ptr<Expression>* filter = nullptr);
+ static bool HasPermission(const ApiUser::Ptr& user, const String& permission, std::unique_ptr<Expression>* permissionFilter = nullptr);
+ static std::vector<Value> GetFilterTargets(const QueryDescription& qd, const Dictionary::Ptr& query,
+ const ApiUser::Ptr& user, const String& variableName = String());
+ static bool EvaluateFilter(ScriptFrame& frame, Expression *filter,
+ const Object::Ptr& target, const String& variableName = String());
+};
+
+}
+
+#endif /* FILTERUTILITY_H */
diff --git a/lib/remote/httphandler.cpp b/lib/remote/httphandler.cpp
new file mode 100644
index 0000000..afe510f
--- /dev/null
+++ b/lib/remote/httphandler.cpp
@@ -0,0 +1,129 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/logger.hpp"
+#include "remote/httphandler.hpp"
+#include "remote/httputility.hpp"
+#include "base/singleton.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/join.hpp>
+#include <boost/beast/http.hpp>
+
+using namespace icinga;
+
+Dictionary::Ptr HttpHandler::m_UrlTree;
+
+void HttpHandler::Register(const Url::Ptr& url, const HttpHandler::Ptr& handler)
+{
+ if (!m_UrlTree)
+ m_UrlTree = new Dictionary();
+
+ Dictionary::Ptr node = m_UrlTree;
+
+ for (const String& elem : url->GetPath()) {
+ Dictionary::Ptr children = node->Get("children");
+
+ if (!children) {
+ children = new Dictionary();
+ node->Set("children", children);
+ }
+
+ Dictionary::Ptr sub_node = children->Get(elem);
+ if (!sub_node) {
+ sub_node = new Dictionary();
+ children->Set(elem, sub_node);
+ }
+
+ node = sub_node;
+ }
+
+ Array::Ptr handlers = node->Get("handlers");
+
+ if (!handlers) {
+ handlers = new Array();
+ node->Set("handlers", handlers);
+ }
+
+ handlers->Add(handler);
+}
+
+void HttpHandler::ProcessRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ Dictionary::Ptr node = m_UrlTree;
+ std::vector<HttpHandler::Ptr> handlers;
+
+ Url::Ptr url = new Url(std::string(request.target()));
+ auto& path (url->GetPath());
+
+ for (std::vector<String>::size_type i = 0; i <= path.size(); i++) {
+ Array::Ptr current_handlers = node->Get("handlers");
+
+ if (current_handlers) {
+ ObjectLock olock(current_handlers);
+ for (const HttpHandler::Ptr& current_handler : current_handlers) {
+ handlers.push_back(current_handler);
+ }
+ }
+
+ Dictionary::Ptr children = node->Get("children");
+
+ if (!children) {
+ node.reset();
+ break;
+ }
+
+ if (i == path.size())
+ break;
+
+ node = children->Get(path[i]);
+
+ if (!node)
+ break;
+ }
+
+ std::reverse(handlers.begin(), handlers.end());
+
+ Dictionary::Ptr params;
+
+ try {
+ params = HttpUtility::FetchRequestParameters(url, request.body());
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid request body: " + DiagnosticInformation(ex, false));
+ return;
+ }
+
+ bool processed = false;
+
+ /*
+ * HandleRequest may throw a permission exception.
+ * DO NOT return a specific permission error. This
+ * allows attackers to guess from words which objects
+ * do exist.
+ */
+ try {
+ for (const HttpHandler::Ptr& handler : handlers) {
+ if (handler->HandleRequest(stream, user, request, url, response, params, yc, server)) {
+ processed = true;
+ break;
+ }
+ }
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "HttpServerConnection")
+ << "Error while processing HTTP request: " << ex.what();
+
+ processed = false;
+ }
+
+ if (!processed) {
+ HttpUtility::SendJsonError(response, params, 404, "The requested path '" + boost::algorithm::join(path, "/") +
+ "' could not be found or the request method is not valid for this path.");
+ return;
+ }
+}
+
diff --git a/lib/remote/httphandler.hpp b/lib/remote/httphandler.hpp
new file mode 100644
index 0000000..a6a7302
--- /dev/null
+++ b/lib/remote/httphandler.hpp
@@ -0,0 +1,74 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HTTPHANDLER_H
+#define HTTPHANDLER_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/url.hpp"
+#include "remote/httpserverconnection.hpp"
+#include "remote/apiuser.hpp"
+#include "base/registry.hpp"
+#include "base/tlsstream.hpp"
+#include <vector>
+#include <boost/asio/spawn.hpp>
+#include <boost/beast/http.hpp>
+
+namespace icinga
+{
+
+/**
+ * HTTP handler.
+ *
+ * @ingroup remote
+ */
+class HttpHandler : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(HttpHandler);
+
+ virtual bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) = 0;
+
+ static void Register(const Url::Ptr& url, const HttpHandler::Ptr& handler);
+ static void ProcessRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ );
+
+private:
+ static Dictionary::Ptr m_UrlTree;
+};
+
+/**
+ * Helper class for registering HTTP handlers.
+ *
+ * @ingroup remote
+ */
+class RegisterHttpHandler
+{
+public:
+ RegisterHttpHandler(const String& url, const HttpHandler& function);
+};
+
+#define REGISTER_URLHANDLER(url, klass) \
+ INITIALIZE_ONCE([]() { \
+ Url::Ptr uurl = new Url(url); \
+ HttpHandler::Ptr handler = new klass(); \
+ HttpHandler::Register(uurl, handler); \
+ })
+
+}
+
+#endif /* HTTPHANDLER_H */
diff --git a/lib/remote/httpserverconnection.cpp b/lib/remote/httpserverconnection.cpp
new file mode 100644
index 0000000..76cfd3c
--- /dev/null
+++ b/lib/remote/httpserverconnection.cpp
@@ -0,0 +1,613 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/httpserverconnection.hpp"
+#include "remote/httphandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/apifunction.hpp"
+#include "remote/jsonrpc.hpp"
+#include "base/application.hpp"
+#include "base/base64.hpp"
+#include "base/convert.hpp"
+#include "base/configtype.hpp"
+#include "base/defer.hpp"
+#include "base/exception.hpp"
+#include "base/io-engine.hpp"
+#include "base/logger.hpp"
+#include "base/objectlock.hpp"
+#include "base/timer.hpp"
+#include "base/tlsstream.hpp"
+#include "base/utility.hpp"
+#include <chrono>
+#include <limits>
+#include <memory>
+#include <stdexcept>
+#include <boost/asio/error.hpp>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/beast/core.hpp>
+#include <boost/beast/http.hpp>
+#include <boost/system/error_code.hpp>
+#include <boost/system/system_error.hpp>
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+auto const l_ServerHeader ("Icinga/" + Application::GetAppVersion());
+
+HttpServerConnection::HttpServerConnection(const String& identity, bool authenticated, const Shared<AsioTlsStream>::Ptr& stream)
+ : HttpServerConnection(identity, authenticated, stream, IoEngine::Get().GetIoContext())
+{
+}
+
+HttpServerConnection::HttpServerConnection(const String& identity, bool authenticated, const Shared<AsioTlsStream>::Ptr& stream, boost::asio::io_context& io)
+ : m_Stream(stream), m_Seen(Utility::GetTime()), m_IoStrand(io), m_ShuttingDown(false), m_HasStartedStreaming(false),
+ m_CheckLivenessTimer(io)
+{
+ if (authenticated) {
+ m_ApiUser = ApiUser::GetByClientCN(identity);
+ }
+
+ {
+ std::ostringstream address;
+ auto endpoint (stream->lowest_layer().remote_endpoint());
+
+ address << '[' << endpoint.address() << "]:" << endpoint.port();
+
+ m_PeerAddress = address.str();
+ }
+}
+
+void HttpServerConnection::Start()
+{
+ namespace asio = boost::asio;
+
+ HttpServerConnection::Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) { ProcessMessages(yc); });
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) { CheckLiveness(yc); });
+}
+
+void HttpServerConnection::Disconnect()
+{
+ namespace asio = boost::asio;
+
+ HttpServerConnection::Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) {
+ if (!m_ShuttingDown) {
+ m_ShuttingDown = true;
+
+ Log(LogInformation, "HttpServerConnection")
+ << "HTTP client disconnected (from " << m_PeerAddress << ")";
+
+ /*
+ * Do not swallow exceptions in a coroutine.
+ * https://github.com/Icinga/icinga2/issues/7351
+ * We must not catch `detail::forced_unwind exception` as
+ * this is used for unwinding the stack.
+ *
+ * Just use the error_code dummy here.
+ */
+ boost::system::error_code ec;
+
+ m_CheckLivenessTimer.cancel();
+
+ m_Stream->lowest_layer().cancel(ec);
+
+ m_Stream->next_layer().async_shutdown(yc[ec]);
+
+ m_Stream->lowest_layer().shutdown(m_Stream->lowest_layer().shutdown_both, ec);
+
+ auto listener (ApiListener::GetInstance());
+
+ if (listener) {
+ CpuBoundWork removeHttpClient (yc);
+
+ listener->RemoveHttpClient(this);
+ }
+ }
+ });
+}
+
+void HttpServerConnection::StartStreaming()
+{
+ namespace asio = boost::asio;
+
+ m_HasStartedStreaming = true;
+
+ HttpServerConnection::Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) {
+ if (!m_ShuttingDown) {
+ char buf[128];
+ asio::mutable_buffer readBuf (buf, 128);
+ boost::system::error_code ec;
+
+ do {
+ m_Stream->async_read_some(readBuf, yc[ec]);
+ } while (!ec);
+
+ Disconnect();
+ }
+ });
+}
+
+bool HttpServerConnection::Disconnected()
+{
+ return m_ShuttingDown;
+}
+
+static inline
+bool EnsureValidHeaders(
+ AsioTlsStream& stream,
+ boost::beast::flat_buffer& buf,
+ boost::beast::http::parser<true, boost::beast::http::string_body>& parser,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ bool& shuttingDown,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ if (shuttingDown)
+ return false;
+
+ bool httpError = false;
+ String errorMsg;
+
+ boost::system::error_code ec;
+
+ http::async_read_header(stream, buf, parser, yc[ec]);
+
+ if (ec) {
+ if (ec == boost::asio::error::operation_aborted)
+ return false;
+
+ errorMsg = ec.message();
+ httpError = true;
+ } else {
+ switch (parser.get().version()) {
+ case 10:
+ case 11:
+ break;
+ default:
+ errorMsg = "Unsupported HTTP version";
+ }
+ }
+
+ if (!errorMsg.IsEmpty() || httpError) {
+ response.result(http::status::bad_request);
+
+ if (!httpError && parser.get()[http::field::accept] == "application/json") {
+ HttpUtility::SendJsonBody(response, nullptr, new Dictionary({
+ { "error", 400 },
+ { "status", String("Bad Request: ") + errorMsg }
+ }));
+ } else {
+ response.set(http::field::content_type, "text/html");
+ response.body() = String("<h1>Bad Request</h1><p><pre>") + errorMsg + "</pre></p>";
+ response.content_length(response.body().size());
+ }
+
+ response.set(http::field::connection, "close");
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return false;
+ }
+
+ return true;
+}
+
+static inline
+void HandleExpect100(
+ AsioTlsStream& stream,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ if (request[http::field::expect] == "100-continue") {
+ http::response<http::string_body> response;
+
+ response.result(http::status::continue_);
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+ }
+}
+
+static inline
+bool HandleAccessControl(
+ AsioTlsStream& stream,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ auto listener (ApiListener::GetInstance());
+
+ if (listener) {
+ auto headerAllowOrigin (listener->GetAccessControlAllowOrigin());
+
+ if (headerAllowOrigin) {
+ CpuBoundWork allowOriginHeader (yc);
+
+ auto allowedOrigins (headerAllowOrigin->ToSet<String>());
+
+ if (!allowedOrigins.empty()) {
+ auto& origin (request[http::field::origin]);
+
+ if (allowedOrigins.find(std::string(origin)) != allowedOrigins.end()) {
+ response.set(http::field::access_control_allow_origin, origin);
+ }
+
+ allowOriginHeader.Done();
+
+ response.set(http::field::access_control_allow_credentials, "true");
+
+ if (request.method() == http::verb::options && !request[http::field::access_control_request_method].empty()) {
+ response.result(http::status::ok);
+ response.set(http::field::access_control_allow_methods, "GET, POST, PUT, DELETE");
+ response.set(http::field::access_control_allow_headers, "Authorization, Content-Type, X-HTTP-Method-Override");
+ response.body() = "Preflight OK";
+ response.content_length(response.body().size());
+ response.set(http::field::connection, "close");
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static inline
+bool EnsureAcceptHeader(
+ AsioTlsStream& stream,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ if (request.method() != http::verb::get && request[http::field::accept] != "application/json") {
+ response.result(http::status::bad_request);
+ response.set(http::field::content_type, "text/html");
+ response.body() = "<h1>Accept header is missing or not set to 'application/json'.</h1>";
+ response.content_length(response.body().size());
+ response.set(http::field::connection, "close");
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return false;
+ }
+
+ return true;
+}
+
+static inline
+bool EnsureAuthenticatedUser(
+ AsioTlsStream& stream,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ ApiUser::Ptr& authenticatedUser,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ if (!authenticatedUser) {
+ Log(LogWarning, "HttpServerConnection")
+ << "Unauthorized request: " << request.method_string() << ' ' << request.target();
+
+ response.result(http::status::unauthorized);
+ response.set(http::field::www_authenticate, "Basic realm=\"Icinga 2\"");
+ response.set(http::field::connection, "close");
+
+ if (request[http::field::accept] == "application/json") {
+ HttpUtility::SendJsonBody(response, nullptr, new Dictionary({
+ { "error", 401 },
+ { "status", "Unauthorized. Please check your user credentials." }
+ }));
+ } else {
+ response.set(http::field::content_type, "text/html");
+ response.body() = "<h1>Unauthorized. Please check your user credentials.</h1>";
+ response.content_length(response.body().size());
+ }
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return false;
+ }
+
+ return true;
+}
+
+static inline
+bool EnsureValidBody(
+ AsioTlsStream& stream,
+ boost::beast::flat_buffer& buf,
+ boost::beast::http::parser<true, boost::beast::http::string_body>& parser,
+ ApiUser::Ptr& authenticatedUser,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ bool& shuttingDown,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ {
+ size_t maxSize = 1024 * 1024;
+ Array::Ptr permissions = authenticatedUser->GetPermissions();
+
+ if (permissions) {
+ CpuBoundWork evalPermissions (yc);
+
+ ObjectLock olock(permissions);
+
+ for (const Value& permissionInfo : permissions) {
+ String permission;
+
+ if (permissionInfo.IsObjectType<Dictionary>()) {
+ permission = static_cast<Dictionary::Ptr>(permissionInfo)->Get("permission");
+ } else {
+ permission = permissionInfo;
+ }
+
+ static std::vector<std::pair<String, size_t>> specialContentLengthLimits {
+ { "config/modify", 512 * 1024 * 1024 }
+ };
+
+ for (const auto& limitInfo : specialContentLengthLimits) {
+ if (limitInfo.second <= maxSize) {
+ continue;
+ }
+
+ if (Utility::Match(permission, limitInfo.first)) {
+ maxSize = limitInfo.second;
+ }
+ }
+ }
+ }
+
+ parser.body_limit(maxSize);
+ }
+
+ if (shuttingDown)
+ return false;
+
+ boost::system::error_code ec;
+
+ http::async_read(stream, buf, parser, yc[ec]);
+
+ if (ec) {
+ if (ec == boost::asio::error::operation_aborted)
+ return false;
+
+ /**
+ * Unfortunately there's no way to tell an HTTP protocol error
+ * from an error on a lower layer:
+ *
+ * <https://github.com/boostorg/beast/issues/643>
+ */
+
+ response.result(http::status::bad_request);
+
+ if (parser.get()[http::field::accept] == "application/json") {
+ HttpUtility::SendJsonBody(response, nullptr, new Dictionary({
+ { "error", 400 },
+ { "status", String("Bad Request: ") + ec.message() }
+ }));
+ } else {
+ response.set(http::field::content_type, "text/html");
+ response.body() = String("<h1>Bad Request</h1><p><pre>") + ec.message() + "</pre></p>";
+ response.content_length(response.body().size());
+ }
+
+ response.set(http::field::connection, "close");
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return false;
+ }
+
+ return true;
+}
+
+static inline
+bool ProcessRequest(
+ AsioTlsStream& stream,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ ApiUser::Ptr& authenticatedUser,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ HttpServerConnection& server,
+ bool& hasStartedStreaming,
+ boost::asio::yield_context& yc
+)
+{
+ namespace http = boost::beast::http;
+
+ try {
+ CpuBoundWork handlingRequest (yc);
+
+ HttpHandler::ProcessRequest(stream, authenticatedUser, request, response, yc, server);
+ } catch (const std::exception& ex) {
+ if (hasStartedStreaming) {
+ return false;
+ }
+
+ auto sysErr (dynamic_cast<const boost::system::system_error*>(&ex));
+
+ if (sysErr && sysErr->code() == boost::asio::error::operation_aborted) {
+ throw;
+ }
+
+ http::response<http::string_body> response;
+
+ HttpUtility::SendJsonError(response, nullptr, 500, "Unhandled exception" , DiagnosticInformation(ex));
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return true;
+ }
+
+ if (hasStartedStreaming) {
+ return false;
+ }
+
+ boost::system::error_code ec;
+
+ http::async_write(stream, response, yc[ec]);
+ stream.async_flush(yc[ec]);
+
+ return true;
+}
+
+void HttpServerConnection::ProcessMessages(boost::asio::yield_context yc)
+{
+ namespace beast = boost::beast;
+ namespace http = beast::http;
+ namespace ch = std::chrono;
+
+ try {
+ /* Do not reset the buffer in the state machine.
+ * EnsureValidHeaders already reads from the stream into the buffer,
+ * EnsureValidBody continues. ProcessRequest() actually handles the request
+ * and needs the full buffer.
+ */
+ beast::flat_buffer buf;
+
+ for (;;) {
+ m_Seen = Utility::GetTime();
+
+ http::parser<true, http::string_body> parser;
+ http::response<http::string_body> response;
+
+ parser.header_limit(1024 * 1024);
+ parser.body_limit(-1);
+
+ response.set(http::field::server, l_ServerHeader);
+
+ if (!EnsureValidHeaders(*m_Stream, buf, parser, response, m_ShuttingDown, yc)) {
+ break;
+ }
+
+ m_Seen = Utility::GetTime();
+ auto start (ch::steady_clock::now());
+
+ auto& request (parser.get());
+
+ {
+ auto method (http::string_to_verb(request["X-Http-Method-Override"]));
+
+ if (method != http::verb::unknown) {
+ request.method(method);
+ }
+ }
+
+ HandleExpect100(*m_Stream, request, yc);
+
+ auto authenticatedUser (m_ApiUser);
+
+ if (!authenticatedUser) {
+ CpuBoundWork fetchingAuthenticatedUser (yc);
+
+ authenticatedUser = ApiUser::GetByAuthHeader(std::string(request[http::field::authorization]));
+ }
+
+ Log logMsg (LogInformation, "HttpServerConnection");
+
+ logMsg << "Request " << request.method_string() << ' ' << request.target()
+ << " (from " << m_PeerAddress
+ << "), user: " << (authenticatedUser ? authenticatedUser->GetName() : "<unauthenticated>")
+ << ", agent: " << request[http::field::user_agent]; //operator[] - Returns the value for a field, or "" if it does not exist.
+
+ Defer addRespCode ([&response, start, &logMsg]() {
+ logMsg << ", status: " << response.result() << ") took "
+ << ch::duration_cast<ch::milliseconds>(ch::steady_clock::now() - start).count() << "ms.";
+ });
+
+ if (!HandleAccessControl(*m_Stream, request, response, yc)) {
+ break;
+ }
+
+ if (!EnsureAcceptHeader(*m_Stream, request, response, yc)) {
+ break;
+ }
+
+ if (!EnsureAuthenticatedUser(*m_Stream, request, authenticatedUser, response, yc)) {
+ break;
+ }
+
+ if (!EnsureValidBody(*m_Stream, buf, parser, authenticatedUser, response, m_ShuttingDown, yc)) {
+ break;
+ }
+
+ m_Seen = std::numeric_limits<decltype(m_Seen)>::max();
+
+ if (!ProcessRequest(*m_Stream, request, authenticatedUser, response, *this, m_HasStartedStreaming, yc)) {
+ break;
+ }
+
+ if (request.version() != 11 || request[http::field::connection] == "close") {
+ break;
+ }
+ }
+ } catch (const std::exception& ex) {
+ if (!m_ShuttingDown) {
+ Log(LogCritical, "HttpServerConnection")
+ << "Unhandled exception while processing HTTP request: " << ex.what();
+ }
+ }
+
+ Disconnect();
+}
+
+void HttpServerConnection::CheckLiveness(boost::asio::yield_context yc)
+{
+ boost::system::error_code ec;
+
+ for (;;) {
+ m_CheckLivenessTimer.expires_from_now(boost::posix_time::seconds(5));
+ m_CheckLivenessTimer.async_wait(yc[ec]);
+
+ if (m_ShuttingDown) {
+ break;
+ }
+
+ if (m_Seen < Utility::GetTime() - 10) {
+ Log(LogInformation, "HttpServerConnection")
+ << "No messages for HTTP connection have been received in the last 10 seconds.";
+
+ Disconnect();
+ break;
+ }
+ }
+}
diff --git a/lib/remote/httpserverconnection.hpp b/lib/remote/httpserverconnection.hpp
new file mode 100644
index 0000000..9c812e5
--- /dev/null
+++ b/lib/remote/httpserverconnection.hpp
@@ -0,0 +1,54 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HTTPSERVERCONNECTION_H
+#define HTTPSERVERCONNECTION_H
+
+#include "remote/apiuser.hpp"
+#include "base/string.hpp"
+#include "base/tlsstream.hpp"
+#include <memory>
+#include <boost/asio/deadline_timer.hpp>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/io_context_strand.hpp>
+#include <boost/asio/spawn.hpp>
+
+namespace icinga
+{
+
+/**
+ * An API client connection.
+ *
+ * @ingroup remote
+ */
+class HttpServerConnection final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(HttpServerConnection);
+
+ HttpServerConnection(const String& identity, bool authenticated, const Shared<AsioTlsStream>::Ptr& stream);
+
+ void Start();
+ void Disconnect();
+ void StartStreaming();
+
+ bool Disconnected();
+
+private:
+ ApiUser::Ptr m_ApiUser;
+ Shared<AsioTlsStream>::Ptr m_Stream;
+ double m_Seen;
+ String m_PeerAddress;
+ boost::asio::io_context::strand m_IoStrand;
+ bool m_ShuttingDown;
+ bool m_HasStartedStreaming;
+ boost::asio::deadline_timer m_CheckLivenessTimer;
+
+ HttpServerConnection(const String& identity, bool authenticated, const Shared<AsioTlsStream>::Ptr& stream, boost::asio::io_context& io);
+
+ void ProcessMessages(boost::asio::yield_context yc);
+ void CheckLiveness(boost::asio::yield_context yc);
+};
+
+}
+
+#endif /* HTTPSERVERCONNECTION_H */
diff --git a/lib/remote/httputility.cpp b/lib/remote/httputility.cpp
new file mode 100644
index 0000000..a2142e5
--- /dev/null
+++ b/lib/remote/httputility.cpp
@@ -0,0 +1,80 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/httputility.hpp"
+#include "remote/url.hpp"
+#include "base/json.hpp"
+#include "base/logger.hpp"
+#include <map>
+#include <string>
+#include <vector>
+#include <boost/beast/http.hpp>
+
+using namespace icinga;
+
+Dictionary::Ptr HttpUtility::FetchRequestParameters(const Url::Ptr& url, const std::string& body)
+{
+ Dictionary::Ptr result;
+
+ if (!body.empty()) {
+ Log(LogDebug, "HttpUtility")
+ << "Request body: '" << body << '\'';
+
+ result = JsonDecode(body);
+ }
+
+ if (!result)
+ result = new Dictionary();
+
+ std::map<String, std::vector<String>> query;
+ for (const auto& kv : url->GetQuery()) {
+ query[kv.first].emplace_back(kv.second);
+ }
+
+ for (auto& kv : query) {
+ result->Set(kv.first, Array::FromVector(kv.second));
+ }
+
+ return result;
+}
+
+Value HttpUtility::GetLastParameter(const Dictionary::Ptr& params, const String& key)
+{
+ Value varr = params->Get(key);
+
+ if (!varr.IsObjectType<Array>())
+ return varr;
+
+ Array::Ptr arr = varr;
+
+ if (arr->GetLength() == 0)
+ return Empty;
+ else
+ return arr->Get(arr->GetLength() - 1);
+}
+
+void HttpUtility::SendJsonBody(boost::beast::http::response<boost::beast::http::string_body>& response, const Dictionary::Ptr& params, const Value& val)
+{
+ namespace http = boost::beast::http;
+
+ response.set(http::field::content_type, "application/json");
+ response.body() = JsonEncode(val, params && GetLastParameter(params, "pretty"));
+ response.content_length(response.body().size());
+}
+
+void HttpUtility::SendJsonError(boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params, int code, const String& info, const String& diagnosticInformation)
+{
+ Dictionary::Ptr result = new Dictionary({ { "error", code } });
+
+ if (!info.IsEmpty()) {
+ result->Set("status", info);
+ }
+
+ if (params && HttpUtility::GetLastParameter(params, "verbose") && !diagnosticInformation.IsEmpty()) {
+ result->Set("diagnostic_information", diagnosticInformation);
+ }
+
+ response.result(code);
+
+ HttpUtility::SendJsonBody(response, params, result);
+}
diff --git a/lib/remote/httputility.hpp b/lib/remote/httputility.hpp
new file mode 100644
index 0000000..6465b4a
--- /dev/null
+++ b/lib/remote/httputility.hpp
@@ -0,0 +1,33 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef HTTPUTILITY_H
+#define HTTPUTILITY_H
+
+#include "remote/url.hpp"
+#include "base/dictionary.hpp"
+#include <boost/beast/http.hpp>
+#include <string>
+
+namespace icinga
+{
+
+/**
+ * Helper functions.
+ *
+ * @ingroup remote
+ */
+class HttpUtility
+{
+
+public:
+ static Dictionary::Ptr FetchRequestParameters(const Url::Ptr& url, const std::string& body);
+ static Value GetLastParameter(const Dictionary::Ptr& params, const String& key);
+
+ static void SendJsonBody(boost::beast::http::response<boost::beast::http::string_body>& response, const Dictionary::Ptr& params, const Value& val);
+ static void SendJsonError(boost::beast::http::response<boost::beast::http::string_body>& response, const Dictionary::Ptr& params, const int code,
+ const String& verbose = String(), const String& diagnosticInformation = String());
+};
+
+}
+
+#endif /* HTTPUTILITY_H */
diff --git a/lib/remote/i2-remote.hpp b/lib/remote/i2-remote.hpp
new file mode 100644
index 0000000..5755bef
--- /dev/null
+++ b/lib/remote/i2-remote.hpp
@@ -0,0 +1,14 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef I2REMOTE_H
+#define I2REMOTE_H
+
+/**
+ * @defgroup remote Remote library
+ *
+ * The Icinga library implements remote cluster functionality.
+ */
+
+#include "base/i2-base.hpp"
+
+#endif /* I2REMOTE_H */
diff --git a/lib/remote/infohandler.cpp b/lib/remote/infohandler.cpp
new file mode 100644
index 0000000..80ebba7
--- /dev/null
+++ b/lib/remote/infohandler.cpp
@@ -0,0 +1,100 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/infohandler.hpp"
+#include "remote/httputility.hpp"
+#include "base/application.hpp"
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/", InfoHandler);
+
+bool InfoHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() > 2)
+ return false;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ if (url->GetPath().empty()) {
+ response.result(http::status::found);
+ response.set(http::field::location, "/v1");
+ return true;
+ }
+
+ if (url->GetPath()[0] != "v1" || url->GetPath().size() != 1)
+ return false;
+
+ response.result(http::status::ok);
+
+ std::vector<String> permInfo;
+ Array::Ptr permissions = user->GetPermissions();
+
+ if (permissions) {
+ ObjectLock olock(permissions);
+ for (const Value& permission : permissions) {
+ String name;
+ bool hasFilter = false;
+ if (permission.IsObjectType<Dictionary>()) {
+ Dictionary::Ptr dpermission = permission;
+ name = dpermission->Get("permission");
+ hasFilter = dpermission->Contains("filter");
+ } else
+ name = permission;
+
+ if (hasFilter)
+ name += " (filtered)";
+
+ permInfo.emplace_back(std::move(name));
+ }
+ }
+
+ if (request[http::field::accept] == "application/json") {
+ Dictionary::Ptr result1 = new Dictionary({
+ { "user", user->GetName() },
+ { "permissions", Array::FromVector(permInfo) },
+ { "version", Application::GetAppVersion() },
+ { "info", "More information about API requests is available in the documentation at https://icinga.com/docs/icinga2/latest/" }
+ });
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array({ result1 }) }
+ });
+
+ HttpUtility::SendJsonBody(response, params, result);
+ } else {
+ response.set(http::field::content_type, "text/html");
+
+ String body = "<html><head><title>Icinga 2</title></head><h1>Hello from Icinga 2 (Version: " + Application::GetAppVersion() + ")!</h1>";
+ body += "<p>You are authenticated as <b>" + user->GetName() + "</b>. ";
+
+ if (!permInfo.empty()) {
+ body += "Your user has the following permissions:</p> <ul>";
+
+ for (const String& perm : permInfo) {
+ body += "<li>" + perm + "</li>";
+ }
+
+ body += "</ul>";
+ } else
+ body += "Your user does not have any permissions.</p>";
+
+ body += R"(<p>More information about API requests is available in the <a href="https://icinga.com/docs/icinga2/latest/" target="_blank">documentation</a>.</p></html>)";
+ response.body() = body;
+ response.content_length(response.body().size());
+ }
+
+ return true;
+}
+
diff --git a/lib/remote/infohandler.hpp b/lib/remote/infohandler.hpp
new file mode 100644
index 0000000..e1fe983
--- /dev/null
+++ b/lib/remote/infohandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef INFOHANDLER_H
+#define INFOHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class InfoHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(InfoHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* INFOHANDLER_H */
diff --git a/lib/remote/jsonrpc.cpp b/lib/remote/jsonrpc.cpp
new file mode 100644
index 0000000..d4d3d3c
--- /dev/null
+++ b/lib/remote/jsonrpc.cpp
@@ -0,0 +1,157 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/jsonrpc.hpp"
+#include "base/netstring.hpp"
+#include "base/json.hpp"
+#include "base/console.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/convert.hpp"
+#include "base/tlsstream.hpp"
+#include <iostream>
+#include <memory>
+#include <utility>
+#include <boost/asio/spawn.hpp>
+
+using namespace icinga;
+
+#ifdef I2_DEBUG
+/**
+ * Determine whether the developer wants to see raw JSON messages.
+ *
+ * @return Internal.DebugJsonRpc boolean
+ */
+static bool GetDebugJsonRpcCached()
+{
+ static int debugJsonRpc = -1;
+
+ if (debugJsonRpc != -1)
+ return debugJsonRpc;
+
+ debugJsonRpc = false;
+
+ Namespace::Ptr internal = ScriptGlobal::Get("Internal", &Empty);
+
+ if (!internal)
+ return false;
+
+ Value vdebug;
+
+ if (!internal->Get("DebugJsonRpc", &vdebug))
+ return false;
+
+ debugJsonRpc = Convert::ToLong(vdebug);
+
+ return debugJsonRpc;
+}
+#endif /* I2_DEBUG */
+
+/**
+ * Sends a message to the connected peer and returns the bytes sent.
+ *
+ * @param message The message.
+ *
+ * @return The amount of bytes sent.
+ */
+size_t JsonRpc::SendMessage(const Shared<AsioTlsStream>::Ptr& stream, const Dictionary::Ptr& message)
+{
+ String json = JsonEncode(message);
+
+#ifdef I2_DEBUG
+ if (GetDebugJsonRpcCached())
+ std::cerr << ConsoleColorTag(Console_ForegroundBlue) << ">> " << json << ConsoleColorTag(Console_Normal) << "\n";
+#endif /* I2_DEBUG */
+
+ return NetString::WriteStringToStream(stream, json);
+}
+
+/**
+ * Sends a message to the connected peer and returns the bytes sent.
+ *
+ * @param message The message.
+ *
+ * @return The amount of bytes sent.
+ */
+size_t JsonRpc::SendMessage(const Shared<AsioTlsStream>::Ptr& stream, const Dictionary::Ptr& message, boost::asio::yield_context yc)
+{
+ return JsonRpc::SendRawMessage(stream, JsonEncode(message), yc);
+}
+
+ /**
+ * Sends a raw message to the connected peer.
+ *
+ * @param stream ASIO TLS Stream
+ * @param json message
+ * @param yc Yield context required for ASIO
+ *
+ * @return bytes sent
+ */
+size_t JsonRpc::SendRawMessage(const Shared<AsioTlsStream>::Ptr& stream, const String& json, boost::asio::yield_context yc)
+{
+#ifdef I2_DEBUG
+ if (GetDebugJsonRpcCached())
+ std::cerr << ConsoleColorTag(Console_ForegroundBlue) << ">> " << json << ConsoleColorTag(Console_Normal) << "\n";
+#endif /* I2_DEBUG */
+
+ return NetString::WriteStringToStream(stream, json, yc);
+}
+
+/**
+ * Reads a message from the connected peer.
+ *
+ * @param stream ASIO TLS Stream
+ * @param maxMessageLength maximum size of bytes read.
+ *
+ * @return A JSON string
+ */
+
+String JsonRpc::ReadMessage(const Shared<AsioTlsStream>::Ptr& stream, ssize_t maxMessageLength)
+{
+ String jsonString = NetString::ReadStringFromStream(stream, maxMessageLength);
+
+#ifdef I2_DEBUG
+ if (GetDebugJsonRpcCached())
+ std::cerr << ConsoleColorTag(Console_ForegroundBlue) << "<< " << jsonString << ConsoleColorTag(Console_Normal) << "\n";
+#endif /* I2_DEBUG */
+
+ return jsonString;
+}
+
+/**
+ * Reads a message from the connected peer.
+ *
+ * @param stream ASIO TLS Stream
+ * @param yc Yield Context for ASIO
+ * @param maxMessageLength maximum size of bytes read.
+ *
+ * @return A JSON string
+ */
+String JsonRpc::ReadMessage(const Shared<AsioTlsStream>::Ptr& stream, boost::asio::yield_context yc, ssize_t maxMessageLength)
+{
+ String jsonString = NetString::ReadStringFromStream(stream, yc, maxMessageLength);
+
+#ifdef I2_DEBUG
+ if (GetDebugJsonRpcCached())
+ std::cerr << ConsoleColorTag(Console_ForegroundBlue) << "<< " << jsonString << ConsoleColorTag(Console_Normal) << "\n";
+#endif /* I2_DEBUG */
+
+ return jsonString;
+}
+
+/**
+ * Decode message, enforce a Dictionary
+ *
+ * @param message JSON string
+ *
+ * @return Dictionary ptr
+ */
+Dictionary::Ptr JsonRpc::DecodeMessage(const String& message)
+{
+ Value value = JsonDecode(message);
+
+ if (!value.IsObjectType<Dictionary>()) {
+ BOOST_THROW_EXCEPTION(std::invalid_argument("JSON-RPC"
+ " message must be a dictionary."));
+ }
+
+ return value;
+}
diff --git a/lib/remote/jsonrpc.hpp b/lib/remote/jsonrpc.hpp
new file mode 100644
index 0000000..3f3cdec
--- /dev/null
+++ b/lib/remote/jsonrpc.hpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef JSONRPC_H
+#define JSONRPC_H
+
+#include "base/stream.hpp"
+#include "base/dictionary.hpp"
+#include "base/tlsstream.hpp"
+#include "remote/i2-remote.hpp"
+#include <memory>
+#include <boost/asio/spawn.hpp>
+
+namespace icinga
+{
+
+/**
+ * A JSON-RPC connection.
+ *
+ * @ingroup remote
+ */
+class JsonRpc
+{
+public:
+ static size_t SendMessage(const Shared<AsioTlsStream>::Ptr& stream, const Dictionary::Ptr& message);
+ static size_t SendMessage(const Shared<AsioTlsStream>::Ptr& stream, const Dictionary::Ptr& message, boost::asio::yield_context yc);
+ static size_t SendRawMessage(const Shared<AsioTlsStream>::Ptr& stream, const String& json, boost::asio::yield_context yc);
+
+ static String ReadMessage(const Shared<AsioTlsStream>::Ptr& stream, ssize_t maxMessageLength = -1);
+ static String ReadMessage(const Shared<AsioTlsStream>::Ptr& stream, boost::asio::yield_context yc, ssize_t maxMessageLength = -1);
+
+ static Dictionary::Ptr DecodeMessage(const String& message);
+
+private:
+ JsonRpc();
+};
+
+}
+
+#endif /* JSONRPC_H */
diff --git a/lib/remote/jsonrpcconnection-heartbeat.cpp b/lib/remote/jsonrpcconnection-heartbeat.cpp
new file mode 100644
index 0000000..2474688
--- /dev/null
+++ b/lib/remote/jsonrpcconnection-heartbeat.cpp
@@ -0,0 +1,48 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/messageorigin.hpp"
+#include "remote/apifunction.hpp"
+#include "base/initialize.hpp"
+#include "base/configtype.hpp"
+#include "base/logger.hpp"
+#include "base/utility.hpp"
+#include <boost/asio/spawn.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
+#include <boost/system/system_error.hpp>
+
+using namespace icinga;
+
+REGISTER_APIFUNCTION(Heartbeat, event, &JsonRpcConnection::HeartbeatAPIHandler);
+
+/**
+ * We still send a heartbeat without timeout here
+ * to keep the m_Seen variable up to date. This is to keep the
+ * cluster connection alive when there isn't much going on.
+ */
+
+void JsonRpcConnection::HandleAndWriteHeartbeats(boost::asio::yield_context yc)
+{
+ boost::system::error_code ec;
+
+ for (;;) {
+ m_HeartbeatTimer.expires_from_now(boost::posix_time::seconds(20));
+ m_HeartbeatTimer.async_wait(yc[ec]);
+
+ if (m_ShuttingDown) {
+ break;
+ }
+
+ SendMessageInternal(new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "event::Heartbeat" },
+ { "params", new Dictionary() }
+ }));
+ }
+}
+
+Value JsonRpcConnection::HeartbeatAPIHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ return Empty;
+}
+
diff --git a/lib/remote/jsonrpcconnection-pki.cpp b/lib/remote/jsonrpcconnection-pki.cpp
new file mode 100644
index 0000000..340e12b
--- /dev/null
+++ b/lib/remote/jsonrpcconnection-pki.cpp
@@ -0,0 +1,439 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/apifunction.hpp"
+#include "remote/jsonrpc.hpp"
+#include "base/atomic-file.hpp"
+#include "base/configtype.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include <boost/thread/once.hpp>
+#include <boost/regex.hpp>
+#include <fstream>
+#include <openssl/asn1.h>
+#include <openssl/ssl.h>
+#include <openssl/x509.h>
+
+using namespace icinga;
+
+static Value RequestCertificateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+REGISTER_APIFUNCTION(RequestCertificate, pki, &RequestCertificateHandler);
+static Value UpdateCertificateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+REGISTER_APIFUNCTION(UpdateCertificate, pki, &UpdateCertificateHandler);
+
+Value RequestCertificateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ String certText = params->Get("cert_request");
+
+ std::shared_ptr<X509> cert;
+
+ Dictionary::Ptr result = new Dictionary();
+ auto& tlsConn (origin->FromClient->GetStream()->next_layer());
+
+ /* Use the presented client certificate if not provided. */
+ if (certText.IsEmpty()) {
+ cert = tlsConn.GetPeerCertificate();
+ } else {
+ cert = StringToCertificate(certText);
+ }
+
+ if (!cert) {
+ Log(LogWarning, "JsonRpcConnection") << "No certificate or CSR received";
+
+ result->Set("status_code", 1);
+ result->Set("error", "No certificate or CSR received.");
+
+ return result;
+ }
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+ std::shared_ptr<X509> cacert = GetX509Certificate(listener->GetDefaultCaPath());
+
+ String cn = GetCertificateCN(cert);
+
+ bool signedByCA = false;
+
+ {
+ Log logmsg(LogInformation, "JsonRpcConnection");
+ logmsg << "Received certificate request for CN '" << cn << "'";
+
+ try {
+ signedByCA = VerifyCertificate(cacert, cert, listener->GetCrlPath());
+ if (!signedByCA) {
+ logmsg << " not";
+ }
+ logmsg << " signed by our CA.";
+ } catch (const std::exception &ex) {
+ logmsg << " which couldn't be verified";
+
+ if (const unsigned long *openssl_code = boost::get_error_info<errinfo_openssl_error>(ex)) {
+ logmsg << ": " << X509_verify_cert_error_string(long(*openssl_code)) << " (code " << *openssl_code << ")";
+ } else {
+ logmsg << ".";
+ }
+ }
+ }
+
+ std::shared_ptr<X509> parsedRequestorCA;
+ X509* requestorCA = nullptr;
+
+ if (signedByCA) {
+ bool uptodate = IsCertUptodate(cert);
+
+ if (uptodate) {
+ // Even if the leaf is up-to-date, the root may expire soon.
+ // In a regular setup where Icinga manages the PKI, there is only one CA.
+ // Icinga includes it in handshakes, let's see whether the peer needs a fresh one...
+
+ if (cn == origin->FromClient->GetIdentity()) {
+ auto chain (SSL_get_peer_cert_chain(tlsConn.native_handle()));
+
+ if (chain) {
+ auto len (sk_X509_num(chain));
+
+ for (int i = 0; i < len; ++i) {
+ auto link (sk_X509_value(chain, i));
+
+ if (!X509_NAME_cmp(X509_get_subject_name(link), X509_get_issuer_name(link))) {
+ requestorCA = link;
+ }
+ }
+ }
+ } else {
+ Value requestorCaStr;
+
+ if (params->Get("requestor_ca", &requestorCaStr)) {
+ parsedRequestorCA = StringToCertificate(requestorCaStr);
+ requestorCA = parsedRequestorCA.get();
+ }
+ }
+
+ if (requestorCA && !IsCaUptodate(requestorCA)) {
+ int days;
+
+ if (ASN1_TIME_diff(&days, nullptr, X509_get_notAfter(requestorCA), X509_get_notAfter(cacert.get())) && days > 0) {
+ uptodate = false;
+ }
+ }
+ }
+
+ if (uptodate) {
+ Log(LogInformation, "JsonRpcConnection")
+ << "The certificates for CN '" << cn << "' and its root CA are valid and uptodate. Skipping automated renewal.";
+ result->Set("status_code", 1);
+ result->Set("error", "The certificates for CN '" + cn + "' and its root CA are valid and uptodate. Skipping automated renewal.");
+ return result;
+ }
+ }
+
+ unsigned int n;
+ unsigned char digest[EVP_MAX_MD_SIZE];
+
+ if (!X509_digest(cert.get(), EVP_sha256(), digest, &n)) {
+ result->Set("status_code", 1);
+ result->Set("error", "Could not calculate fingerprint for the X509 certificate for CN '" + cn + "'.");
+
+ Log(LogWarning, "JsonRpcConnection")
+ << "Could not calculate fingerprint for the X509 certificate requested for CN '"
+ << cn << "'.";
+
+ return result;
+ }
+
+ char certFingerprint[EVP_MAX_MD_SIZE*2+1];
+ for (unsigned int i = 0; i < n; i++)
+ sprintf(certFingerprint + 2 * i, "%02x", digest[i]);
+
+ result->Set("fingerprint_request", certFingerprint);
+
+ String requestDir = ApiListener::GetCertificateRequestsDir();
+ String requestPath = requestDir + "/" + certFingerprint + ".json";
+
+ result->Set("ca", CertificateToString(cacert));
+
+ JsonRpcConnection::Ptr client = origin->FromClient;
+
+ /* If we already have a signed certificate request, send it to the client. */
+ if (Utility::PathExists(requestPath)) {
+ Dictionary::Ptr request = Utility::LoadJsonFile(requestPath);
+
+ String certResponse = request->Get("cert_response");
+
+ if (!certResponse.IsEmpty()) {
+ Log(LogInformation, "JsonRpcConnection")
+ << "Sending certificate response for CN '" << cn
+ << "' to endpoint '" << client->GetIdentity() << "'.";
+
+ result->Set("cert", certResponse);
+ result->Set("status_code", 0);
+
+ Dictionary::Ptr message = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "pki::UpdateCertificate" },
+ { "params", result }
+ });
+ client->SendMessage(message);
+
+ return result;
+ }
+ } else if (Utility::PathExists(requestDir + "/" + certFingerprint + ".removed")) {
+ Log(LogInformation, "JsonRpcConnection")
+ << "Certificate for CN " << cn << " has been removed. Ignoring signing request.";
+ result->Set("status_code", 1);
+ result->Set("error", "Ticket for CN " + cn + " declined by administrator.");
+ return result;
+ }
+
+ std::shared_ptr<X509> newcert;
+ Dictionary::Ptr message;
+ String ticket;
+
+ /* Check whether we are a signing instance or we
+ * must delay the signing request.
+ */
+ if (!Utility::PathExists(GetIcingaCADir() + "/ca.key"))
+ goto delayed_request;
+
+ if (!signedByCA) {
+ String salt = listener->GetTicketSalt();
+
+ ticket = params->Get("ticket");
+
+ // Auto-signing is disabled: Client did not include a ticket in its request.
+ if (ticket.IsEmpty()) {
+ Log(LogNotice, "JsonRpcConnection")
+ << "Certificate request for CN '" << cn
+ << "': No ticket included, skipping auto-signing and waiting for on-demand signing approval.";
+
+ goto delayed_request;
+ }
+
+ // Auto-signing is disabled: no TicketSalt
+ if (salt.IsEmpty()) {
+ Log(LogNotice, "JsonRpcConnection")
+ << "Certificate request for CN '" << cn
+ << "': This instance is the signing master for the Icinga CA."
+ << " The 'ticket_salt' attribute in the 'api' feature is not set."
+ << " Not signing the request. Please check the docs.";
+
+ goto delayed_request;
+ }
+
+ String realTicket = PBKDF2_SHA1(cn, salt, 50000);
+
+ Log(LogDebug, "JsonRpcConnection")
+ << "Certificate request for CN '" << cn << "': Comparing received ticket '"
+ << ticket << "' with calculated ticket '" << realTicket << "'.";
+
+ if (!Utility::ComparePasswords(ticket, realTicket)) {
+ Log(LogWarning, "JsonRpcConnection")
+ << "Ticket '" << ticket << "' for CN '" << cn << "' is invalid.";
+
+ result->Set("status_code", 1);
+ result->Set("error", "Invalid ticket for CN '" + cn + "'.");
+ return result;
+ }
+ }
+
+ newcert = listener->RenewCert(cert);
+
+ if (!newcert) {
+ goto delayed_request;
+ }
+
+ /* Send the signed certificate update. */
+ Log(LogInformation, "JsonRpcConnection")
+ << "Sending certificate response for CN '" << cn << "' to endpoint '"
+ << client->GetIdentity() << "'" << (!ticket.IsEmpty() ? " (auto-signing ticket)" : "" ) << ".";
+
+ result->Set("cert", CertificateToString(newcert));
+
+ result->Set("status_code", 0);
+
+ message = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "method", "pki::UpdateCertificate" },
+ { "params", result }
+ });
+ client->SendMessage(message);
+
+ return result;
+
+delayed_request:
+ /* Send a delayed certificate signing request. */
+ Utility::MkDirP(requestDir, 0700);
+
+ Dictionary::Ptr request = new Dictionary({
+ { "cert_request", CertificateToString(cert) },
+ { "ticket", params->Get("ticket") }
+ });
+
+ if (requestorCA) {
+ request->Set("requestor_ca", CertificateToString(requestorCA));
+ }
+
+ Utility::SaveJsonFile(requestPath, 0600, request);
+
+ JsonRpcConnection::SendCertificateRequest(nullptr, origin, requestPath);
+
+ result->Set("status_code", 2);
+ result->Set("error", "Certificate request for CN '" + cn + "' is pending. Waiting for approval from the parent Icinga instance.");
+
+ Log(LogInformation, "JsonRpcConnection")
+ << "Certificate request for CN '" << cn << "' is pending. Waiting for approval.";
+
+ if (origin) {
+ auto client (origin->FromClient);
+
+ if (client && !client->GetEndpoint()) {
+ client->Disconnect();
+ }
+ }
+
+ return result;
+}
+
+void JsonRpcConnection::SendCertificateRequest(const JsonRpcConnection::Ptr& aclient, const MessageOrigin::Ptr& origin, const String& path)
+{
+ Dictionary::Ptr message = new Dictionary();
+ message->Set("jsonrpc", "2.0");
+ message->Set("method", "pki::RequestCertificate");
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return;
+
+ Dictionary::Ptr params = new Dictionary();
+ message->Set("params", params);
+
+ /* Path is empty if this is our own request. */
+ if (path.IsEmpty()) {
+ {
+ Log msg (LogInformation, "JsonRpcConnection");
+ msg << "Requesting new certificate for this Icinga instance";
+
+ if (aclient) {
+ msg << " from endpoint '" << aclient->GetIdentity() << "'";
+ }
+
+ msg << ".";
+ }
+
+ String ticketPath = ApiListener::GetCertsDir() + "/ticket";
+
+ std::ifstream fp(ticketPath.CStr());
+ String ticket((std::istreambuf_iterator<char>(fp)), std::istreambuf_iterator<char>());
+ fp.close();
+
+ params->Set("ticket", ticket);
+ } else {
+ Dictionary::Ptr request = Utility::LoadJsonFile(path);
+
+ if (request->Contains("cert_response"))
+ return;
+
+ request->CopyTo(params);
+ }
+
+ /* Send the request to a) the connected client
+ * or b) the local zone and all parents.
+ */
+ if (aclient)
+ aclient->SendMessage(message);
+ else
+ listener->RelayMessage(origin, Zone::GetLocalZone(), message, false);
+}
+
+Value UpdateCertificateHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ if (origin->FromZone && !Zone::GetLocalZone()->IsChildOf(origin->FromZone)) {
+ Log(LogWarning, "ClusterEvents")
+ << "Discarding 'update certificate' message from '" << origin->FromClient->GetIdentity() << "': Invalid endpoint origin (client not allowed).";
+
+ return Empty;
+ }
+
+ String ca = params->Get("ca");
+ String cert = params->Get("cert");
+
+ ApiListener::Ptr listener = ApiListener::GetInstance();
+
+ if (!listener)
+ return Empty;
+
+ std::shared_ptr<X509> oldCert = GetX509Certificate(listener->GetDefaultCertPath());
+ std::shared_ptr<X509> newCert = StringToCertificate(cert);
+
+ String cn = GetCertificateCN(newCert);
+
+ Log(LogInformation, "JsonRpcConnection")
+ << "Received certificate update message for CN '" << cn << "'";
+
+ /* Check if this is a certificate update for a subordinate instance. */
+ std::shared_ptr<EVP_PKEY> oldKey = std::shared_ptr<EVP_PKEY>(X509_get_pubkey(oldCert.get()), EVP_PKEY_free);
+ std::shared_ptr<EVP_PKEY> newKey = std::shared_ptr<EVP_PKEY>(X509_get_pubkey(newCert.get()), EVP_PKEY_free);
+
+ if (X509_NAME_cmp(X509_get_subject_name(oldCert.get()), X509_get_subject_name(newCert.get())) != 0 ||
+ EVP_PKEY_cmp(oldKey.get(), newKey.get()) != 1) {
+ String certFingerprint = params->Get("fingerprint_request");
+
+ /* Validate the fingerprint format. */
+ boost::regex expr("^[0-9a-f]+$");
+
+ if (!boost::regex_match(certFingerprint.GetData(), expr)) {
+ Log(LogWarning, "JsonRpcConnection")
+ << "Endpoint '" << origin->FromClient->GetIdentity() << "' sent an invalid certificate fingerprint: '"
+ << certFingerprint << "' for CN '" << cn << "'.";
+ return Empty;
+ }
+
+ String requestDir = ApiListener::GetCertificateRequestsDir();
+ String requestPath = requestDir + "/" + certFingerprint + ".json";
+
+ /* Save the received signed certificate request to disk. */
+ if (Utility::PathExists(requestPath)) {
+ Log(LogInformation, "JsonRpcConnection")
+ << "Saved certificate update for CN '" << cn << "'";
+
+ Dictionary::Ptr request = Utility::LoadJsonFile(requestPath);
+ request->Set("cert_response", cert);
+ Utility::SaveJsonFile(requestPath, 0644, request);
+ }
+
+ return Empty;
+ }
+
+ /* Update CA certificate. */
+ String caPath = listener->GetDefaultCaPath();
+
+ Log(LogInformation, "JsonRpcConnection")
+ << "Updating CA certificate in '" << caPath << "'.";
+
+ AtomicFile::Write(caPath, 0644, ca);
+
+ /* Update signed certificate. */
+ String certPath = listener->GetDefaultCertPath();
+
+ Log(LogInformation, "JsonRpcConnection")
+ << "Updating client certificate for CN '" << cn << "' in '" << certPath << "'.";
+
+ AtomicFile::Write(certPath, 0644, cert);
+
+ /* Remove ticket for successful signing request. */
+ String ticketPath = ApiListener::GetCertsDir() + "/ticket";
+
+ Utility::Remove(ticketPath);
+
+ /* Update the certificates at runtime and reconnect all endpoints. */
+ Log(LogInformation, "JsonRpcConnection")
+ << "Updating the client certificate for CN '" << cn << "' at runtime and reconnecting the endpoints.";
+
+ listener->UpdateSSLContext();
+
+ return Empty;
+}
diff --git a/lib/remote/jsonrpcconnection.cpp b/lib/remote/jsonrpcconnection.cpp
new file mode 100644
index 0000000..3bae3ca
--- /dev/null
+++ b/lib/remote/jsonrpcconnection.cpp
@@ -0,0 +1,388 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/jsonrpcconnection.hpp"
+#include "remote/apilistener.hpp"
+#include "remote/apifunction.hpp"
+#include "remote/jsonrpc.hpp"
+#include "base/defer.hpp"
+#include "base/configtype.hpp"
+#include "base/io-engine.hpp"
+#include "base/json.hpp"
+#include "base/objectlock.hpp"
+#include "base/utility.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/convert.hpp"
+#include "base/tlsstream.hpp"
+#include <memory>
+#include <utility>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/spawn.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
+#include <boost/system/system_error.hpp>
+#include <boost/thread/once.hpp>
+
+using namespace icinga;
+
+static Value SetLogPositionHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params);
+REGISTER_APIFUNCTION(SetLogPosition, log, &SetLogPositionHandler);
+
+static RingBuffer l_TaskStats (15 * 60);
+
+JsonRpcConnection::JsonRpcConnection(const String& identity, bool authenticated,
+ const Shared<AsioTlsStream>::Ptr& stream, ConnectionRole role)
+ : JsonRpcConnection(identity, authenticated, stream, role, IoEngine::Get().GetIoContext())
+{
+}
+
+JsonRpcConnection::JsonRpcConnection(const String& identity, bool authenticated,
+ const Shared<AsioTlsStream>::Ptr& stream, ConnectionRole role, boost::asio::io_context& io)
+ : m_Identity(identity), m_Authenticated(authenticated), m_Stream(stream), m_Role(role),
+ m_Timestamp(Utility::GetTime()), m_Seen(Utility::GetTime()), m_NextHeartbeat(0), m_IoStrand(io),
+ m_OutgoingMessagesQueued(io), m_WriterDone(io), m_ShuttingDown(false),
+ m_CheckLivenessTimer(io), m_HeartbeatTimer(io)
+{
+ if (authenticated)
+ m_Endpoint = Endpoint::GetByName(identity);
+}
+
+void JsonRpcConnection::Start()
+{
+ namespace asio = boost::asio;
+
+ JsonRpcConnection::Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) { HandleIncomingMessages(yc); });
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) { WriteOutgoingMessages(yc); });
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) { HandleAndWriteHeartbeats(yc); });
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) { CheckLiveness(yc); });
+}
+
+void JsonRpcConnection::HandleIncomingMessages(boost::asio::yield_context yc)
+{
+ m_Stream->next_layer().SetSeen(&m_Seen);
+
+ for (;;) {
+ String message;
+
+ try {
+ message = JsonRpc::ReadMessage(m_Stream, yc, m_Endpoint ? -1 : 1024 * 1024);
+ } catch (const std::exception& ex) {
+ Log(m_ShuttingDown ? LogDebug : LogNotice, "JsonRpcConnection")
+ << "Error while reading JSON-RPC message for identity '" << m_Identity
+ << "': " << DiagnosticInformation(ex);
+
+ break;
+ }
+
+ m_Seen = Utility::GetTime();
+
+ try {
+ CpuBoundWork handleMessage (yc);
+
+ MessageHandler(message);
+ } catch (const std::exception& ex) {
+ Log(m_ShuttingDown ? LogDebug : LogWarning, "JsonRpcConnection")
+ << "Error while processing JSON-RPC message for identity '" << m_Identity
+ << "': " << DiagnosticInformation(ex);
+
+ break;
+ }
+
+ CpuBoundWork taskStats (yc);
+
+ l_TaskStats.InsertValue(Utility::GetTime(), 1);
+ }
+
+ Disconnect();
+}
+
+void JsonRpcConnection::WriteOutgoingMessages(boost::asio::yield_context yc)
+{
+ Defer signalWriterDone ([this]() { m_WriterDone.Set(); });
+
+ do {
+ m_OutgoingMessagesQueued.Wait(yc);
+
+ auto queue (std::move(m_OutgoingMessagesQueue));
+
+ m_OutgoingMessagesQueue.clear();
+ m_OutgoingMessagesQueued.Clear();
+
+ if (!queue.empty()) {
+ try {
+ for (auto& message : queue) {
+ size_t bytesSent = JsonRpc::SendRawMessage(m_Stream, message, yc);
+
+ if (m_Endpoint) {
+ m_Endpoint->AddMessageSent(bytesSent);
+ }
+ }
+
+ m_Stream->async_flush(yc);
+ } catch (const std::exception& ex) {
+ Log(m_ShuttingDown ? LogDebug : LogWarning, "JsonRpcConnection")
+ << "Error while sending JSON-RPC message for identity '"
+ << m_Identity << "'\n" << DiagnosticInformation(ex);
+
+ break;
+ }
+ }
+ } while (!m_ShuttingDown);
+
+ Disconnect();
+}
+
+double JsonRpcConnection::GetTimestamp() const
+{
+ return m_Timestamp;
+}
+
+String JsonRpcConnection::GetIdentity() const
+{
+ return m_Identity;
+}
+
+bool JsonRpcConnection::IsAuthenticated() const
+{
+ return m_Authenticated;
+}
+
+Endpoint::Ptr JsonRpcConnection::GetEndpoint() const
+{
+ return m_Endpoint;
+}
+
+Shared<AsioTlsStream>::Ptr JsonRpcConnection::GetStream() const
+{
+ return m_Stream;
+}
+
+ConnectionRole JsonRpcConnection::GetRole() const
+{
+ return m_Role;
+}
+
+void JsonRpcConnection::SendMessage(const Dictionary::Ptr& message)
+{
+ Ptr keepAlive (this);
+
+ m_IoStrand.post([this, keepAlive, message]() { SendMessageInternal(message); });
+}
+
+void JsonRpcConnection::SendRawMessage(const String& message)
+{
+ Ptr keepAlive (this);
+
+ m_IoStrand.post([this, keepAlive, message]() {
+ m_OutgoingMessagesQueue.emplace_back(message);
+ m_OutgoingMessagesQueued.Set();
+ });
+}
+
+void JsonRpcConnection::SendMessageInternal(const Dictionary::Ptr& message)
+{
+ m_OutgoingMessagesQueue.emplace_back(JsonEncode(message));
+ m_OutgoingMessagesQueued.Set();
+}
+
+void JsonRpcConnection::Disconnect()
+{
+ namespace asio = boost::asio;
+
+ JsonRpcConnection::Ptr keepAlive (this);
+
+ IoEngine::SpawnCoroutine(m_IoStrand, [this, keepAlive](asio::yield_context yc) {
+ if (!m_ShuttingDown) {
+ m_ShuttingDown = true;
+
+ Log(LogWarning, "JsonRpcConnection")
+ << "API client disconnected for identity '" << m_Identity << "'";
+
+ {
+ CpuBoundWork removeClient (yc);
+
+ if (m_Endpoint) {
+ m_Endpoint->RemoveClient(this);
+ } else {
+ ApiListener::GetInstance()->RemoveAnonymousClient(this);
+ }
+ }
+
+ m_OutgoingMessagesQueued.Set();
+
+ m_WriterDone.Wait(yc);
+
+ /*
+ * Do not swallow exceptions in a coroutine.
+ * https://github.com/Icinga/icinga2/issues/7351
+ * We must not catch `detail::forced_unwind exception` as
+ * this is used for unwinding the stack.
+ *
+ * Just use the error_code dummy here.
+ */
+ boost::system::error_code ec;
+
+ m_CheckLivenessTimer.cancel();
+ m_HeartbeatTimer.cancel();
+
+ m_Stream->lowest_layer().cancel(ec);
+
+ Timeout::Ptr shutdownTimeout (new Timeout(
+ m_IoStrand.context(),
+ m_IoStrand,
+ boost::posix_time::seconds(10),
+ [this, keepAlive](asio::yield_context yc) {
+ boost::system::error_code ec;
+ m_Stream->lowest_layer().cancel(ec);
+ }
+ ));
+
+ m_Stream->next_layer().async_shutdown(yc[ec]);
+
+ shutdownTimeout->Cancel();
+
+ m_Stream->lowest_layer().shutdown(m_Stream->lowest_layer().shutdown_both, ec);
+ }
+ });
+}
+
+void JsonRpcConnection::MessageHandler(const String& jsonString)
+{
+ Dictionary::Ptr message = JsonRpc::DecodeMessage(jsonString);
+
+ if (m_Endpoint && message->Contains("ts")) {
+ double ts = message->Get("ts");
+
+ /* ignore old messages */
+ if (ts < m_Endpoint->GetRemoteLogPosition())
+ return;
+
+ m_Endpoint->SetRemoteLogPosition(ts);
+ }
+
+ MessageOrigin::Ptr origin = new MessageOrigin();
+ origin->FromClient = this;
+
+ if (m_Endpoint) {
+ if (m_Endpoint->GetZone() != Zone::GetLocalZone())
+ origin->FromZone = m_Endpoint->GetZone();
+ else
+ origin->FromZone = Zone::GetByName(message->Get("originZone"));
+
+ m_Endpoint->AddMessageReceived(jsonString.GetLength());
+ }
+
+ Value vmethod;
+
+ if (!message->Get("method", &vmethod)) {
+ Value vid;
+
+ if (!message->Get("id", &vid))
+ return;
+
+ Log(LogWarning, "JsonRpcConnection",
+ "We received a JSON-RPC response message. This should never happen because we're only ever sending notifications.");
+
+ return;
+ }
+
+ String method = vmethod;
+
+ Log(LogNotice, "JsonRpcConnection")
+ << "Received '" << method << "' message from identity '" << m_Identity << "'.";
+
+ Dictionary::Ptr resultMessage = new Dictionary();
+
+ try {
+ ApiFunction::Ptr afunc = ApiFunction::GetByName(method);
+
+ if (!afunc) {
+ Log(LogNotice, "JsonRpcConnection")
+ << "Call to non-existent function '" << method << "' from endpoint '" << m_Identity << "'.";
+ } else {
+ Dictionary::Ptr params = message->Get("params");
+ if (params)
+ resultMessage->Set("result", afunc->Invoke(origin, params));
+ else
+ resultMessage->Set("result", Empty);
+ }
+ } catch (const std::exception& ex) {
+ /* TODO: Add a user readable error message for the remote caller */
+ String diagInfo = DiagnosticInformation(ex);
+ resultMessage->Set("error", diagInfo);
+ Log(LogWarning, "JsonRpcConnection")
+ << "Error while processing message for identity '" << m_Identity << "'\n" << diagInfo;
+ }
+
+ if (message->Contains("id")) {
+ resultMessage->Set("jsonrpc", "2.0");
+ resultMessage->Set("id", message->Get("id"));
+
+ SendMessageInternal(resultMessage);
+ }
+}
+
+Value SetLogPositionHandler(const MessageOrigin::Ptr& origin, const Dictionary::Ptr& params)
+{
+ double log_position = params->Get("log_position");
+ Endpoint::Ptr endpoint = origin->FromClient->GetEndpoint();
+
+ if (!endpoint)
+ return Empty;
+
+ if (log_position > endpoint->GetLocalLogPosition())
+ endpoint->SetLocalLogPosition(log_position);
+
+ return Empty;
+}
+
+void JsonRpcConnection::CheckLiveness(boost::asio::yield_context yc)
+{
+ boost::system::error_code ec;
+
+ if (!m_Authenticated) {
+ /* Anonymous connections are normally only used for requesting a certificate and are closed after this request
+ * is received. However, the request is only sent if the child has successfully verified the certificate of its
+ * parent so that it is an authenticated connection from its perspective. In case this verification fails, both
+ * ends view it as an anonymous connection and never actually use it but attempt a reconnect after 10 seconds
+ * leaking the connection. Therefore close it after a timeout.
+ */
+
+ m_CheckLivenessTimer.expires_from_now(boost::posix_time::seconds(10));
+ m_CheckLivenessTimer.async_wait(yc[ec]);
+
+ if (m_ShuttingDown) {
+ return;
+ }
+
+ auto remote (m_Stream->lowest_layer().remote_endpoint());
+
+ Log(LogInformation, "JsonRpcConnection")
+ << "Closing anonymous connection [" << remote.address() << "]:" << remote.port() << " after 10 seconds.";
+
+ Disconnect();
+ } else {
+ for (;;) {
+ m_CheckLivenessTimer.expires_from_now(boost::posix_time::seconds(30));
+ m_CheckLivenessTimer.async_wait(yc[ec]);
+
+ if (m_ShuttingDown) {
+ break;
+ }
+
+ if (m_Seen < Utility::GetTime() - 60 && (!m_Endpoint || !m_Endpoint->GetSyncing())) {
+ Log(LogInformation, "JsonRpcConnection")
+ << "No messages for identity '" << m_Identity << "' have been received in the last 60 seconds.";
+
+ Disconnect();
+ break;
+ }
+ }
+ }
+}
+
+double JsonRpcConnection::GetWorkQueueRate()
+{
+ return l_TaskStats.UpdateAndGetValues(Utility::GetTime(), 60) / 60.0;
+}
diff --git a/lib/remote/jsonrpcconnection.hpp b/lib/remote/jsonrpcconnection.hpp
new file mode 100644
index 0000000..591ddcb
--- /dev/null
+++ b/lib/remote/jsonrpcconnection.hpp
@@ -0,0 +1,100 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef JSONRPCCONNECTION_H
+#define JSONRPCCONNECTION_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/endpoint.hpp"
+#include "base/io-engine.hpp"
+#include "base/tlsstream.hpp"
+#include "base/timer.hpp"
+#include "base/workqueue.hpp"
+#include <memory>
+#include <vector>
+#include <boost/asio/io_context.hpp>
+#include <boost/asio/io_context_strand.hpp>
+#include <boost/asio/spawn.hpp>
+
+namespace icinga
+{
+
+enum ClientRole
+{
+ ClientInbound,
+ ClientOutbound
+};
+
+enum ClientType
+{
+ ClientJsonRpc,
+ ClientHttp
+};
+
+class MessageOrigin;
+
+/**
+ * An API client connection.
+ *
+ * @ingroup remote
+ */
+class JsonRpcConnection final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(JsonRpcConnection);
+
+ JsonRpcConnection(const String& identity, bool authenticated, const Shared<AsioTlsStream>::Ptr& stream, ConnectionRole role);
+
+ void Start();
+
+ double GetTimestamp() const;
+ String GetIdentity() const;
+ bool IsAuthenticated() const;
+ Endpoint::Ptr GetEndpoint() const;
+ Shared<AsioTlsStream>::Ptr GetStream() const;
+ ConnectionRole GetRole() const;
+
+ void Disconnect();
+
+ void SendMessage(const Dictionary::Ptr& request);
+ void SendRawMessage(const String& request);
+
+ static Value HeartbeatAPIHandler(const intrusive_ptr<MessageOrigin>& origin, const Dictionary::Ptr& params);
+
+ static double GetWorkQueueRate();
+
+ static void SendCertificateRequest(const JsonRpcConnection::Ptr& aclient, const intrusive_ptr<MessageOrigin>& origin, const String& path);
+
+private:
+ String m_Identity;
+ bool m_Authenticated;
+ Endpoint::Ptr m_Endpoint;
+ Shared<AsioTlsStream>::Ptr m_Stream;
+ ConnectionRole m_Role;
+ double m_Timestamp;
+ double m_Seen;
+ double m_NextHeartbeat;
+ boost::asio::io_context::strand m_IoStrand;
+ std::vector<String> m_OutgoingMessagesQueue;
+ AsioConditionVariable m_OutgoingMessagesQueued;
+ AsioConditionVariable m_WriterDone;
+ bool m_ShuttingDown;
+ boost::asio::deadline_timer m_CheckLivenessTimer, m_HeartbeatTimer;
+
+ JsonRpcConnection(const String& identity, bool authenticated, const Shared<AsioTlsStream>::Ptr& stream, ConnectionRole role, boost::asio::io_context& io);
+
+ void HandleIncomingMessages(boost::asio::yield_context yc);
+ void WriteOutgoingMessages(boost::asio::yield_context yc);
+ void HandleAndWriteHeartbeats(boost::asio::yield_context yc);
+ void CheckLiveness(boost::asio::yield_context yc);
+
+ bool ProcessMessage();
+ void MessageHandler(const String& jsonString);
+
+ void CertificateRequestResponseHandler(const Dictionary::Ptr& message);
+
+ void SendMessageInternal(const Dictionary::Ptr& request);
+};
+
+}
+
+#endif /* JSONRPCCONNECTION_H */
diff --git a/lib/remote/messageorigin.cpp b/lib/remote/messageorigin.cpp
new file mode 100644
index 0000000..7de0ca7
--- /dev/null
+++ b/lib/remote/messageorigin.cpp
@@ -0,0 +1,10 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/messageorigin.hpp"
+
+using namespace icinga;
+
+bool MessageOrigin::IsLocal() const
+{
+ return !FromClient;
+}
diff --git a/lib/remote/messageorigin.hpp b/lib/remote/messageorigin.hpp
new file mode 100644
index 0000000..8a91ecc
--- /dev/null
+++ b/lib/remote/messageorigin.hpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MESSAGEORIGIN_H
+#define MESSAGEORIGIN_H
+
+#include "remote/zone.hpp"
+#include "remote/jsonrpcconnection.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup remote
+ */
+class MessageOrigin final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(MessageOrigin);
+
+ Zone::Ptr FromZone;
+ JsonRpcConnection::Ptr FromClient;
+
+ bool IsLocal() const;
+};
+
+}
+
+#endif /* MESSAGEORIGIN_H */
diff --git a/lib/remote/modifyobjecthandler.cpp b/lib/remote/modifyobjecthandler.cpp
new file mode 100644
index 0000000..d6fa98b
--- /dev/null
+++ b/lib/remote/modifyobjecthandler.cpp
@@ -0,0 +1,168 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/modifyobjecthandler.hpp"
+#include "remote/configobjectslock.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "remote/apiaction.hpp"
+#include "base/exception.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/objects", ModifyObjectHandler);
+
+bool ModifyObjectHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() < 3 || url->GetPath().size() > 4)
+ return false;
+
+ if (request.method() != http::verb::post)
+ return false;
+
+ Type::Ptr type = FilterUtility::TypeFromPluralName(url->GetPath()[2]);
+
+ if (!type) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid type specified.");
+ return true;
+ }
+
+ QueryDescription qd;
+ qd.Types.insert(type->GetName());
+ qd.Permission = "objects/modify/" + type->GetName();
+
+ params->Set("type", type->GetName());
+
+ if (url->GetPath().size() >= 4) {
+ String attr = type->GetName();
+ boost::algorithm::to_lower(attr);
+ params->Set(attr, url->GetPath()[3]);
+ }
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ Value attrsVal = params->Get("attrs");
+
+ if (attrsVal.GetReflectionType() != Dictionary::TypeInstance && attrsVal.GetType() != ValueEmpty) {
+ HttpUtility::SendJsonError(response, params, 400,
+ "Invalid type for 'attrs' attribute specified. Dictionary type is required."
+ "Or is this a POST query and you missed adding a 'X-HTTP-Method-Override: GET' header?");
+ return true;
+ }
+
+ Dictionary::Ptr attrs = attrsVal;
+
+ Value restoreAttrsVal = params->Get("restore_attrs");
+
+ if (restoreAttrsVal.GetReflectionType() != Array::TypeInstance && restoreAttrsVal.GetType() != ValueEmpty) {
+ HttpUtility::SendJsonError(response, params, 400,
+ "Invalid type for 'restore_attrs' attribute specified. Array type is required.");
+ return true;
+ }
+
+ Array::Ptr restoreAttrs = restoreAttrsVal;
+
+ if (!(attrs || restoreAttrs)) {
+ HttpUtility::SendJsonError(response, params, 400,
+ "Missing both 'attrs' and 'restore_attrs'. "
+ "Or is this a POST query and you missed adding a 'X-HTTP-Method-Override: GET' header?");
+ return true;
+ }
+
+ bool verbose = false;
+
+ if (params)
+ verbose = HttpUtility::GetLastParameter(params, "verbose");
+
+ ConfigObjectsSharedLock lock (std::try_to_lock);
+
+ if (!lock) {
+ HttpUtility::SendJsonError(response, params, 503, "Icinga is reloading");
+ return true;
+ }
+
+ ArrayData results;
+
+ for (const ConfigObject::Ptr& obj : objs) {
+ Dictionary::Ptr result1 = new Dictionary();
+
+ result1->Set("type", type->GetName());
+ result1->Set("name", obj->GetName());
+
+ String key;
+
+ try {
+ if (restoreAttrs) {
+ ObjectLock oLock (restoreAttrs);
+
+ for (auto& attr : restoreAttrs) {
+ key = attr;
+ obj->RestoreAttribute(key);
+ }
+ }
+ } catch (const std::exception& ex) {
+ result1->Set("code", 500);
+ result1->Set("status", "Attribute '" + key + "' could not be restored: " + DiagnosticInformation(ex, false));
+
+ if (verbose)
+ result1->Set("diagnostic_information", DiagnosticInformation(ex));
+
+ results.push_back(std::move(result1));
+ continue;
+ }
+
+ try {
+ if (attrs) {
+ ObjectLock olock(attrs);
+ for (const Dictionary::Pair& kv : attrs) {
+ key = kv.first;
+ obj->ModifyAttribute(kv.first, kv.second);
+ }
+ }
+ } catch (const std::exception& ex) {
+ result1->Set("code", 500);
+ result1->Set("status", "Attribute '" + key + "' could not be set: " + DiagnosticInformation(ex, false));
+
+ if (verbose)
+ result1->Set("diagnostic_information", DiagnosticInformation(ex));
+
+ results.push_back(std::move(result1));
+ continue;
+ }
+
+ result1->Set("code", 200);
+ result1->Set("status", "Attributes updated.");
+
+ results.push_back(std::move(result1));
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
diff --git a/lib/remote/modifyobjecthandler.hpp b/lib/remote/modifyobjecthandler.hpp
new file mode 100644
index 0000000..f469301
--- /dev/null
+++ b/lib/remote/modifyobjecthandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef MODIFYOBJECTHANDLER_H
+#define MODIFYOBJECTHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class ModifyObjectHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ModifyObjectHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* MODIFYOBJECTHANDLER_H */
diff --git a/lib/remote/objectqueryhandler.cpp b/lib/remote/objectqueryhandler.cpp
new file mode 100644
index 0000000..ad73030
--- /dev/null
+++ b/lib/remote/objectqueryhandler.cpp
@@ -0,0 +1,330 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/objectqueryhandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/serializer.hpp"
+#include "base/dependencygraph.hpp"
+#include "base/configtype.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <set>
+#include <unordered_map>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/objects", ObjectQueryHandler);
+
+Dictionary::Ptr ObjectQueryHandler::SerializeObjectAttrs(const Object::Ptr& object,
+ const String& attrPrefix, const Array::Ptr& attrs, bool isJoin, bool allAttrs)
+{
+ Type::Ptr type = object->GetReflectionType();
+
+ std::vector<int> fids;
+
+ if (isJoin && attrs) {
+ ObjectLock olock(attrs);
+ for (const String& attr : attrs) {
+ if (attr == attrPrefix) {
+ allAttrs = true;
+ break;
+ }
+ }
+ }
+
+ if (!isJoin && (!attrs || attrs->GetLength() == 0))
+ allAttrs = true;
+
+ if (allAttrs) {
+ for (int fid = 0; fid < type->GetFieldCount(); fid++) {
+ fids.push_back(fid);
+ }
+ } else if (attrs) {
+ ObjectLock olock(attrs);
+ for (const String& attr : attrs) {
+ String userAttr;
+
+ if (isJoin) {
+ String::SizeType dpos = attr.FindFirstOf(".");
+ if (dpos == String::NPos)
+ continue;
+
+ String userJoinAttr = attr.SubStr(0, dpos);
+ if (userJoinAttr != attrPrefix)
+ continue;
+
+ userAttr = attr.SubStr(dpos + 1);
+ } else
+ userAttr = attr;
+
+ int fid = type->GetFieldId(userAttr);
+
+ if (fid < 0)
+ BOOST_THROW_EXCEPTION(ScriptError("Invalid field specified: " + userAttr));
+
+ fids.push_back(fid);
+ }
+ }
+
+ DictionaryData resultAttrs;
+ resultAttrs.reserve(fids.size());
+
+ for (int fid : fids) {
+ Field field = type->GetFieldInfo(fid);
+
+ Value val = object->GetField(fid);
+
+ /* hide attributes which shouldn't be user-visible */
+ if (field.Attributes & FANoUserView)
+ continue;
+
+ /* hide internal navigation fields */
+ if (field.Attributes & FANavigation && !(field.Attributes & (FAConfig | FAState)))
+ continue;
+
+ Value sval = Serialize(val, FAConfig | FAState);
+ resultAttrs.emplace_back(field.Name, sval);
+ }
+
+ return new Dictionary(std::move(resultAttrs));
+}
+
+bool ObjectQueryHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() < 3 || url->GetPath().size() > 4)
+ return false;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ Type::Ptr type = FilterUtility::TypeFromPluralName(url->GetPath()[2]);
+
+ if (!type) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid type specified.");
+ return true;
+ }
+
+ QueryDescription qd;
+ qd.Types.insert(type->GetName());
+ qd.Permission = "objects/query/" + type->GetName();
+
+ Array::Ptr uattrs, ujoins, umetas;
+
+ try {
+ uattrs = params->Get("attrs");
+ } catch (const std::exception&) {
+ HttpUtility::SendJsonError(response, params, 400,
+ "Invalid type for 'attrs' attribute specified. Array type is required.");
+ return true;
+ }
+
+ try {
+ ujoins = params->Get("joins");
+ } catch (const std::exception&) {
+ HttpUtility::SendJsonError(response, params, 400,
+ "Invalid type for 'joins' attribute specified. Array type is required.");
+ return true;
+ }
+
+ try {
+ umetas = params->Get("meta");
+ } catch (const std::exception&) {
+ HttpUtility::SendJsonError(response, params, 400,
+ "Invalid type for 'meta' attribute specified. Array type is required.");
+ return true;
+ }
+
+ bool allJoins = HttpUtility::GetLastParameter(params, "all_joins");
+
+ params->Set("type", type->GetName());
+
+ if (url->GetPath().size() >= 4) {
+ String attr = type->GetName();
+ boost::algorithm::to_lower(attr);
+ params->Set(attr, url->GetPath()[3]);
+ }
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ ArrayData results;
+ results.reserve(objs.size());
+
+ std::set<String> joinAttrs;
+ std::set<String> userJoinAttrs;
+
+ if (ujoins) {
+ ObjectLock olock(ujoins);
+ for (const String& ujoin : ujoins) {
+ userJoinAttrs.insert(ujoin.SubStr(0, ujoin.FindFirstOf(".")));
+ }
+ }
+
+ for (int fid = 0; fid < type->GetFieldCount(); fid++) {
+ Field field = type->GetFieldInfo(fid);
+
+ if (!(field.Attributes & FANavigation))
+ continue;
+
+ if (!allJoins && userJoinAttrs.find(field.NavigationName) == userJoinAttrs.end())
+ continue;
+
+ joinAttrs.insert(field.Name);
+ }
+
+ std::unordered_map<Type*, std::pair<bool, std::unique_ptr<Expression>>> typePermissions;
+ std::unordered_map<Object*, bool> objectAccessAllowed;
+
+ for (const ConfigObject::Ptr& obj : objs) {
+ DictionaryData result1{
+ { "name", obj->GetName() },
+ { "type", obj->GetReflectionType()->GetName() }
+ };
+
+ DictionaryData metaAttrs;
+
+ if (umetas) {
+ ObjectLock olock(umetas);
+ for (const String& meta : umetas) {
+ if (meta == "used_by") {
+ Array::Ptr used_by = new Array();
+ metaAttrs.emplace_back("used_by", used_by);
+
+ for (const Object::Ptr& pobj : DependencyGraph::GetParents((obj)))
+ {
+ ConfigObject::Ptr configObj = dynamic_pointer_cast<ConfigObject>(pobj);
+
+ if (!configObj)
+ continue;
+
+ used_by->Add(new Dictionary({
+ { "type", configObj->GetReflectionType()->GetName() },
+ { "name", configObj->GetName() }
+ }));
+ }
+ } else if (meta == "location") {
+ metaAttrs.emplace_back("location", obj->GetSourceLocation());
+ } else {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid field specified for meta: " + meta);
+ return true;
+ }
+ }
+ }
+
+ result1.emplace_back("meta", new Dictionary(std::move(metaAttrs)));
+
+ try {
+ result1.emplace_back("attrs", SerializeObjectAttrs(obj, String(), uattrs, false, false));
+ } catch (const ScriptError& ex) {
+ HttpUtility::SendJsonError(response, params, 400, ex.what());
+ return true;
+ }
+
+ DictionaryData joins;
+
+ for (const String& joinAttr : joinAttrs) {
+ Object::Ptr joinedObj;
+ int fid = type->GetFieldId(joinAttr);
+
+ if (fid < 0) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid field specified for join: " + joinAttr);
+ return true;
+ }
+
+ Field field = type->GetFieldInfo(fid);
+
+ if (!(field.Attributes & FANavigation)) {
+ HttpUtility::SendJsonError(response, params, 400, "Not a joinable field: " + joinAttr);
+ return true;
+ }
+
+ joinedObj = obj->NavigateField(fid);
+
+ if (!joinedObj)
+ continue;
+
+ Type::Ptr reflectionType = joinedObj->GetReflectionType();
+ auto it = typePermissions.find(reflectionType.get());
+ bool granted;
+
+ if (it == typePermissions.end()) {
+ String permission = "objects/query/" + reflectionType->GetName();
+
+ std::unique_ptr<Expression> permissionFilter;
+ granted = FilterUtility::HasPermission(user, permission, &permissionFilter);
+
+ it = typePermissions.insert({reflectionType.get(), std::make_pair(granted, std::move(permissionFilter))}).first;
+ }
+
+ granted = it->second.first;
+ const std::unique_ptr<Expression>& permissionFilter = it->second.second;
+
+ if (!granted) {
+ // Not authorized
+ continue;
+ }
+
+ auto relation = objectAccessAllowed.find(joinedObj.get());
+ bool accessAllowed;
+
+ if (relation == objectAccessAllowed.end()) {
+ ScriptFrame permissionFrame(false, new Namespace());
+
+ try {
+ accessAllowed = FilterUtility::EvaluateFilter(permissionFrame, permissionFilter.get(), joinedObj);
+ } catch (const ScriptError& err) {
+ accessAllowed = false;
+ }
+
+ objectAccessAllowed.insert({joinedObj.get(), accessAllowed});
+ } else {
+ accessAllowed = relation->second;
+ }
+
+ if (!accessAllowed) {
+ // Access denied
+ continue;
+ }
+
+ String prefix = field.NavigationName;
+
+ try {
+ joins.emplace_back(prefix, SerializeObjectAttrs(joinedObj, prefix, ujoins, true, allJoins));
+ } catch (const ScriptError& ex) {
+ HttpUtility::SendJsonError(response, params, 400, ex.what());
+ return true;
+ }
+ }
+
+ result1.emplace_back("joins", new Dictionary(std::move(joins)));
+
+ results.push_back(new Dictionary(std::move(result1)));
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
diff --git a/lib/remote/objectqueryhandler.hpp b/lib/remote/objectqueryhandler.hpp
new file mode 100644
index 0000000..691b2cf
--- /dev/null
+++ b/lib/remote/objectqueryhandler.hpp
@@ -0,0 +1,34 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef OBJECTQUERYHANDLER_H
+#define OBJECTQUERYHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class ObjectQueryHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(ObjectQueryHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+
+private:
+ static Dictionary::Ptr SerializeObjectAttrs(const Object::Ptr& object, const String& attrPrefix,
+ const Array::Ptr& attrs, bool isJoin, bool allAttrs);
+};
+
+}
+
+#endif /* OBJECTQUERYHANDLER_H */
diff --git a/lib/remote/pkiutility.cpp b/lib/remote/pkiutility.cpp
new file mode 100644
index 0000000..00d6525
--- /dev/null
+++ b/lib/remote/pkiutility.cpp
@@ -0,0 +1,452 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/pkiutility.hpp"
+#include "remote/apilistener.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/logger.hpp"
+#include "base/application.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/tlsutility.hpp"
+#include "base/console.hpp"
+#include "base/tlsstream.hpp"
+#include "base/tcpsocket.hpp"
+#include "base/json.hpp"
+#include "base/utility.hpp"
+#include "base/convert.hpp"
+#include "base/exception.hpp"
+#include "remote/jsonrpc.hpp"
+#include <fstream>
+#include <iostream>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/filesystem/path.hpp>
+
+using namespace icinga;
+
+int PkiUtility::NewCa()
+{
+ String caDir = ApiListener::GetCaDir();
+ String caCertFile = caDir + "/ca.crt";
+ String caKeyFile = caDir + "/ca.key";
+
+ if (Utility::PathExists(caCertFile) && Utility::PathExists(caKeyFile)) {
+ Log(LogWarning, "cli")
+ << "CA files '" << caCertFile << "' and '" << caKeyFile << "' already exist.";
+ return 1;
+ }
+
+ Utility::MkDirP(caDir, 0700);
+
+ MakeX509CSR("Icinga CA", caKeyFile, String(), caCertFile, true);
+
+ return 0;
+}
+
+int PkiUtility::NewCert(const String& cn, const String& keyfile, const String& csrfile, const String& certfile)
+{
+ try {
+ MakeX509CSR(cn, keyfile, csrfile, certfile);
+ } catch(std::exception&) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int PkiUtility::SignCsr(const String& csrfile, const String& certfile)
+{
+ char errbuf[256];
+
+ InitializeOpenSSL();
+
+ BIO *csrbio = BIO_new_file(csrfile.CStr(), "r");
+ X509_REQ *req = PEM_read_bio_X509_REQ(csrbio, nullptr, nullptr, nullptr);
+
+ if (!req) {
+ ERR_error_string_n(ERR_peek_error(), errbuf, sizeof errbuf);
+ Log(LogCritical, "SSL")
+ << "Could not read X509 certificate request from '" << csrfile << "': " << ERR_peek_error() << ", \"" << errbuf << "\"";
+ return 1;
+ }
+
+ BIO_free(csrbio);
+
+ std::shared_ptr<EVP_PKEY> pubkey = std::shared_ptr<EVP_PKEY>(X509_REQ_get_pubkey(req), EVP_PKEY_free);
+ std::shared_ptr<X509> cert = CreateCertIcingaCA(pubkey.get(), X509_REQ_get_subject_name(req));
+
+ X509_REQ_free(req);
+
+ WriteCert(cert, certfile);
+
+ return 0;
+}
+
+std::shared_ptr<X509> PkiUtility::FetchCert(const String& host, const String& port)
+{
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext();
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "pki")
+ << "Cannot make SSL context.";
+ Log(LogDebug, "pki")
+ << "Cannot make SSL context:\n" << DiagnosticInformation(ex);
+ return std::shared_ptr<X509>();
+ }
+
+ auto stream (Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, host));
+
+ try {
+ Connect(stream->lowest_layer(), host, port);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "pki")
+ << "Cannot connect to host '" << host << "' on port '" << port << "'";
+ Log(LogDebug, "pki")
+ << "Cannot connect to host '" << host << "' on port '" << port << "':\n" << DiagnosticInformation(ex);
+ return std::shared_ptr<X509>();
+ }
+
+ auto& sslConn (stream->next_layer());
+
+ try {
+ sslConn.handshake(sslConn.client);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "pki")
+ << "Client TLS handshake failed. (" << ex.what() << ")";
+ return std::shared_ptr<X509>();
+ }
+
+ Defer shutdown ([&sslConn]() { sslConn.shutdown(); });
+
+ return sslConn.GetPeerCertificate();
+}
+
+int PkiUtility::WriteCert(const std::shared_ptr<X509>& cert, const String& trustedfile)
+{
+ std::ofstream fpcert;
+ fpcert.open(trustedfile.CStr());
+ fpcert << CertificateToString(cert);
+ fpcert.close();
+
+ if (fpcert.fail()) {
+ Log(LogCritical, "pki")
+ << "Could not write certificate to file '" << trustedfile << "'.";
+ return 1;
+ }
+
+ Log(LogInformation, "pki")
+ << "Writing certificate to file '" << trustedfile << "'.";
+
+ return 0;
+}
+
+int PkiUtility::GenTicket(const String& cn, const String& salt, std::ostream& ticketfp)
+{
+ ticketfp << PBKDF2_SHA1(cn, salt, 50000) << "\n";
+
+ return 0;
+}
+
+int PkiUtility::RequestCertificate(const String& host, const String& port, const String& keyfile,
+ const String& certfile, const String& cafile, const std::shared_ptr<X509>& trustedCert, const String& ticket)
+{
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext(certfile, keyfile);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot make SSL context for cert path: '" << certfile << "' key path: '" << keyfile << "' ca path: '" << cafile << "'.";
+ Log(LogDebug, "cli")
+ << "Cannot make SSL context for cert path: '" << certfile << "' key path: '" << keyfile << "' ca path: '" << cafile << "':\n" << DiagnosticInformation(ex);
+ return 1;
+ }
+
+ auto stream (Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, host));
+
+ try {
+ Connect(stream->lowest_layer(), host, port);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Cannot connect to host '" << host << "' on port '" << port << "'";
+ Log(LogDebug, "cli")
+ << "Cannot connect to host '" << host << "' on port '" << port << "':\n" << DiagnosticInformation(ex);
+ return 1;
+ }
+
+ auto& sslConn (stream->next_layer());
+
+ try {
+ sslConn.handshake(sslConn.client);
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Client TLS handshake failed: " << DiagnosticInformation(ex, false);
+ return 1;
+ }
+
+ Defer shutdown ([&sslConn]() { sslConn.shutdown(); });
+
+ auto peerCert (sslConn.GetPeerCertificate());
+
+ if (X509_cmp(peerCert.get(), trustedCert.get())) {
+ Log(LogCritical, "cli", "Peer certificate does not match trusted certificate.");
+ return 1;
+ }
+
+ Dictionary::Ptr params = new Dictionary({
+ { "ticket", String(ticket) }
+ });
+
+ String msgid = Utility::NewUniqueID();
+
+ Dictionary::Ptr request = new Dictionary({
+ { "jsonrpc", "2.0" },
+ { "id", msgid },
+ { "method", "pki::RequestCertificate" },
+ { "params", params }
+ });
+
+ Dictionary::Ptr response;
+
+ try {
+ JsonRpc::SendMessage(stream, request);
+ stream->flush();
+
+ for (;;) {
+ response = JsonRpc::DecodeMessage(JsonRpc::ReadMessage(stream));
+
+ if (response && response->Contains("error")) {
+ Log(LogCritical, "cli", "Could not fetch valid response. Please check the master log (notice or debug).");
+#ifdef I2_DEBUG
+ /* we shouldn't expose master errors to the user in production environments */
+ Log(LogCritical, "cli", response->Get("error"));
+#endif /* I2_DEBUG */
+ return 1;
+ }
+
+ if (response && (response->Get("id") != msgid))
+ continue;
+
+ break;
+ }
+ } catch (...) {
+ Log(LogCritical, "cli", "Could not fetch valid response. Please check the master log.");
+ return 1;
+ }
+
+ if (!response) {
+ Log(LogCritical, "cli", "Could not fetch valid response. Please check the master log.");
+ return 1;
+ }
+
+ Dictionary::Ptr result = response->Get("result");
+
+ if (result->Contains("ca")) {
+ try {
+ StringToCertificate(result->Get("ca"));
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Could not write CA file: " << DiagnosticInformation(ex, false);
+ return 1;
+ }
+
+ Log(LogInformation, "cli")
+ << "Writing CA certificate to file '" << cafile << "'.";
+
+ std::ofstream fpca;
+ fpca.open(cafile.CStr());
+ fpca << result->Get("ca");
+ fpca.close();
+
+ if (fpca.fail()) {
+ Log(LogCritical, "cli")
+ << "Could not open CA certificate file '" << cafile << "' for writing.";
+ return 1;
+ }
+ }
+
+ if (result->Contains("error")) {
+ LogSeverity severity;
+
+ Value vstatus;
+
+ if (!result->Get("status_code", &vstatus))
+ vstatus = 1;
+
+ int status = vstatus;
+
+ if (status == 1)
+ severity = LogCritical;
+ else {
+ severity = LogInformation;
+ Log(severity, "cli", "!!!!!!");
+ }
+
+ Log(severity, "cli")
+ << "!!! " << result->Get("error");
+
+ if (status == 1)
+ return 1;
+ else {
+ Log(severity, "cli", "!!!!!!");
+ return 0;
+ }
+ }
+
+ try {
+ StringToCertificate(result->Get("cert"));
+ } catch (const std::exception& ex) {
+ Log(LogCritical, "cli")
+ << "Could not write certificate file: " << DiagnosticInformation(ex, false);
+ return 1;
+ }
+
+ Log(LogInformation, "cli")
+ << "Writing signed certificate to file '" << certfile << "'.";
+
+ std::ofstream fpcert;
+ fpcert.open(certfile.CStr());
+ fpcert << result->Get("cert");
+ fpcert.close();
+
+ if (fpcert.fail()) {
+ Log(LogCritical, "cli")
+ << "Could not write certificate to file '" << certfile << "'.";
+ return 1;
+ }
+
+ return 0;
+}
+
+String PkiUtility::GetCertificateInformation(const std::shared_ptr<X509>& cert) {
+ BIO *out = BIO_new(BIO_s_mem());
+ String pre;
+
+ pre = "\n Version: " + Convert::ToString(GetCertificateVersion(cert));
+ BIO_write(out, pre.CStr(), pre.GetLength());
+
+ pre = "\n Subject: ";
+ BIO_write(out, pre.CStr(), pre.GetLength());
+ X509_NAME_print_ex(out, X509_get_subject_name(cert.get()), 0, XN_FLAG_ONELINE & ~ASN1_STRFLGS_ESC_MSB);
+
+ pre = "\n Issuer: ";
+ BIO_write(out, pre.CStr(), pre.GetLength());
+ X509_NAME_print_ex(out, X509_get_issuer_name(cert.get()), 0, XN_FLAG_ONELINE & ~ASN1_STRFLGS_ESC_MSB);
+
+ pre = "\n Valid From: ";
+ BIO_write(out, pre.CStr(), pre.GetLength());
+ ASN1_TIME_print(out, X509_get_notBefore(cert.get()));
+
+ pre = "\n Valid Until: ";
+ BIO_write(out, pre.CStr(), pre.GetLength());
+ ASN1_TIME_print(out, X509_get_notAfter(cert.get()));
+
+ pre = "\n Serial: ";
+ BIO_write(out, pre.CStr(), pre.GetLength());
+ ASN1_INTEGER *asn1_serial = X509_get_serialNumber(cert.get());
+ for (int i = 0; i < asn1_serial->length; i++) {
+ BIO_printf(out, "%02x%c", asn1_serial->data[i], ((i + 1 == asn1_serial->length) ? '\n' : ':'));
+ }
+
+ pre = "\n Signature Algorithm: " + GetSignatureAlgorithm(cert);
+ BIO_write(out, pre.CStr(), pre.GetLength());
+
+ pre = "\n Subject Alt Names: " + GetSubjectAltNames(cert)->Join(" ");
+ BIO_write(out, pre.CStr(), pre.GetLength());
+
+ pre = "\n Fingerprint: ";
+ BIO_write(out, pre.CStr(), pre.GetLength());
+ unsigned char md[EVP_MAX_MD_SIZE];
+ unsigned int diglen;
+ X509_digest(cert.get(), EVP_sha256(), md, &diglen);
+
+ char *data;
+ long length = BIO_get_mem_data(out, &data);
+
+ std::stringstream info;
+ info << String(data, data + length);
+
+ BIO_free(out);
+
+ for (unsigned int i = 0; i < diglen; i++) {
+ info << std::setfill('0') << std::setw(2) << std::uppercase
+ << std::hex << static_cast<int>(md[i]) << ' ';
+ }
+ info << '\n';
+
+ return info.str();
+}
+
+static void CollectRequestHandler(const Dictionary::Ptr& requests, const String& requestFile)
+{
+ Dictionary::Ptr request = Utility::LoadJsonFile(requestFile);
+
+ if (!request)
+ return;
+
+ Dictionary::Ptr result = new Dictionary();
+
+ namespace fs = boost::filesystem;
+ fs::path file(requestFile.Begin(), requestFile.End());
+ String fingerprint = file.stem().string();
+
+ String certRequestText = request->Get("cert_request");
+ result->Set("cert_request", certRequestText);
+
+ Value vcertResponseText;
+
+ if (request->Get("cert_response", &vcertResponseText)) {
+ String certResponseText = vcertResponseText;
+ result->Set("cert_response", certResponseText);
+ }
+
+ std::shared_ptr<X509> certRequest = StringToCertificate(certRequestText);
+
+/* XXX (requires OpenSSL >= 1.0.0)
+ time_t now;
+ time(&now);
+ ASN1_TIME *tm = ASN1_TIME_adj(nullptr, now, 0, 0);
+
+ int day, sec;
+ ASN1_TIME_diff(&day, &sec, tm, X509_get_notBefore(certRequest.get()));
+
+ result->Set("timestamp", static_cast<double>(now) + day * 24 * 60 * 60 + sec); */
+
+ BIO *out = BIO_new(BIO_s_mem());
+ ASN1_TIME_print(out, X509_get_notBefore(certRequest.get()));
+
+ char *data;
+ long length;
+ length = BIO_get_mem_data(out, &data);
+
+ result->Set("timestamp", String(data, data + length));
+ BIO_free(out);
+
+ out = BIO_new(BIO_s_mem());
+ X509_NAME_print_ex(out, X509_get_subject_name(certRequest.get()), 0, XN_FLAG_ONELINE & ~ASN1_STRFLGS_ESC_MSB);
+
+ length = BIO_get_mem_data(out, &data);
+
+ result->Set("subject", String(data, data + length));
+ BIO_free(out);
+
+ requests->Set(fingerprint, result);
+}
+
+Dictionary::Ptr PkiUtility::GetCertificateRequests(bool removed)
+{
+ Dictionary::Ptr requests = new Dictionary();
+
+ String requestDir = ApiListener::GetCertificateRequestsDir();
+ String ext = "json";
+
+ if (removed)
+ ext = "removed";
+
+ if (Utility::PathExists(requestDir))
+ Utility::Glob(requestDir + "/*." + ext, [requests](const String& requestFile) { CollectRequestHandler(requests, requestFile); }, GlobFile);
+
+ return requests;
+}
+
diff --git a/lib/remote/pkiutility.hpp b/lib/remote/pkiutility.hpp
new file mode 100644
index 0000000..50d47e0
--- /dev/null
+++ b/lib/remote/pkiutility.hpp
@@ -0,0 +1,41 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef PKIUTILITY_H
+#define PKIUTILITY_H
+
+#include "remote/i2-remote.hpp"
+#include "base/exception.hpp"
+#include "base/dictionary.hpp"
+#include "base/string.hpp"
+#include <openssl/x509v3.h>
+#include <memory>
+
+namespace icinga
+{
+
+/**
+ * @ingroup remote
+ */
+class PkiUtility
+{
+public:
+ static int NewCa();
+ static int NewCert(const String& cn, const String& keyfile, const String& csrfile, const String& certfile);
+ static int SignCsr(const String& csrfile, const String& certfile);
+ static std::shared_ptr<X509> FetchCert(const String& host, const String& port);
+ static int WriteCert(const std::shared_ptr<X509>& cert, const String& trustedfile);
+ static int GenTicket(const String& cn, const String& salt, std::ostream& ticketfp);
+ static int RequestCertificate(const String& host, const String& port, const String& keyfile,
+ const String& certfile, const String& cafile, const std::shared_ptr<X509>& trustedcert,
+ const String& ticket = String());
+ static String GetCertificateInformation(const std::shared_ptr<X509>& certificate);
+ static Dictionary::Ptr GetCertificateRequests(bool removed = false);
+
+private:
+ PkiUtility();
+
+};
+
+}
+
+#endif /* PKIUTILITY_H */
diff --git a/lib/remote/statushandler.cpp b/lib/remote/statushandler.cpp
new file mode 100644
index 0000000..1f3f618
--- /dev/null
+++ b/lib/remote/statushandler.cpp
@@ -0,0 +1,120 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/statushandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/serializer.hpp"
+#include "base/statsfunction.hpp"
+#include "base/namespace.hpp"
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/status", StatusHandler);
+
+class StatusTargetProvider final : public TargetProvider
+{
+public:
+ DECLARE_PTR_TYPEDEFS(StatusTargetProvider);
+
+ void FindTargets(const String& type,
+ const std::function<void (const Value&)>& addTarget) const override
+ {
+ Namespace::Ptr statsFunctions = ScriptGlobal::Get("StatsFunctions", &Empty);
+
+ if (statsFunctions) {
+ ObjectLock olock(statsFunctions);
+
+ for (const Namespace::Pair& kv : statsFunctions)
+ addTarget(GetTargetByName("Status", kv.first));
+ }
+ }
+
+ Value GetTargetByName(const String& type, const String& name) const override
+ {
+ Namespace::Ptr statsFunctions = ScriptGlobal::Get("StatsFunctions", &Empty);
+
+ if (!statsFunctions)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("No status functions are available."));
+
+ Value vfunc;
+
+ if (!statsFunctions->Get(name, &vfunc))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid status function name."));
+
+ Function::Ptr func = vfunc;
+
+ if (!func)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid status function name."));
+
+ Dictionary::Ptr status = new Dictionary();
+ Array::Ptr perfdata = new Array();
+ func->Invoke({ status, perfdata });
+
+ return new Dictionary({
+ { "name", name },
+ { "status", status },
+ { "perfdata", Serialize(perfdata, FAState) }
+ });
+ }
+
+ bool IsValidType(const String& type) const override
+ {
+ return type == "Status";
+ }
+
+ String GetPluralName(const String& type) const override
+ {
+ return "statuses";
+ }
+};
+
+bool StatusHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() > 3)
+ return false;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ QueryDescription qd;
+ qd.Types.insert("Status");
+ qd.Provider = new StatusTargetProvider();
+ qd.Permission = "status/query";
+
+ params->Set("type", "Status");
+
+ if (url->GetPath().size() >= 3)
+ params->Set("status", url->GetPath()[2]);
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(objs)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
+
diff --git a/lib/remote/statushandler.hpp b/lib/remote/statushandler.hpp
new file mode 100644
index 0000000..c722ab3
--- /dev/null
+++ b/lib/remote/statushandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef STATUSHANDLER_H
+#define STATUSHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class StatusHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(StatusHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* STATUSHANDLER_H */
diff --git a/lib/remote/templatequeryhandler.cpp b/lib/remote/templatequeryhandler.cpp
new file mode 100644
index 0000000..e70dafb
--- /dev/null
+++ b/lib/remote/templatequeryhandler.cpp
@@ -0,0 +1,136 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/templatequeryhandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "config/configitem.hpp"
+#include "base/configtype.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/logger.hpp"
+#include <boost/algorithm/string/case_conv.hpp>
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/templates", TemplateQueryHandler);
+
+class TemplateTargetProvider final : public TargetProvider
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TemplateTargetProvider);
+
+ static Dictionary::Ptr GetTargetForTemplate(const ConfigItem::Ptr& item)
+ {
+ DebugInfo di = item->GetDebugInfo();
+
+ return new Dictionary({
+ { "name", item->GetName() },
+ { "type", item->GetType()->GetName() },
+ { "location", new Dictionary({
+ { "path", di.Path },
+ { "first_line", di.FirstLine },
+ { "first_column", di.FirstColumn },
+ { "last_line", di.LastLine },
+ { "last_column", di.LastColumn }
+ }) }
+ });
+ }
+
+ void FindTargets(const String& type,
+ const std::function<void (const Value&)>& addTarget) const override
+ {
+ Type::Ptr ptype = Type::GetByName(type);
+
+ for (const ConfigItem::Ptr& item : ConfigItem::GetItems(ptype)) {
+ if (item->IsAbstract())
+ addTarget(GetTargetForTemplate(item));
+ }
+ }
+
+ Value GetTargetByName(const String& type, const String& name) const override
+ {
+ Type::Ptr ptype = Type::GetByName(type);
+
+ ConfigItem::Ptr item = ConfigItem::GetByTypeAndName(ptype, name);
+
+ if (!item || !item->IsAbstract())
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Template does not exist."));
+
+ return GetTargetForTemplate(item);
+ }
+
+ bool IsValidType(const String& type) const override
+ {
+ Type::Ptr ptype = Type::GetByName(type);
+
+ if (!ptype)
+ return false;
+
+ return ConfigObject::TypeInstance->IsAssignableFrom(ptype);
+ }
+
+ String GetPluralName(const String& type) const override
+ {
+ return Type::GetByName(type)->GetPluralName();
+ }
+};
+
+bool TemplateQueryHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() < 3 || url->GetPath().size() > 4)
+ return false;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ Type::Ptr type = FilterUtility::TypeFromPluralName(url->GetPath()[2]);
+
+ if (!type) {
+ HttpUtility::SendJsonError(response, params, 400, "Invalid type specified.");
+ return true;
+ }
+
+ QueryDescription qd;
+ qd.Types.insert(type->GetName());
+ qd.Permission = "templates/query/" + type->GetName();
+ qd.Provider = new TemplateTargetProvider();
+
+ params->Set("type", type->GetName());
+
+ if (url->GetPath().size() >= 4) {
+ String attr = type->GetName();
+ boost::algorithm::to_lower(attr);
+ params->Set(attr, url->GetPath()[3]);
+ }
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user, "tmpl");
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No templates found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(objs)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
diff --git a/lib/remote/templatequeryhandler.hpp b/lib/remote/templatequeryhandler.hpp
new file mode 100644
index 0000000..503bc85
--- /dev/null
+++ b/lib/remote/templatequeryhandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TEMPLATEQUERYHANDLER_H
+#define TEMPLATEQUERYHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class TemplateQueryHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TemplateQueryHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* TEMPLATEQUERYHANDLER_H */
diff --git a/lib/remote/typequeryhandler.cpp b/lib/remote/typequeryhandler.cpp
new file mode 100644
index 0000000..4e82653
--- /dev/null
+++ b/lib/remote/typequeryhandler.cpp
@@ -0,0 +1,156 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/typequeryhandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/configtype.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/logger.hpp"
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/types", TypeQueryHandler);
+
+class TypeTargetProvider final : public TargetProvider
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TypeTargetProvider);
+
+ void FindTargets(const String& type,
+ const std::function<void (const Value&)>& addTarget) const override
+ {
+ for (const Type::Ptr& target : Type::GetAllTypes()) {
+ addTarget(target);
+ }
+ }
+
+ Value GetTargetByName(const String& type, const String& name) const override
+ {
+ Type::Ptr ptype = Type::GetByName(name);
+
+ if (!ptype)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Type does not exist."));
+
+ return ptype;
+ }
+
+ bool IsValidType(const String& type) const override
+ {
+ return type == "Type";
+ }
+
+ String GetPluralName(const String& type) const override
+ {
+ return "types";
+ }
+};
+
+bool TypeQueryHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() > 3)
+ return false;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ QueryDescription qd;
+ qd.Types.insert("Type");
+ qd.Permission = "types";
+ qd.Provider = new TypeTargetProvider();
+
+ if (params->Contains("type"))
+ params->Set("name", params->Get("type"));
+
+ params->Set("type", "Type");
+
+ if (url->GetPath().size() >= 3)
+ params->Set("name", url->GetPath()[2]);
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user);
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No objects found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ ArrayData results;
+
+ for (const Type::Ptr& obj : objs) {
+ Dictionary::Ptr result1 = new Dictionary();
+ results.push_back(result1);
+
+ Dictionary::Ptr resultAttrs = new Dictionary();
+ result1->Set("name", obj->GetName());
+ result1->Set("plural_name", obj->GetPluralName());
+ if (obj->GetBaseType())
+ result1->Set("base", obj->GetBaseType()->GetName());
+ result1->Set("abstract", obj->IsAbstract());
+ result1->Set("fields", resultAttrs);
+
+ Dictionary::Ptr prototype = dynamic_pointer_cast<Dictionary>(obj->GetPrototype());
+ Array::Ptr prototypeKeys = new Array();
+ result1->Set("prototype_keys", prototypeKeys);
+
+ if (prototype) {
+ ObjectLock olock(prototype);
+ for (const Dictionary::Pair& kv : prototype) {
+ prototypeKeys->Add(kv.first);
+ }
+ }
+
+ int baseFieldCount = 0;
+
+ if (obj->GetBaseType())
+ baseFieldCount = obj->GetBaseType()->GetFieldCount();
+
+ for (int fid = baseFieldCount; fid < obj->GetFieldCount(); fid++) {
+ Field field = obj->GetFieldInfo(fid);
+
+ Dictionary::Ptr fieldInfo = new Dictionary();
+ resultAttrs->Set(field.Name, fieldInfo);
+
+ fieldInfo->Set("id", fid);
+ fieldInfo->Set("type", field.TypeName);
+ if (field.RefTypeName)
+ fieldInfo->Set("ref_type", field.RefTypeName);
+ if (field.Attributes & FANavigation)
+ fieldInfo->Set("navigation_name", field.NavigationName);
+ fieldInfo->Set("array_rank", field.ArrayRank);
+
+ fieldInfo->Set("attributes", new Dictionary({
+ { "config", static_cast<bool>(field.Attributes & FAConfig) },
+ { "state", static_cast<bool>(field.Attributes & FAState) },
+ { "required", static_cast<bool>(field.Attributes & FARequired) },
+ { "navigation", static_cast<bool>(field.Attributes & FANavigation) },
+ { "no_user_modify", static_cast<bool>(field.Attributes & FANoUserModify) },
+ { "no_user_view", static_cast<bool>(field.Attributes & FANoUserView) },
+ { "deprecated", static_cast<bool>(field.Attributes & FADeprecated) }
+ }));
+ }
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
diff --git a/lib/remote/typequeryhandler.hpp b/lib/remote/typequeryhandler.hpp
new file mode 100644
index 0000000..5489cb2
--- /dev/null
+++ b/lib/remote/typequeryhandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef TYPEQUERYHANDLER_H
+#define TYPEQUERYHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class TypeQueryHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TypeQueryHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* TYPEQUERYHANDLER_H */
diff --git a/lib/remote/url-characters.hpp b/lib/remote/url-characters.hpp
new file mode 100644
index 0000000..3cc4921
--- /dev/null
+++ b/lib/remote/url-characters.hpp
@@ -0,0 +1,29 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef URL_CHARACTERS_H
+#define URL_CHARACTERS_H
+
+#define ALPHA "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#define NUMERIC "0123456789"
+
+#define UNRESERVED ALPHA NUMERIC "-._~" "%"
+#define GEN_DELIMS ":/?#[]@"
+#define SUB_DELIMS "!$&'()*+,;="
+#define PCHAR UNRESERVED SUB_DELIMS ":@"
+#define PCHAR_ENCODE UNRESERVED ":@"
+
+#define ACSCHEME ALPHA NUMERIC ".-+"
+
+//authority = [ userinfo "@" ] host [ ":" port ]
+#define ACUSERINFO UNRESERVED SUB_DELIMS
+#define ACHOST UNRESERVED SUB_DELIMS
+#define ACPORT NUMERIC
+
+#define ACPATHSEGMENT PCHAR
+#define ACPATHSEGMENT_ENCODE PCHAR_ENCODE
+#define ACQUERY PCHAR "/?"
+#define ACQUERY_ENCODE PCHAR_ENCODE "/?"
+#define ACFRAGMENT PCHAR "/?"
+#define ACFRAGMENT_ENCODE PCHAR_ENCODE "/?"
+
+#endif /* URL_CHARACTERS_H */
diff --git a/lib/remote/url.cpp b/lib/remote/url.cpp
new file mode 100644
index 0000000..e87628e
--- /dev/null
+++ b/lib/remote/url.cpp
@@ -0,0 +1,363 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/array.hpp"
+#include "base/utility.hpp"
+#include "base/objectlock.hpp"
+#include "remote/url.hpp"
+#include "remote/url-characters.hpp"
+#include <boost/tokenizer.hpp>
+
+using namespace icinga;
+
+Url::Url(const String& base_url)
+{
+ String url = base_url;
+
+ if (url.GetLength() == 0)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL Empty URL."));
+
+ size_t pHelper = String::NPos;
+ if (url[0] != '/')
+ pHelper = url.Find(":");
+
+ if (pHelper != String::NPos) {
+ if (!ParseScheme(url.SubStr(0, pHelper)))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL Scheme."));
+ url = url.SubStr(pHelper + 1);
+ }
+
+ if (*url.Begin() != '/')
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL: '/' expected after scheme."));
+
+ if (url.GetLength() == 1) {
+ return;
+ }
+
+ if (*(url.Begin() + 1) == '/') {
+ pHelper = url.Find("/", 2);
+
+ if (pHelper == String::NPos)
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL: Missing '/' after authority."));
+
+ if (!ParseAuthority(url.SubStr(0, pHelper)))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL Authority"));
+
+ url = url.SubStr(pHelper);
+ }
+
+ if (*url.Begin() == '/') {
+ pHelper = url.FindFirstOf("#?");
+ if (!ParsePath(url.SubStr(1, pHelper - 1)))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL Path"));
+
+ if (pHelper != String::NPos)
+ url = url.SubStr(pHelper);
+ } else
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL: Missing path."));
+
+ if (*url.Begin() == '?') {
+ pHelper = url.Find("#");
+ if (!ParseQuery(url.SubStr(1, pHelper - 1)))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL Query"));
+
+ if (pHelper != String::NPos)
+ url = url.SubStr(pHelper);
+ }
+
+ if (*url.Begin() == '#') {
+ if (!ParseFragment(url.SubStr(1)))
+ BOOST_THROW_EXCEPTION(std::invalid_argument("Invalid URL Fragment"));
+ }
+}
+
+String Url::GetScheme() const
+{
+ return m_Scheme;
+}
+
+String Url::GetAuthority() const
+{
+ if (m_Host.IsEmpty())
+ return "";
+
+ String auth;
+ if (!m_Username.IsEmpty()) {
+ auth = m_Username;
+ if (!m_Password.IsEmpty())
+ auth += ":" + m_Password;
+ auth += "@";
+ }
+
+ auth += m_Host;
+
+ if (!m_Port.IsEmpty())
+ auth += ":" + m_Port;
+
+ return auth;
+}
+
+String Url::GetUsername() const
+{
+ return m_Username;
+}
+
+String Url::GetPassword() const
+{
+ return m_Password;
+}
+
+String Url::GetHost() const
+{
+ return m_Host;
+}
+
+String Url::GetPort() const
+{
+ return m_Port;
+}
+
+const std::vector<String>& Url::GetPath() const
+{
+ return m_Path;
+}
+
+const std::vector<std::pair<String, String>>& Url::GetQuery() const
+{
+ return m_Query;
+}
+
+String Url::GetFragment() const
+{
+ return m_Fragment;
+}
+
+void Url::SetScheme(const String& scheme)
+{
+ m_Scheme = scheme;
+}
+
+void Url::SetUsername(const String& username)
+{
+ m_Username = username;
+}
+
+void Url::SetPassword(const String& password)
+{
+ m_Password = password;
+}
+
+void Url::SetHost(const String& host)
+{
+ m_Host = host;
+}
+
+void Url::SetPort(const String& port)
+{
+ m_Port = port;
+}
+
+void Url::SetPath(const std::vector<String>& path)
+{
+ m_Path = path;
+}
+
+void Url::SetQuery(const std::vector<std::pair<String, String>>& query)
+{
+ m_Query = query;
+}
+
+void Url::SetArrayFormatUseBrackets(bool useBrackets)
+{
+ m_ArrayFormatUseBrackets = useBrackets;
+}
+
+void Url::AddQueryElement(const String& name, const String& value)
+{
+ m_Query.emplace_back(name, value);
+}
+
+void Url::SetFragment(const String& fragment) {
+ m_Fragment = fragment;
+}
+
+String Url::Format(bool onlyPathAndQuery, bool printCredentials) const
+{
+ String url;
+
+ if (!onlyPathAndQuery) {
+ if (!m_Scheme.IsEmpty())
+ url += m_Scheme + ":";
+
+ if (printCredentials && !GetAuthority().IsEmpty())
+ url += "//" + GetAuthority();
+ else if (!GetHost().IsEmpty())
+ url += "//" + GetHost() + (!GetPort().IsEmpty() ? ":" + GetPort() : "");
+ }
+
+ if (m_Path.empty())
+ url += "/";
+ else {
+ for (const String& segment : m_Path) {
+ url += "/";
+ url += Utility::EscapeString(segment, ACPATHSEGMENT_ENCODE, false);
+ }
+ }
+
+ String param;
+ if (!m_Query.empty()) {
+ typedef std::pair<String, std::vector<String> > kv_pair;
+
+ for (const auto& kv : m_Query) {
+ String key = Utility::EscapeString(kv.first, ACQUERY_ENCODE, false);
+ if (param.IsEmpty())
+ param = "?";
+ else
+ param += "&";
+
+ param += key;
+ param += kv.second.IsEmpty() ?
+ String() : "=" + Utility::EscapeString(kv.second, ACQUERY_ENCODE, false);
+ }
+ }
+
+ url += param;
+
+ if (!m_Fragment.IsEmpty())
+ url += "#" + Utility::EscapeString(m_Fragment, ACFRAGMENT_ENCODE, false);
+
+ return url;
+}
+
+bool Url::ParseScheme(const String& scheme)
+{
+ m_Scheme = scheme;
+
+ if (scheme.FindFirstOf(ALPHA) != 0)
+ return false;
+
+ return (ValidateToken(scheme, ACSCHEME));
+}
+
+bool Url::ParseAuthority(const String& authority)
+{
+ String auth = authority.SubStr(2);
+ size_t pos = auth.Find("@");
+ if (pos != String::NPos && pos != 0) {
+ if (!Url::ParseUserinfo(auth.SubStr(0, pos)))
+ return false;
+ auth = auth.SubStr(pos+1);
+ }
+
+ pos = auth.Find(":");
+ if (pos != String::NPos) {
+ if (pos == 0 || pos == auth.GetLength() - 1 || !Url::ParsePort(auth.SubStr(pos+1)))
+ return false;
+ }
+
+ m_Host = auth.SubStr(0, pos);
+ return ValidateToken(m_Host, ACHOST);
+}
+
+bool Url::ParseUserinfo(const String& userinfo)
+{
+ size_t pos = userinfo.Find(":");
+ m_Username = userinfo.SubStr(0, pos);
+ if (!ValidateToken(m_Username, ACUSERINFO))
+ return false;
+ m_Username = Utility::UnescapeString(m_Username);
+ if (pos != String::NPos && pos != userinfo.GetLength() - 1) {
+ m_Password = userinfo.SubStr(pos+1);
+ if (!ValidateToken(m_Username, ACUSERINFO))
+ return false;
+ m_Password = Utility::UnescapeString(m_Password);
+ } else
+ m_Password = "";
+
+ return true;
+}
+
+bool Url::ParsePort(const String& port)
+{
+ m_Port = Utility::UnescapeString(port);
+ if (!ValidateToken(m_Port, ACPORT))
+ return false;
+ return true;
+}
+
+bool Url::ParsePath(const String& path)
+{
+ const std::string& pathStr = path;
+ boost::char_separator<char> sep("/");
+ boost::tokenizer<boost::char_separator<char> > tokens(pathStr, sep);
+
+ for (const String& token : tokens) {
+ if (token.IsEmpty())
+ continue;
+
+ if (!ValidateToken(token, ACPATHSEGMENT))
+ return false;
+
+ m_Path.emplace_back(Utility::UnescapeString(token));
+ }
+
+ return true;
+}
+
+bool Url::ParseQuery(const String& query)
+{
+ /* Tokenizer does not like String AT ALL */
+ const std::string& queryStr = query;
+ boost::char_separator<char> sep("&");
+ boost::tokenizer<boost::char_separator<char> > tokens(queryStr, sep);
+
+ for (const String& token : tokens) {
+ size_t pHelper = token.Find("=");
+
+ if (pHelper == 0)
+ // /?foo=bar&=bar == invalid
+ return false;
+
+ String key = token.SubStr(0, pHelper);
+ String value = Empty;
+
+ if (pHelper != String::NPos && pHelper != token.GetLength() - 1)
+ value = token.SubStr(pHelper+1);
+
+ if (!ValidateToken(value, ACQUERY))
+ return false;
+
+ value = Utility::UnescapeString(value);
+
+ pHelper = key.Find("[]");
+
+ if (pHelper == 0 || (pHelper != String::NPos && pHelper != key.GetLength()-2))
+ return false;
+
+ key = key.SubStr(0, pHelper);
+
+ if (!ValidateToken(key, ACQUERY))
+ return false;
+
+ m_Query.emplace_back(Utility::UnescapeString(key), std::move(value));
+ }
+
+ return true;
+}
+
+bool Url::ParseFragment(const String& fragment)
+{
+ m_Fragment = Utility::UnescapeString(fragment);
+
+ return ValidateToken(fragment, ACFRAGMENT);
+}
+
+bool Url::ValidateToken(const String& token, const String& symbols)
+{
+ for (const char ch : token) {
+ if (symbols.FindFirstOf(ch) == String::NPos)
+ return false;
+ }
+
+ return true;
+}
+
diff --git a/lib/remote/url.hpp b/lib/remote/url.hpp
new file mode 100644
index 0000000..6012b2f
--- /dev/null
+++ b/lib/remote/url.hpp
@@ -0,0 +1,78 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef URL_H
+#define URL_H
+
+#include "remote/i2-remote.hpp"
+#include "base/object.hpp"
+#include "base/string.hpp"
+#include "base/array.hpp"
+#include "base/value.hpp"
+#include <map>
+#include <utility>
+#include <vector>
+
+namespace icinga
+{
+
+/**
+ * A url class to use with the API
+ *
+ * @ingroup base
+ */
+class Url final : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(Url);
+
+ Url() = default;
+ Url(const String& url);
+
+ String Format(bool onlyPathAndQuery = false, bool printCredentials = false) const;
+
+ String GetScheme() const;
+ String GetAuthority() const;
+ String GetUsername() const;
+ String GetPassword() const;
+ String GetHost() const;
+ String GetPort() const;
+ const std::vector<String>& GetPath() const;
+ const std::vector<std::pair<String, String>>& GetQuery() const;
+ String GetFragment() const;
+
+ void SetScheme(const String& scheme);
+ void SetUsername(const String& username);
+ void SetPassword(const String& password);
+ void SetHost(const String& host);
+ void SetPort(const String& port);
+ void SetPath(const std::vector<String>& path);
+ void SetQuery(const std::vector<std::pair<String, String>>& query);
+ void SetArrayFormatUseBrackets(bool useBrackets = true);
+
+ void AddQueryElement(const String& name, const String& query);
+ void SetFragment(const String& fragment);
+
+private:
+ String m_Scheme;
+ String m_Username;
+ String m_Password;
+ String m_Host;
+ String m_Port;
+ std::vector<String> m_Path;
+ std::vector<std::pair<String, String>> m_Query;
+ bool m_ArrayFormatUseBrackets;
+ String m_Fragment;
+
+ bool ParseScheme(const String& scheme);
+ bool ParseAuthority(const String& authority);
+ bool ParseUserinfo(const String& userinfo);
+ bool ParsePort(const String& port);
+ bool ParsePath(const String& path);
+ bool ParseQuery(const String& query);
+ bool ParseFragment(const String& fragment);
+
+ static bool ValidateToken(const String& token, const String& symbols);
+};
+
+}
+#endif /* URL_H */
diff --git a/lib/remote/variablequeryhandler.cpp b/lib/remote/variablequeryhandler.cpp
new file mode 100644
index 0000000..50c0e78
--- /dev/null
+++ b/lib/remote/variablequeryhandler.cpp
@@ -0,0 +1,121 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/variablequeryhandler.hpp"
+#include "remote/httputility.hpp"
+#include "remote/filterutility.hpp"
+#include "base/configtype.hpp"
+#include "base/scriptglobal.hpp"
+#include "base/logger.hpp"
+#include "base/serializer.hpp"
+#include "base/namespace.hpp"
+#include <set>
+
+using namespace icinga;
+
+REGISTER_URLHANDLER("/v1/variables", VariableQueryHandler);
+
+class VariableTargetProvider final : public TargetProvider
+{
+public:
+ DECLARE_PTR_TYPEDEFS(VariableTargetProvider);
+
+ static Dictionary::Ptr GetTargetForVar(const String& name, const Value& value)
+ {
+ return new Dictionary({
+ { "name", name },
+ { "type", value.GetReflectionType()->GetName() },
+ { "value", value }
+ });
+ }
+
+ void FindTargets(const String& type,
+ const std::function<void (const Value&)>& addTarget) const override
+ {
+ {
+ Namespace::Ptr globals = ScriptGlobal::GetGlobals();
+ ObjectLock olock(globals);
+ for (const Namespace::Pair& kv : globals) {
+ addTarget(GetTargetForVar(kv.first, kv.second.Val));
+ }
+ }
+ }
+
+ Value GetTargetByName(const String& type, const String& name) const override
+ {
+ return GetTargetForVar(name, ScriptGlobal::Get(name));
+ }
+
+ bool IsValidType(const String& type) const override
+ {
+ return type == "Variable";
+ }
+
+ String GetPluralName(const String& type) const override
+ {
+ return "variables";
+ }
+};
+
+bool VariableQueryHandler::HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+)
+{
+ namespace http = boost::beast::http;
+
+ if (url->GetPath().size() > 3)
+ return false;
+
+ if (request.method() != http::verb::get)
+ return false;
+
+ QueryDescription qd;
+ qd.Types.insert("Variable");
+ qd.Permission = "variables";
+ qd.Provider = new VariableTargetProvider();
+
+ params->Set("type", "Variable");
+
+ if (url->GetPath().size() >= 3)
+ params->Set("variable", url->GetPath()[2]);
+
+ std::vector<Value> objs;
+
+ try {
+ objs = FilterUtility::GetFilterTargets(qd, params, user, "variable");
+ } catch (const std::exception& ex) {
+ HttpUtility::SendJsonError(response, params, 404,
+ "No variables found.",
+ DiagnosticInformation(ex));
+ return true;
+ }
+
+ ArrayData results;
+
+ for (const Dictionary::Ptr& var : objs) {
+ if (var->Get("name") == "TicketSalt")
+ continue;
+
+ results.emplace_back(new Dictionary({
+ { "name", var->Get("name") },
+ { "type", var->Get("type") },
+ { "value", Serialize(var->Get("value"), 0) }
+ }));
+ }
+
+ Dictionary::Ptr result = new Dictionary({
+ { "results", new Array(std::move(results)) }
+ });
+
+ response.result(http::status::ok);
+ HttpUtility::SendJsonBody(response, params, result);
+
+ return true;
+}
+
diff --git a/lib/remote/variablequeryhandler.hpp b/lib/remote/variablequeryhandler.hpp
new file mode 100644
index 0000000..48e73be
--- /dev/null
+++ b/lib/remote/variablequeryhandler.hpp
@@ -0,0 +1,30 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef VARIABLEQUERYHANDLER_H
+#define VARIABLEQUERYHANDLER_H
+
+#include "remote/httphandler.hpp"
+
+namespace icinga
+{
+
+class VariableQueryHandler final : public HttpHandler
+{
+public:
+ DECLARE_PTR_TYPEDEFS(VariableQueryHandler);
+
+ bool HandleRequest(
+ AsioTlsStream& stream,
+ const ApiUser::Ptr& user,
+ boost::beast::http::request<boost::beast::http::string_body>& request,
+ const Url::Ptr& url,
+ boost::beast::http::response<boost::beast::http::string_body>& response,
+ const Dictionary::Ptr& params,
+ boost::asio::yield_context& yc,
+ HttpServerConnection& server
+ ) override;
+};
+
+}
+
+#endif /* VARIABLEQUERYHANDLER_H */
diff --git a/lib/remote/zone.cpp b/lib/remote/zone.cpp
new file mode 100644
index 0000000..5ae1468
--- /dev/null
+++ b/lib/remote/zone.cpp
@@ -0,0 +1,154 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "remote/zone.hpp"
+#include "remote/zone-ti.cpp"
+#include "remote/jsonrpcconnection.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/logger.hpp"
+
+using namespace icinga;
+
+REGISTER_TYPE(Zone);
+
+void Zone::OnAllConfigLoaded()
+{
+ ObjectImpl<Zone>::OnAllConfigLoaded();
+
+ m_Parent = Zone::GetByName(GetParentRaw());
+
+ if (m_Parent && m_Parent->IsGlobal())
+ BOOST_THROW_EXCEPTION(ScriptError("Zone '" + GetName() + "' can not have a global zone as parent.", GetDebugInfo()));
+
+ Zone::Ptr zone = m_Parent;
+ int levels = 0;
+
+ Array::Ptr endpoints = GetEndpointsRaw();
+
+ if (endpoints) {
+ ObjectLock olock(endpoints);
+ for (const String& endpoint : endpoints) {
+ Endpoint::Ptr ep = Endpoint::GetByName(endpoint);
+
+ if (ep)
+ ep->SetCachedZone(this);
+ }
+ }
+
+ while (zone) {
+ m_AllParents.push_back(zone);
+
+ zone = Zone::GetByName(zone->GetParentRaw());
+ levels++;
+
+ if (levels > 32)
+ BOOST_THROW_EXCEPTION(ScriptError("Infinite recursion detected while resolving zone graph. Check your zone hierarchy.", GetDebugInfo()));
+ }
+}
+
+Zone::Ptr Zone::GetParent() const
+{
+ return m_Parent;
+}
+
+std::set<Endpoint::Ptr> Zone::GetEndpoints() const
+{
+ std::set<Endpoint::Ptr> result;
+
+ Array::Ptr endpoints = GetEndpointsRaw();
+
+ if (endpoints) {
+ ObjectLock olock(endpoints);
+
+ for (const String& name : endpoints) {
+ Endpoint::Ptr endpoint = Endpoint::GetByName(name);
+
+ if (!endpoint)
+ continue;
+
+ result.insert(endpoint);
+ }
+ }
+
+ return result;
+}
+
+std::vector<Zone::Ptr> Zone::GetAllParentsRaw() const
+{
+ return m_AllParents;
+}
+
+Array::Ptr Zone::GetAllParents() const
+{
+ auto result (new Array);
+
+ for (auto& parent : m_AllParents)
+ result->Add(parent->GetName());
+
+ return result;
+}
+
+bool Zone::CanAccessObject(const ConfigObject::Ptr& object)
+{
+ Zone::Ptr object_zone;
+
+ if (object->GetReflectionType() == Zone::TypeInstance)
+ object_zone = static_pointer_cast<Zone>(object);
+ else
+ object_zone = static_pointer_cast<Zone>(object->GetZone());
+
+ if (!object_zone)
+ object_zone = Zone::GetLocalZone();
+
+ if (object_zone->GetGlobal())
+ return true;
+
+ return object_zone->IsChildOf(this);
+}
+
+bool Zone::IsChildOf(const Zone::Ptr& zone)
+{
+ Zone::Ptr azone = this;
+
+ while (azone) {
+ if (azone == zone)
+ return true;
+
+ azone = azone->GetParent();
+ }
+
+ return false;
+}
+
+bool Zone::IsGlobal() const
+{
+ return GetGlobal();
+}
+
+bool Zone::IsSingleInstance() const
+{
+ Array::Ptr endpoints = GetEndpointsRaw();
+ return !endpoints || endpoints->GetLength() < 2;
+}
+
+Zone::Ptr Zone::GetLocalZone()
+{
+ Endpoint::Ptr local = Endpoint::GetLocalEndpoint();
+
+ if (!local)
+ return nullptr;
+
+ return local->GetZone();
+}
+
+void Zone::ValidateEndpointsRaw(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils)
+{
+ ObjectImpl<Zone>::ValidateEndpointsRaw(lvalue, utils);
+
+ if (lvalue() && lvalue()->GetLength() > 2) {
+ Log(LogWarning, "Zone")
+ << "The Zone object '" << GetName() << "' has more than two endpoints."
+ << " Due to a known issue this type of configuration is strongly"
+ << " discouraged and may cause Icinga to use excessive amounts of CPU time.";
+ }
+}
diff --git a/lib/remote/zone.hpp b/lib/remote/zone.hpp
new file mode 100644
index 0000000..897b18e
--- /dev/null
+++ b/lib/remote/zone.hpp
@@ -0,0 +1,46 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ZONE_H
+#define ZONE_H
+
+#include "remote/i2-remote.hpp"
+#include "remote/zone-ti.hpp"
+#include "remote/endpoint.hpp"
+
+namespace icinga
+{
+
+/**
+ * @ingroup remote
+ */
+class Zone final : public ObjectImpl<Zone>
+{
+public:
+ DECLARE_OBJECT(Zone);
+ DECLARE_OBJECTNAME(Zone);
+
+ void OnAllConfigLoaded() override;
+
+ Zone::Ptr GetParent() const;
+ std::set<Endpoint::Ptr> GetEndpoints() const;
+ std::vector<Zone::Ptr> GetAllParentsRaw() const;
+ Array::Ptr GetAllParents() const override;
+
+ bool CanAccessObject(const ConfigObject::Ptr& object);
+ bool IsChildOf(const Zone::Ptr& zone);
+ bool IsGlobal() const;
+ bool IsSingleInstance() const;
+
+ static Zone::Ptr GetLocalZone();
+
+protected:
+ void ValidateEndpointsRaw(const Lazy<Array::Ptr>& lvalue, const ValidationUtils& utils) override;
+
+private:
+ Zone::Ptr m_Parent;
+ std::vector<Zone::Ptr> m_AllParents;
+};
+
+}
+
+#endif /* ZONE_H */
diff --git a/lib/remote/zone.ti b/lib/remote/zone.ti
new file mode 100644
index 0000000..25f6a64
--- /dev/null
+++ b/lib/remote/zone.ti
@@ -0,0 +1,25 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/configobject.hpp"
+
+library remote;
+
+namespace icinga
+{
+
+class Zone : ConfigObject
+{
+ [config, no_user_modify, navigation] name(Zone) parent (ParentRaw) {
+ navigate {{{
+ return Zone::GetByName(GetParentRaw());
+ }}}
+ };
+
+ [config] array(name(Endpoint)) endpoints (EndpointsRaw);
+ [config] bool global;
+ [no_user_modify, no_storage] array(Value) all_parents {
+ get;
+ };
+};
+
+}
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 0000000..aaee8d5
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,33 @@
+site_name: Icinga 2
+docs_dir: doc
+dev_addr: 0.0.0.0:8000
+pages:
+ - 'About Icinga 2': '01-about.md'
+ - 'Installation': '02-installation.md'
+ - 'Monitoring Basics': '03-monitoring-basics.md'
+ - 'Configuration': '04-configuration.md'
+ - 'Service Monitoring': '05-service-monitoring.md'
+ - 'Distributed Monitoring': '06-distributed-monitoring.md'
+ - 'Agent Based Monitoring': '07-agent-based-monitoring.md'
+ - 'Advanced Topics': '08-advanced-topics.md'
+ - 'Object Types': '09-object-types.md'
+ - 'Icinga Template Library': '10-icinga-template-library.md'
+ - 'CLI Commands': '11-cli-commands.md'
+ - 'Icinga 2 API': '12-icinga2-api.md'
+ - 'Addons': '13-addons.md'
+ - 'Features': '14-features.md'
+ - 'Troubleshooting': '15-troubleshooting.md'
+ - 'Upgrading Icinga 2': '16-upgrading-icinga-2.md'
+ - 'Language Reference': '17-language-reference.md'
+ - 'Library Reference': '18-library-reference.md'
+ - 'Technical Concepts': '19-technical-concepts.md'
+ - 'Script Debugger': '20-script-debugger.md'
+ - 'Development': '21-development.md'
+ - 'SELinux': '22-selinux.md'
+ - 'Migrating from Icinga 1.x': '23-migrating-from-icinga-1x.md'
+ - 'Appendix': '24-appendix.md'
+theme: readthedocs
+markdown_extensions:
+ - smarty
+extra_javascript:
+ - scroll.js
diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt
new file mode 100644
index 0000000..27fddec
--- /dev/null
+++ b/plugins/CMakeLists.txt
@@ -0,0 +1,69 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+add_executable(check_nscp_api
+ check_nscp_api.cpp
+ ${base_OBJS}
+ $<TARGET_OBJECTS:config>
+ $<TARGET_OBJECTS:remote>
+)
+target_link_libraries(check_nscp_api ${base_DEPS})
+set_target_properties (
+ check_nscp_api PROPERTIES
+ DEFINE_SYMBOL I2_PLUGINS_BUILD
+ FOLDER Plugins)
+
+# Prefer the PluginDir constant which is set to /sbin on Windows
+
+if(WIN32)
+ install(TARGETS check_nscp_api RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR})
+else()
+ install(TARGETS check_nscp_api RUNTIME DESTINATION ${ICINGA2_PLUGINDIR})
+endif()
+
+if (WIN32)
+ add_definitions(-DUNICODE -D_UNICODE)
+
+ set(thresholds_SOURCES
+ thresholds.cpp thresholds.hpp
+ )
+
+ add_library(thresholds ${thresholds_SOURCES})
+
+ set_target_properties(
+ thresholds PROPERTIES
+ FOLDER Plugins
+ )
+
+ set(check_SOURCES
+ check_disk.cpp check_load.cpp check_memory.cpp check_network.cpp check_perfmon.cpp
+ check_ping.cpp check_procs.cpp check_service.cpp check_swap.cpp check_update.cpp check_uptime.cpp
+ check_users.cpp
+ )
+
+ foreach(source ${check_SOURCES})
+ string(REGEX REPLACE ".cpp\$" "" check_OUT "${source}")
+
+ add_executable(${check_OUT} ${source})
+ target_link_libraries(${check_OUT} thresholds shlwapi.lib ${Boost_PROGRAM_OPTIONS_LIBRARY})
+
+ set_target_properties(
+ ${check_OUT} PROPERTIES
+ DEFINE_SYMBOL I2_PLUGINS_BUILD
+ FOLDER Plugins
+ )
+ endforeach()
+
+ target_link_libraries(check_load pdh.lib)
+ target_link_libraries(check_network pdh.lib iphlpapi.lib)
+ target_link_libraries(check_perfmon pdh.lib)
+ target_link_libraries(check_ping ntdll.lib iphlpapi.lib ws2_32.lib)
+ target_link_libraries(check_procs pdh.lib)
+ target_link_libraries(check_uptime ${Boost_SYSTEM_LIBRARY})
+ target_link_libraries(check_users wtsapi32.lib)
+
+ install(
+ TARGETS check_disk check_load check_memory check_network check_perfmon check_procs
+ check_ping check_service check_swap check_update check_uptime check_users
+ RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}
+ )
+endif ( )
diff --git a/plugins/check_disk.cpp b/plugins/check_disk.cpp
new file mode 100644
index 0000000..48f82ec
--- /dev/null
+++ b/plugins/check_disk.cpp
@@ -0,0 +1,443 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <vector>
+#include <windows.h>
+#include <set>
+#include <iostream>
+#include <functional>
+#include <shlwapi.h>
+#include <math.h>
+
+#define VERSION 1.1
+
+namespace po = boost::program_options;
+
+struct drive
+{
+ std::wstring name;
+ double cap;
+ double free;
+ double used;
+
+ drive(std::wstring p)
+ : name(p)
+ { }
+};
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ std::vector<std::wstring> drives;
+ std::vector<std::wstring> exclude_drives;
+ Bunit unit;
+ bool showUsed{false};
+};
+
+static bool l_Debug;
+
+static int check_drives(std::vector<drive>& vDrives, std::vector<std::wstring>& vExclude_Drives)
+{
+ DWORD dwResult, dwSize = 0, dwVolumePathNamesLen = MAX_PATH + 1;
+ WCHAR szLogicalDrives[1024], szVolumeName[MAX_PATH], *szVolumePathNames = NULL;
+ HANDLE hVolume = NULL;
+ std::wstring wsLogicalDrives;
+ size_t volumeNameEnd = 0;
+
+ std::set<std::wstring> sDrives;
+
+ if (l_Debug)
+ std::wcout << "Getting logic drive string (includes network drives)\n";
+
+ dwResult = GetLogicalDriveStrings(MAX_PATH, szLogicalDrives);
+ if (dwResult > MAX_PATH)
+ goto die;
+ if (l_Debug)
+ std::wcout << "Splitting string into single drive names\n";
+
+ LPTSTR szSingleDrive = szLogicalDrives;
+ while (*szSingleDrive) {
+ std::wstring drname = szSingleDrive;
+ sDrives.insert(drname);
+ szSingleDrive += wcslen(szSingleDrive) + 1;
+ if (l_Debug)
+ std::wcout << "Got: " << drname << '\n';
+ }
+
+ if (l_Debug)
+ std::wcout << "Getting volume mountpoints (includes NTFS folders)\n"
+ << "Getting first volume\n";
+
+ hVolume = FindFirstVolume(szVolumeName, MAX_PATH);
+ if (hVolume == INVALID_HANDLE_VALUE)
+ goto die;
+
+ if (l_Debug)
+ std::wcout << "Traversing through list of drives\n";
+
+ while (GetLastError() != ERROR_NO_MORE_FILES) {
+ if (l_Debug)
+ std::wcout << "Path name for " << szVolumeName << "= \"";
+ volumeNameEnd = wcslen(szVolumeName) - 1;
+ szVolumePathNames = new WCHAR[dwVolumePathNamesLen];
+
+ while (!GetVolumePathNamesForVolumeName(szVolumeName, szVolumePathNames, dwVolumePathNamesLen,
+ &dwVolumePathNamesLen)) {
+ if (GetLastError() != ERROR_MORE_DATA)
+ break;
+ delete[] szVolumePathNames;
+ szVolumePathNames = new WCHAR[dwVolumePathNamesLen];
+
+ }
+ if (l_Debug)
+ std::wcout << szVolumePathNames << "\"\n";
+
+ sDrives.insert(std::wstring(szVolumePathNames));
+ FindNextVolume(hVolume, szVolumeName, MAX_PATH);
+ }
+ if (l_Debug)
+ std::wcout << "Creating vector from found volumes, ignoring CD drives etc.:\n";
+ for (const auto& driveName : sDrives) {
+ unsigned int type = GetDriveType(driveName.c_str());
+ if (type == DRIVE_FIXED || type == DRIVE_REMOTE) {
+ if (l_Debug)
+ std::wcout << "\t" << driveName << '\n';
+ vDrives.push_back(drive(driveName));
+ }
+ }
+
+ FindVolumeClose(hVolume);
+ if (szVolumePathNames)
+ delete[] szVolumePathNames;
+
+ if (l_Debug)
+ std::wcout << "Removing excluded drives\n";
+
+ for (const auto& driveName : vExclude_Drives) {
+ vDrives.erase(std::remove_if(vDrives.begin(), vDrives.end(),
+ [&driveName](const drive& d) { return d.name == driveName + L'\\'; }),
+ vDrives.end());
+ }
+
+ return -1;
+
+die:
+ if (hVolume)
+ FindVolumeClose(hVolume);
+ printErrorInfo();
+ return 3;
+}
+
+static int check_drives(std::vector<drive>& vDrives, printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << "Removing excluded drives from user input drives\n";
+
+ for (const auto& driveName : printInfo.exclude_drives) {
+ printInfo.drives.erase(std::remove(printInfo.drives.begin(), printInfo.drives.end(), driveName),
+ printInfo.drives.end());
+ }
+
+ if (l_Debug)
+ std::wcout << "Parsing user input drive names\n";
+
+ for (auto& driveName : printInfo.drives) {
+ if (driveName.at(driveName.length() - 1) != *L"\\")
+ driveName.append(L"\\");
+
+ if (std::wstring::npos == driveName.find(L":\\")) {
+ std::wcout << "A \":\" is required after the drive name of " << driveName << '\n';
+ return 3;
+ }
+
+ if (l_Debug)
+ std::wcout << "Added " << driveName << '\n';
+
+ vDrives.emplace_back(driveName);
+ }
+
+ return -1;
+}
+
+static bool getDriveSpaceValues(drive& drive, const Bunit& unit)
+{
+ if (l_Debug)
+ std::wcout << "Getting free and used disk space for drive " << drive.name << '\n';
+
+ ULARGE_INTEGER tempFree, tempTotal;
+ if (!GetDiskFreeSpaceEx(drive.name.c_str(), NULL, &tempTotal, &tempFree))
+ return false;
+
+ ULARGE_INTEGER tempUsed;
+ tempUsed.QuadPart = tempTotal.QuadPart - tempFree.QuadPart;
+
+ if (l_Debug)
+ std::wcout << "\tcap: " << tempFree.QuadPart << '\n';
+
+ drive.cap = round((tempTotal.QuadPart / pow(1024.0, unit)));
+
+ if (l_Debug)
+ std::wcout << "\tAfter conversion: " << drive.cap << '\n'
+ << "\tfree: " << tempFree.QuadPart << '\n';
+
+ drive.free = round((tempFree.QuadPart / pow(1024.0, unit)));
+
+ if (l_Debug)
+ std::wcout << "\tAfter conversion: " << drive.free << '\n'
+ << "\tused: " << tempUsed.QuadPart << '\n';
+
+ drive.used = round((tempUsed.QuadPart / pow(1024.0, unit)));
+
+ if (l_Debug)
+ std::wcout << "\tAfter conversion: " << drive.used << '\n' << '\n';
+
+ return true;
+}
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc("Options");
+
+ desc.add_options()
+ ("help,h", "Print usage message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning threshold")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold")
+ ("path,p", po::wvalue<std::vector<std::wstring>>()->multitoken(), "Declare explicitly which drives to check (default checks all)")
+ ("exclude_device,x", po::wvalue<std::vector<std::wstring>>()->multitoken(), "Exclude these drives from check")
+ ("exclude-type,X", po::wvalue<std::vector<std::wstring>>()->multitoken(), "Exclude partition types (ignored)")
+ ("iwarning,W", po::wvalue<std::wstring>(), "Warning threshold for inodes (ignored)")
+ ("icritical,K", po::wvalue<std::wstring>(), "Critical threshold for inodes (ignored)")
+ ("unit,u", po::wvalue<std::wstring>(), "Assign unit possible are: B, kB, MB, GB, TB")
+ ("show-used,U", "Show used space instead of the free space")
+ ("megabytes,m", "use megabytes, overridden by -unit")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines disk space usage.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tDISK WARNING 29GB | disk=29GB;50%%;5;0;120\n\n"
+ L"\"DISK\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"23.8304%%\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value.\n"
+ L"This program will also print out additional performance data disk by disk\n\n"
+ L"%s' exit codes denote the following:\n\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE < THRESHOLD\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE > THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too."
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version"))
+ std::cout << "Version: " << VERSION << '\n';
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("path"))
+ printInfo.drives = vm["path"].as<std::vector<std::wstring>>();
+
+ if (vm.count("exclude_device"))
+ printInfo.exclude_drives = vm["exclude_device"].as<std::vector<std::wstring>>();
+
+ if (vm.count("unit")) {
+ try {
+ printInfo.unit = parseBUnit(vm["unit"].as<std::wstring>());
+ } catch (const std::invalid_argument&) {
+ std::wcout << "Unknown unit Type " << vm["unit"].as<std::wstring>() << '\n';
+ return 3;
+ }
+ } else {
+ if (vm.count("megabytes"))
+ printInfo.unit = BunitMB;
+ else
+ printInfo.unit = BunitB;
+ }
+
+ printInfo.showUsed = vm.count("show-used") > 0;
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo, std::vector<drive>& vDrives)
+{
+ if (l_Debug)
+ std::wcout << "Constructing output string\n";
+
+ std::vector<std::wstring> wsDrives, wsPerf;
+ std::wstring unit = BunitStr(printInfo.unit);
+
+ state state = OK;
+
+ std::wstring output = L"DISK OK - free space:";
+
+ if (printInfo.showUsed)
+ output = L"DISK OK - used space:";
+
+ double tCap = 0, tFree = 0, tUsed = 0;
+
+ for (std::vector<drive>::iterator it = vDrives.begin(); it != vDrives.end(); it++) {
+ tCap += it->cap;
+ tFree += it->free;
+ tUsed += it->used;
+
+ if (printInfo.showUsed) {
+ wsDrives.push_back(it->name + L" " + removeZero(it->used) + L" " + unit + L" (" +
+ removeZero(std::round(it->used / it->cap * 100.0)) + L"%); ");
+
+ wsPerf.push_back(L" " + it->name + L"=" + removeZero(it->used) + unit + L";" +
+ printInfo.warn.pString(it->cap) + L";" + printInfo.crit.pString(it->cap) + L";0;"
+ + removeZero(it->cap));
+
+ if (printInfo.crit.set && !printInfo.crit.rend(it->used, it->cap))
+ state = CRITICAL;
+
+ if (state == OK && printInfo.warn.set && !printInfo.warn.rend(it->used, it->cap))
+ state = WARNING;
+ } else {
+ wsDrives.push_back(it->name + L" " + removeZero(it->free) + L" " + unit + L" (" +
+ removeZero(std::round(it->free / it->cap * 100.0)) + L"%); ");
+
+ wsPerf.push_back(L" '" + it->name + L"'=" + removeZero(it->free) + unit + L";" +
+ printInfo.warn.pString(it->cap) + L";" + printInfo.crit.pString(it->cap) + L";0;"
+ + removeZero(it->cap));
+
+ if (printInfo.crit.rend(it->free, it->cap))
+ state = CRITICAL;
+
+ if (state == OK && printInfo.warn.rend(it->free, it->cap))
+ state = WARNING;
+ }
+ }
+
+ if (state == WARNING) {
+ output = L"DISK WARNING - free space:";
+
+ if (printInfo.showUsed)
+ output = L"DISK WARNING - used space:";
+ }
+
+ if (state == CRITICAL) {
+ output = L"DISK CRITICAL - free space:";
+
+ if (printInfo.showUsed)
+ output = L"DISK CRITICAL - used space:";
+ }
+
+ std::wcout << output;
+
+ if (vDrives.size() > 1 && printInfo.showUsed) {
+ std::wcout << "Total " << (printInfo.showUsed ? tUsed : tFree) << unit
+ << " (" << removeZero(std::round(tUsed / tCap * 100.0)) << "%); ";
+ }
+
+ for (const auto& driveName : wsDrives)
+ std::wcout << driveName;
+
+ std::wcout << "|";
+
+ for (const auto& perf : wsPerf)
+ std::wcout << perf;
+
+ std::wcout << '\n';
+
+ return state;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ std::vector<drive> vDrives;
+ printInfoStruct printInfo;
+ po::variables_map vm;
+
+ int ret;
+
+ ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ printInfo.warn.legal = !printInfo.warn.legal;
+ printInfo.crit.legal = !printInfo.crit.legal;
+
+ if (printInfo.drives.empty())
+ ret = check_drives(vDrives, printInfo.exclude_drives);
+ else
+ ret = check_drives(vDrives, printInfo);
+
+ if (ret != -1)
+ return ret;
+
+ for (std::vector<drive>::iterator it = vDrives.begin(); it != vDrives.end(); ++it) {
+ if (!getDriveSpaceValues(*it, printInfo.unit)) {
+ std::wcout << "Failed to access drive at " << it->name << '\n';
+ return 3;
+ }
+ }
+
+ return printOutput(printInfo, vDrives);
+}
diff --git a/plugins/check_load.cpp b/plugins/check_load.cpp
new file mode 100644
index 0000000..563c347
--- /dev/null
+++ b/plugins/check_load.cpp
@@ -0,0 +1,244 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <boost/algorithm/string/classification.hpp>
+#include <iostream>
+#include <pdh.h>
+#include <shlwapi.h>
+#include <pdhmsg.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ double load;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ wchar_t namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ wchar_t *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print usage message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning value (in percent)")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical value (in percent)")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines CPU load.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tLOAD WARNING 67%% | load=67%%;50%%;90%%;0;100\n\n"
+ L"\"LOAD\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"67%%\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value.\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too."
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version"))
+ std::cout << "Version: " << VERSION << '\n';
+
+ if (vm.count("warning")) {
+ try {
+ std::wstring wthreshold = vm["warning"].as<std::wstring>();
+ std::vector<std::wstring> tokens;
+ boost::algorithm::split(tokens, wthreshold, boost::algorithm::is_any_of(","));
+ printInfo.warn = threshold(tokens[0]);
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("critical")) {
+ try {
+ std::wstring cthreshold = vm["critical"].as<std::wstring>();
+ std::vector<std::wstring> tokens;
+ boost::algorithm::split(tokens, cthreshold, boost::algorithm::is_any_of(","));
+ printInfo.crit = threshold(tokens[0]);
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ if (printInfo.warn.rend(printInfo.load))
+ state = WARNING;
+
+ if (printInfo.crit.rend(printInfo.load))
+ state = CRITICAL;
+
+ std::wcout << L"LOAD ";
+
+ switch (state) {
+ case OK:
+ std::wcout << L"OK";
+ break;
+ case WARNING:
+ std::wcout << L"WARNING";
+ break;
+ case CRITICAL:
+ std::wcout << L"CRITICAL";
+ break;
+ }
+
+ std::wcout << " " << printInfo.load << L"% | 'load'=" << printInfo.load << L"%;"
+ << printInfo.warn.pString() << L";"
+ << printInfo.crit.pString() << L";0;100" << '\n';
+
+ return state;
+}
+
+static int check_load(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Creating query and adding counter" << '\n';
+
+ PDH_HQUERY phQuery;
+ PDH_STATUS err = PdhOpenQuery(NULL, NULL, &phQuery);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ PDH_HCOUNTER phCounter;
+ err = PdhAddEnglishCounter(phQuery, L"\\Processor(_Total)\\% Idle Time", NULL, &phCounter);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Collecting first batch of query data" << '\n';
+
+ err = PdhCollectQueryData(phQuery);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Sleep for one second" << '\n';
+
+ Sleep(1000);
+
+ if (l_Debug)
+ std::wcout << L"Collecting second batch of query data" << '\n';
+
+ err = PdhCollectQueryData(phQuery);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Creating formatted counter array" << '\n';
+
+ DWORD CounterType;
+ PDH_FMT_COUNTERVALUE DisplayValue;
+ err = PdhGetFormattedCounterValue(phCounter, PDH_FMT_DOUBLE, &CounterType, &DisplayValue);
+ if (SUCCEEDED(err)) {
+ if (DisplayValue.CStatus == PDH_CSTATUS_VALID_DATA) {
+ if (l_Debug)
+ std::wcout << L"Recieved Value of " << DisplayValue.doubleValue << L" (idle)" << '\n';
+ printInfo.load = 100.0 - DisplayValue.doubleValue;
+ }
+ else {
+ if (l_Debug)
+ std::wcout << L"Received data was not valid\n";
+ goto die;
+ }
+
+ if (l_Debug)
+ std::wcout << L"Finished collection. Cleaning up and returning" << '\n';
+
+ PdhCloseQuery(phQuery);
+ return -1;
+ }
+
+die:
+ printErrorInfo();
+ if (phQuery)
+ PdhCloseQuery(phQuery);
+ return 3;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ printInfoStruct printInfo;
+ po::variables_map vm;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ ret = check_load(printInfo);
+ if (ret != -1)
+ return ret;
+
+ return printOutput(printInfo);
+}
diff --git a/plugins/check_memory.cpp b/plugins/check_memory.cpp
new file mode 100644
index 0000000..1461445
--- /dev/null
+++ b/plugins/check_memory.cpp
@@ -0,0 +1,215 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <shlwapi.h>
+#include <winbase.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ double tRam;
+ double aRam;
+ double percentFree;
+ Bunit unit = BunitMB;
+ bool showUsed;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR ** av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning threshold")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold")
+ ("unit,u", po::wvalue<std::wstring>(), "The unit to use for display (default MB)")
+ ("show-used,U", "Show used memory instead of the free memory")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines physical memory.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tMEMORY WARNING - 50%% free | memory=2024MB;3000;500;0;4096\n\n"
+ L"\"MEMORY\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"50%%\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. Performance data will only be displayed when\n"
+ L"you set at least one threshold\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too.\n"
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version"))
+ std::wcout << L"Version: " << VERSION << '\n';
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ printInfo.warn.legal = !printInfo.warn.legal;
+ }
+
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ printInfo.crit.legal = !printInfo.crit.legal;
+ }
+
+ l_Debug = vm.count("debug") > 0;
+
+ if (vm.count("unit")) {
+ try {
+ printInfo.unit = parseBUnit(vm["unit"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("show-used")) {
+ printInfo.showUsed = true;
+ printInfo.warn.legal = true;
+ printInfo.crit.legal = true;
+ }
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ std::wcout << L"MEMORY ";
+
+ double currentValue;
+
+ if (!printInfo.showUsed)
+ currentValue = printInfo.aRam;
+ else
+ currentValue = printInfo.tRam - printInfo.aRam;
+
+ if (printInfo.warn.rend(currentValue, printInfo.tRam))
+ state = WARNING;
+
+ if (printInfo.crit.rend(currentValue, printInfo.tRam))
+ state = CRITICAL;
+
+ std::wcout << stateToString(state);
+
+ if (!printInfo.showUsed)
+ std::wcout << " - " << printInfo.percentFree << L"% free";
+ else
+ std::wcout << " - " << 100 - printInfo.percentFree << L"% used";
+
+ std::wcout << "| 'memory'=" << currentValue << BunitStr(printInfo.unit) << L";"
+ << printInfo.warn.pString(printInfo.tRam) << L";" << printInfo.crit.pString(printInfo.tRam)
+ << L";0;" << printInfo.tRam << '\n';
+
+ return state;
+}
+
+static int check_memory(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Accessing memory statistics via MemoryStatus" << '\n';
+
+ MEMORYSTATUSEX memBuf;
+ memBuf.dwLength = sizeof(memBuf);
+ GlobalMemoryStatusEx(&memBuf);
+
+ printInfo.tRam = round((memBuf.ullTotalPhys / pow(1024.0, printInfo.unit) * pow(10.0, printInfo.unit))) / pow(10.0, printInfo.unit);
+ printInfo.aRam = round((memBuf.ullAvailPhys / pow(1024.0, printInfo.unit) * pow(10.0, printInfo.unit))) / pow(10.0, printInfo.unit);
+ printInfo.percentFree = 100.0 * memBuf.ullAvailPhys / memBuf.ullTotalPhys;
+
+ if (l_Debug)
+ std::wcout << L"Found memBuf.dwTotalPhys: " << memBuf.ullTotalPhys << '\n'
+ << L"Found memBuf.dwAvailPhys: " << memBuf.ullAvailPhys << '\n';
+
+ return -1;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ printInfoStruct printInfo = {};
+ po::variables_map vm;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ ret = check_memory(printInfo);
+ if (ret != -1)
+ return ret;
+
+ return printOutput(printInfo);
+}
diff --git a/plugins/check_network.cpp b/plugins/check_network.cpp
new file mode 100644
index 0000000..e21607a
--- /dev/null
+++ b/plugins/check_network.cpp
@@ -0,0 +1,374 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#define WIN32_LEAN_AND_MEAN
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <boost/algorithm/string/replace.hpp>
+#include <vector>
+#include <map>
+#include <windows.h>
+#include <pdh.h>
+#include <shlwapi.h>
+#include <iostream>
+#include <pdhmsg.h>
+#include <winsock2.h>
+#include <iphlpapi.h>
+
+#define VERSION 1.2
+
+namespace po = boost::program_options;
+
+struct nInterface
+{
+ std::wstring name;
+ LONG BytesInSec, BytesOutSec;
+ nInterface(std::wstring p)
+ : name(p)
+ { }
+};
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+};
+
+static bool l_Debug;
+static bool l_NoISATAP;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc("Options");
+
+ desc.add_options()
+ ("help,h", "print usage and exit")
+ ("version,V", "print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("noisatap,n", "Don't show ISATAP interfaces in output")
+ ("warning,w", po::wvalue<std::wstring>(), "warning value")
+ ("critical,c", po::wvalue<std::wstring>(), "critical value")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines network performance.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tNETWORK WARNING 1131B/s | network=1131B;1000;7000;0\n\n"
+ L"\"NETWORK\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"1131B/s\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. Performance data will only be displayed when\n"
+ L"you set at least one threshold\n\n"
+ L"This program will also print out additional performance data interface\n"
+ L"by interface\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"All of these options work with the critical threshold \"-c\" too."
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version"))
+ std::cout << "Version: " << VERSION << '\n';
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ l_Debug = vm.count("debug") > 0;
+ l_NoISATAP = vm.count("noisatap") > 0;
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo, const std::vector<nInterface>& vInterfaces, const std::map<std::wstring, std::wstring>& mapNames)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ long tIn = 0, tOut = 0;
+ std::wstringstream tss;
+ state state = OK;
+
+ std::map<std::wstring, std::wstring>::const_iterator mapIt;
+ std::wstring wsFriendlyName;
+
+ for (std::vector<nInterface>::const_iterator it = vInterfaces.begin(); it != vInterfaces.end(); ++it) {
+ tIn += it->BytesInSec;
+ tOut += it->BytesOutSec;
+ if (l_Debug)
+ std::wcout << "Getting friendly name of " << it->name << '\n';
+ mapIt = mapNames.find(it->name);
+ if (mapIt != mapNames.end()) {
+ if (l_Debug)
+ std::wcout << "\tIs " << mapIt->second << '\n';
+ wsFriendlyName = mapIt->second;
+ } else {
+ if (l_Debug)
+ std::wcout << "\tNo friendly name found, using adapter name\n";
+ wsFriendlyName = it->name;
+ }
+ if (wsFriendlyName.find(L"isatap") != std::wstring::npos && l_NoISATAP) {
+ if (l_Debug)
+ std::wcout << "\tSkipping isatap interface " << wsFriendlyName << "\n";
+ continue;
+ } else {
+ boost::algorithm::replace_all(wsFriendlyName, "'", "''");
+ tss << L"'" << wsFriendlyName << L"_in'=" << it->BytesInSec << L"B '" << wsFriendlyName << L"_out'=" << it->BytesOutSec << L"B ";
+ }
+ }
+
+ if (printInfo.warn.rend(tIn + tOut))
+ state = WARNING;
+ if (printInfo.crit.rend(tIn + tOut))
+ state = CRITICAL;
+
+ std::wcout << "NETWORK ";
+
+ switch (state) {
+ case OK:
+ std::wcout << L"OK";
+ break;
+ case WARNING:
+ std::wcout << L"WARNING";
+ break;
+ case CRITICAL:
+ std::wcout << L"CRITICAL";
+ break;
+ }
+
+ std::wcout << " " << tIn + tOut << L"B/s | "
+ << L"'network'=" << tIn + tOut << L"B;" << printInfo.warn.pString() << L";" << printInfo.crit.pString() << L";" << L"0; "
+ << L"'network_in'=" << tIn << L"B 'network_out'=" << tOut << L"B "
+ << tss.str() << '\n';
+
+ return state;
+}
+
+static int check_network(std::vector<nInterface>& vInterfaces)
+{
+
+ if (l_Debug)
+ std::wcout << L"Creating Query and adding counters" << '\n';
+
+ PDH_FMT_COUNTERVALUE_ITEM *pDisplayValuesIn = NULL, *pDisplayValuesOut = NULL;
+
+ PDH_HQUERY phQuery;
+ PDH_STATUS err = PdhOpenQuery(NULL, NULL, &phQuery);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ const WCHAR *perfIn = L"\\Network Interface(*)\\Bytes Received/sec";
+ PDH_HCOUNTER phCounterIn;
+ err = PdhAddEnglishCounter(phQuery, perfIn, NULL, &phCounterIn);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ const WCHAR *perfOut = L"\\Network Interface(*)\\Bytes Sent/sec";
+ PDH_HCOUNTER phCounterOut;
+ err = PdhAddEnglishCounter(phQuery, perfOut, NULL, &phCounterOut);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Collecting first batch of query data" << '\n';
+
+ err = PdhCollectQueryData(phQuery);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Sleep for one second" << '\n';
+
+ Sleep(1000);
+
+ if (l_Debug)
+ std::wcout << L"Collecting second batch of query data" << '\n';
+
+ err = PdhCollectQueryData(phQuery);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Creating formatted counter arrays" << '\n';
+
+ DWORD dwItemCount;
+ DWORD dwBufferSizeIn = 0;
+ err = PdhGetFormattedCounterArray(phCounterIn, PDH_FMT_LONG, &dwBufferSizeIn, &dwItemCount, pDisplayValuesIn);
+ if (err == PDH_MORE_DATA || SUCCEEDED(err))
+ pDisplayValuesIn = reinterpret_cast<PDH_FMT_COUNTERVALUE_ITEM*>(new BYTE[dwItemCount*dwBufferSizeIn]);
+ else
+ goto die;
+
+ DWORD dwBufferSizeOut = 0;
+ err = PdhGetFormattedCounterArray(phCounterOut, PDH_FMT_LONG, &dwBufferSizeOut, &dwItemCount, pDisplayValuesOut);
+ if (err == PDH_MORE_DATA || SUCCEEDED(err))
+ pDisplayValuesOut = reinterpret_cast<PDH_FMT_COUNTERVALUE_ITEM*>(new BYTE[dwItemCount*dwBufferSizeIn]);
+ else
+ goto die;
+
+ err = PdhGetFormattedCounterArray(phCounterIn, PDH_FMT_LONG, &dwBufferSizeIn, &dwItemCount, pDisplayValuesIn);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ err = PdhGetFormattedCounterArray(phCounterOut, PDH_FMT_LONG, &dwBufferSizeOut, &dwItemCount, pDisplayValuesOut);
+ if (!SUCCEEDED(err))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Going over counter array" << '\n';
+
+ for (DWORD i = 0; i < dwItemCount; i++) {
+ nInterface iface{pDisplayValuesIn[i].szName};
+ iface.BytesInSec = pDisplayValuesIn[i].FmtValue.longValue;
+ iface.BytesOutSec = pDisplayValuesOut[i].FmtValue.longValue;
+ vInterfaces.push_back(iface);
+
+ if (l_Debug)
+ std::wcout << L"Collected interface " << pDisplayValuesIn[i].szName << '\n';
+ }
+
+ if (l_Debug)
+ std::wcout << L"Finished collection. Cleaning up and returning" << '\n';
+
+ if (phQuery)
+ PdhCloseQuery(phQuery);
+
+ delete reinterpret_cast<BYTE *>(pDisplayValuesIn);
+ delete reinterpret_cast<BYTE *>(pDisplayValuesOut);
+
+ return -1;
+die:
+ printErrorInfo(err);
+ if (phQuery)
+ PdhCloseQuery(phQuery);
+
+ delete reinterpret_cast<BYTE *>(pDisplayValuesIn);
+ delete reinterpret_cast<BYTE *>(pDisplayValuesOut);
+
+ return 3;
+}
+
+static bool mapSystemNamesToFamiliarNames(std::map<std::wstring, std::wstring>& mapNames)
+{
+ /*
+ PIP_ADAPTER_UNICAST_ADDRESS pUnicast = NULL;
+ PIP_ADAPTER_ANYCAST_ADDRESS pAnycast = NULL;
+ PIP_ADAPTER_MULTICAST_ADDRESS pMulticast = NULL;
+ PIP_ADAPTER_DNS_SERVER_ADDRESS pDnsServer = NULL;
+ PIP_ADAPTER_PREFIX pPrefix = NULL;
+ */
+ ULONG outBufLen = 15000; //15KB as suggestet by msdn of GetAdaptersAddresses
+
+ if (l_Debug)
+ std::wcout << "Mapping adapter system names to friendly names\n";
+
+ PIP_ADAPTER_ADDRESSES pAddresses;
+
+ unsigned int Iterations = 0;
+ DWORD dwRetVal = 0;
+
+ do {
+ pAddresses = reinterpret_cast<PIP_ADAPTER_ADDRESSES>(new BYTE[outBufLen]);
+
+ dwRetVal = GetAdaptersAddresses(AF_UNSPEC, GAA_FLAG_INCLUDE_PREFIX, NULL, pAddresses, &outBufLen);
+
+ if (dwRetVal == ERROR_BUFFER_OVERFLOW) {
+ delete[]pAddresses;
+ pAddresses = NULL;
+ } else
+ break;
+ } while (++Iterations < 3);
+
+ if (dwRetVal != NO_ERROR) {
+ std::wcout << "Failed to collect friendly adapter names\n";
+ delete[]pAddresses;
+ return false;
+ }
+
+ for (PIP_ADAPTER_ADDRESSES pCurrAddresses = pAddresses; pCurrAddresses; pCurrAddresses = pCurrAddresses->Next) {
+ if (l_Debug)
+ std::wcout << "Got: " << pCurrAddresses->Description << " -- " << pCurrAddresses->FriendlyName << '\n';
+
+ mapNames[pCurrAddresses->Description] = pCurrAddresses->FriendlyName;
+ }
+
+ delete[]pAddresses;
+ return true;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ std::vector<nInterface> vInterfaces;
+ std::map<std::wstring, std::wstring> mapNames;
+ printInfoStruct printInfo;
+ po::variables_map vm;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+
+ if (ret != -1)
+ return ret;
+
+ if (!mapSystemNamesToFamiliarNames(mapNames))
+ return 3;
+
+ ret = check_network(vInterfaces);
+ if (ret != -1)
+ return ret;
+
+ return printOutput(printInfo, vInterfaces, mapNames);
+}
diff --git a/plugins/check_nscp_api.cpp b/plugins/check_nscp_api.cpp
new file mode 100644
index 0000000..aef43fb
--- /dev/null
+++ b/plugins/check_nscp_api.cpp
@@ -0,0 +1,512 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga-version.h" /* include VERSION */
+
+// ensure to include base first
+#include "base/i2-base.hpp"
+#include "base/application.hpp"
+#include "base/json.hpp"
+#include "base/string.hpp"
+#include "base/logger.hpp"
+#include "base/exception.hpp"
+#include "base/utility.hpp"
+#include "base/defer.hpp"
+#include "base/io-engine.hpp"
+#include "base/stream.hpp"
+#include "base/tcpsocket.hpp" /* include global icinga::Connect */
+#include "base/tlsstream.hpp"
+#include "base/base64.hpp"
+#include "remote/url.hpp"
+#include <remote/url-characters.hpp>
+#include <boost/program_options.hpp>
+#include <boost/algorithm/string/split.hpp>
+#include <boost/range/algorithm/remove_if.hpp>
+#include <boost/asio/buffer.hpp>
+#include <boost/asio/ssl/context.hpp>
+#include <boost/beast.hpp>
+#include <cstddef>
+#include <cstring>
+#include <iostream>
+
+using namespace icinga;
+namespace po = boost::program_options;
+
+static bool l_Debug;
+
+/**
+ * Prints an Icinga plugin API compliant output, including error handling.
+ *
+ * @param result
+ *
+ * @return Status code for exit()
+ */
+static int FormatOutput(const Dictionary::Ptr& result)
+{
+ if (!result) {
+ std::cerr << "UNKNOWN: No data received.\n";
+ return 3;
+ }
+
+ if (l_Debug)
+ std::cout << "\tJSON Body:\n" << result->ToString() << '\n';
+
+ Array::Ptr payloads = result->Get("payload");
+ if (!payloads) {
+ std::cerr << "UNKNOWN: Answer format error: Answer is missing 'payload'.\n";
+ return 3;
+ }
+
+ if (payloads->GetLength() == 0) {
+ std::cerr << "UNKNOWN: Answer format error: 'payload' was empty.\n";
+ return 3;
+ }
+
+ if (payloads->GetLength() > 1) {
+ std::cerr << "UNKNOWN: Answer format error: Multiple payloads are not supported.";
+ return 3;
+ }
+
+ Dictionary::Ptr payload;
+
+ try {
+ payload = payloads->Get(0);
+ } catch (const std::exception&) {
+ std::cerr << "UNKNOWN: Answer format error: 'payload' was not a Dictionary.\n";
+ return 3;
+ }
+
+ Array::Ptr lines;
+
+ try {
+ lines = payload->Get("lines");
+ } catch (const std::exception&) {
+ std::cerr << "UNKNOWN: Answer format error: 'payload' is missing 'lines'.\n";
+ return 3;
+ }
+
+ if (!lines) {
+ std::cerr << "UNKNOWN: Answer format error: 'lines' is Null.\n";
+ return 3;
+ }
+
+ std::stringstream ssout;
+
+ ObjectLock olock(lines);
+
+ for (const Value& vline : lines) {
+ Dictionary::Ptr line;
+
+ try {
+ line = vline;
+ } catch (const std::exception&) {
+ std::cerr << "UNKNOWN: Answer format error: 'lines' entry was not a Dictionary.\n";
+ return 3;
+ }
+
+ if (!line) {
+ std::cerr << "UNKNOWN: Answer format error: 'lines' entry was Null.\n";
+ return 3;
+ }
+
+ ssout << payload->Get("command") << ' ' << line->Get("message") << " | ";
+
+ if (!line->Contains("perf")) {
+ ssout << '\n';
+ break;
+ }
+
+ Array::Ptr perfs = line->Get("perf");
+
+ ObjectLock olock(perfs);
+
+ for (const Dictionary::Ptr& perf : perfs) {
+ ssout << "'" << perf->Get("alias") << "'=";
+
+ Dictionary::Ptr values = perf->Get("float_value");
+
+ if (perf->Contains("int_value"))
+ values = perf->Get("int_value");
+
+ ssout << values->Get("value") << values->Get("unit") << ';' << values->Get("warning") << ';' << values->Get("critical");
+
+ if (values->Contains("minimum") || values->Contains("maximum")) {
+ ssout << ';';
+
+ if (values->Contains("minimum"))
+ ssout << values->Get("minimum");
+
+ if (values->Contains("maximum"))
+ ssout << ';' << values->Get("maximum");
+ }
+
+ ssout << ' ';
+ }
+
+ ssout << '\n';
+ }
+
+ std::map<String, unsigned int> stateMap = {
+ { "OK", 0 },
+ { "WARNING", 1},
+ { "CRITICAL", 2},
+ { "UNKNOWN", 3}
+ };
+
+ String state = static_cast<String>(payload->Get("result")).ToUpper();
+
+ auto it = stateMap.find(state);
+
+ if (it == stateMap.end()) {
+ std::cerr << "UNKNOWN Answer format error: 'result' was not a known state.\n";
+ return 3;
+ }
+
+ std::cout << ssout.rdbuf();
+
+ return it->second;
+}
+
+/**
+ * Connects to host:port and performs a TLS shandshake
+ *
+ * @param host To connect to.
+ * @param port To connect to.
+ *
+ * @returns AsioTlsStream pointer for future HTTP connections.
+ */
+static Shared<AsioTlsStream>::Ptr Connect(const String& host, const String& port)
+{
+ Shared<boost::asio::ssl::context>::Ptr sslContext;
+
+ try {
+ sslContext = MakeAsioSslContext(Empty, Empty, Empty); //TODO: Add support for cert, key, ca parameters
+ } catch(const std::exception& ex) {
+ Log(LogCritical, "DebugConsole")
+ << "Cannot make SSL context: " << ex.what();
+ throw;
+ }
+
+ Shared<AsioTlsStream>::Ptr stream = Shared<AsioTlsStream>::Make(IoEngine::Get().GetIoContext(), *sslContext, host);
+
+ try {
+ icinga::Connect(stream->lowest_layer(), host, port);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "DebugConsole")
+ << "Cannot connect to REST API on host '" << host << "' port '" << port << "': " << ex.what();
+ throw;
+ }
+
+ auto& tlsStream (stream->next_layer());
+
+ try {
+ tlsStream.handshake(tlsStream.client);
+ } catch (const std::exception& ex) {
+ Log(LogWarning, "DebugConsole")
+ << "TLS handshake with host '" << host << "' failed: " << ex.what();
+ throw;
+ }
+
+ return stream;
+}
+
+static const char l_ReasonToInject[2] = {' ', 'X'};
+
+template<class MutableBufferSequence>
+static inline
+boost::asio::mutable_buffer GetFirstNonZeroBuffer(const MutableBufferSequence& mbs)
+{
+ namespace asio = boost::asio;
+
+ auto end (asio::buffer_sequence_end(mbs));
+
+ for (auto current (asio::buffer_sequence_begin(mbs)); current != end; ++current) {
+ asio::mutable_buffer buf (*current);
+
+ if (buf.size() > 0u) {
+ return buf;
+ }
+ }
+
+ return {};
+}
+
+/**
+ * Workaround for <https://github.com/mickem/nscp/issues/610>.
+ */
+template<class SyncReadStream>
+class HttpResponseReasonInjector
+{
+public:
+ inline HttpResponseReasonInjector(SyncReadStream& stream)
+ : m_Stream(stream), m_ReasonHasBeenInjected(false), m_StashedData(nullptr)
+ {
+ }
+
+ HttpResponseReasonInjector(const HttpResponseReasonInjector&) = delete;
+ HttpResponseReasonInjector(HttpResponseReasonInjector&&) = delete;
+ HttpResponseReasonInjector& operator=(const HttpResponseReasonInjector&) = delete;
+ HttpResponseReasonInjector& operator=(HttpResponseReasonInjector&&) = delete;
+
+ template<class MutableBufferSequence>
+ size_t read_some(const MutableBufferSequence& mbs)
+ {
+ boost::system::error_code ec;
+ size_t amount = read_some(mbs, ec);
+
+ if (ec) {
+ throw boost::system::system_error(ec);
+ }
+
+ return amount;
+ }
+
+ template<class MutableBufferSequence>
+ size_t read_some(const MutableBufferSequence& mbs, boost::system::error_code& ec)
+ {
+ auto mb (GetFirstNonZeroBuffer(mbs));
+
+ if (m_StashedData) {
+ size_t amount = 0;
+ auto end ((char*)mb.data() + mb.size());
+
+ for (auto current ((char*)mb.data()); current < end; ++current) {
+ *current = *m_StashedData;
+
+ ++m_StashedData;
+ ++amount;
+
+ if (m_StashedData == (char*)m_StashedDataBuf + (sizeof(m_StashedDataBuf) / sizeof(m_StashedDataBuf[0]))) {
+ m_StashedData = nullptr;
+ break;
+ }
+ }
+
+ return amount;
+ }
+
+ size_t amount = m_Stream.read_some(mb, ec);
+
+ if (!ec && !m_ReasonHasBeenInjected) {
+ auto end ((char*)mb.data() + amount);
+
+ for (auto current ((char*)mb.data()); current < end; ++current) {
+ if (*current == '\r') {
+ auto last (end - 1);
+
+ for (size_t i = sizeof(l_ReasonToInject) / sizeof(l_ReasonToInject[0]); i;) {
+ m_StashedDataBuf[--i] = *last;
+
+ if (last > current) {
+ memmove(current + 1, current, last - current);
+ }
+
+ *current = l_ReasonToInject[i];
+ }
+
+ m_ReasonHasBeenInjected = true;
+ m_StashedData = m_StashedDataBuf;
+
+ break;
+ }
+ }
+ }
+
+ return amount;
+ }
+
+private:
+ SyncReadStream& m_Stream;
+ bool m_ReasonHasBeenInjected;
+ char m_StashedDataBuf[sizeof(l_ReasonToInject) / sizeof(l_ReasonToInject[0])];
+ char* m_StashedData;
+};
+
+/**
+ * Queries the given endpoint and host:port and retrieves data.
+ *
+ * @param host To connect to.
+ * @param port To connect to.
+ * @param password For auth header (required).
+ * @param endpoint Caller must construct the full endpoint including the command query.
+ *
+ * @return Dictionary de-serialized from JSON data.
+ */
+
+static Dictionary::Ptr FetchData(const String& host, const String& port, const String& password,
+ const String& endpoint)
+{
+ namespace beast = boost::beast;
+ namespace http = beast::http;
+
+ Shared<AsioTlsStream>::Ptr tlsStream;
+
+ try {
+ tlsStream = Connect(host, port);
+ } catch (const std::exception& ex) {
+ std::cerr << "Connection error: " << ex.what();
+ throw ex;
+ }
+
+ Url::Ptr url;
+
+ try {
+ url = new Url(endpoint);
+ } catch (const std::exception& ex) {
+ std::cerr << "URL error: " << ex.what();
+ throw ex;
+ }
+
+ url->SetScheme("https");
+ url->SetHost(host);
+ url->SetPort(port);
+
+ // NSClient++ uses `time=1m&time=5m` instead of `time[]=1m&time[]=5m`
+ url->SetArrayFormatUseBrackets(false);
+
+ http::request<http::string_body> request (http::verb::get, std::string(url->Format(true)), 10);
+
+ request.set(http::field::user_agent, "Icinga/check_nscp_api/" + String(VERSION));
+ request.set(http::field::host, host + ":" + port);
+
+ request.set(http::field::accept, "application/json");
+ request.set("password", password);
+
+ if (l_Debug) {
+ std::cout << "Sending request to " << url->Format(false, false) << "'.\n";
+ }
+
+ try {
+ http::write(*tlsStream, request);
+ tlsStream->flush();
+ } catch (const std::exception& ex) {
+ std::cerr << "Cannot write HTTP request to REST API at URL '" << url->Format(false, false) << "': " << ex.what();
+ throw ex;
+ }
+
+ beast::flat_buffer buffer;
+ http::parser<false, http::string_body> p;
+
+ try {
+ HttpResponseReasonInjector<decltype(*tlsStream)> reasonInjector (*tlsStream);
+ http::read(reasonInjector, buffer, p);
+ } catch (const std::exception &ex) {
+ BOOST_THROW_EXCEPTION(ScriptError(String("Error reading HTTP response data: ") + ex.what()));
+ }
+
+ String body (std::move(p.get().body()));
+
+ if (l_Debug)
+ std::cout << "Received body from NSCP: '" << body << "'." << std::endl;
+
+ // Add some rudimentary error handling.
+ if (body.IsEmpty()) {
+ String message = "No body received. Ensure that connection parameters are good and check the NSCP logs.";
+ BOOST_THROW_EXCEPTION(ScriptError(message));
+ }
+
+ Dictionary::Ptr jsonResponse;
+
+ try {
+ jsonResponse = JsonDecode(body);
+ } catch (const std::exception& ex) {
+ String message = "Cannot parse JSON response body '" + body + "', error: " + ex.what();
+ BOOST_THROW_EXCEPTION(ScriptError(message));
+ }
+
+ return jsonResponse;
+}
+
+/**
+ * Main function
+ *
+ * @param argc
+ * @param argv
+ * @return exit code
+ */
+int main(int argc, char **argv)
+{
+ po::variables_map vm;
+ po::options_description desc("Options");
+
+ desc.add_options()
+ ("help,h", "Print usage message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("host,H", po::value<std::string>()->required(), "REQUIRED: NSCP API Host")
+ ("port,P", po::value<std::string>()->default_value("8443"), "NSCP API Port (Default: 8443)")
+ ("password", po::value<std::string>()->required(), "REQUIRED: NSCP API Password")
+ ("query,q", po::value<std::string>()->required(), "REQUIRED: NSCP API Query endpoint")
+ ("arguments,a", po::value<std::vector<std::string>>()->multitoken(), "NSCP API Query arguments for the endpoint");
+
+ po::command_line_parser parser(argc, argv);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+
+ if (vm.count("version")) {
+ std::cout << "Version: " << VERSION << '\n';
+ Application::Exit(0);
+ }
+
+ if (vm.count("help")) {
+ std::cout << argv[0] << " Help\n\tVersion: " << VERSION << '\n';
+ std::cout << "check_nscp_api is a program used to query the NSClient++ API.\n";
+ std::cout << desc;
+ std::cout << "For detailed information on possible queries and their arguments refer to the NSClient++ documentation.\n";
+ Application::Exit(0);
+ }
+
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ Application::Exit(3);
+ }
+
+ l_Debug = vm.count("debug") > 0;
+
+ // Initialize logger
+ if (l_Debug)
+ Logger::SetConsoleLogSeverity(LogDebug);
+ else
+ Logger::SetConsoleLogSeverity(LogWarning);
+
+ // Create the URL string and escape certain characters since Url() follows RFC 3986
+ String endpoint = "/query/" + vm["query"].as<std::string>();
+ if (!vm.count("arguments"))
+ endpoint += '/';
+ else {
+ endpoint += '?';
+ for (const String& argument : vm["arguments"].as<std::vector<std::string>>()) {
+ String::SizeType pos = argument.FindFirstOf("=");
+ if (pos == String::NPos)
+ endpoint += Utility::EscapeString(argument, ACQUERY_ENCODE, false);
+ else {
+ String key = argument.SubStr(0, pos);
+ String val = argument.SubStr(pos + 1);
+ endpoint += Utility::EscapeString(key, ACQUERY_ENCODE, false) + "=" + Utility::EscapeString(val, ACQUERY_ENCODE, false);
+ }
+ endpoint += '&';
+ }
+ }
+
+ Dictionary::Ptr result;
+
+ try {
+ result = FetchData(vm["host"].as<std::string>(), vm["port"].as<std::string>(),
+ vm["password"].as<std::string>(), endpoint);
+ } catch (const std::exception& ex) {
+ std::cerr << "UNKNOWN - " << ex.what();
+ exit(3);
+ }
+
+ // Application::Exit() is the clean way to exit after calling InitializeBase()
+ Application::Exit(FormatOutput(result));
+ return 255;
+}
diff --git a/plugins/check_perfmon.cpp b/plugins/check_perfmon.cpp
new file mode 100644
index 0000000..0f94b12
--- /dev/null
+++ b/plugins/check_perfmon.cpp
@@ -0,0 +1,387 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <vector>
+#include <windows.h>
+#include <pdh.h>
+#include <pdhmsg.h>
+#include <shlwapi.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold tWarn;
+ threshold tCrit;
+ std::wstring wsFullPath;
+ double dValue;
+ DWORD dwPerformanceWait = 1000;
+ DWORD dwRequestedType = PDH_FMT_DOUBLE;
+};
+
+static bool parseArguments(const int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR szNamePath[MAX_PATH + 1];
+ GetModuleFileName(NULL, szNamePath, MAX_PATH);
+ WCHAR *szProgName = PathFindFileName(szNamePath);
+
+ po::options_description desc("Options");
+ desc.add_options()
+ ("help,h", "Print help page and exit")
+ ("version,V", "Print version and exit")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning thershold")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold")
+ ("performance-counter,P", po::wvalue<std::wstring>(), "The performance counter string to use")
+ ("performance-wait", po::value<DWORD>(), "Sleep in milliseconds between the two perfomance querries (Default: 1000ms)")
+ ("fmt-countertype", po::wvalue<std::wstring>(), "Value type of counter: 'double'(default), 'long', 'int64'")
+ ("print-objects", "Prints all available objects to console")
+ ("print-object-info", "Prints all available instances and counters of --performance-counter, do not use a full perfomance counter string here")
+ ("perf-syntax", po::wvalue<std::wstring>(), "Use this string as name for the performance counter (graphite compatibility)")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return false;
+ }
+
+ if (vm.count("version")) {
+ std::wcout << "Version: " << VERSION << '\n';
+ return false;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << szProgName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s runs a check against a performance counter.\n"
+ L"You can use the following options to define its behaviour:\n\n", szProgName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tPERFMON CRITICAL \"\\Processor(_Total)\\%% Idle Time\" = 40.34 | "
+ L"perfmon=40.34;20;40;; \"\\Processor(_Total)\\%% Idle Time\"=40.34\n\n"
+ L"\"tPERFMON\" being the type of the check, \"CRITICAL\" the returned status\n"
+ L"and \"40.34\" is the performance counters value.\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were exceeded\n"
+ L" 1\tWARNING,\n\tThe warning was broken, but not the critical threshold\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tNo check could be performed\n\n"
+ , szProgName);
+ return false;
+ }
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.tWarn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::wcout << e.what() << '\n';
+ return false;
+ }
+ }
+
+ if (vm.count("critical")) {
+ try {
+ printInfo.tCrit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::wcout << e.what() << '\n';
+ return false;
+ }
+ }
+
+ if (vm.count("fmt-countertype")) {
+ if (!vm["fmt-countertype"].as<std::wstring>().compare(L"int64"))
+ printInfo.dwRequestedType = PDH_FMT_LARGE;
+ else if (!vm["fmt-countertype"].as<std::wstring>().compare(L"long"))
+ printInfo.dwRequestedType = PDH_FMT_LONG;
+ else if (vm["fmt-countertype"].as<std::wstring>().compare(L"double")) {
+ std::wcout << "Unknown value type " << vm["fmt-countertype"].as<std::wstring>() << '\n';
+ return false;
+ }
+ }
+
+ if (vm.count("performance-counter"))
+ printInfo.wsFullPath = vm["performance-counter"].as<std::wstring>();
+
+ if (vm.count("performance-wait"))
+ printInfo.dwPerformanceWait = vm["performance-wait"].as<DWORD>();
+
+ return true;
+}
+
+static bool getInstancesAndCountersOfObject(const std::wstring& wsObject,
+ std::vector<std::wstring>& vecInstances, std::vector<std::wstring>& vecCounters)
+{
+ DWORD dwCounterListLength = 0, dwInstanceListLength = 0;
+
+ if (PdhEnumObjectItems(NULL, NULL, wsObject.c_str(),
+ NULL, &dwCounterListLength, NULL,
+ &dwInstanceListLength, PERF_DETAIL_WIZARD, 0) != PDH_MORE_DATA)
+ return false;
+
+ std::vector<WCHAR> mszCounterList(dwCounterListLength + 1);
+ std::vector<WCHAR> mszInstanceList(dwInstanceListLength + 1);
+
+ if (FAILED(PdhEnumObjectItems(NULL, NULL, wsObject.c_str(),
+ mszCounterList.data(), &dwCounterListLength, mszInstanceList.data(),
+ &dwInstanceListLength, PERF_DETAIL_WIZARD, 0))) {
+ return false;
+ }
+
+ if (dwInstanceListLength) {
+ std::wstringstream wssInstanceName;
+
+ // XXX: is the "- 1" correct?
+ for (DWORD c = 0; c < dwInstanceListLength - 1; ++c) {
+ if (mszInstanceList[c])
+ wssInstanceName << mszInstanceList[c];
+ else {
+ vecInstances.push_back(wssInstanceName.str());
+ wssInstanceName.str(L"");
+ }
+ }
+ }
+
+ if (dwCounterListLength) {
+ std::wstringstream wssCounterName;
+
+ // XXX: is the "- 1" correct?
+ for (DWORD c = 0; c < dwCounterListLength - 1; ++c) {
+ if (mszCounterList[c])
+ wssCounterName << mszCounterList[c];
+ else {
+ vecCounters.push_back(wssCounterName.str());
+ wssCounterName.str(L"");
+ }
+ }
+ }
+
+ return true;
+}
+
+static void printPDHError(PDH_STATUS status)
+{
+ HMODULE hPdhLibrary = NULL;
+ LPWSTR pMessage = NULL;
+
+ hPdhLibrary = LoadLibrary(L"pdh.dll");
+ if (!hPdhLibrary) {
+ std::wcout << "LoadLibrary failed with " << GetLastError() << '\n';
+ return;
+ }
+
+ if (!FormatMessage(FORMAT_MESSAGE_FROM_HMODULE | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_ARGUMENT_ARRAY,
+ hPdhLibrary, status, 0, (LPWSTR)&pMessage, 0, NULL)) {
+ FreeLibrary(hPdhLibrary);
+ std::wcout << "Format message failed with " << std::hex << GetLastError() << '\n';
+ return;
+ }
+
+ FreeLibrary(hPdhLibrary);
+
+ std::wcout << pMessage << '\n';
+ LocalFree(pMessage);
+}
+
+static void printObjects()
+{
+ DWORD dwBufferLength = 0;
+ PDH_STATUS status = PdhEnumObjects(NULL, NULL, NULL,
+ &dwBufferLength, PERF_DETAIL_WIZARD, FALSE);
+ //HEX HEX! Only a Magicians gets all the info he wants, and only Microsoft knows what that means
+
+ if (status != PDH_MORE_DATA) {
+ printPDHError(status);
+ return;
+ }
+
+ std::vector<WCHAR> mszObjectList(dwBufferLength + 2);
+ status = PdhEnumObjects(NULL, NULL, mszObjectList.data(),
+ &dwBufferLength, PERF_DETAIL_WIZARD, FALSE);
+
+ if (FAILED(status)) {
+ printPDHError(status);
+ return;
+ }
+
+ DWORD c = 0;
+
+ while (++c < dwBufferLength) {
+ if (mszObjectList[c] == '\0')
+ std::wcout << '\n';
+ else
+ std::wcout << mszObjectList[c];
+ }
+}
+
+static void printObjectInfo(const printInfoStruct& pI)
+{
+ if (pI.wsFullPath.empty()) {
+ std::wcout << "No object given!\n";
+ return;
+ }
+
+ std::vector<std::wstring> vecInstances, vecCounters;
+
+ if (!getInstancesAndCountersOfObject(pI.wsFullPath, vecInstances, vecCounters)) {
+ std::wcout << "Could not enumerate instances and counters of " << pI.wsFullPath << '\n'
+ << "Make sure it exists!\n";
+ return;
+ }
+
+ std::wcout << "Instances of " << pI.wsFullPath << ":\n";
+ if (vecInstances.empty())
+ std::wcout << "> Has no instances\n";
+ else {
+ for (const auto& instance : vecInstances)
+ std::wcout << "> " << instance << '\n';
+ }
+ std::wcout << std::endl;
+
+ std::wcout << "Performance Counters of " << pI.wsFullPath << ":\n";
+ if (vecCounters.empty())
+ std::wcout << "> Has no counters\n";
+ else {
+ for (const auto& counter : vecCounters)
+ std::wcout << "> " << counter << "\n";
+ }
+ std::wcout << std::endl;
+}
+
+bool QueryPerfData(printInfoStruct& pI)
+{
+ PDH_HQUERY hQuery = NULL;
+ PDH_HCOUNTER hCounter = NULL;
+ DWORD dwBufferSize = 0, dwItemCount = 0;
+
+ if (pI.wsFullPath.empty()) {
+ std::wcout << "No performance counter path given!\n";
+ return false;
+ }
+
+ PDH_FMT_COUNTERVALUE_ITEM *pDisplayValues = NULL;
+
+ PDH_STATUS status = PdhOpenQuery(NULL, NULL, &hQuery);
+ if (FAILED(status))
+ goto die;
+
+ status = PdhAddEnglishCounter(hQuery, pI.wsFullPath.c_str(), NULL, &hCounter);
+
+ if (FAILED(status))
+ status = PdhAddCounter(hQuery, pI.wsFullPath.c_str(), NULL, &hCounter);
+
+ if (FAILED(status))
+ goto die;
+
+ status = PdhCollectQueryData(hQuery);
+ if (FAILED(status))
+ goto die;
+
+ /*
+ * Most counters need two queries to provide a value.
+ * Those which need only one will return the second.
+ */
+ Sleep(pI.dwPerformanceWait);
+
+ status = PdhCollectQueryData(hQuery);
+ if (FAILED(status))
+ goto die;
+
+ status = PdhGetFormattedCounterArray(hCounter, pI.dwRequestedType, &dwBufferSize, &dwItemCount, NULL);
+ if (status != PDH_MORE_DATA)
+ goto die;
+
+ pDisplayValues = reinterpret_cast<PDH_FMT_COUNTERVALUE_ITEM*>(new BYTE[dwBufferSize]);
+ status = PdhGetFormattedCounterArray(hCounter, pI.dwRequestedType, &dwBufferSize, &dwItemCount, pDisplayValues);
+
+ if (FAILED(status))
+ goto die;
+
+ switch (pI.dwRequestedType) {
+ case (PDH_FMT_LONG):
+ pI.dValue = pDisplayValues[0].FmtValue.longValue;
+ break;
+ case (PDH_FMT_LARGE):
+ pI.dValue = (double) pDisplayValues[0].FmtValue.largeValue;
+ break;
+ default:
+ pI.dValue = pDisplayValues[0].FmtValue.doubleValue;
+ break;
+ }
+
+ delete[]pDisplayValues;
+
+ return true;
+
+die:
+ printPDHError(status);
+ delete[]pDisplayValues;
+ return false;
+}
+
+static int printOutput(const po::variables_map& vm, printInfoStruct& pi)
+{
+ std::wstringstream wssPerfData;
+
+ if (vm.count("perf-syntax"))
+ wssPerfData << "'" << vm["perf-syntax"].as<std::wstring>() << "'=";
+ else
+ wssPerfData << "'" << pi.wsFullPath << "'=";
+
+ wssPerfData << pi.dValue << ';' << pi.tWarn.pString() << ';' << pi.tCrit.pString() << ";;";
+
+ if (pi.tCrit.rend(pi.dValue)) {
+ std::wcout << "PERFMON CRITICAL for '" << (vm.count("perf-syntax") ? vm["perf-syntax"].as<std::wstring>() : pi.wsFullPath)
+ << "' = " << pi.dValue << " | " << wssPerfData.str() << "\n";
+ return 2;
+ }
+
+ if (pi.tWarn.rend(pi.dValue)) {
+ std::wcout << "PERFMON WARNING for '" << (vm.count("perf-syntax") ? vm["perf-syntax"].as<std::wstring>() : pi.wsFullPath)
+ << "' = " << pi.dValue << " | " << wssPerfData.str() << "\n";
+ return 1;
+ }
+
+ std::wcout << "PERFMON OK for '" << (vm.count("perf-syntax") ? vm["perf-syntax"].as<std::wstring>() : pi.wsFullPath)
+ << "' = " << pi.dValue << " | " << wssPerfData.str() << "\n";
+
+ return 0;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ po::variables_map variables_map;
+ printInfoStruct stPrintInfo;
+ if (!parseArguments(argc, argv, variables_map, stPrintInfo))
+ return 3;
+
+ if (variables_map.count("print-objects")) {
+ printObjects();
+ return 0;
+ }
+
+ if (variables_map.count("print-object-info")) {
+ printObjectInfo(stPrintInfo);
+ return 0;
+ }
+
+ if (QueryPerfData(stPrintInfo))
+ return printOutput(variables_map, stPrintInfo);
+ else
+ return 3;
+}
diff --git a/plugins/check_ping.cpp b/plugins/check_ping.cpp
new file mode 100644
index 0000000..c918d92
--- /dev/null
+++ b/plugins/check_ping.cpp
@@ -0,0 +1,508 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN //else winsock will be included with windows.h and conflict with winsock2
+#endif
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <winsock2.h>
+#include <iphlpapi.h>
+#include <icmpapi.h>
+#include <shlwapi.h>
+#include <ws2ipdef.h>
+#include <mstcpip.h>
+#include <ws2tcpip.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct response
+{
+ double avg;
+ unsigned int pMin = 0;
+ unsigned int pMax = 0;
+ unsigned int dropped = 0;
+};
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ threshold wpl;
+ threshold cpl;
+ std::wstring host;
+ std::wstring ip;
+ bool ipv6 = false;
+ int timeout = 1000;
+ int num = 5;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print usage message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("host,H", po::wvalue<std::wstring>()->required(), "Target hostname or IP. If an IPv6 address is given, the '-6' option must be set")
+ (",4", "--Host is an IPv4 address or if it's a hostname: Resolve it to an IPv4 address (default)")
+ (",6", "--Host is an IPv6 address or if it's a hostname: Resolve it to an IPv6 address")
+ ("timeout,t", po::value<int>(), "Specify timeout for requests in ms (default=1000)")
+ ("packets,p", po::value<int>(), "Declare ping count (default=5)")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning values: rtt,package loss")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical values: rtt,package loss")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise &
+ ~po::command_line_style::allow_guessing
+ )
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to ping an ip4 address.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will take at least timeout times number of pings to run\n"
+ L"Then it will output a string looking something like this:\n\n"
+ L"\tPING WARNING RTA: 72ms Packet loss: 20%% | ping=72ms;40;80;71;77 pl=20%%;20;50;0;100\n\n"
+ L"\"PING\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"RTA: 72ms Packet loss: 20%%\" the relevant information.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. \n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too.",
+ progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version")) {
+ std::cout << progName << " Version: " << VERSION << '\n';
+ return 0;
+ }
+
+ if (vm.count("-4") && vm.count("-6")) {
+ std::cout << "Conflicting options \"4\" and \"6\"" << '\n';
+ return 3;
+ }
+
+ printInfo.ipv6 = vm.count("-6") > 0;
+
+ if (vm.count("warning")) {
+ std::vector<std::wstring> sVec = splitMultiOptions(vm["warning"].as<std::wstring>());
+ if (sVec.size() != 2) {
+ std::cout << "Wrong format for warning thresholds" << '\n';
+ return 3;
+ }
+ try {
+ printInfo.warn = threshold(*sVec.begin());
+ printInfo.wpl = threshold(sVec.back());
+ if (!printInfo.wpl.perc) {
+ std::cout << "Packet loss must be percentage" << '\n';
+ return 3;
+ }
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("critical")) {
+ std::vector<std::wstring> sVec = splitMultiOptions(vm["critical"].as<std::wstring>());
+ if (sVec.size() != 2) {
+ std::cout << "Wrong format for critical thresholds" << '\n';
+ return 3;
+ }
+ try {
+ printInfo.crit = threshold(*sVec.begin());
+ printInfo.cpl = threshold(sVec.back());
+ if (!printInfo.wpl.perc) {
+ std::cout << "Packet loss must be percentage" << '\n';
+ return 3;
+ }
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("timeout"))
+ printInfo.timeout = vm["timeout"].as<int>();
+
+ if (vm.count("packets"))
+ printInfo.num = vm["packets"].as<int>();
+
+ printInfo.host = vm["host"].as<std::wstring>();
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo, response& response)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ double plp = ((double)response.dropped / printInfo.num) * 100.0;
+
+ if (printInfo.warn.rend(response.avg) || printInfo.wpl.rend(plp))
+ state = WARNING;
+
+ if (printInfo.crit.rend(response.avg) || printInfo.cpl.rend(plp))
+ state = CRITICAL;
+
+ std::wstringstream perf;
+ perf << L"rta=" << response.avg << L"ms;" << printInfo.warn.pString() << L";"
+ << printInfo.crit.pString() << L";0;" << " pl=" << removeZero(plp) << "%;"
+ << printInfo.wpl.pString() << ";" << printInfo.cpl.pString() << ";0;100";
+
+ if (response.dropped == printInfo.num) {
+ std::wcout << L"PING CRITICAL ALL CONNECTIONS DROPPED | " << perf.str() << '\n';
+ return 2;
+ }
+
+ std::wcout << L"PING ";
+
+ switch (state) {
+ case OK:
+ std::wcout << L"OK";
+ break;
+ case WARNING:
+ std::wcout << L"WARNING";
+ break;
+ case CRITICAL:
+ std::wcout << L"CRITICAL";
+ break;
+ }
+
+ std::wcout << L" RTA: " << response.avg << L"ms Packet loss: " << removeZero(plp) << "% | " << perf.str() << '\n';
+
+ return state;
+}
+
+static bool resolveHostname(const std::wstring& hostname, bool ipv6, std::wstring& ipaddr)
+{
+ ADDRINFOW hints;
+ ZeroMemory(&hints, sizeof(hints));
+
+ if (ipv6)
+ hints.ai_family = AF_INET6;
+ else
+ hints.ai_family = AF_INET;
+
+ if (l_Debug)
+ std::wcout << L"Resolving hostname \"" << hostname << L"\"\n";
+
+ ADDRINFOW *result = NULL;
+ DWORD ret = GetAddrInfoW(hostname.c_str(), NULL, &hints, &result);
+
+ if (ret) {
+ std::wcout << L"Failed to resolve hostname. Error " << ret << L": " << formatErrorInfo(ret) << L"\n";
+ return false;
+ }
+
+ wchar_t ipstringbuffer[46];
+
+ if (ipv6) {
+ struct sockaddr_in6 *address6 = (struct sockaddr_in6 *)result->ai_addr;
+ InetNtop(AF_INET6, &address6->sin6_addr, ipstringbuffer, 46);
+ }
+ else {
+ struct sockaddr_in *address4 = (struct sockaddr_in *)result->ai_addr;
+ InetNtop(AF_INET, &address4->sin_addr, ipstringbuffer, 46);
+ }
+
+ if (l_Debug)
+ std::wcout << L"Resolved to \"" << ipstringbuffer << L"\"\n";
+
+ ipaddr = ipstringbuffer;
+ return true;
+}
+
+static int check_ping4(const printInfoStruct& pi, response& response)
+{
+ if (l_Debug)
+ std::wcout << L"Parsing ip address" << '\n';
+
+ in_addr ipDest4;
+ LPCWSTR term;
+ if (RtlIpv4StringToAddress(pi.ip.c_str(), TRUE, &term, &ipDest4) == STATUS_INVALID_PARAMETER) {
+ std::wcout << pi.ip << " is not a valid ip address\n";
+ return 3;
+ }
+
+ if (*term != L'\0') {
+ std::wcout << pi.ip << " is not a valid ip address\n";
+ return 3;
+ }
+
+ if (l_Debug)
+ std::wcout << L"Creating Icmp File\n";
+
+ HANDLE hIcmp;
+ if ((hIcmp = IcmpCreateFile()) == INVALID_HANDLE_VALUE)
+ goto die;
+
+ DWORD dwRepSize = sizeof(ICMP_ECHO_REPLY) + 8;
+ void *repBuf = reinterpret_cast<VOID *>(new BYTE[dwRepSize]);
+
+ if (repBuf == NULL)
+ goto die;
+
+ unsigned int rtt = 0;
+ int num = pi.num;
+
+ LARGE_INTEGER frequency;
+ QueryPerformanceFrequency(&frequency);
+
+ do {
+ LARGE_INTEGER timer1;
+ QueryPerformanceCounter(&timer1);
+
+ if (l_Debug)
+ std::wcout << L"Sending Icmp echo\n";
+
+ if (!IcmpSendEcho2(hIcmp, NULL, NULL, NULL, ipDest4.S_un.S_addr,
+ NULL, 0, NULL, repBuf, dwRepSize, pi.timeout)) {
+ response.dropped++;
+ if (l_Debug)
+ std::wcout << L"Dropped: Response was 0" << '\n';
+ continue;
+ }
+
+ if (l_Debug)
+ std::wcout << "Ping recieved" << '\n';
+
+ PICMP_ECHO_REPLY pEchoReply = static_cast<PICMP_ECHO_REPLY>(repBuf);
+
+ if (pEchoReply->Status != IP_SUCCESS) {
+ response.dropped++;
+ if (l_Debug)
+ std::wcout << L"Dropped: echo reply status " << pEchoReply->Status << '\n';
+ continue;
+ }
+
+ if (l_Debug)
+ std::wcout << L"Recorded rtt of " << pEchoReply->RoundTripTime << '\n';
+
+ rtt += pEchoReply->RoundTripTime;
+ if (response.pMin == 0 || pEchoReply->RoundTripTime < response.pMin)
+ response.pMin = pEchoReply->RoundTripTime;
+ else if (pEchoReply->RoundTripTime > response.pMax)
+ response.pMax = pEchoReply->RoundTripTime;
+
+ LARGE_INTEGER timer2;
+ QueryPerformanceCounter(&timer2);
+
+ if (((timer2.QuadPart - timer1.QuadPart) * 1000 / frequency.QuadPart) < pi.timeout)
+ Sleep((DWORD) (pi.timeout - ((timer2.QuadPart - timer1.QuadPart) * 1000 / frequency.QuadPart)));
+ } while (--num);
+
+ if (l_Debug)
+ std::wcout << L"All pings sent. Cleaning up and returning" << '\n';
+
+ if (hIcmp)
+ IcmpCloseHandle(hIcmp);
+ if (repBuf)
+ delete reinterpret_cast<VOID *>(repBuf);
+
+ response.avg = ((double)rtt / pi.num);
+
+ return -1;
+
+die:
+ printErrorInfo();
+ if (hIcmp)
+ IcmpCloseHandle(hIcmp);
+ if (repBuf)
+ delete reinterpret_cast<VOID *>(repBuf);
+
+ return 3;
+}
+
+static int check_ping6(const printInfoStruct& pi, response& response)
+{
+ DWORD dwRepSize = sizeof(ICMPV6_ECHO_REPLY) + 8;
+ void *repBuf = reinterpret_cast<void *>(new BYTE[dwRepSize]);
+
+ int num = pi.num;
+ unsigned int rtt = 0;
+
+ if (l_Debug)
+ std::wcout << L"Parsing ip address" << '\n';
+
+ sockaddr_in6 ipDest6;
+ if (RtlIpv6StringToAddressEx(pi.ip.c_str(), &ipDest6.sin6_addr, &ipDest6.sin6_scope_id, &ipDest6.sin6_port)) {
+ std::wcout << pi.ip << " is not a valid ipv6 address" << '\n';
+ return 3;
+ }
+
+ ipDest6.sin6_family = AF_INET6;
+
+ sockaddr_in6 ipSource6;
+ ipSource6.sin6_addr = in6addr_any;
+ ipSource6.sin6_family = AF_INET6;
+ ipSource6.sin6_flowinfo = 0;
+ ipSource6.sin6_port = 0;
+
+ if (l_Debug)
+ std::wcout << L"Creating Icmp File" << '\n';
+
+ HANDLE hIcmp = Icmp6CreateFile();
+ if (hIcmp == INVALID_HANDLE_VALUE) {
+ printErrorInfo(GetLastError());
+
+ if (hIcmp)
+ IcmpCloseHandle(hIcmp);
+
+ if (repBuf)
+ delete reinterpret_cast<BYTE *>(repBuf);
+
+ return 3;
+ } else {
+ IP_OPTION_INFORMATION ipInfo = { 30, 0, 0, 0, NULL };
+
+ LARGE_INTEGER frequency;
+ QueryPerformanceFrequency(&frequency);
+
+ do {
+ LARGE_INTEGER timer1;
+ QueryPerformanceCounter(&timer1);
+
+ if (l_Debug)
+ std::wcout << L"Sending Icmp echo" << '\n';
+
+ if (!Icmp6SendEcho2(hIcmp, NULL, NULL, NULL, &ipSource6, &ipDest6,
+ NULL, 0, &ipInfo, repBuf, dwRepSize, pi.timeout)) {
+ response.dropped++;
+ if (l_Debug)
+ std::wcout << L"Dropped: Response was 0" << '\n';
+ continue;
+ }
+
+ if (l_Debug)
+ std::wcout << "Ping recieved" << '\n';
+
+ Icmp6ParseReplies(repBuf, dwRepSize);
+
+ ICMPV6_ECHO_REPLY *pEchoReply = static_cast<ICMPV6_ECHO_REPLY *>(repBuf);
+
+ if (pEchoReply->Status != IP_SUCCESS) {
+ response.dropped++;
+ if (l_Debug)
+ std::wcout << L"Dropped: echo reply status " << pEchoReply->Status << '\n';
+ continue;
+ }
+
+ rtt += pEchoReply->RoundTripTime;
+
+ if (l_Debug)
+ std::wcout << L"Recorded rtt of " << pEchoReply->RoundTripTime << '\n';
+
+ if (response.pMin == 0 || pEchoReply->RoundTripTime < response.pMin)
+ response.pMin = pEchoReply->RoundTripTime;
+ else if (pEchoReply->RoundTripTime > response.pMax)
+ response.pMax = pEchoReply->RoundTripTime;
+
+ LARGE_INTEGER timer2;
+ QueryPerformanceCounter(&timer2);
+
+ if (((timer2.QuadPart - timer1.QuadPart) * 1000 / frequency.QuadPart) < pi.timeout)
+ Sleep((DWORD) (pi.timeout - ((timer2.QuadPart - timer1.QuadPart) * 1000 / frequency.QuadPart)));
+ } while (--num);
+
+ if (l_Debug)
+ std::wcout << L"All pings sent. Cleaning up and returning" << '\n';
+
+ if (hIcmp)
+ IcmpCloseHandle(hIcmp);
+
+ if (repBuf)
+ delete reinterpret_cast<BYTE *>(repBuf);
+
+ response.avg = ((double)rtt / pi.num);
+
+ return -1;
+ }
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ WSADATA dat;
+ if (WSAStartup(MAKEWORD(2, 2), &dat)) {
+ std::cout << "WSAStartup failed\n";
+ return 3;
+ }
+
+ po::variables_map vm;
+ printInfoStruct printInfo;
+ if (parseArguments(argc, argv, vm, printInfo) != -1)
+ return 3;
+
+ if (!resolveHostname(printInfo.host, printInfo.ipv6, printInfo.ip))
+ return 3;
+
+ response response;
+
+ if (printInfo.ipv6) {
+ if (check_ping6(printInfo, response) != -1)
+ return 3;
+ } else {
+ if (check_ping4(printInfo, response) != -1)
+ return 3;
+ }
+
+ WSACleanup();
+
+ return printOutput(printInfo, response);
+}
diff --git a/plugins/check_procs.cpp b/plugins/check_procs.cpp
new file mode 100644
index 0000000..44e2483
--- /dev/null
+++ b/plugins/check_procs.cpp
@@ -0,0 +1,325 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <windows.h>
+#include <shlwapi.h>
+#include <tlhelp32.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ std::wstring user;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("user,u", po::wvalue<std::wstring>(), "Count only processes of user")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning threshold")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines processes.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tPROCS WARNING 67 | load=67;50;90;0\n\n"
+ L"\"PROCS\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"67\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. Performance data will only be displayed when\n"
+ L"you set at least one threshold\n\n"
+ L"For \"-user\" option keep in mind you need root to see other users processes\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too."
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version")) {
+ std::wcout << "Version: " << VERSION << '\n';
+ return 0;
+ }
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("user"))
+ printInfo.user = vm["user"].as<std::wstring>();
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(const int numProcs, printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ if (printInfo.warn.rend(numProcs))
+ state = WARNING;
+
+ if (printInfo.crit.rend(numProcs))
+ state = CRITICAL;
+
+ std::wstring user;
+ if (!printInfo.user.empty())
+ user = L" processes of user " + printInfo.user;
+
+ std::wcout << L"PROCS ";
+
+ switch (state) {
+ case OK:
+ std::wcout << L"OK";
+ break;
+ case WARNING:
+ std::wcout << L"WARNING";
+ break;
+ case CRITICAL:
+ std::wcout << L"CRITICAL";
+ break;
+ }
+
+ std::wcout << L" " << numProcs << user << L" | procs=" << numProcs << L";"
+ << printInfo.warn.pString() << L";" << printInfo.crit.pString() << L";0;" << '\n';
+
+ return state;
+}
+
+static int countProcs()
+{
+ if (l_Debug)
+ std::wcout << L"Counting all processes" << '\n';
+
+ if (l_Debug)
+ std::wcout << L"Creating snapshot" << '\n';
+
+ HANDLE hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+ if (hProcessSnap == INVALID_HANDLE_VALUE)
+ return -1;
+
+ PROCESSENTRY32 pe32;
+ pe32.dwSize = sizeof(PROCESSENTRY32);
+
+ if (l_Debug)
+ std::wcout << L"Grabbing first proccess" << '\n';
+
+ if (!Process32First(hProcessSnap, &pe32)) {
+ CloseHandle(hProcessSnap);
+ return -1;
+ }
+
+ if (l_Debug)
+ std::wcout << L"Counting processes..." << '\n';
+
+ int numProcs = 0;
+
+ do {
+ ++numProcs;
+ } while (Process32Next(hProcessSnap, &pe32));
+
+ if (l_Debug)
+ std::wcout << L"Found " << numProcs << L" processes. Cleaning up udn returning" << '\n';
+
+ CloseHandle(hProcessSnap);
+
+ return numProcs;
+}
+
+static int countProcs(const std::wstring& user)
+{
+ if (l_Debug)
+ std::wcout << L"Counting all processes of user" << user << '\n';
+
+ const WCHAR *wuser = user.c_str();
+ int numProcs = 0;
+
+ HANDLE hProcessSnap, hProcess = NULL, hToken = NULL;
+ PROCESSENTRY32 pe32;
+ DWORD dwReturnLength, dwAcctName, dwDomainName;
+ PTOKEN_USER pSIDTokenUser = NULL;
+ SID_NAME_USE sidNameUse;
+ LPWSTR AcctName, DomainName;
+
+ if (l_Debug)
+ std::wcout << L"Creating snapshot" << '\n';
+
+ hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+ if (hProcessSnap == INVALID_HANDLE_VALUE)
+ goto die;
+
+ pe32.dwSize = sizeof(PROCESSENTRY32);
+
+ if (l_Debug)
+ std::wcout << L"Grabbing first proccess" << '\n';
+
+ if (!Process32First(hProcessSnap, &pe32))
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Counting processes..." << '\n';
+
+ do {
+ if (l_Debug)
+ std::wcout << L"Getting process token" << '\n';
+
+ //get ProcessToken
+ hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pe32.th32ProcessID);
+ if (!OpenProcessToken(hProcess, TOKEN_QUERY, &hToken))
+ //Won't count pid 0 (system idle) and 4/8 (Sytem)
+ continue;
+
+ //Get dwReturnLength in first call
+ dwReturnLength = 1;
+ if (!GetTokenInformation(hToken, TokenUser, NULL, 0, &dwReturnLength)
+ && GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ continue;
+
+ pSIDTokenUser = reinterpret_cast<PTOKEN_USER>(new BYTE[dwReturnLength]);
+ memset(pSIDTokenUser, 0, dwReturnLength);
+
+ if (l_Debug)
+ std::wcout << L"Received token, saving information" << '\n';
+
+ //write Info in pSIDTokenUser
+ if (!GetTokenInformation(hToken, TokenUser, pSIDTokenUser, dwReturnLength, NULL))
+ continue;
+
+ AcctName = NULL;
+ DomainName = NULL;
+ dwAcctName = 1;
+ dwDomainName = 1;
+
+ if (l_Debug)
+ std::wcout << L"Looking up SID" << '\n';
+
+ //get dwAcctName and dwDomainName size
+ if (!LookupAccountSid(NULL, pSIDTokenUser->User.Sid, AcctName,
+ (LPDWORD)&dwAcctName, DomainName, (LPDWORD)&dwDomainName, &sidNameUse)
+ && GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ continue;
+
+ AcctName = reinterpret_cast<LPWSTR>(new WCHAR[dwAcctName]);
+ DomainName = reinterpret_cast<LPWSTR>(new WCHAR[dwDomainName]);
+
+ if (!LookupAccountSid(NULL, pSIDTokenUser->User.Sid, AcctName,
+ (LPDWORD)&dwAcctName, DomainName, (LPDWORD)&dwDomainName, &sidNameUse))
+ continue;
+
+ if (l_Debug)
+ std::wcout << L"Comparing " << AcctName << L" to " << wuser << '\n';
+ if (!wcscmp(AcctName, wuser)) {
+ ++numProcs;
+ if (l_Debug)
+ std::wcout << L"Is process of " << wuser << L" (" << numProcs << L")" << '\n';
+ }
+
+ delete[] reinterpret_cast<LPWSTR>(AcctName);
+ delete[] reinterpret_cast<LPWSTR>(DomainName);
+
+ } while (Process32Next(hProcessSnap, &pe32));
+
+die:
+ if (hProcessSnap)
+ CloseHandle(hProcessSnap);
+ if (hProcess)
+ CloseHandle(hProcess);
+ if (hToken)
+ CloseHandle(hToken);
+ if (pSIDTokenUser)
+ delete[] reinterpret_cast<PTOKEN_USER>(pSIDTokenUser);
+ return numProcs;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ po::variables_map vm;
+ printInfoStruct printInfo = { };
+
+ int r = parseArguments(argc, argv, vm, printInfo);
+
+ if (r != -1)
+ return r;
+
+ if (!printInfo.user.empty())
+ return printOutput(countProcs(printInfo.user), printInfo);
+
+ return printOutput(countProcs(), printInfo);
+}
diff --git a/plugins/check_service.cpp b/plugins/check_service.cpp
new file mode 100644
index 0000000..cd0cf14
--- /dev/null
+++ b/plugins/check_service.cpp
@@ -0,0 +1,284 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <windows.h>
+#include <shlwapi.h>
+
+#define VERSION 1.1
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ bool warn;
+ DWORD ServiceState;
+ std::wstring service;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,D", "Verbose/Debug output")
+ ("service,s", po::wvalue<std::wstring>(), "Service name to check")
+ ("description,d", "Use \"service\" to match on description")
+ ("warn,w", "Return warning (1) instead of critical (2),\n when service is not running")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style &
+ ~po::command_line_style::allow_guessing |
+ po::command_line_style::allow_long_disguise
+ )
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check the status of a service.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tSERVICE CRITICAL NOT_RUNNING | service=4;!4;!4;1;7\n\n"
+ L"\"SERVICE\" being the type of the check, \"CRITICAL\" the returned status\n"
+ L"and \"1\" is the returned value.\n"
+ L"A service is either running (Code 0x04) or not running (any other).\n"
+ L"For more information consult the msdn on service state transitions.\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"%s' thresholds work differently, since a service is either running or not\n"
+ L"all \"-w\" and \"-c\" do is say whether a not running service is a warning\n"
+ L"or critical state respectively.\n\n"
+ , progName, progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version")) {
+ std::cout << "Version: " << VERSION << '\n';
+ return 0;
+ }
+
+ if (!vm.count("service")) {
+ std::cout << "Argument \"service\" is required.\n" << desc << '\n';
+ return 3;
+ }
+
+ printInfo.service = vm["service"].as<std::wstring>();
+
+ printInfo.warn = vm.count("warn");
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(const printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ std::wstring perf;
+ state state = OK;
+
+ if (!printInfo.ServiceState) {
+ std::wcout << L"SERVICE CRITICAL NOT FOUND | 'service'=" << printInfo.ServiceState << ";;;1;7" << '\n';
+ return 3;
+ }
+
+ if (printInfo.ServiceState != 0x04)
+ printInfo.warn ? state = WARNING : state = CRITICAL;
+
+ switch (state) {
+ case OK:
+ std::wcout << L"SERVICE \"" << printInfo.service << "\" OK RUNNING | 'service'=4;;;1;7" << '\n';
+ break;
+ case WARNING:
+ std::wcout << L"SERVICE \"" << printInfo.service << "\" WARNING NOT RUNNING | 'service'=" << printInfo.ServiceState << ";;;1;7" << '\n';
+ break;
+ case CRITICAL:
+ std::wcout << L"SERVICE \"" << printInfo.service << "\" CRITICAL NOT RUNNING | 'service'=" << printInfo.ServiceState << ";;;1;7" << '\n';
+ break;
+ }
+
+ return state;
+}
+
+static std::wstring getServiceByDescription(const std::wstring& description)
+{
+ SC_HANDLE hSCM = NULL;
+ LPENUM_SERVICE_STATUSW lpServices = NULL;
+ LPBYTE lpBuf = NULL;
+ DWORD cbBufSize = 0;
+ DWORD lpServicesReturned = 0;
+ DWORD pcbBytesNeeded = 0;
+ DWORD lpResumeHandle = 0;;
+
+ if (l_Debug)
+ std::wcout << L"Opening SC Manager" << '\n';
+
+ hSCM = OpenSCManager(NULL, NULL, GENERIC_READ);
+ if (hSCM == NULL)
+ goto die;
+
+ if (l_Debug)
+ std::wcout << L"Determining initially required memory" << '\n';
+
+ EnumServicesStatus(hSCM, SERVICE_WIN32 | SERVICE_DRIVER, SERVICE_STATE_ALL, NULL, 0,
+ &pcbBytesNeeded, &lpServicesReturned, &lpResumeHandle);
+
+ /* This should always be ERROR_INSUFFICIENT_BUFFER... But for some reason it is sometimes ERROR_MORE_DATA
+ * See the MSDN on EnumServiceStatus for a glimpse of despair
+ */
+
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER && GetLastError() != ERROR_MORE_DATA)
+ goto die;
+
+ lpServices = reinterpret_cast<LPENUM_SERVICE_STATUSW>(new BYTE[pcbBytesNeeded]);
+
+ if (l_Debug)
+ std::wcout << L"Requesting Service Information. Entry point: " << lpResumeHandle << '\n';
+
+ EnumServicesStatus(hSCM, SERVICE_WIN32 | SERVICE_DRIVER, SERVICE_STATE_ALL, lpServices, pcbBytesNeeded,
+ &pcbBytesNeeded, &lpServicesReturned, &lpResumeHandle);
+
+ for (decltype(lpServicesReturned) index = 0; index < lpServicesReturned; index++) {
+ LPWSTR lpCurrent = lpServices[index].lpServiceName;
+
+ if (l_Debug) {
+ std::wcout << L"Opening Service \"" << lpServices[index].lpServiceName << L"\"\n";
+ }
+
+ SC_HANDLE hService = OpenService(hSCM, lpCurrent, SERVICE_QUERY_CONFIG);
+ if (!hService)
+ goto die;
+
+ DWORD dwBytesNeeded = 0;
+ if (l_Debug)
+ std::wcout << "Accessing config\n";
+
+ if (!QueryServiceConfig2(hService, SERVICE_CONFIG_DESCRIPTION, NULL, 0, &dwBytesNeeded) && GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ continue;
+
+ LPSERVICE_DESCRIPTION lpsd = reinterpret_cast<LPSERVICE_DESCRIPTION>(new BYTE[dwBytesNeeded]);
+
+ if (!QueryServiceConfig2(hService, SERVICE_CONFIG_DESCRIPTION, (LPBYTE)lpsd, dwBytesNeeded, &dwBytesNeeded))
+ continue;
+
+ if (lpsd->lpDescription != NULL && lstrcmp(lpsd->lpDescription, L"") != 0) {
+ std::wstring desc(lpsd->lpDescription);
+ if (l_Debug)
+ std::wcout << "Got description:\n" << desc << '\n';
+ size_t p = desc.find(description);
+ if (desc.find(description) != desc.npos)
+ return lpCurrent;
+ }
+ else if (l_Debug)
+ std::wcout << "No description found\n";
+ }
+
+ CloseServiceHandle(hSCM);
+ delete[] lpServices;
+ return L"";
+
+die:
+ printErrorInfo();
+ if (hSCM)
+ CloseServiceHandle(hSCM);
+ if (lpServices)
+ delete[] lpServices;
+ return L"";
+}
+
+static DWORD getServiceStatus(const printInfoStruct& printInfo)
+{
+ SC_HANDLE hSCM;
+ SC_HANDLE hService;
+ DWORD cbBufSize;
+ DWORD lpResumeHandle = 0;
+ LPBYTE lpBuf = NULL;
+
+ if (l_Debug)
+ std::wcout << L"Opening SC Manager" << '\n';
+
+ hSCM = OpenSCManager(NULL, NULL, GENERIC_READ);
+ if (hSCM == NULL)
+ goto die;
+
+ hService = OpenService(hSCM, printInfo.service.c_str(), SERVICE_QUERY_STATUS);
+ if (hService == NULL)
+ goto die;
+
+ QueryServiceStatusEx(hService, SC_STATUS_PROCESS_INFO, NULL, 0, &cbBufSize);
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ goto die;
+
+ lpBuf = new BYTE[cbBufSize];
+ if (QueryServiceStatusEx(hService, SC_STATUS_PROCESS_INFO, lpBuf, cbBufSize, &cbBufSize)) {
+ LPSERVICE_STATUS_PROCESS pInfo = (LPSERVICE_STATUS_PROCESS)lpBuf;
+ return pInfo->dwCurrentState;
+ }
+
+die:
+ printErrorInfo();
+ if (hSCM)
+ CloseServiceHandle(hSCM);
+ if (hService)
+ CloseServiceHandle(hService);
+ if (lpBuf)
+ delete [] lpBuf;
+
+ return -1;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ po::variables_map vm;
+ printInfoStruct printInfo;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ if (vm.count("description"))
+ printInfo.service = getServiceByDescription(vm["service"].as<std::wstring>());
+
+ if (printInfo.service.empty()) {
+ std::wcout << "Could not find service matching description\n";
+ return 3;
+ }
+
+ printInfo.ServiceState = getServiceStatus(printInfo);
+ if (printInfo.ServiceState == -1)
+ return 3;
+
+ return printOutput(printInfo);
+}
+
diff --git a/plugins/check_swap.cpp b/plugins/check_swap.cpp
new file mode 100644
index 0000000..dc08f3b
--- /dev/null
+++ b/plugins/check_swap.cpp
@@ -0,0 +1,238 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <shlwapi.h>
+#include <Psapi.h>
+#include <vector>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ double tSwap;
+ double aSwap;
+ double percentFree;
+ Bunit unit = BunitMB;
+ bool showUsed;
+};
+
+struct pageFileInfo
+{
+ SIZE_T totalSwap;
+ SIZE_T availableSpwap;
+};
+
+static bool l_Debug;
+
+BOOL EnumPageFilesProc(LPVOID pContext, PENUM_PAGE_FILE_INFORMATION pPageFileInfo, LPCWSTR lpFilename) {
+ std::vector<pageFileInfo>* pageFile = static_cast<std::vector<pageFileInfo>*>(pContext);
+ SYSTEM_INFO systemInfo;
+
+ GetSystemInfo(&systemInfo);
+
+ // pPageFileInfo output is in pages, we need to multiply it by the page size
+ pageFile->push_back({ pPageFileInfo->TotalSize * systemInfo.dwPageSize, (pPageFileInfo->TotalSize - pPageFileInfo->TotalInUse) * systemInfo.dwPageSize });
+
+ return TRUE;
+}
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning threshold")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold")
+ ("unit,u", po::wvalue<std::wstring>(), "The unit to use for display (default MB)")
+ ("show-used,U", "Show used swap instead of the free swap")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines swap in percent.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tSWAP WARNING - 20%% free | swap=2000B;3000;500;0;10000\n\n"
+ L"\"SWAP\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"20%%\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. Performance data will only be displayed when\n"
+ L"you set at least one threshold\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too.\n"
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version"))
+ std::wcout << L"Version: " << VERSION << '\n';
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ printInfo.warn.legal = !printInfo.warn.legal;
+ }
+
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ printInfo.crit.legal = !printInfo.crit.legal;
+ }
+
+ l_Debug = vm.count("debug") > 0;
+
+ if (vm.count("unit")) {
+ try {
+ printInfo.unit = parseBUnit(vm["unit"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("show-used")) {
+ printInfo.showUsed = true;
+ printInfo.warn.legal = true;
+ printInfo.crit.legal = true;
+ }
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ std::wcout << L"SWAP ";
+
+ double currentValue;
+
+ if (!printInfo.showUsed)
+ currentValue = printInfo.aSwap;
+ else
+ currentValue = printInfo.tSwap - printInfo.aSwap;
+
+ if (printInfo.warn.rend(currentValue, printInfo.tSwap))
+ state = WARNING;
+
+ if (printInfo.crit.rend(currentValue, printInfo.tSwap))
+ state = CRITICAL;
+
+ std::wcout << stateToString(state) << " - ";
+
+ if (!printInfo.showUsed)
+ std::wcout << printInfo.percentFree << L"% free ";
+ else
+ std::wcout << 100 - printInfo.percentFree << L"% used ";
+
+ std::wcout << "| 'swap'=" << currentValue << BunitStr(printInfo.unit) << L";"
+ << printInfo.warn.pString(printInfo.tSwap) << L";" << printInfo.crit.pString(printInfo.tSwap)
+ << L";0;" << printInfo.tSwap << '\n';
+
+ return state;
+}
+
+static int check_swap(printInfoStruct& printInfo)
+{
+ // Needs explicit cast: http://msinilo.pl/blog2/post/p1348/
+ PENUM_PAGE_FILE_CALLBACKW pageFileCallback = (PENUM_PAGE_FILE_CALLBACKW)EnumPageFilesProc;
+ std::vector<pageFileInfo> pageFiles;
+
+ if(!EnumPageFilesW(pageFileCallback, &pageFiles)) {
+ printErrorInfo();
+ return 3;
+ }
+
+ for (int i = 0; i < pageFiles.size(); i++) {
+ printInfo.tSwap += round(pageFiles.at(i).totalSwap / pow(1024.0, printInfo.unit));
+ printInfo.aSwap += round(pageFiles.at(i).availableSpwap / pow(1024.0, printInfo.unit));
+ }
+
+ if (printInfo.aSwap > 0 && printInfo.tSwap > 0)
+ printInfo.percentFree = 100.0 * printInfo.aSwap / printInfo.tSwap;
+ else
+ printInfo.percentFree = 0;
+
+ return -1;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ printInfoStruct printInfo = { };
+ po::variables_map vm;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ ret = check_swap(printInfo);
+ if (ret != -1)
+ return ret;
+
+ return printOutput(printInfo);
+}
diff --git a/plugins/check_update.cpp b/plugins/check_update.cpp
new file mode 100644
index 0000000..2711d93
--- /dev/null
+++ b/plugins/check_update.cpp
@@ -0,0 +1,248 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <windows.h>
+#include <shlwapi.h>
+#include <wuapi.h>
+#include <wuerror.h>
+
+#define VERSION 1.0
+
+#define CRITERIA L"(IsInstalled = 0 and CategoryIDs contains '0fa1201d-4330-4fa8-8ae9-b877473b6441') or (IsInstalled = 0 and CategoryIDs contains 'E6CF1350-C01B-414D-A61F-263D14D133B4')"
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ int warn{0};
+ int crit{0};
+ LONG numUpdates{0};
+ bool ignoreReboot{false};
+ int reboot{0};
+ bool careForCanRequest{false};
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::value<int>(), "Number of updates to trigger a warning.")
+ ("critical,c", po::value<int>(), "Number of updates to trigger a critical.")
+ ("possible-reboot", "Treat \"update may need reboot\" as \"update needs reboot\"")
+ ("no-reboot-critical", "Do not automatically return critical if an update requiring reboot is present.")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines required updates.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nAfter some time, it will then output a string like this one:\n\n"
+ L"\tUPDATE WARNING 8 | updates=8;1;1;0\n\n"
+ L"\"UPDATE\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"8\" is the number of important updates.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value.\n\n"
+ L"An update counts as important when it is part of the Security- or\n"
+ L"CriticalUpdates group.\n"
+ L"Consult the msdn on WSUS Classification GUIDs for more information.\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken or an update required reboot.\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"If a warning threshold is set but not a critical threshold, the critical\n"
+ L"threshold will be set to one greater than the set warning threshold.\n\n"
+ L"The \"possible-reboot\" option is not recommended since this true for nearly\n"
+ L"every update."
+ , progName);
+ std::cout << '\n';
+ return 0;
+ } if (vm.count("version")) {
+ std::cout << "Version: " << VERSION << '\n';
+ return 0;
+ }
+ if(vm.count("warning"))
+ printInfo.warn = vm["warning"].as<int>();
+ if (vm.count("critical"))
+ printInfo.crit = vm["critical"].as<int>();
+ else if (vm.count("warning"))
+ printInfo.crit = printInfo.warn + 1;
+ printInfo.careForCanRequest = vm.count("possible-reboot") > 0;
+ printInfo.ignoreReboot = vm.count("no-reboot-critical") > 0;
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(const printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+ std::wstring output = L"UPDATE ";
+
+ if (printInfo.numUpdates >= printInfo.warn && printInfo.warn)
+ state = WARNING;
+
+ if ((printInfo.reboot && !printInfo.ignoreReboot) || (printInfo.numUpdates >= printInfo.crit && printInfo.crit))
+ state = CRITICAL;
+
+ switch (state) {
+ case OK:
+ output.append(L"OK ");
+ break;
+ case WARNING:
+ output.append(L"WARNING ");
+ break;
+ case CRITICAL:
+ output.append(L"CRITICAL ");
+ break;
+ }
+ output.append(std::to_wstring(printInfo.numUpdates));
+ if (printInfo.reboot) {
+ output.append(L"; ");
+ output.append(std::to_wstring(printInfo.reboot));
+ output.append(L" NEED REBOOT ");
+ }
+ std::wcout << output << L" | 'update'=" << printInfo.numUpdates << L";"
+ << printInfo.warn << L";" << printInfo.crit << L";0;" << '\n';
+
+ return state;
+}
+
+static int check_update(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << "Initializing COM library" << '\n';
+ CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
+ ISearchResult *pResult;
+ IUpdateSession *pSession;
+ IUpdateSearcher *pSearcher;
+ BSTR criteria = NULL;
+
+ HRESULT err;
+ if (l_Debug)
+ std::wcout << "Creating UpdateSession and UpdateSearcher" << '\n';
+ CoCreateInstance(CLSID_UpdateSession, NULL, CLSCTX_INPROC_SERVER, IID_IUpdateSession, (void **)&pSession);
+ pSession->CreateUpdateSearcher(&pSearcher);
+
+ /*
+ * IsInstalled = 0: All updates, including languagepacks and features
+ * BrowseOnly = 0: No features or languagepacks, security and unnamed
+ * BrowseOnly = 1: Nothing, broken
+ * RebootRequired = 1: Reboot required
+ */
+
+ criteria = SysAllocString(CRITERIA);
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa386526%28v=vs.85%29.aspx
+ // https://msdn.microsoft.com/en-us/library/ff357803%28v=vs.85%29.aspx
+
+ if (l_Debug)
+ std::wcout << L"Querying updates from server" << '\n';
+
+ err = pSearcher->Search(criteria, &pResult);
+ if (!SUCCEEDED(err))
+ goto die;
+ SysFreeString(criteria);
+
+ IUpdateCollection *pCollection;
+ IUpdate *pUpdate;
+
+ LONG updateSize;
+ pResult->get_Updates(&pCollection);
+ pCollection->get_Count(&updateSize);
+
+ if (updateSize == 0)
+ return -1;
+
+ printInfo.numUpdates = updateSize;
+ // printInfo.important = printInfo.warn;
+
+ IInstallationBehavior *pIbehav;
+ InstallationRebootBehavior updateReboot;
+
+ for (LONG i = 0; i < updateSize; i++) {
+ pCollection->get_Item(i, &pUpdate);
+ if (l_Debug) {
+ std::wcout << L"Checking reboot behaviour of update number " << i << '\n';
+ }
+ pUpdate->get_InstallationBehavior(&pIbehav);
+ pIbehav->get_RebootBehavior(&updateReboot);
+ if (updateReboot == irbAlwaysRequiresReboot) {
+ printInfo.reboot++;
+ if (l_Debug)
+ std::wcout << L"It requires reboot" << '\n';
+ continue;
+ }
+ if (printInfo.careForCanRequest && updateReboot == irbCanRequestReboot) {
+ if (l_Debug)
+ std::wcout << L"It requires reboot" << '\n';
+ printInfo.reboot++;
+ }
+ }
+
+ if (l_Debug)
+ std::wcout << L"Cleaning up and returning" << '\n';
+
+ SysFreeString(criteria);
+ CoUninitialize();
+ return -1;
+
+die:
+ printErrorInfo(err);
+ CoUninitialize();
+ if (criteria)
+ SysFreeString(criteria);
+ return 3;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ printInfoStruct printInfo;
+ po::variables_map vm;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ ret = check_update(printInfo);
+ if (ret != -1)
+ return ret;
+
+ return printOutput(printInfo);
+}
diff --git a/plugins/check_uptime.cpp b/plugins/check_uptime.cpp
new file mode 100644
index 0000000..93d540a
--- /dev/null
+++ b/plugins/check_uptime.cpp
@@ -0,0 +1,213 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <boost/chrono.hpp>
+#include <iostream>
+#include <windows.h>
+#include <shlwapi.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ long long time;
+ long long timeInSeconds;
+ Tunit unit;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning threshold (Uses -unit)")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold (Uses -unit)")
+ ("unit,u", po::wvalue<std::wstring>(), "Unit to use:\nh\t- hours\nm\t- minutes\ns\t- seconds (default)\nms\t- milliseconds")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines uptime.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tUPTIME WARNING 712h | uptime=712h;700;1800;0\n\n"
+ L"\"UPTIME\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"712h\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. Performance data will only be displayed when\n"
+ L"you set at least one threshold\n\n"
+ L"Note that the returned time ins always rounded down,\n"
+ L"4 hours and 44 minutes will show as 4h.\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too.\n"
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version")) {
+ std::cout << VERSION << '\n';
+ return 0;
+ }
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ if (vm.count("unit")) {
+ try {
+ printInfo.unit = parseTUnit(vm["unit"].as<std::wstring>());
+ } catch (const std::invalid_argument&) {
+ std::wcout << L"Unknown unit type " << vm["unit"].as<std::wstring>() << '\n';
+ return 3;
+ }
+ } else
+ printInfo.unit = TunitS;
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ if (printInfo.warn.rend((double) printInfo.time))
+ state = WARNING;
+ if (printInfo.crit.rend((double) printInfo.time))
+ state = CRITICAL;
+
+ switch (state) {
+ case OK:
+ std::wcout << L"UPTIME OK " << printInfo.time << TunitStr(printInfo.unit) << L" | 'uptime'=" << printInfo.timeInSeconds
+ << "s" << L";" << printInfo.warn.toSeconds(printInfo.unit).pString() << L";"
+ << printInfo.crit.toSeconds(printInfo.unit).pString() << L";0;" << '\n';
+ break;
+ case WARNING:
+ std::wcout << L"UPTIME WARNING " << printInfo.time << TunitStr(printInfo.unit) << L" | 'uptime'=" << printInfo.timeInSeconds
+ << "s" << L";" << printInfo.warn.toSeconds(printInfo.unit).pString() << L";"
+ << printInfo.crit.toSeconds(printInfo.unit).pString() << L";0;" << '\n';
+ break;
+ case CRITICAL:
+ std::wcout << L"UPTIME CRITICAL " << printInfo.time << TunitStr(printInfo.unit) << L" | 'uptime'=" << printInfo.timeInSeconds
+ << "s" << L";" << printInfo.warn.toSeconds(printInfo.unit).pString() << L";"
+ << printInfo.crit.toSeconds(printInfo.unit).pString() << L";0;" << '\n';
+ break;
+ }
+
+ return state;
+}
+
+static void getUptime(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Getting uptime in milliseconds" << '\n';
+
+ boost::chrono::milliseconds uptime = boost::chrono::milliseconds(GetTickCount64());
+
+ if (l_Debug)
+ std::wcout << L"Converting requested unit (default: seconds)" << '\n';
+
+ switch (printInfo.unit) {
+ case TunitH:
+ printInfo.time = boost::chrono::duration_cast<boost::chrono::hours>(uptime).count();
+ break;
+ case TunitM:
+ printInfo.time = boost::chrono::duration_cast<boost::chrono::minutes>(uptime).count();
+ break;
+ case TunitS:
+ printInfo.time = boost::chrono::duration_cast<boost::chrono::seconds>(uptime).count();
+ break;
+ case TunitMS:
+ printInfo.time = uptime.count();
+ break;
+ }
+
+ // For the Performance Data we need the time in seconds
+ printInfo.timeInSeconds = boost::chrono::duration_cast<boost::chrono::seconds>(uptime).count();
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ po::variables_map vm;
+ printInfoStruct printInfo;
+ int ret = parseArguments(argc, argv, vm, printInfo);
+
+ if (ret != -1)
+ return ret;
+
+ getUptime(printInfo);
+
+ return printOutput(printInfo);
+}
diff --git a/plugins/check_users.cpp b/plugins/check_users.cpp
new file mode 100644
index 0000000..9193551
--- /dev/null
+++ b/plugins/check_users.cpp
@@ -0,0 +1,225 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/program_options.hpp>
+#include <iostream>
+#include <windows.h>
+#include <shlwapi.h>
+#include <wtsapi32.h>
+
+#define VERSION 1.0
+
+namespace po = boost::program_options;
+
+struct printInfoStruct
+{
+ threshold warn;
+ threshold crit;
+ DOUBLE users;
+};
+
+static bool l_Debug;
+
+static int parseArguments(int ac, WCHAR **av, po::variables_map& vm, printInfoStruct& printInfo)
+{
+ WCHAR namePath[MAX_PATH];
+ GetModuleFileName(NULL, namePath, MAX_PATH);
+ WCHAR *progName = PathFindFileName(namePath);
+
+ po::options_description desc;
+
+ desc.add_options()
+ ("help,h", "Print help message and exit")
+ ("version,V", "Print version and exit")
+ ("debug,d", "Verbose/Debug output")
+ ("warning,w", po::wvalue<std::wstring>(), "Warning threshold")
+ ("critical,c", po::wvalue<std::wstring>(), "Critical threshold")
+ ;
+
+ po::wcommand_line_parser parser(ac, av);
+
+ try {
+ po::store(
+ parser
+ .options(desc)
+ .style(
+ po::command_line_style::unix_style |
+ po::command_line_style::allow_long_disguise)
+ .run(),
+ vm);
+ vm.notify();
+ } catch (const std::exception& e) {
+ std::cout << e.what() << '\n' << desc << '\n';
+ return 3;
+ }
+
+ if (vm.count("help")) {
+ std::wcout << progName << " Help\n\tVersion: " << VERSION << '\n';
+ wprintf(
+ L"%s is a simple program to check a machines logged in users.\n"
+ L"You can use the following options to define its behaviour:\n\n", progName);
+ std::cout << desc;
+ wprintf(
+ L"\nIt will then output a string looking something like this:\n\n"
+ L"\tUSERS WARNING 48 | users=48;10;50;0\n\n"
+ L"\"USERS\" being the type of the check, \"WARNING\" the returned status\n"
+ L"and \"48\" is the returned value.\n"
+ L"The performance data is found behind the \"|\", in order:\n"
+ L"returned value, warning threshold, critical threshold, minimal value and,\n"
+ L"if applicable, the maximal value. Performance data will only be displayed when\n"
+ L"you set at least one threshold\n\n"
+ L"%s' exit codes denote the following:\n"
+ L" 0\tOK,\n\tNo Thresholds were broken or the programs check part was not executed\n"
+ L" 1\tWARNING,\n\tThe warning, but not the critical threshold was broken\n"
+ L" 2\tCRITICAL,\n\tThe critical threshold was broken\n"
+ L" 3\tUNKNOWN, \n\tThe program experienced an internal or input error\n\n"
+ L"Threshold syntax:\n\n"
+ L"-w THRESHOLD\n"
+ L"warn if threshold is broken, which means VALUE > THRESHOLD\n"
+ L"(unless stated differently)\n\n"
+ L"-w !THRESHOLD\n"
+ L"inverts threshold check, VALUE < THRESHOLD (analogous to above)\n\n"
+ L"-w [THR1-THR2]\n"
+ L"warn is VALUE is inside the range spanned by THR1 and THR2\n\n"
+ L"-w ![THR1-THR2]\n"
+ L"warn if VALUE is outside the range spanned by THR1 and THR2\n\n"
+ L"-w THRESHOLD%%\n"
+ L"if the plugin accepts percentage based thresholds those will be used.\n"
+ L"Does nothing if the plugin does not accept percentages, or only uses\n"
+ L"percentage thresholds. Ranges can be used with \"%%\", but both range values need\n"
+ L"to end with a percentage sign.\n\n"
+ L"All of these options work with the critical threshold \"-c\" too."
+ , progName);
+ std::cout << '\n';
+ return 0;
+ }
+
+ if (vm.count("version"))
+ std::wcout << L"Version: " << VERSION << '\n';
+
+ if (vm.count("warning")) {
+ try {
+ printInfo.warn = threshold(vm["warning"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+ if (vm.count("critical")) {
+ try {
+ printInfo.crit = threshold(vm["critical"].as<std::wstring>());
+ } catch (const std::invalid_argument& e) {
+ std::cout << e.what() << '\n';
+ return 3;
+ }
+ }
+
+ l_Debug = vm.count("debug") > 0;
+
+ return -1;
+}
+
+static int printOutput(printInfoStruct& printInfo)
+{
+ if (l_Debug)
+ std::wcout << L"Constructing output string" << '\n';
+
+ state state = OK;
+
+ if (printInfo.warn.rend(printInfo.users))
+ state = WARNING;
+
+ if (printInfo.crit.rend(printInfo.users))
+ state = CRITICAL;
+
+ switch (state) {
+ case OK:
+ std::wcout << L"USERS OK " << printInfo.users << L" User(s) logged in | 'users'=" << printInfo.users << L";"
+ << printInfo.warn.pString() << L";" << printInfo.crit.pString() << L";0;" << '\n';
+ break;
+ case WARNING:
+ std::wcout << L"USERS WARNING " << printInfo.users << L" User(s) logged in | 'users'=" << printInfo.users << L";"
+ << printInfo.warn.pString() << L";" << printInfo.crit.pString() << L";0;" << '\n';
+ break;
+ case CRITICAL:
+ std::wcout << L"USERS CRITICAL " << printInfo.users << L" User(s) logged in | 'users'=" << printInfo.users << L";"
+ << printInfo.warn.pString() << L";" << printInfo.crit.pString() << L";0;" << '\n';
+ break;
+ }
+
+ return state;
+}
+
+static int check_users(printInfoStruct& printInfo)
+{
+ DOUBLE users = 0;
+ WTS_SESSION_INFOW *pSessionInfo = NULL;
+ DWORD count;
+ DWORD index;
+
+ if (l_Debug)
+ std::wcout << L"Trying to enumerate terminal sessions" << '\n';
+
+ if (!WTSEnumerateSessions(WTS_CURRENT_SERVER_HANDLE, 0, 1, &pSessionInfo, &count)) {
+ std::wcout << L"Failed to enumerate terminal sessions" << '\n';
+ printErrorInfo();
+ if (pSessionInfo)
+ WTSFreeMemory(pSessionInfo);
+ return 3;
+ }
+
+ if (l_Debug)
+ std::wcout << L"Got all sessions (" << count << L"), traversing and counting active ones" << '\n';
+
+ for (index = 0; index < count; index++) {
+ LPWSTR name;
+ DWORD size;
+ int len;
+
+ if (l_Debug)
+ std::wcout << L"Querrying session number " << index << '\n';
+
+ if (!WTSQuerySessionInformation(WTS_CURRENT_SERVER_HANDLE, pSessionInfo[index].SessionId,
+ WTSUserName, &name, &size))
+ continue;
+
+ if (l_Debug)
+ std::wcout << L"Found \"" << name << L"\". Checking whether it's a real session" << '\n';
+
+ len = lstrlenW(name);
+
+ WTSFreeMemory(name);
+
+ if (!len)
+ continue;
+
+ if (pSessionInfo[index].State == WTSActive || pSessionInfo[index].State == WTSDisconnected) {
+ users++;
+ if (l_Debug)
+ std::wcout << L"\"" << name << L"\" is a real session, counting it. Now " << users << '\n';
+ }
+ }
+
+ if (l_Debug)
+ std::wcout << "Finished coutning user sessions (" << users << "). Freeing memory and returning" << '\n';
+
+ WTSFreeMemory(pSessionInfo);
+ printInfo.users = users;
+ return -1;
+}
+
+int wmain(int argc, WCHAR **argv)
+{
+ printInfoStruct printInfo = { };
+ po::variables_map vm;
+
+ int ret = parseArguments(argc, argv, vm, printInfo);
+ if (ret != -1)
+ return ret;
+
+ ret = check_users(printInfo);
+ if (ret != -1)
+ return ret;
+
+ return printOutput(printInfo);
+}
diff --git a/plugins/thresholds.cpp b/plugins/thresholds.cpp
new file mode 100644
index 0000000..bdd67ee
--- /dev/null
+++ b/plugins/thresholds.cpp
@@ -0,0 +1,276 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "plugins/thresholds.hpp"
+#include <boost/algorithm/string.hpp>
+#include <boost/lexical_cast.hpp>
+#include <iostream>
+
+using namespace boost::algorithm;
+
+threshold::threshold()
+ : set(false)
+{}
+
+threshold::threshold(const double v, const double c, bool l , bool p ) {
+ lower = v;
+ upper = c;
+ legal = l;
+ perc = p;
+}
+
+threshold::threshold(const std::wstring& stri)
+{
+ if (stri.empty())
+ throw std::invalid_argument("Threshold must not be empty");
+
+ std::wstring str = stri;
+
+ //kill whitespace
+ boost::algorithm::trim(str);
+
+ bool low = (str.at(0) == L'!');
+ if (low)
+ str = std::wstring(str.begin() + 1, str.end());
+
+ bool pc = false;
+
+ if (str.at(0) == L'[' && str.at(str.length() - 1) == L']') {//is range
+ str = std::wstring(str.begin() + 1, str.end() - 1);
+ std::vector<std::wstring> svec;
+ boost::split(svec, str, boost::is_any_of(L"-"));
+ if (svec.size() != 2)
+ throw std::invalid_argument("Threshold range requires two arguments");
+ std::wstring str1 = svec.at(0), str2 = svec.at(1);
+
+ if (str1.at(str1.length() - 1) == L'%' && str2.at(str2.length() - 1) == L'%') {
+ pc = true;
+ str1 = std::wstring(str1.begin(), str1.end() - 1);
+ str2 = std::wstring(str2.begin(), str2.end() - 1);
+ }
+
+ try {
+ boost::algorithm::trim(str1);
+ lower = boost::lexical_cast<DOUBLE>(str1);
+ boost::algorithm::trim(str2);
+ upper = boost::lexical_cast<DOUBLE>(str2);
+ legal = !low; perc = pc; set = true;
+ } catch (const boost::bad_lexical_cast&) {
+ throw std::invalid_argument("Unknown Threshold type");
+ }
+ } else { //not range
+ if (str.at(str.length() - 1) == L'%') {
+ pc = true;
+ str = std::wstring(str.begin(), str.end() - 1);
+ }
+ try {
+ boost::algorithm::trim(str);
+ lower = upper = boost::lexical_cast<DOUBLE>(str);
+ legal = !low; perc = pc; set = true;
+ } catch (const boost::bad_lexical_cast&) {
+ throw std::invalid_argument("Unknown Threshold type");
+ }
+ }
+}
+
+//return TRUE if the threshold is broken
+bool threshold::rend(const double val, const double max)
+{
+ double upperAbs = upper;
+ double lowerAbs = lower;
+
+ if (perc) {
+ upperAbs = upper / 100.0 * max;
+ lowerAbs = lower / 100.0 * max;
+ }
+
+ if (!set)
+ return set;
+ if (lowerAbs == upperAbs)
+ return val > upperAbs == legal;
+ else
+ return (val < lowerAbs || upperAbs < val) != legal;
+}
+
+//returns a printable string of the threshold
+std::wstring threshold::pString(const double max)
+{
+ if (!set)
+ return L"";
+ //transform percentages to abolute values
+ double lowerAbs = lower;
+ double upperAbs = upper;
+ if (perc) {
+ lowerAbs = lower / 100.0 * max;
+ upperAbs = upper / 100.0 * max;
+ }
+
+ std::wstring s, lowerStr = removeZero(lowerAbs),
+ upperStr = removeZero(upperAbs);
+
+ if (lower != upper) {
+ s.append(L"[").append(lowerStr).append(L"-")
+ .append(upperStr).append(L"]");
+ } else
+ s.append(lowerStr);
+
+ return s;
+}
+
+threshold threshold::toSeconds(const Tunit& fromUnit) {
+ if (!set)
+ return *this;
+
+ double lowerAbs = lower;
+ double upperAbs = upper;
+
+ switch (fromUnit) {
+ case TunitMS:
+ lowerAbs = lowerAbs / 1000;
+ upperAbs = upperAbs / 1000;
+ break;
+ case TunitS:
+ lowerAbs = lowerAbs ;
+ upperAbs = upperAbs ;
+ break;
+ case TunitM:
+ lowerAbs = lowerAbs * 60;
+ upperAbs = upperAbs * 60;
+ break;
+ case TunitH:
+ lowerAbs = lowerAbs * 60 * 60;
+ upperAbs = upperAbs * 60 * 60;
+ break;
+ }
+
+ return threshold(lowerAbs, upperAbs, legal, perc);
+}
+
+std::wstring removeZero(double val)
+{
+ std::wstring ret = boost::lexical_cast<std::wstring>(val);
+ std::wstring::size_type pos = ret.length();
+ if (ret.find_first_of(L".") == std::string::npos)
+ return ret;
+ for (std::wstring::reverse_iterator rit = ret.rbegin(); rit != ret.rend(); ++rit) {
+ if (*rit == L'.') {
+ return ret.substr(0, pos - 1);
+ }
+ if (*rit != L'0') {
+ return ret.substr(0, pos);
+ }
+ pos--;
+ }
+ return L"0";
+}
+
+std::vector<std::wstring> splitMultiOptions(const std::wstring& str)
+{
+ std::vector<std::wstring> sVec;
+ boost::split(sVec, str, boost::is_any_of(L","));
+ return sVec;
+}
+
+Bunit parseBUnit(const std::wstring& str)
+{
+ std::wstring wstr = to_upper_copy(str);
+
+ if (wstr == L"B")
+ return BunitB;
+ if (wstr == L"KB")
+ return BunitkB;
+ if (wstr == L"MB")
+ return BunitMB;
+ if (wstr == L"GB")
+ return BunitGB;
+ if (wstr == L"TB")
+ return BunitTB;
+
+ throw std::invalid_argument("Unknown unit type");
+}
+
+std::wstring BunitStr(const Bunit& unit)
+{
+ switch (unit) {
+ case BunitB:
+ return L"B";
+ case BunitkB:
+ return L"kB";
+ case BunitMB:
+ return L"MB";
+ case BunitGB:
+ return L"GB";
+ case BunitTB:
+ return L"TB";
+ }
+ return NULL;
+}
+
+Tunit parseTUnit(const std::wstring& str) {
+ std::wstring wstr = to_lower_copy(str);
+
+ if (wstr == L"ms")
+ return TunitMS;
+ if (wstr == L"s")
+ return TunitS;
+ if (wstr == L"m")
+ return TunitM;
+ if (wstr == L"h")
+ return TunitH;
+
+ throw std::invalid_argument("Unknown unit type");
+}
+
+std::wstring TunitStr(const Tunit& unit)
+{
+ switch (unit) {
+ case TunitMS:
+ return L"ms";
+ case TunitS:
+ return L"s";
+ case TunitM:
+ return L"m";
+ case TunitH:
+ return L"h";
+ }
+ return NULL;
+}
+
+void printErrorInfo(unsigned long err)
+{
+ if (!err)
+ err = GetLastError();
+ LPWSTR mBuf = NULL;
+ if (!FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPWSTR)&mBuf, 0, NULL))
+ std::wcout << "Failed to format error message, last error was: " << err << '\n';
+ else {
+ boost::trim_right(std::wstring(mBuf));
+ std::wcout << mBuf << std::endl;
+ }
+}
+
+std::wstring formatErrorInfo(unsigned long err) {
+ std::wostringstream out;
+ if (!err)
+ err = GetLastError();
+ LPWSTR mBuf = NULL;
+ if (!FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPWSTR)&mBuf, 0, NULL))
+ out << "Failed to format error message, last error was: " << err;
+ else {
+ std::wstring tempOut = std::wstring(mBuf);
+ boost::trim_right(tempOut);
+ out << tempOut;
+ }
+
+ return out.str();
+}
+
+std::wstring stateToString(const state& state) {
+ switch (state) {
+ case OK: return L"OK";
+ case WARNING: return L"WARNING";
+ case CRITICAL: return L"CRITICAL";
+ default: return L"UNKNOWN";
+ }
+} \ No newline at end of file
diff --git a/plugins/thresholds.hpp b/plugins/thresholds.hpp
new file mode 100644
index 0000000..4c47ddb
--- /dev/null
+++ b/plugins/thresholds.hpp
@@ -0,0 +1,64 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef THRESHOLDS_H
+#define THRESHOLDS_H
+
+#include <string>
+#include <vector>
+#include <windows.h>
+
+enum Bunit
+{
+ BunitB = 0, BunitkB = 1, BunitMB = 2, BunitGB = 3, BunitTB = 4
+};
+
+enum Tunit
+{
+ TunitMS, TunitS, TunitM, TunitH
+};
+
+enum state
+{
+ OK = 0, WARNING = 1, CRITICAL = 2
+};
+
+class threshold
+{
+public:
+ // doubles are always enough for ANY 64 bit value
+ double lower;
+ double upper;
+ // true means everything BELOW upper/outside [lower-upper] is fine
+ bool legal;
+ bool perc;
+ bool set;
+
+ threshold();
+
+ threshold(const double v, const double c, bool l = true, bool p = false);
+
+ threshold(const std::wstring&);
+
+ // returns true if the threshold is broken
+ bool rend(const double val, const double max = 100.0);
+
+ // returns a printable string of the threshold
+ std::wstring pString(const double max = 100.0);
+
+ threshold toSeconds(const Tunit& fromUnit);
+};
+
+std::wstring removeZero(double);
+std::vector<std::wstring> splitMultiOptions(const std::wstring&);
+
+Bunit parseBUnit(const std::wstring&);
+std::wstring BunitStr(const Bunit&);
+Tunit parseTUnit(const std::wstring&);
+std::wstring TunitStr(const Tunit&);
+
+void printErrorInfo(unsigned long err = 0);
+std::wstring formatErrorInfo(unsigned long err);
+
+std::wstring stateToString(const state&);
+
+#endif /* THRESHOLDS_H */
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
new file mode 100644
index 0000000..1c972d6
--- /dev/null
+++ b/test/CMakeLists.txt
@@ -0,0 +1,259 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+include(BoostTestTargets)
+
+set(base_test_SOURCES
+ icingaapplication-fixture.cpp
+ base-array.cpp
+ base-base64.cpp
+ base-convert.cpp
+ base-dictionary.cpp
+ base-fifo.cpp
+ base-json.cpp
+ base-match.cpp
+ base-netstring.cpp
+ base-object.cpp
+ base-object-packer.cpp
+ base-serialize.cpp
+ base-shellescape.cpp
+ base-stacktrace.cpp
+ base-stream.cpp
+ base-string.cpp
+ base-timer.cpp
+ base-tlsutility.cpp
+ base-type.cpp
+ base-utility.cpp
+ base-value.cpp
+ config-apply.cpp
+ config-ops.cpp
+ icinga-checkresult.cpp
+ icinga-dependencies.cpp
+ icinga-legacytimeperiod.cpp
+ icinga-macros.cpp
+ icinga-notification.cpp
+ icinga-perfdata.cpp
+ methods-pluginnotificationtask.cpp
+ remote-configpackageutility.cpp
+ remote-url.cpp
+ ${base_OBJS}
+ $<TARGET_OBJECTS:config>
+ $<TARGET_OBJECTS:remote>
+ $<TARGET_OBJECTS:icinga>
+ $<TARGET_OBJECTS:methods>
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(base test base_test_SOURCES)
+endif()
+
+add_boost_test(base
+ SOURCES test-runner.cpp ${base_test_SOURCES}
+ LIBRARIES ${base_DEPS}
+ TESTS
+ base_array/construct
+ base_array/getset
+ base_array/resize
+ base_array/insert
+ base_array/remove
+ base_array/unique
+ base_array/foreach
+ base_array/clone
+ base_array/json
+ base_base64/base64
+ base_convert/tolong
+ base_convert/todouble
+ base_convert/tostring
+ base_convert/tobool
+ base_dictionary/construct
+ base_dictionary/initializer1
+ base_dictionary/initializer2
+ base_dictionary/get1
+ base_dictionary/get2
+ base_dictionary/foreach
+ base_dictionary/remove
+ base_dictionary/clone
+ base_dictionary/json
+ base_dictionary/keys_ordered
+ base_fifo/construct
+ base_fifo/io
+ base_json/encode
+ base_json/decode
+ base_json/invalid1
+ base_object_packer/pack_null
+ base_object_packer/pack_false
+ base_object_packer/pack_true
+ base_object_packer/pack_number
+ base_object_packer/pack_string
+ base_object_packer/pack_array
+ base_object_packer/pack_object
+ base_match/tolong
+ base_netstring/netstring
+ base_object/construct
+ base_object/getself
+ base_serialize/scalar
+ base_serialize/array
+ base_serialize/dictionary
+ base_serialize/object
+ base_shellescape/escape_basic
+ base_shellescape/escape_quoted
+ base_stacktrace/stacktrace
+ base_stream/readline_stdio
+ base_string/construct
+ base_string/equal
+ base_string/clear
+ base_string/append
+ base_string/trim
+ base_string/contains
+ base_string/replace
+ base_string/index
+ base_string/find
+ base_timer/construct
+ base_timer/interval
+ base_timer/invoke
+ base_timer/scope
+ base_tlsutility/sha1
+ base_tlsutility/iscauptodate_ok
+ base_tlsutility/iscauptodate_expiring
+ base_tlsutility/iscertuptodate_ok
+ base_tlsutility/iscertuptodate_expiring
+ base_tlsutility/iscertuptodate_old
+ base_type/gettype
+ base_type/assign
+ base_type/byname
+ base_type/instantiate
+ base_utility/parse_version
+ base_utility/compare_version
+ base_utility/comparepasswords_works
+ base_utility/comparepasswords_issafe
+ base_utility/validateutf8
+ base_utility/EscapeCreateProcessArg
+ base_utility/TruncateUsingHash
+ base_value/scalar
+ base_value/convert
+ base_value/format
+ config_apply/gettargethosts_literal
+ config_apply/gettargethosts_const
+ config_apply/gettargethosts_swapped
+ config_apply/gettargethosts_two
+ config_apply/gettargethosts_three
+ config_apply/gettargethosts_mixed
+ config_apply/gettargethosts_redundant
+ config_apply/gettargethosts_badconst
+ config_apply/gettargethosts_notliteral
+ config_apply/gettargethosts_wrongop
+ config_apply/gettargethosts_wrongattr
+ config_apply/gettargethosts_wrongvar
+ config_apply/gettargethosts_noindexer
+ config_apply/gettargetservices_literal
+ config_apply/gettargetservices_const
+ config_apply/gettargetservices_swapped_outer
+ config_apply/gettargetservices_swapped_inner
+ config_apply/gettargetservices_two
+ config_apply/gettargetservices_three
+ config_apply/gettargetservices_mixed
+ config_apply/gettargetservices_redundant
+ config_apply/gettargetservices_badconst
+ config_apply/gettargetservices_notliteral
+ config_apply/gettargetservices_wrongop_outer
+ config_apply/gettargetservices_wrongop_host
+ config_apply/gettargetservices_wrongop_service
+ config_apply/gettargetservices_wrongattr_host
+ config_apply/gettargetservices_wrongattr_service
+ config_apply/gettargetservices_wrongvar_host
+ config_apply/gettargetservices_wrongvar_service
+ config_apply/gettargetservices_noindexer_host
+ config_apply/gettargetservices_noindexer_service
+ config_ops/simple
+ config_ops/advanced
+ icinga_checkresult/host_1attempt
+ icinga_checkresult/host_2attempts
+ icinga_checkresult/host_3attempts
+ icinga_checkresult/service_1attempt
+ icinga_checkresult/service_2attempts
+ icinga_checkresult/service_3attempts
+ icinga_checkresult/host_flapping_notification
+ icinga_checkresult/service_flapping_notification
+ icinga_checkresult/suppressed_notification
+ icinga_dependencies/multi_parent
+ icinga_notification/strings
+ icinga_notification/state_filter
+ icinga_notification/type_filter
+ icinga_notification/no_filter_problem_no_duplicate
+ icinga_notification/filter_problem_no_duplicate
+ icinga_notification/volatile_filter_problem_duplicate
+ icinga_notification/no_recovery_filter_no_duplicate
+ icinga_notification/recovery_filter_duplicate
+ icinga_macros/simple
+ icinga_legacytimeperiod/simple
+ icinga_legacytimeperiod/advanced
+ icinga_legacytimeperiod/dst
+ icinga_legacytimeperiod/dst_isinside
+ icinga_perfdata/empty
+ icinga_perfdata/simple
+ icinga_perfdata/quotes
+ icinga_perfdata/multiple
+ icinga_perfdata/multiline
+ icinga_perfdata/normalize
+ icinga_perfdata/uom
+ icinga_perfdata/warncritminmax
+ icinga_perfdata/ignore_invalid_warn_crit_min_max
+ icinga_perfdata/invalid
+ icinga_perfdata/multi
+ icinga_perfdata/scientificnotation
+ icinga_perfdata/parse_edgecases
+ methods_pluginnotificationtask/truncate_long_output
+ remote_configpackageutility/ValidateName
+ remote_url/id_and_path
+ remote_url/parameters
+ remote_url/get_and_set
+ remote_url/format
+ remote_url/illegal_legal_strings
+)
+
+if(ICINGA2_WITH_LIVESTATUS)
+ set(livestatus_test_SOURCES
+ icingaapplication-fixture.cpp
+ livestatus-fixture.cpp
+ livestatus.cpp
+ ${base_OBJS}
+ $<TARGET_OBJECTS:config>
+ $<TARGET_OBJECTS:remote>
+ $<TARGET_OBJECTS:icinga>
+ $<TARGET_OBJECTS:livestatus>
+ $<TARGET_OBJECTS:methods>
+ )
+
+ if(ICINGA2_UNITY_BUILD)
+ mkunity_target(livestatus test livestatus_test_SOURCES)
+ endif()
+
+ add_boost_test(livestatus
+ SOURCES test-runner.cpp ${livestatus_test_SOURCES}
+ LIBRARIES ${base_DEPS}
+ TESTS livestatus/hosts livestatus/services
+ )
+endif()
+
+set(icinga_checkable_test_SOURCES
+ icingaapplication-fixture.cpp
+ icinga-checkable-fixture.cpp
+ icinga-checkable-flapping.cpp
+ ${base_OBJS}
+ $<TARGET_OBJECTS:config>
+ $<TARGET_OBJECTS:remote>
+ $<TARGET_OBJECTS:icinga>
+ $<TARGET_OBJECTS:cli>
+)
+
+if(ICINGA2_UNITY_BUILD)
+ mkunity_target(icinga_checkable test icinga_checkable_test_SOURCES)
+endif()
+
+add_boost_test(icinga_checkable
+ SOURCES test-runner.cpp ${icinga_checkable_test_SOURCES}
+ LIBRARIES ${base_DEPS}
+ TESTS icinga_checkable_flapping/host_not_flapping
+ icinga_checkable_flapping/host_flapping
+ icinga_checkable_flapping/host_flapping_recover
+ icinga_checkable_flapping/host_flapping_docs_example
+)
diff --git a/test/base-array.cpp b/test/base-array.cpp
new file mode 100644
index 0000000..33e54e8
--- /dev/null
+++ b/test/base-array.cpp
@@ -0,0 +1,162 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_array)
+
+BOOST_AUTO_TEST_CASE(construct)
+{
+ Array::Ptr array = new Array();
+ BOOST_CHECK(array);
+ BOOST_CHECK(array->GetLength() == 0);
+}
+
+BOOST_AUTO_TEST_CASE(getset)
+{
+ Array::Ptr array = new Array();
+ array->Add(7);
+ array->Add(2);
+ array->Add(5);
+ BOOST_CHECK(array->GetLength() == 3);
+ BOOST_CHECK(array->Get(0) == 7);
+ BOOST_CHECK(array->Get(1) == 2);
+ BOOST_CHECK(array->Get(2) == 5);
+
+ array->Set(1, 9);
+ BOOST_CHECK(array->Get(1) == 9);
+
+ array->Remove(1);
+ BOOST_CHECK(array->GetLength() == 2);
+ BOOST_CHECK(array->Get(1) == 5);
+}
+
+BOOST_AUTO_TEST_CASE(resize)
+{
+ Array::Ptr array = new Array();
+ array->Resize(2);
+ BOOST_CHECK(array->GetLength() == 2);
+ BOOST_CHECK(array->Get(0) == Empty);
+ BOOST_CHECK(array->Get(1) == Empty);
+}
+
+BOOST_AUTO_TEST_CASE(insert)
+{
+ Array::Ptr array = new Array();
+
+ array->Insert(0, 11);
+ array->Insert(1, 22);
+ BOOST_CHECK(array->GetLength() == 2);
+ BOOST_CHECK(array->Get(1) == 22);
+
+ array->Insert(0, 33);
+ BOOST_CHECK(array->GetLength() == 3);
+ BOOST_CHECK(array->Get(0) == 33);
+ BOOST_CHECK(array->Get(1) == 11);
+
+ array->Insert(1, 44);
+ BOOST_CHECK(array->GetLength() == 4);
+ BOOST_CHECK(array->Get(0) == 33);
+ BOOST_CHECK(array->Get(1) == 44);
+ BOOST_CHECK(array->Get(2) == 11);
+}
+
+BOOST_AUTO_TEST_CASE(remove)
+{
+ Array::Ptr array = new Array();
+ array->Add(7);
+ array->Add(2);
+ array->Add(5);
+
+ {
+ ObjectLock olock(array);
+ auto it = array->Begin();
+ array->Remove(it);
+ }
+
+ BOOST_CHECK(array->GetLength() == 2);
+ BOOST_CHECK(array->Get(0) == 2);
+
+ array->Clear();
+ BOOST_CHECK(array->GetLength() == 0);
+}
+
+BOOST_AUTO_TEST_CASE(unique)
+{
+ Array::Ptr array = new Array();
+ array->Add("group1");
+ array->Add("group2");
+ array->Add("group1");
+ array->Add("group2");
+
+ Array::Ptr result;
+
+ {
+ ObjectLock olock(array);
+ result = array->Unique();
+ }
+
+ BOOST_CHECK(result->GetLength() == 2);
+ result->Sort();
+
+ BOOST_CHECK(result->Get(0) == "group1");
+ BOOST_CHECK(result->Get(1) == "group2");
+}
+BOOST_AUTO_TEST_CASE(foreach)
+{
+ Array::Ptr array = new Array();
+ array->Add(7);
+ array->Add(2);
+ array->Add(5);
+
+ ObjectLock olock(array);
+
+ int n = 0;
+
+ for (const Value& item : array) {
+ BOOST_CHECK(n != 0 || item == 7);
+ BOOST_CHECK(n != 1 || item == 2);
+ BOOST_CHECK(n != 2 || item == 5);
+
+ n++;
+ }
+}
+
+BOOST_AUTO_TEST_CASE(clone)
+{
+ Array::Ptr array = new Array();
+ array->Add(7);
+ array->Add(2);
+ array->Add(5);
+
+ Array::Ptr clone = array->ShallowClone();
+
+ BOOST_CHECK(clone->GetLength() == 3);
+ BOOST_CHECK(clone->Get(0) == 7);
+ BOOST_CHECK(clone->Get(1) == 2);
+ BOOST_CHECK(clone->Get(2) == 5);
+}
+
+BOOST_AUTO_TEST_CASE(json)
+{
+ Array::Ptr array = new Array();
+ array->Add(7);
+ array->Add(2);
+ array->Add(5);
+
+ String json = JsonEncode(array);
+ BOOST_CHECK(json.GetLength() > 0);
+
+ Array::Ptr deserialized = JsonDecode(json);
+ BOOST_CHECK(deserialized);
+ BOOST_CHECK(deserialized->GetLength() == 3);
+ BOOST_CHECK(deserialized->Get(0) == 7);
+ BOOST_CHECK(deserialized->Get(1) == 2);
+ BOOST_CHECK(deserialized->Get(2) == 5);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-base64.cpp b/test/base-base64.cpp
new file mode 100644
index 0000000..f9e6aec
--- /dev/null
+++ b/test/base-base64.cpp
@@ -0,0 +1,45 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/base64.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_base64)
+
+BOOST_AUTO_TEST_CASE(base64)
+{
+ std::vector<String> clearText;
+ clearText.emplace_back("");
+ clearText.emplace_back("1");
+ clearText.emplace_back("12");
+ clearText.emplace_back("123");
+ clearText.emplace_back("1234");
+ clearText.emplace_back("VsowLvPqEiAeITDmo-5L_NB-k7fsT3sT2d3K9O4iC2uBk41hvCPAxrgGSxrdeX5s"
+ "Zo0Z9b1kxDZlzf8GHQ9ARW6YLeGODMtiZo8cKkUzfSbxyZ_wlE9u6pCTTg9kODCM"
+ "Ve-X_a3jWkOy89RoDkT5ahKBY-8S25L6wlvWt8ZyQ2bLxfplzEzuHgEknTMKKp2K"
+ "jRlwI2p3gF4FYeQM7dx0E5O782Lh1P3IC6jPNqiZgTgWmsRYZbAN8oU2V626bQxD"
+ "n8prQ0Xr_3aPdP7VIVgxNZMnF0NJrQvB_rzq1Dip1UM_xH_9nansbX25E64OQU-r"
+ "q54EdO-vb_9FvyqdeVTJ3UTgXIP7OXtz4K8xlEHWdb1-hJChVvDc0KSnN5HVN2NJ"
+ "yJrAofVyHBxXGRnGMdN8cOwvxzBFsz2Hih_lIqm1NVULm9_J9GoesY-aN8JzGocU"
+ "U3hbhFQBiUlzliuodhwg4RXRcfmPHQRo7kWKaohpySkvqmWcXEAt2LPJ8nH70fW7"
+ "vudgzwwWTnNcMlf0Wa-nKL4xXNNPQD0obDCfogN8uKuGqi0DltOUmFK62Zkkb0_d"
+ "45grssnD5q89MjDGBkGMXuLY_JLOqc7Y9VV6H48vzoTNK1a2kOGV2TrAD8syuA5Z"
+ "o8RLKjTqAYjKTUqEJjg0MflpiBnbDQvRqiSXs1cJuFNXRLpEC5GoqGqMd0zAGn4u"
+ "5J3OurVd0SFp8_vkYUI6YwNUe00y8_Dn6DOBh_0KKADphZBgple82_8HrnQNreQn"
+ "GkB2TpIsjwWud0yuhI-jQZEMNNlhEYMLwx7B-xTGhn0LFC1pLEXn_kZ2NOgDgUHd"
+ "bdj906o3N2Jjo9Fb5GXkCrt-fNEYBjeXvIu73yeTGmsiAzfiICNHi_PmGkgq8fYQ"
+ "O9lQgyRHCMic8zU7ffWuSoUPRgHsqztLHaCDbYIrNmgrn2taxcXSb57Xm_l-1xBH"
+ "bZqdMvBziapJXaLJmhUg03lgdsIc_OuJmzt-sytDLVGIuNqpa4dETdhLsI7qis4B"
+ );
+
+ // 1024 chars
+
+ for (const String& str : clearText) {
+ String enc = Base64::Encode(str);
+ String dec = Base64::Decode(enc);
+ BOOST_CHECK(str == dec);
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-convert.cpp b/test/base-convert.cpp
new file mode 100644
index 0000000..bc7c61b
--- /dev/null
+++ b/test/base-convert.cpp
@@ -0,0 +1,60 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/convert.hpp"
+#include "base/object.hpp"
+#include <BoostTestTargetConfig.h>
+#include <iostream>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_convert)
+
+BOOST_AUTO_TEST_CASE(tolong)
+{
+ BOOST_CHECK_THROW(Convert::ToLong(" 7"), boost::exception);
+ BOOST_CHECK(Convert::ToLong("-7") == -7);
+ BOOST_CHECK_THROW(Convert::ToLong("7a"), boost::exception);
+
+ BOOST_CHECK(Convert::ToLong(Value(-7)) == -7);
+
+ BOOST_CHECK(Convert::ToLong(3.141386593) == 3);
+}
+
+BOOST_AUTO_TEST_CASE(todouble)
+{
+ BOOST_CHECK_THROW(Convert::ToDouble(" 7.3"), boost::exception);
+ BOOST_CHECK(Convert::ToDouble("-7.3") == -7.3);
+ BOOST_CHECK_THROW(Convert::ToDouble("7.3a"), boost::exception);
+ BOOST_CHECK(Convert::ToDouble(Value(-7.3)) == -7.3);
+}
+
+BOOST_AUTO_TEST_CASE(tostring)
+{
+ BOOST_CHECK(Convert::ToString(7) == "7");
+ BOOST_CHECK(Convert::ToString(7.5) == "7.500000");
+ BOOST_CHECK(Convert::ToString("hello") == "hello");
+ BOOST_CHECK(Convert::ToString(18446744073709551616.0) == "18446744073709551616"); // pow(2, 64)
+
+ String str = "hello";
+ BOOST_CHECK(Convert::ToString(str) == "hello");
+
+ BOOST_CHECK(Convert::ToString(Value(7)) == "7");
+ BOOST_CHECK(Convert::ToString(Value(7.5)) == "7.500000");
+ BOOST_CHECK(Convert::ToString(Value(18446744073709551616.0)) == "18446744073709551616"); // pow(2, 64)
+ BOOST_CHECK(Convert::ToString(Value("hello")) == "hello");
+ BOOST_CHECK(Convert::ToString(Value("hello hello")) == "hello hello");
+}
+
+BOOST_AUTO_TEST_CASE(tobool)
+{
+ BOOST_CHECK(Convert::ToBool("a") == true);
+ BOOST_CHECK(Convert::ToBool("0") == true);
+ BOOST_CHECK(Convert::ToBool("1") == true);
+ BOOST_CHECK(Convert::ToBool("2") == true);
+ BOOST_CHECK(Convert::ToBool(1) == true);
+ BOOST_CHECK(Convert::ToBool(0) == false);
+ BOOST_CHECK(Convert::ToBool(Value(true)) == true);
+ BOOST_CHECK(Convert::ToBool(Value(false)) == false);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-dictionary.cpp b/test/base-dictionary.cpp
new file mode 100644
index 0000000..3469be7
--- /dev/null
+++ b/test/base-dictionary.cpp
@@ -0,0 +1,200 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dictionary.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include "base/string.hpp"
+#include "base/utility.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_dictionary)
+
+BOOST_AUTO_TEST_CASE(construct)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+ BOOST_CHECK(dictionary);
+}
+
+BOOST_AUTO_TEST_CASE(initializer1)
+{
+ DictionaryData dict;
+
+ dict.emplace_back("test1", "Gin-o-clock");
+
+ Dictionary::Ptr dictionary = new Dictionary(std::move(dict));
+
+ Value test1;
+ test1 = dictionary->Get("test1");
+ BOOST_CHECK(test1 == "Gin-o-clock");
+}
+
+BOOST_AUTO_TEST_CASE(initializer2)
+{
+ Dictionary::Ptr dictionary = new Dictionary({ {"test1", "Gin-for-the-win"} });
+
+ Value test1;
+ test1 = dictionary->Get("test1");
+ BOOST_CHECK(test1 == "Gin-for-the-win");
+}
+
+BOOST_AUTO_TEST_CASE(get1)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+ dictionary->Set("test1", 7);
+ dictionary->Set("test2", "hello world");
+
+ BOOST_CHECK(dictionary->GetLength() == 2);
+
+ Value test1;
+ test1 = dictionary->Get("test1");
+ BOOST_CHECK(test1 == 7);
+
+ Value test2;
+ test2 = dictionary->Get("test2");
+ BOOST_CHECK(test2 == "hello world");
+
+ String key3 = "test3";
+ Value test3;
+ test3 = dictionary->Get(key3);
+ BOOST_CHECK(test3.IsEmpty());
+}
+
+BOOST_AUTO_TEST_CASE(get2)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+ Dictionary::Ptr other = new Dictionary();
+
+ dictionary->Set("test1", other);
+
+ BOOST_CHECK(dictionary->GetLength() == 1);
+
+ Dictionary::Ptr test1 = dictionary->Get("test1");
+ BOOST_CHECK(other == test1);
+
+ Dictionary::Ptr test2 = dictionary->Get("test2");
+ BOOST_CHECK(!test2);
+}
+
+BOOST_AUTO_TEST_CASE(foreach)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+ dictionary->Set("test1", 7);
+ dictionary->Set("test2", "hello world");
+
+ ObjectLock olock(dictionary);
+
+ bool seen_test1 = false, seen_test2 = false;
+
+ for (const Dictionary::Pair& kv : dictionary) {
+ BOOST_CHECK(kv.first == "test1" || kv.first == "test2");
+
+ if (kv.first == "test1") {
+ BOOST_CHECK(!seen_test1);
+ seen_test1 = true;
+
+ BOOST_CHECK(kv.second == 7);
+
+ continue;
+ } else if (kv.first == "test2") {
+ BOOST_CHECK(!seen_test2);
+ seen_test2 = true;
+
+ BOOST_CHECK(kv.second == "hello world");
+ }
+ }
+
+ BOOST_CHECK(seen_test1);
+ BOOST_CHECK(seen_test2);
+}
+
+BOOST_AUTO_TEST_CASE(remove)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+
+ dictionary->Set("test1", 7);
+ dictionary->Set("test2", "hello world");
+
+ BOOST_CHECK(dictionary->Contains("test1"));
+ BOOST_CHECK(dictionary->GetLength() == 2);
+
+ dictionary->Set("test1", Empty);
+
+ BOOST_CHECK(dictionary->Contains("test1"));
+ BOOST_CHECK(dictionary->GetLength() == 2);
+
+ dictionary->Remove("test1");
+
+ BOOST_CHECK(!dictionary->Contains("test1"));
+ BOOST_CHECK(dictionary->GetLength() == 1);
+
+ dictionary->Remove("test2");
+
+ BOOST_CHECK(!dictionary->Contains("test2"));
+ BOOST_CHECK(dictionary->GetLength() == 0);
+
+ dictionary->Set("test1", 7);
+ dictionary->Set("test2", "hello world");
+
+ {
+ ObjectLock olock(dictionary);
+
+ auto it = dictionary->Begin();
+ dictionary->Remove(it);
+ }
+
+ BOOST_CHECK(dictionary->GetLength() == 1);
+}
+
+BOOST_AUTO_TEST_CASE(clone)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+
+ dictionary->Set("test1", 7);
+ dictionary->Set("test2", "hello world");
+
+ Dictionary::Ptr clone = dictionary->ShallowClone();
+
+ BOOST_CHECK(dictionary != clone);
+
+ BOOST_CHECK(clone->GetLength() == 2);
+ BOOST_CHECK(clone->Get("test1") == 7);
+ BOOST_CHECK(clone->Get("test2") == "hello world");
+
+ clone->Set("test3", 5);
+ BOOST_CHECK(!dictionary->Contains("test3"));
+ BOOST_CHECK(dictionary->GetLength() == 2);
+
+ clone->Set("test2", "test");
+ BOOST_CHECK(dictionary->Get("test2") == "hello world");
+}
+
+BOOST_AUTO_TEST_CASE(json)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+
+ dictionary->Set("test1", 7);
+ dictionary->Set("test2", "hello world");
+
+ String json = JsonEncode(dictionary);
+ BOOST_CHECK(json.GetLength() > 0);
+ Dictionary::Ptr deserialized = JsonDecode(json);
+ BOOST_CHECK(deserialized->GetLength() == 2);
+ BOOST_CHECK(deserialized->Get("test1") == 7);
+ BOOST_CHECK(deserialized->Get("test2") == "hello world");
+}
+
+BOOST_AUTO_TEST_CASE(keys_ordered)
+{
+ Dictionary::Ptr dictionary = new Dictionary();
+
+ for (int i = 0; i < 100; i++) {
+ dictionary->Set(std::to_string(Utility::Random()), Utility::Random());
+ }
+
+ std::vector<String> keys = dictionary->GetKeys();
+ BOOST_CHECK(std::is_sorted(keys.begin(), keys.end()));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-fifo.cpp b/test/base-fifo.cpp
new file mode 100644
index 0000000..5ecf1ac
--- /dev/null
+++ b/test/base-fifo.cpp
@@ -0,0 +1,43 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/fifo.hpp"
+#include "base/objectlock.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_fifo)
+
+BOOST_AUTO_TEST_CASE(construct)
+{
+ FIFO::Ptr fifo = new FIFO();
+ BOOST_CHECK(fifo);
+ BOOST_CHECK(fifo->GetAvailableBytes() == 0);
+
+ fifo->Close();
+}
+
+BOOST_AUTO_TEST_CASE(io)
+{
+ FIFO::Ptr fifo = new FIFO();
+
+ fifo->Write("hello", 5);
+ BOOST_CHECK(fifo->GetAvailableBytes() == 5);
+
+ char buffer1[2];
+ fifo->Read(buffer1, 2, true);
+ BOOST_CHECK(memcmp(buffer1, "he", 2) == 0);
+ BOOST_CHECK(fifo->GetAvailableBytes() == 3);
+
+ char buffer2[5];
+ size_t rc = fifo->Read(buffer2, 5, true);
+ BOOST_CHECK(rc == 3);
+ BOOST_CHECK(memcmp(buffer2, "llo", 3) == 0);
+ BOOST_CHECK(fifo->GetAvailableBytes() == 0);
+
+ BOOST_CHECK(!fifo->IsEof());
+
+ fifo->Close();
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-json.cpp b/test/base-json.cpp
new file mode 100644
index 0000000..02bbebb
--- /dev/null
+++ b/test/base-json.cpp
@@ -0,0 +1,110 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/dictionary.hpp"
+#include "base/function.hpp"
+#include "base/namespace.hpp"
+#include "base/array.hpp"
+#include "base/objectlock.hpp"
+#include "base/json.hpp"
+#include <boost/algorithm/string/replace.hpp>
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_json)
+
+BOOST_AUTO_TEST_CASE(encode)
+{
+ Dictionary::Ptr input (new Dictionary({
+ { "array", new Array({ new Namespace() }) },
+ { "false", false },
+ { "float", -1.25 },
+ { "fx", new Function("<test>", []() {}) },
+ { "int", -42 },
+ { "null", Value() },
+ { "string", "LF\nTAB\tAUml\xC3\xA4Ill\xC3" },
+ { "true", true },
+ { "uint", 23u }
+ }));
+
+ String output (R"EOF({
+ "array": [
+ {}
+ ],
+ "false": false,
+ "float": -1.25,
+ "fx": "Object of type 'Function'",
+ "int": -42,
+ "null": null,
+ "string": "LF\nTAB\tAUml\u00e4Ill\ufffd",
+ "true": true,
+ "uint": 23
+}
+)EOF");
+
+ BOOST_CHECK(JsonEncode(input, true) == output);
+
+ boost::algorithm::replace_all(output, " ", "");
+ boost::algorithm::replace_all(output, "Objectoftype'Function'", "Object of type 'Function'");
+ boost::algorithm::replace_all(output, "\n", "");
+
+ BOOST_CHECK(JsonEncode(input, false) == output);
+}
+
+BOOST_AUTO_TEST_CASE(decode)
+{
+ String input (R"EOF({
+ "array": [
+ {}
+ ],
+ "false": false,
+ "float": -1.25,
+ "int": -42,
+ "null": null,
+ "string": "LF\nTAB\tAUmlIll",
+ "true": true,
+ "uint": 23
+}
+)EOF");
+
+ boost::algorithm::replace_all(input, "AUml", "AUml\xC3\xA4");
+ boost::algorithm::replace_all(input, "Ill", "Ill\xC3");
+
+ auto output ((Dictionary::Ptr)JsonDecode(input));
+ BOOST_CHECK(output->GetKeys() == std::vector<String>({"array", "false", "float", "int", "null", "string", "true", "uint"}));
+
+ auto array ((Array::Ptr)output->Get("array"));
+ BOOST_CHECK(array->GetLength() == 1u);
+
+ auto array0 ((Dictionary::Ptr)array->Get(0));
+ BOOST_CHECK(array0->GetKeys() == std::vector<String>());
+
+ auto fAlse (output->Get("false"));
+ BOOST_CHECK(fAlse.IsBoolean() && !fAlse.ToBool());
+
+ auto fLoat (output->Get("float"));
+ BOOST_CHECK(fLoat.IsNumber() && fLoat.Get<double>() == -1.25);
+
+ auto iNt (output->Get("int"));
+ BOOST_CHECK(iNt.IsNumber() && iNt.Get<double>() == -42.0);
+
+ BOOST_CHECK(output->Get("null").IsEmpty());
+
+ auto string (output->Get("string"));
+ BOOST_CHECK(string.IsString() && string.Get<String>() == "LF\nTAB\tAUml\xC3\xA4Ill\xEF\xBF\xBD");
+
+ auto tRue (output->Get("true"));
+ BOOST_CHECK(tRue.IsBoolean() && tRue.ToBool());
+
+ auto uint (output->Get("uint"));
+ BOOST_CHECK(uint.IsNumber() && uint.Get<double>() == 23.0);
+}
+
+BOOST_AUTO_TEST_CASE(invalid1)
+{
+ BOOST_CHECK_THROW(JsonDecode("\"1.7"), std::exception);
+ BOOST_CHECK_THROW(JsonDecode("{8: \"test\"}"), std::exception);
+ BOOST_CHECK_THROW(JsonDecode("{\"test\": \"test\""), std::exception);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-match.cpp b/test/base-match.cpp
new file mode 100644
index 0000000..7fad3cb
--- /dev/null
+++ b/test/base-match.cpp
@@ -0,0 +1,27 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/utility.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_match)
+
+BOOST_AUTO_TEST_CASE(tolong)
+{
+ BOOST_CHECK(Utility::Match("*", "hello"));
+ BOOST_CHECK(!Utility::Match("\\**", "hello"));
+ BOOST_CHECK(Utility::Match("\\**", "*ello"));
+ BOOST_CHECK(Utility::Match("?e*l?", "hello"));
+ BOOST_CHECK(Utility::Match("?e*l?", "helo"));
+ BOOST_CHECK(!Utility::Match("world", "hello"));
+ BOOST_CHECK(!Utility::Match("hee*", "hello"));
+ BOOST_CHECK(Utility::Match("he??o", "hello"));
+ BOOST_CHECK(Utility::Match("he?", "hel"));
+ BOOST_CHECK(Utility::Match("he*", "hello"));
+ BOOST_CHECK(Utility::Match("he*o", "heo"));
+ BOOST_CHECK(Utility::Match("he**o", "heo"));
+ BOOST_CHECK(Utility::Match("he**o", "hello"));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-netstring.cpp b/test/base-netstring.cpp
new file mode 100644
index 0000000..faa7eb5
--- /dev/null
+++ b/test/base-netstring.cpp
@@ -0,0 +1,25 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/netstring.hpp"
+#include "base/fifo.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_netstring)
+
+BOOST_AUTO_TEST_CASE(netstring)
+{
+ FIFO::Ptr fifo = new FIFO();
+
+ NetString::WriteStringToStream(fifo, "hello");
+
+ String s;
+ StreamReadContext src;
+ BOOST_CHECK(NetString::ReadStringFromStream(fifo, &s, src) == StatusNewItem);
+ BOOST_CHECK(s == "hello");
+
+ fifo->Close();
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-object-packer.cpp b/test/base-object-packer.cpp
new file mode 100644
index 0000000..b84705d
--- /dev/null
+++ b/test/base-object-packer.cpp
@@ -0,0 +1,264 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/object-packer.hpp"
+#include "base/value.hpp"
+#include "base/string.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include <BoostTestTargetConfig.h>
+#include <climits>
+#include <initializer_list>
+#include <iomanip>
+#include <sstream>
+
+using namespace icinga;
+
+#if CHAR_MIN != 0
+union CharU2SConverter
+{
+ CharU2SConverter()
+ {
+ s = 0;
+ }
+
+ unsigned char u;
+ signed char s;
+};
+#endif
+
+/**
+ * Avoid implementation-defined overflows during unsigned to signed casts
+ */
+static inline char UIntToByte(unsigned i)
+{
+#if CHAR_MIN == 0
+ return i;
+#else
+ CharU2SConverter converter;
+
+ converter.u = i;
+ return converter.s;
+#endif
+}
+
+#if CHAR_MIN != 0
+union CharS2UConverter
+{
+ CharS2UConverter()
+ {
+ u = 0;
+ }
+
+ unsigned char u;
+ signed char s;
+};
+#endif
+
+/**
+ * Avoid implementation-defined underflows during signed to unsigned casts
+ */
+static inline unsigned ByteToUInt(char c)
+{
+#if CHAR_MIN == 0
+ return c;
+#else
+ CharS2UConverter converter;
+
+ converter.s = c;
+ return converter.u;
+#endif
+}
+
+/**
+ * Compare the expected output with the actual output
+ */
+static inline bool ComparePackObjectResult(const String& actualOutput, const std::initializer_list<int>& out)
+{
+ if (actualOutput.GetLength() != out.size())
+ return false;
+
+ auto actualOutputPos = actualOutput.Begin();
+ for (auto byte : out) {
+ if (*actualOutputPos != UIntToByte(byte))
+ return false;
+
+ ++actualOutputPos;
+ }
+
+ return true;
+}
+
+/**
+ * Pack the given input and compare with the expected output
+ */
+static inline bool AssertPackObjectResult(Value in, std::initializer_list<int> out)
+{
+ auto actualOutput = PackObject(in);
+ bool equal = ComparePackObjectResult(actualOutput, out);
+
+ if (!equal) {
+ std::ostringstream buf;
+ buf << std::setw(2) << std::setfill('0') << std::setbase(16);
+
+ buf << "--- ";
+ for (int c : out) {
+ buf << c;
+ }
+ buf << std::endl;
+
+ buf << "+++ ";
+ for (char c : actualOutput) {
+ buf << ByteToUInt(c);
+ }
+ buf << std::endl;
+
+ BOOST_TEST_MESSAGE(buf.str());
+ }
+
+ return equal;
+}
+
+BOOST_AUTO_TEST_SUITE(base_object_packer)
+
+BOOST_AUTO_TEST_CASE(pack_null)
+{
+ BOOST_CHECK(AssertPackObjectResult(Empty, {0}));
+}
+
+BOOST_AUTO_TEST_CASE(pack_false)
+{
+ BOOST_CHECK(AssertPackObjectResult(false, {1}));
+}
+
+BOOST_AUTO_TEST_CASE(pack_true)
+{
+ BOOST_CHECK(AssertPackObjectResult(true, {2}));
+}
+
+BOOST_AUTO_TEST_CASE(pack_number)
+{
+ BOOST_CHECK(AssertPackObjectResult(42.125, {
+ // type
+ 3,
+ // IEEE 754
+ 64, 69, 16, 0, 0, 0, 0, 0
+ }));
+}
+
+BOOST_AUTO_TEST_CASE(pack_string)
+{
+ BOOST_CHECK(AssertPackObjectResult(
+ String(
+ // ASCII (1 to 127)
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ // some keyboard-independent non-ASCII unicode characters
+ "áéíóú"
+ ),
+ {
+ // type
+ 4,
+ // length
+ 0, 0, 0, 0, 0, 0, 0, 137,
+ // ASCII
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ // UTF-8
+ 195, 161, 195, 169, 195, 173, 195, 179, 195, 186
+ }
+ ));
+}
+
+BOOST_AUTO_TEST_CASE(pack_array)
+{
+ BOOST_CHECK(AssertPackObjectResult(
+ (Array::Ptr)new Array({Empty, false, true, 42.125, "foobar"}),
+ {
+ // type
+ 5,
+ // length
+ 0, 0, 0, 0, 0, 0, 0, 5,
+ // Empty
+ 0,
+ // false
+ 1,
+ // true
+ 2,
+ // 42.125
+ 3,
+ 64, 69, 16, 0, 0, 0, 0, 0,
+ // "foobar"
+ 4,
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ 102, 111, 111, 98, 97, 114
+ }
+ ));
+}
+
+BOOST_AUTO_TEST_CASE(pack_object)
+{
+ BOOST_CHECK(AssertPackObjectResult(
+ (Dictionary::Ptr)new Dictionary({
+ {"null", Empty},
+ {"false", false},
+ {"true", true},
+ {"42.125", 42.125},
+ {"foobar", "foobar"},
+ {"[]", (Array::Ptr)new Array()}
+ }),
+ {
+ // type
+ 6,
+ // length
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ // "42.125"
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ 52, 50, 46, 49, 50, 53,
+ // 42.125
+ 3,
+ 64, 69, 16, 0, 0, 0, 0, 0,
+ // "[]"
+ 0, 0, 0, 0, 0, 0, 0, 2,
+ 91, 93,
+ // (Array::Ptr)new Array()
+ 5,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ // "false"
+ 0, 0, 0, 0, 0, 0, 0, 5,
+ 102, 97, 108, 115, 101,
+ // false
+ 1,
+ // "foobar"
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ 102, 111, 111, 98, 97, 114,
+ // "foobar"
+ 4,
+ 0, 0, 0, 0, 0, 0, 0, 6,
+ 102, 111, 111, 98, 97, 114,
+ // "null"
+ 0, 0, 0, 0, 0, 0, 0, 4,
+ 110, 117, 108, 108,
+ // Empty
+ 0,
+ // "true"
+ 0, 0, 0, 0, 0, 0, 0, 4,
+ 116, 114, 117, 101,
+ // true
+ 2
+ }
+ ));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-object.cpp b/test/base-object.cpp
new file mode 100644
index 0000000..fb3c2b3
--- /dev/null
+++ b/test/base-object.cpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/object.hpp"
+#include "base/value.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+class TestObject : public Object
+{
+public:
+ DECLARE_PTR_TYPEDEFS(TestObject);
+
+ TestObject::Ptr GetTestRef()
+ {
+ return this;
+ }
+};
+
+BOOST_AUTO_TEST_SUITE(base_object)
+
+BOOST_AUTO_TEST_CASE(construct)
+{
+ Object::Ptr tobject = new TestObject();
+ BOOST_CHECK(tobject);
+}
+
+BOOST_AUTO_TEST_CASE(getself)
+{
+ TestObject::Ptr tobject = new TestObject();
+ TestObject::Ptr tobject_self = tobject->GetTestRef();
+ BOOST_CHECK(tobject == tobject_self);
+
+ Value vobject = tobject;
+ BOOST_CHECK(!vobject.IsEmpty());
+ BOOST_CHECK(vobject.IsObjectType<TestObject>());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-serialize.cpp b/test/base-serialize.cpp
new file mode 100644
index 0000000..3293f86
--- /dev/null
+++ b/test/base-serialize.cpp
@@ -0,0 +1,68 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/perfdatavalue.hpp"
+#include "base/dictionary.hpp"
+#include "base/objectlock.hpp"
+#include "base/serializer.hpp"
+#include "base/array.hpp"
+#include "base/dictionary.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_serialize)
+
+BOOST_AUTO_TEST_CASE(scalar)
+{
+ BOOST_CHECK(Deserialize(Serialize(7)) == 7);
+ BOOST_CHECK(Deserialize(Serialize(7.3)) == 7.3);
+ BOOST_CHECK(Deserialize(Serialize(Empty)) == Empty);
+ BOOST_CHECK(Deserialize(Serialize("hello")) == "hello");
+}
+
+BOOST_AUTO_TEST_CASE(array)
+{
+ Array::Ptr array = new Array();
+ array->Add(7);
+ array->Add(7.3);
+ array->Add(Empty);
+ array->Add("hello");
+
+ Array::Ptr result = Deserialize(Serialize(array));
+
+ BOOST_CHECK(result->GetLength() == array->GetLength());
+
+ BOOST_CHECK(result->Get(0) == 7);
+ BOOST_CHECK(result->Get(1) == 7.3);
+ BOOST_CHECK(result->Get(2) == Empty);
+ BOOST_CHECK(result->Get(3) == "hello");
+}
+
+BOOST_AUTO_TEST_CASE(dictionary)
+{
+ Dictionary::Ptr dict = new Dictionary();
+ dict->Set("k1", 7);
+ dict->Set("k2", 7.3);
+ dict->Set("k3", Empty);
+ dict->Set("k4", "hello");
+
+ Dictionary::Ptr result = Deserialize(Serialize(dict));
+
+ BOOST_CHECK(result->GetLength() == dict->GetLength());
+
+ BOOST_CHECK(result->Get("k1") == 7);
+ BOOST_CHECK(result->Get("k2") == 7.3);
+ BOOST_CHECK(result->Get("k3") == Empty);
+ BOOST_CHECK(result->Get("k4") == "hello");
+}
+
+BOOST_AUTO_TEST_CASE(object)
+{
+ PerfdataValue::Ptr pdv = new PerfdataValue("size", 100, true, "bytes");
+
+ PerfdataValue::Ptr result = Deserialize(Serialize(pdv));
+
+ BOOST_CHECK(result->GetValue() == pdv->GetValue());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-shellescape.cpp b/test/base-shellescape.cpp
new file mode 100644
index 0000000..1eb0eae
--- /dev/null
+++ b/test/base-shellescape.cpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/utility.hpp"
+#include <BoostTestTargetConfig.h>
+#include <iostream>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_shellescape)
+
+BOOST_AUTO_TEST_CASE(escape_basic)
+{
+#ifdef _WIN32
+ BOOST_CHECK(Utility::EscapeShellCmd("%PATH%") == "^%PATH^%");
+#else /* _WIN32 */
+ BOOST_CHECK(Utility::EscapeShellCmd("$PATH") == "\\$PATH");
+ BOOST_CHECK(Utility::EscapeShellCmd("\\$PATH") == "\\\\\\$PATH");
+#endif /* _WIN32 */
+}
+
+BOOST_AUTO_TEST_CASE(escape_quoted)
+{
+#ifdef _WIN32
+ BOOST_CHECK(Utility::EscapeShellCmd("'hello'") == "^'hello^'");
+ BOOST_CHECK(Utility::EscapeShellCmd("\"hello\"") == "^\"hello^\"");
+#else /* _WIN32 */
+ BOOST_CHECK(Utility::EscapeShellCmd("'hello'") == "'hello'");
+ BOOST_CHECK(Utility::EscapeShellCmd("'hello") == "\\'hello");
+#endif /* _WIN32 */
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-stacktrace.cpp b/test/base-stacktrace.cpp
new file mode 100644
index 0000000..f0b87e2
--- /dev/null
+++ b/test/base-stacktrace.cpp
@@ -0,0 +1,72 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#include "base/stacktrace.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+
+/* If you are reading this, you are probably doing so because this test case just failed. This might happen as it
+ * heavily depends on platform and compiler behavior. There are two likely causes why this could break:
+ *
+ * - Your compiler found new ways to optimize the functions that are called to create a stack, even though we tried
+ * to disable optimizations using #pragmas for some compilers. If you know a way to disable (more) optimizations for
+ * your compiler, you can try if this helps.
+ *
+ * - Boost fails to resolve symbol names as we've already seen on some platforms. In this case, you can try again
+ * passing the additional flag `-DICINGA2_STACKTRACE_USE_BACKTRACE_SYMBOLS=ON` to CMake and see if this helps.
+ *
+ * In any case, please report a bug. If you run `make CTEST_OUTPUT_ON_FAILURE=1 test`, the stack trace in question
+ * should be printed. If it looks somewhat meaningful, you can probably ignore a failure of this test case.
+ */
+
+#pragma GCC push_options
+#pragma GCC optimize ("O0")
+#pragma clang optimize off
+#ifdef _MSVC_VER
+#pragma optimize("", off)
+#endif /* _MSVC_VER */
+
+BOOST_AUTO_TEST_SUITE(base_stacktrace)
+
+[[gnu::noinline]]
+void stack_test_func_b()
+{
+ boost::stacktrace::stacktrace stack;
+ std::ostringstream obuf;
+ obuf << StackTraceFormatter(stack);
+ std::string result = obuf.str();
+ BOOST_CHECK_MESSAGE(!result.empty(), "stack trace must not be empty");
+ size_t pos_a = result.find("stack_test_func_a");
+ size_t pos_b = result.find("stack_test_func_b");
+ BOOST_CHECK_MESSAGE(pos_a != std::string::npos, "'stack_test_func_a' not found\n\n" << result);
+ BOOST_CHECK_MESSAGE(pos_b != std::string::npos, "'stack_test_func_b' not found\n\n" << result);
+ BOOST_CHECK_MESSAGE(pos_a > pos_b, "'stack_test_func_a' must appear after 'stack_test_func_b'\n\n" << result);
+}
+
+[[gnu::noinline]]
+void stack_test_func_a()
+{
+ boost::stacktrace::stacktrace stack;
+ std::ostringstream obuf;
+ obuf << StackTraceFormatter(stack);
+ std::string result = obuf.str();
+ BOOST_CHECK_MESSAGE(!result.empty(), "stack trace must not be empty");
+ size_t pos_a = result.find("stack_test_func_a");
+ BOOST_CHECK_MESSAGE(pos_a != std::string::npos, "'stack_test_func_a' not found\n\n" << result);
+
+ stack_test_func_b();
+}
+
+BOOST_AUTO_TEST_CASE(stacktrace)
+{
+ stack_test_func_a();
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+#pragma GCC pop_options
+#pragma clang optimize on
+#ifdef _MSVC_VER
+#pragma optimize("", on)
+#endif /* _MSVC_VER */
diff --git a/test/base-stream.cpp b/test/base-stream.cpp
new file mode 100644
index 0000000..34a93a2
--- /dev/null
+++ b/test/base-stream.cpp
@@ -0,0 +1,39 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/stdiostream.hpp"
+#include "base/string.hpp"
+#include <BoostTestTargetConfig.h>
+#include <sstream>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_stream)
+
+BOOST_AUTO_TEST_CASE(readline_stdio)
+{
+ std::stringstream msgbuf;
+ msgbuf << "Hello\nWorld\n\n";
+
+ StdioStream::Ptr stdstream = new StdioStream(&msgbuf, false);
+
+ StreamReadContext rlc;
+
+ String line;
+ BOOST_CHECK(stdstream->ReadLine(&line, rlc) == StatusNewItem);
+ BOOST_CHECK(line == "Hello");
+
+ BOOST_CHECK(stdstream->ReadLine(&line, rlc) == StatusNewItem);
+ BOOST_CHECK(line == "World");
+
+ BOOST_CHECK(stdstream->ReadLine(&line, rlc) == StatusNewItem);
+ BOOST_CHECK(line == "");
+
+ BOOST_CHECK(stdstream->ReadLine(&line, rlc) == StatusNewItem);
+ BOOST_CHECK(line == "");
+
+ BOOST_CHECK(stdstream->ReadLine(&line, rlc) == StatusEof);
+
+ stdstream->Close();
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-string.cpp b/test/base-string.cpp
new file mode 100644
index 0000000..835b1a6
--- /dev/null
+++ b/test/base-string.cpp
@@ -0,0 +1,104 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/string.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_string)
+
+BOOST_AUTO_TEST_CASE(construct)
+{
+ BOOST_CHECK(String() == "");
+ BOOST_CHECK(String(5, 'n') == "nnnnn");
+}
+
+BOOST_AUTO_TEST_CASE(equal)
+{
+ BOOST_CHECK(String("hello") == String("hello"));
+ BOOST_CHECK("hello" == String("hello"));
+ BOOST_CHECK(String("hello") == String("hello"));
+
+ BOOST_CHECK(String("hello") != String("helloworld"));
+ BOOST_CHECK("hello" != String("helloworld"));
+ BOOST_CHECK(String("hello") != "helloworld");
+}
+
+BOOST_AUTO_TEST_CASE(clear)
+{
+ String s = "hello";
+ s.Clear();
+ BOOST_CHECK(s == "");
+ BOOST_CHECK(s.IsEmpty());
+}
+
+BOOST_AUTO_TEST_CASE(append)
+{
+ String s;
+ s += "he";
+ s += String("ll");
+ s += 'o';
+
+ BOOST_CHECK(s == "hello");
+}
+
+BOOST_AUTO_TEST_CASE(trim)
+{
+ String s1 = "hello";
+ BOOST_CHECK(s1.Trim() == "hello");
+
+ String s2 = " hello";
+ BOOST_CHECK(s2.Trim() == "hello");
+
+ String s3 = "hello ";
+ BOOST_CHECK(s3.Trim() == "hello");
+
+ String s4 = " hello ";
+ BOOST_CHECK(s4.Trim() == "hello");
+}
+
+BOOST_AUTO_TEST_CASE(contains)
+{
+ String s1 = "hello world";
+ String s2 = "hello";
+ BOOST_CHECK(s1.Contains(s2));
+
+ String s3 = " hello world ";
+ String s4 = " hello";
+ BOOST_CHECK(s3.Contains(s4));
+
+ String s5 = " hello world ";
+ String s6 = "world ";
+ BOOST_CHECK(s5.Contains(s6));
+}
+
+BOOST_AUTO_TEST_CASE(replace)
+{
+ String s = "hello";
+
+ s.Replace(0, 2, "x");
+ BOOST_CHECK(s == "xllo");
+}
+
+BOOST_AUTO_TEST_CASE(index)
+{
+ String s = "hello";
+ BOOST_CHECK(s[0] == 'h');
+
+ s[0] = 'x';
+ BOOST_CHECK(s == "xello");
+
+ for (char& ch : s) {
+ ch = 'y';
+ }
+ BOOST_CHECK(s == "yyyyy");
+}
+
+BOOST_AUTO_TEST_CASE(find)
+{
+ String s = "hello";
+ BOOST_CHECK(s.Find("ll") == 2);
+ BOOST_CHECK(s.FindFirstOf("xl") == 2);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-timer.cpp b/test/base-timer.cpp
new file mode 100644
index 0000000..696a474
--- /dev/null
+++ b/test/base-timer.cpp
@@ -0,0 +1,61 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/timer.hpp"
+#include "base/utility.hpp"
+#include "base/application.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_timer)
+
+BOOST_AUTO_TEST_CASE(construct)
+{
+ Timer::Ptr timer = Timer::Create();
+ BOOST_CHECK(timer);
+}
+
+BOOST_AUTO_TEST_CASE(interval)
+{
+ Timer::Ptr timer = Timer::Create();
+ timer->SetInterval(1.5);
+ BOOST_CHECK(timer->GetInterval() == 1.5);
+}
+
+int counter = 0;
+
+static void Callback(const Timer * const&)
+{
+ counter++;
+}
+
+BOOST_AUTO_TEST_CASE(invoke)
+{
+ Timer::Ptr timer = Timer::Create();
+ timer->OnTimerExpired.connect(&Callback);
+ timer->SetInterval(1);
+
+ counter = 0;
+ timer->Start();
+ Utility::Sleep(5.5);
+ timer->Stop();
+
+ BOOST_CHECK(counter >= 4 && counter <= 6);
+}
+
+BOOST_AUTO_TEST_CASE(scope)
+{
+ Timer::Ptr timer = Timer::Create();
+ timer->OnTimerExpired.connect(&Callback);
+ timer->SetInterval(1);
+
+ counter = 0;
+ timer->Start();
+ Utility::Sleep(5.5);
+ timer.reset();
+ Utility::Sleep(5.5);
+
+ BOOST_CHECK(counter >= 4 && counter <= 6);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-tlsutility.cpp b/test/base-tlsutility.cpp
new file mode 100644
index 0000000..2e611e4
--- /dev/null
+++ b/test/base-tlsutility.cpp
@@ -0,0 +1,135 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "base/tlsutility.hpp"
+#include <BoostTestTargetConfig.h>
+#include <functional>
+#include <memory>
+#include <openssl/asn1.h>
+#include <openssl/bn.h>
+#include <openssl/evp.h>
+#include <openssl/obj_mac.h>
+#include <openssl/rsa.h>
+#include <openssl/x509.h>
+#include <utility>
+#include <vector>
+
+using namespace icinga;
+
+static EVP_PKEY* GenKeypair()
+{
+ InitializeOpenSSL();
+
+ auto e (BN_new());
+ BOOST_REQUIRE(e);
+
+ auto rsa (RSA_new());
+ BOOST_REQUIRE(rsa);
+
+ auto key (EVP_PKEY_new());
+ BOOST_REQUIRE(key);
+
+ BOOST_REQUIRE(BN_set_word(e, RSA_F4));
+ BOOST_REQUIRE(RSA_generate_key_ex(rsa, 4096, e, nullptr));
+ BOOST_REQUIRE(EVP_PKEY_assign_RSA(key, rsa));
+
+ return key;
+}
+
+static std::shared_ptr<X509> MakeCert(const char* issuer, EVP_PKEY* signer, const char* subject, EVP_PKEY* pubkey, std::function<void(ASN1_TIME*, ASN1_TIME*)> setTimes)
+{
+ auto cert (X509_new());
+ BOOST_REQUIRE(cert);
+
+ auto serial (BN_new());
+ BOOST_REQUIRE(serial);
+
+ BOOST_REQUIRE(X509_set_version(cert, 0x2));
+ BOOST_REQUIRE(BN_to_ASN1_INTEGER(serial, X509_get_serialNumber(cert)));
+ BOOST_REQUIRE(X509_NAME_add_entry_by_NID(X509_get_issuer_name(cert), NID_commonName, MBSTRING_ASC, (unsigned char*)issuer, -1, -1, 0));
+ setTimes(X509_get_notBefore(cert), X509_get_notAfter(cert));
+ BOOST_REQUIRE(X509_NAME_add_entry_by_NID(X509_get_subject_name(cert), NID_commonName, MBSTRING_ASC, (unsigned char*)subject, -1, -1, 0));
+ BOOST_REQUIRE(X509_set_pubkey(cert, pubkey));
+ BOOST_REQUIRE(X509_sign(cert, signer, EVP_sha256()));
+
+ return std::shared_ptr<X509>(cert, X509_free);
+}
+
+static const long l_2016 = 1480000000; // Thu Nov 24 15:06:40 UTC 2016
+static const long l_2017 = 1490000000; // Mon Mar 20 08:53:20 UTC 2017
+
+BOOST_AUTO_TEST_SUITE(base_tlsutility)
+
+BOOST_AUTO_TEST_CASE(sha1)
+{
+ std::string allchars;
+ for (size_t i = 0; i < 256; i++) {
+ allchars.push_back(i);
+ }
+
+ std::vector<std::pair<std::string,std::string>> testdata = {
+ {"", "da39a3ee5e6b4b0d3255bfef95601890afd80709"},
+ {"icinga", "f172c5e9e4d840a55356882a2b644846b302b216"},
+ {"Icinga", "b3bdae77f60d9065f6152c7e3bbd351fa65e6fab"},
+ {"ICINGA", "335da1d814abeef09b4623e2ce5169140c267a39"},
+ {"#rX|wlcM:.8)uVmxz", "99dc4d34caf36c6d6b08404135f1a7286211be1e"},
+ {"AgbM;Z8Tz1!Im,kecZWs", "aa793bef1ca307012980ae5ae046b7e929f6ed99"},
+ {"yLUA4vKQ~24W}ahI;i?NLLS", "5e1a5ee3bd9fae5150681ef656ad43d9cb8e7005"},
+ {allchars, "4916d6bdb7f78e6803698cab32d1586ea457dfc8"},
+ };
+
+ for (const auto& p : testdata) {
+ const auto& input = p.first;
+ const auto& expected = p.second;
+ auto output = SHA1(input);
+ BOOST_CHECK_MESSAGE(output == expected, "SHA1('" << input << "') should be " << expected << ", got " << output);
+ }
+}
+
+BOOST_AUTO_TEST_CASE(iscauptodate_ok)
+{
+ auto key (GenKeypair());
+
+ BOOST_CHECK(IsCaUptodate(MakeCert("Icinga CA", key, "Icinga CA", key, [](ASN1_TIME* notBefore, ASN1_TIME* notAfter) {
+ BOOST_REQUIRE(X509_gmtime_adj(notBefore, 0));
+ BOOST_REQUIRE(X509_gmtime_adj(notAfter, LEAF_VALID_FOR + 60 * 60));
+ }).get()));
+}
+
+BOOST_AUTO_TEST_CASE(iscauptodate_expiring)
+{
+ auto key (GenKeypair());
+
+ BOOST_CHECK(!IsCaUptodate(MakeCert("Icinga CA", key, "Icinga CA", key, [](ASN1_TIME* notBefore, ASN1_TIME* notAfter) {
+ BOOST_REQUIRE(X509_gmtime_adj(notBefore, 0));
+ BOOST_REQUIRE(X509_gmtime_adj(notAfter, LEAF_VALID_FOR - 60 * 60));
+ }).get()));
+}
+
+BOOST_AUTO_TEST_CASE(iscertuptodate_ok)
+{
+ BOOST_CHECK(IsCertUptodate(MakeCert("Icinga CA", GenKeypair(), "example.com", GenKeypair(), [](ASN1_TIME* notBefore, ASN1_TIME* notAfter) {
+ time_t epoch = 0;
+ BOOST_REQUIRE(X509_time_adj(notBefore, l_2017, &epoch));
+ BOOST_REQUIRE(X509_gmtime_adj(notAfter, RENEW_THRESHOLD + 60 * 60));
+ })));
+}
+
+BOOST_AUTO_TEST_CASE(iscertuptodate_expiring)
+{
+ BOOST_CHECK(!IsCertUptodate(MakeCert("Icinga CA", GenKeypair(), "example.com", GenKeypair(), [](ASN1_TIME* notBefore, ASN1_TIME* notAfter) {
+ time_t epoch = 0;
+ BOOST_REQUIRE(X509_time_adj(notBefore, l_2017, &epoch));
+ BOOST_REQUIRE(X509_gmtime_adj(notAfter, RENEW_THRESHOLD - 60 * 60));
+ })));
+}
+
+BOOST_AUTO_TEST_CASE(iscertuptodate_old)
+{
+ BOOST_CHECK(!IsCertUptodate(MakeCert("Icinga CA", GenKeypair(), "example.com", GenKeypair(), [](ASN1_TIME* notBefore, ASN1_TIME* notAfter) {
+ time_t epoch = 0;
+ BOOST_REQUIRE(X509_time_adj(notBefore, l_2016, &epoch));
+ BOOST_REQUIRE(X509_gmtime_adj(notAfter, RENEW_THRESHOLD + 60 * 60));
+ })));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-type.cpp b/test/base-type.cpp
new file mode 100644
index 0000000..21bcf43
--- /dev/null
+++ b/test/base-type.cpp
@@ -0,0 +1,47 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/perfdatavalue.hpp"
+#include "base/dictionary.hpp"
+#include "base/objectlock.hpp"
+#include "base/application.hpp"
+#include "base/type.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_type)
+
+BOOST_AUTO_TEST_CASE(gettype)
+{
+ Type::Ptr t = Type::GetByName("Application");
+
+ BOOST_CHECK(t);
+}
+
+BOOST_AUTO_TEST_CASE(assign)
+{
+ Type::Ptr t1 = Type::GetByName("Application");
+ Type::Ptr t2 = Type::GetByName("ConfigObject");
+
+ BOOST_CHECK(t1->IsAssignableFrom(t1));
+ BOOST_CHECK(t2->IsAssignableFrom(t1));
+ BOOST_CHECK(!t1->IsAssignableFrom(t2));
+}
+
+BOOST_AUTO_TEST_CASE(byname)
+{
+ Type::Ptr t = Type::GetByName("Application");
+
+ BOOST_CHECK(t);
+}
+
+BOOST_AUTO_TEST_CASE(instantiate)
+{
+ Type::Ptr t = Type::GetByName("PerfdataValue");
+
+ Object::Ptr p = t->Instantiate(std::vector<Value>());
+
+ BOOST_CHECK(p);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-utility.cpp b/test/base-utility.cpp
new file mode 100644
index 0000000..65222e1
--- /dev/null
+++ b/test/base-utility.cpp
@@ -0,0 +1,138 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/utility.hpp"
+#include <chrono>
+#include <BoostTestTargetConfig.h>
+
+#ifdef _WIN32
+# include <windows.h>
+# include <shellapi.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_utility)
+
+BOOST_AUTO_TEST_CASE(parse_version)
+{
+ BOOST_CHECK(Utility::ParseVersion("2.11.0-0.rc1.1") == "2.11.0");
+ BOOST_CHECK(Utility::ParseVersion("v2.10.5") == "2.10.5");
+ BOOST_CHECK(Utility::ParseVersion("r2.11.1") == "2.11.1");
+ BOOST_CHECK(Utility::ParseVersion("v2.11.0-rc1-58-g7c1f716da") == "2.11.0");
+
+ BOOST_CHECK(Utility::ParseVersion("v2.11butactually3.0") == "v2.11butactually3.0");
+}
+
+BOOST_AUTO_TEST_CASE(compare_version)
+{
+ BOOST_CHECK(Utility::CompareVersion("2.10.5", Utility::ParseVersion("v2.10.4")) < 0);
+ BOOST_CHECK(Utility::CompareVersion("2.11.0", Utility::ParseVersion("2.11.0-0")) == 0);
+ BOOST_CHECK(Utility::CompareVersion("2.10.5", Utility::ParseVersion("2.11.0-0.rc1.1")) > 0);
+}
+
+BOOST_AUTO_TEST_CASE(comparepasswords_works)
+{
+ BOOST_CHECK(Utility::ComparePasswords("", ""));
+
+ BOOST_CHECK(!Utility::ComparePasswords("x", ""));
+ BOOST_CHECK(!Utility::ComparePasswords("", "x"));
+
+ BOOST_CHECK(Utility::ComparePasswords("x", "x"));
+ BOOST_CHECK(!Utility::ComparePasswords("x", "y"));
+
+ BOOST_CHECK(Utility::ComparePasswords("abcd", "abcd"));
+ BOOST_CHECK(!Utility::ComparePasswords("abc", "abcd"));
+ BOOST_CHECK(!Utility::ComparePasswords("abcde", "abcd"));
+}
+
+BOOST_AUTO_TEST_CASE(comparepasswords_issafe)
+{
+ using std::chrono::duration_cast;
+ using std::chrono::microseconds;
+ using std::chrono::steady_clock;
+
+ String a, b;
+
+ a.Append(200000001, 'a');
+ b.Append(200000002, 'b');
+
+ auto start1 (steady_clock::now());
+
+ Utility::ComparePasswords(a, a);
+
+ auto duration1 (steady_clock::now() - start1);
+
+ auto start2 (steady_clock::now());
+
+ Utility::ComparePasswords(a, b);
+
+ auto duration2 (steady_clock::now() - start2);
+
+ double diff = (double)duration_cast<microseconds>(duration1).count() / (double)duration_cast<microseconds>(duration2).count();
+ BOOST_WARN(0.9 <= diff && diff <= 1.1);
+}
+
+BOOST_AUTO_TEST_CASE(validateutf8)
+{
+ BOOST_CHECK(Utility::ValidateUTF8("") == "");
+ BOOST_CHECK(Utility::ValidateUTF8("a") == "a");
+ BOOST_CHECK(Utility::ValidateUTF8("\xC3") == "\xEF\xBF\xBD");
+ BOOST_CHECK(Utility::ValidateUTF8("\xC3\xA4") == "\xC3\xA4");
+}
+
+BOOST_AUTO_TEST_CASE(EscapeCreateProcessArg)
+{
+#ifdef _WIN32
+ using convert = std::wstring_convert<std::codecvt<wchar_t, char, std::mbstate_t>, wchar_t>;
+
+ std::vector<std::string> testdata = {
+ R"(foobar)",
+ R"(foo bar)",
+ R"(foo"bar)",
+ R"("foo bar")",
+ R"(" \" \\" \\\" \\\\")",
+ R"( !"#$$%&'()*+,-./09:;<=>?@AZ[\]^_`az{|}~ " \" \\" \\\" \\\\")",
+ "'foo\nbar'",
+ };
+
+ for (const auto& t : testdata) {
+ // Prepend some fake exec name as the first argument is handled differently.
+ std::string escaped = "some.exe " + Utility::EscapeCreateProcessArg(t);
+ int argc;
+ std::shared_ptr<LPWSTR> argv(CommandLineToArgvW(convert{}.from_bytes(escaped.c_str()).data(), &argc), LocalFree);
+ BOOST_CHECK_MESSAGE(argv != nullptr, "CommandLineToArgvW() should not return nullptr for " << t);
+ BOOST_CHECK_MESSAGE(argc == 2, "CommandLineToArgvW() should find 2 arguments for " << t);
+ if (argc >= 2) {
+ std::string unescaped = convert{}.to_bytes(argv.get()[1]);
+ BOOST_CHECK_MESSAGE(unescaped == t,
+ "CommandLineToArgvW() should return original value for " << t << " (got: " << unescaped << ")");
+ }
+ }
+#endif /* _WIN32 */
+}
+
+BOOST_AUTO_TEST_CASE(TruncateUsingHash)
+{
+ /*
+ * Note: be careful when changing the output of TruncateUsingHash as it is used to derive file names that should not
+ * change between versions or would need special handling if they do (/var/lib/icinga2/api/packages/_api).
+ */
+
+ /* minimum allowed value for maxLength template parameter */
+ BOOST_CHECK_EQUAL(Utility::TruncateUsingHash<44>(std::string(64, 'a')),
+ "a...0098ba824b5c16427bd7a1122a5a442a25ec644d");
+
+ BOOST_CHECK_EQUAL(Utility::TruncateUsingHash<80>(std::string(100, 'a')),
+ std::string(37, 'a') + "...7f9000257a4918d7072655ea468540cdcbd42e0c");
+
+ /* short enough values should not be truncated */
+ BOOST_CHECK_EQUAL(Utility::TruncateUsingHash<80>(""), "");
+ BOOST_CHECK_EQUAL(Utility::TruncateUsingHash<80>(std::string(60, 'a')), std::string(60, 'a'));
+ BOOST_CHECK_EQUAL(Utility::TruncateUsingHash<80>(std::string(79, 'a')), std::string(79, 'a'));
+
+ /* inputs of maxLength are hashed to avoid collisions */
+ BOOST_CHECK_EQUAL(Utility::TruncateUsingHash<80>(std::string(80, 'a')),
+ std::string(37, 'a') + "...86f33652fcffd7fa1443e246dd34fe5d00e25ffd");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/base-value.cpp b/test/base-value.cpp
new file mode 100644
index 0000000..c53b8e9
--- /dev/null
+++ b/test/base-value.cpp
@@ -0,0 +1,53 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/value.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(base_value)
+
+BOOST_AUTO_TEST_CASE(scalar)
+{
+ Value v;
+
+ v = 3;
+ BOOST_CHECK(v.IsScalar());
+
+ v = "hello";
+ BOOST_CHECK(v.IsScalar());
+
+ v = Empty;
+ BOOST_CHECK(!v.IsScalar());
+}
+
+BOOST_AUTO_TEST_CASE(convert)
+{
+ Value v;
+ BOOST_CHECK(v.IsEmpty());
+ BOOST_CHECK(v == "");
+ BOOST_CHECK(static_cast<double>(v) == 0);
+ BOOST_CHECK(!v.IsScalar());
+ BOOST_CHECK(!v.IsObjectType<Object>());
+
+ BOOST_CHECK(v + "hello" == "hello");
+ BOOST_CHECK("hello" + v == "hello");
+}
+
+BOOST_AUTO_TEST_CASE(format)
+{
+ Value v = 3;
+
+ std::ostringstream obuf;
+ obuf << v;
+
+ BOOST_CHECK(obuf.str() == "3");
+
+ std::istringstream ibuf("3");
+ ibuf >> v;
+
+ BOOST_CHECK_MESSAGE(v.IsString(), "type of v should be String (is " << v.GetTypeName() << ")");
+ BOOST_CHECK_MESSAGE(v == "3", "v should be '3' (is '" << v << "')");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/config-apply.cpp b/test/config-apply.cpp
new file mode 100644
index 0000000..c207ddd
--- /dev/null
+++ b/test/config-apply.cpp
@@ -0,0 +1,251 @@
+/* Icinga 2 | (c) 2023 Icinga GmbH | GPLv2+ */
+
+#include "config/applyrule.hpp"
+#include "config/configcompiler.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+static Expression* RequireActualExpression(const std::unique_ptr<Expression>& compiledExpression)
+{
+ BOOST_REQUIRE_NE(compiledExpression.get(), nullptr);
+
+ auto dict (dynamic_cast<DictExpression*>(compiledExpression.get()));
+ BOOST_REQUIRE_NE(dict, nullptr);
+
+ auto& subex (dict->GetExpressions());
+ BOOST_REQUIRE_EQUAL(subex.size(), 1u);
+
+ auto sub0 (subex.at(0).get());
+ BOOST_REQUIRE_NE(sub0, nullptr);
+
+ return sub0;
+}
+
+template<>
+struct boost::test_tools::tt_detail::print_log_value<std::pair<String, String>>
+{
+ inline void operator()(std::ostream& os, const std::pair<String, String>& hs)
+ {
+ os << hs.first << "!" << hs.second;
+ }
+};
+
+static void GetTargetHostsHelper(
+ const String& filter, const Dictionary::Ptr& constants, bool targeted, const std::vector<String>& hosts = {}
+)
+{
+ auto compiled (ConfigCompiler::CompileText("<test>", filter));
+ auto expr (RequireActualExpression(compiled));
+ std::vector<const String*> actualHosts;
+
+ BOOST_CHECK_EQUAL(ApplyRule::GetTargetHosts(expr, actualHosts, constants), targeted);
+
+ if (targeted) {
+ std::vector<String> actualHostNames;
+
+ actualHostNames.reserve(actualHosts.size());
+
+ for (auto h : actualHosts) {
+ actualHostNames.emplace_back(*h);
+ }
+
+ BOOST_CHECK_EQUAL_COLLECTIONS(actualHostNames.begin(), actualHostNames.end(), hosts.begin(), hosts.end());
+ }
+}
+
+static void GetTargetServicesHelper(
+ const String& filter, const Dictionary::Ptr& constants, bool targeted, const std::vector<std::pair<String, String>>& services = {}
+)
+{
+ auto compiled (ConfigCompiler::CompileText("<test>", filter));
+ auto expr (RequireActualExpression(compiled));
+ std::vector<std::pair<const String*, const String*>> actualServices;
+
+ BOOST_CHECK_EQUAL(ApplyRule::GetTargetServices(expr, actualServices, constants), targeted);
+
+ if (targeted) {
+ std::vector<std::pair<String, String>> actualServiceNames;
+
+ actualServiceNames.reserve(actualServices.size());
+
+ for (auto s : actualServices) {
+ actualServiceNames.emplace_back(*s.first, *s.second);
+ }
+
+ BOOST_CHECK_EQUAL_COLLECTIONS(actualServiceNames.begin(), actualServiceNames.end(), services.begin(), services.end());
+ }
+}
+
+BOOST_AUTO_TEST_SUITE(config_apply)
+
+BOOST_AUTO_TEST_CASE(gettargethosts_literal)
+{
+ GetTargetHostsHelper("host.name == \"foo\"", nullptr, true, {"foo"});
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_const)
+{
+ GetTargetHostsHelper("host.name == x", new Dictionary({{"x", "foo"}}), true, {"foo"});
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_swapped)
+{
+ GetTargetHostsHelper("\"foo\" == host.name", nullptr, true, {"foo"});
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_two)
+{
+ GetTargetHostsHelper("host.name == \"foo\" || host.name == \"bar\"", nullptr, true, {"foo", "bar"});
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_three)
+{
+ GetTargetHostsHelper(
+ "host.name == \"foo\" || host.name == \"bar\" || host.name == \"foobar\"",
+ nullptr, true, {"foo", "bar", "foobar"}
+ );
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_mixed)
+{
+ GetTargetHostsHelper("host.name == x || \"bar\" == host.name", new Dictionary({{"x", "foo"}}), true, {"foo", "bar"});
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_redundant)
+{
+ GetTargetHostsHelper("host.name == \"foo\" && 1", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_badconst)
+{
+ GetTargetHostsHelper("host.name == NodeName", new Dictionary({{"x", "foo"}}), false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_notliteral)
+{
+ GetTargetHostsHelper("host.name == \"foo\" + \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_wrongop)
+{
+ GetTargetHostsHelper("host.name != \"foo\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_wrongattr)
+{
+ GetTargetHostsHelper("host.__name == \"foo\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_wrongvar)
+{
+ GetTargetHostsHelper("service.name == \"foo\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargethosts_noindexer)
+{
+ GetTargetHostsHelper("name == \"foo\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_literal)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && service.name == \"bar\"", nullptr, true, {{"foo", "bar"}});
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_const)
+{
+ GetTargetServicesHelper("host.name == x && service.name == y", new Dictionary({{"x", "foo"}, {"y", "bar"}}), true, {{"foo", "bar"}});
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_swapped_outer)
+{
+ GetTargetServicesHelper("service.name == \"bar\" && host.name == \"foo\"", nullptr, true, {{"foo", "bar"}});
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_swapped_inner)
+{
+ GetTargetServicesHelper("\"foo\" == host.name && \"bar\" == service.name", nullptr, true, {{"foo", "bar"}});
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_two)
+{
+ GetTargetServicesHelper(
+ "host.name == \"foo\" && service.name == \"bar\" || host.name == \"oof\" && service.name == \"rab\"",
+ nullptr, true, {{"foo", "bar"}, {"oof", "rab"}}
+ );
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_three)
+{
+ GetTargetServicesHelper(
+ "host.name == \"foo\" && service.name == \"bar\" || host.name == \"oof\" && service.name == \"rab\" || host.name == \"ofo\" && service.name == \"rba\"",
+ nullptr, true, {{"foo", "bar"}, {"oof", "rab"}, {"ofo", "rba"}}
+ );
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_mixed)
+{
+ GetTargetServicesHelper("\"bar\" == service.name && x == host.name", new Dictionary({{"x", "foo"}}), true, {{"foo", "bar"}});
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_redundant)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && service.name == \"bar\" && 1", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_badconst)
+{
+ GetTargetServicesHelper("host.name == NodeName && service.name == \"bar\"", new Dictionary({{"x", "foo"}}), false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_notliteral)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && service.name == \"b\" + \"ar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongop_outer)
+{
+ GetTargetServicesHelper("host.name == \"foo\" & service.name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongop_host)
+{
+ GetTargetServicesHelper("host.name != \"foo\" && service.name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongop_service)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && service.name != \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongattr_host)
+{
+ GetTargetServicesHelper("host.__name == \"foo\" && service.name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongattr_service)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && service.__name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongvar_host)
+{
+ GetTargetServicesHelper("horst.name == \"foo\" && service.name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_wrongvar_service)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && sehrvice.name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_noindexer_host)
+{
+ GetTargetServicesHelper("name == \"foo\" && service.name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_CASE(gettargetservices_noindexer_service)
+{
+ GetTargetServicesHelper("host.name == \"foo\" && name == \"bar\"", nullptr, false);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/config-ops.cpp b/test/config-ops.cpp
new file mode 100644
index 0000000..dfbef25
--- /dev/null
+++ b/test/config-ops.cpp
@@ -0,0 +1,246 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configcompiler.hpp"
+#include "base/exception.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(config_ops)
+
+BOOST_AUTO_TEST_CASE(simple)
+{
+ ScriptFrame frame(true);
+ std::unique_ptr<Expression> expr;
+ Dictionary::Ptr dict;
+
+ expr = ConfigCompiler::CompileText("<test>", "");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == Empty);
+
+ expr = ConfigCompiler::CompileText("<test>", "\n3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "{ 3\n\n5 }");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "1 + 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 4);
+
+ expr = ConfigCompiler::CompileText("<test>", "3 - 1");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 2);
+
+ expr = ConfigCompiler::CompileText("<test>", "5m * 10");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3000);
+
+ expr = ConfigCompiler::CompileText("<test>", "5m / 5");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 60);
+
+ expr = ConfigCompiler::CompileText("<test>", "7 & 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "2 | 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "true && false");
+ BOOST_CHECK(!expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "true || false");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "3 < 5");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "3 > 5");
+ BOOST_CHECK(!expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "3 <= 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "3 >= 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "2 + 3 * 4");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 14);
+
+ expr = ConfigCompiler::CompileText("<test>", "(2 + 3) * 4");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 20);
+
+ expr = ConfigCompiler::CompileText("<test>", "2 * - 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == -6);
+
+ expr = ConfigCompiler::CompileText("<test>", "-(2 + 3)");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == -5);
+
+ expr = ConfigCompiler::CompileText("<test>", "- 2 * 2 - 2 * 3 - 4 * - 5");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 10);
+
+ expr = ConfigCompiler::CompileText("<test>", "!0 == true");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "~0");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == (double)~(long)0);
+
+ expr = ConfigCompiler::CompileText("<test>", "4 << 8");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 1024);
+
+ expr = ConfigCompiler::CompileText("<test>", "1024 >> 4");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 64);
+
+ expr = ConfigCompiler::CompileText("<test>", "2 << 3 << 4");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 256);
+
+ expr = ConfigCompiler::CompileText("<test>", "256 >> 4 >> 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 2);
+
+ expr = ConfigCompiler::CompileText("<test>", R"("hello" == "hello")");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", R"("hello" != "hello")");
+ BOOST_CHECK(!expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", R"("foo" in [ "foo", "bar" ])");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", R"("foo" in [ "bar", "baz" ])");
+ BOOST_CHECK(!expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "\"foo\" in null");
+ BOOST_CHECK(!expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", R"("foo" in "bar")");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", R"("foo" !in [ "bar", "baz" ])");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", R"("foo" !in [ "foo", "bar" ])");
+ BOOST_CHECK(!expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "\"foo\" !in null");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", R"("foo" !in "bar")");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "{ a += 3 }");
+ dict = expr->Evaluate(frame).GetValue();
+ BOOST_CHECK(dict->GetLength() == 1);
+ BOOST_CHECK(dict->Get("a") == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "test");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "null + 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "3 + null");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "\"test\" + 3");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == "test3");
+
+ expr = ConfigCompiler::CompileText("<test>", R"("\"te\\st")");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == "\"te\\st");
+
+ expr = ConfigCompiler::CompileText("<test>", R"("\'test")");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "({ a = 3\nb = 3 })");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue().IsObjectType<Dictionary>());
+}
+
+BOOST_AUTO_TEST_CASE(advanced)
+{
+ ScriptFrame frame(true);
+ std::unique_ptr<Expression> expr;
+ Function::Ptr func;
+
+ expr = ConfigCompiler::CompileText("<test>", R"(regex("^Hello", "Hello World"))");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "__boost_test()");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ Object::Ptr self = new Object();
+ ScriptFrame frame2(true, self);
+ expr = ConfigCompiler::CompileText("<test>", "this");
+ BOOST_CHECK(expr->Evaluate(frame2).GetValue() == Value(self));
+
+ expr = ConfigCompiler::CompileText("<test>", "var v = 7; v");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "{ a = 3 }.a");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "[ 2, 3 ][1]");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "var v = { a = 3}; v.a");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "a = 3 b = 3");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "function() { 3 }()");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "function() { return 3, 5 }()");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "typeof([]) == Array");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "typeof({}) == Dictionary");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "typeof(3) == Number");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "typeof(\"test\") == String");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "(7 | 8) == 15");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "(7 ^ 8) == 15");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "(7 & 15) == 7");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "7 in [7] == true");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "7 !in [7] == false");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "(7 | 8) > 14");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "(7 ^ 8) > 14");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "(7 & 15) > 6");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue());
+
+ expr = ConfigCompiler::CompileText("<test>", "\"a\" = 3");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "3 = 3");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "var e; e");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue().IsEmpty());
+
+ expr = ConfigCompiler::CompileText("<test>", "var e = 3; e");
+ BOOST_CHECK(expr->Evaluate(frame).GetValue() == 3);
+
+ expr = ConfigCompiler::CompileText("<test>", "Array.x");
+ BOOST_CHECK_THROW(expr->Evaluate(frame).GetValue(), ScriptError);
+
+ expr = ConfigCompiler::CompileText("<test>", "{{ 3 }}");
+ func = expr->Evaluate(frame).GetValue();
+ BOOST_CHECK(func->Invoke() == 3);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/config/2742.conf b/test/config/2742.conf
new file mode 100644
index 0000000..555e714
--- /dev/null
+++ b/test/config/2742.conf
@@ -0,0 +1,21 @@
+
+object CheckCommand "2742-macro-command" {
+ command = "echo UPTIME: $icinga.uptime$ SERVICES warn: $icinga.num_services_warning$ crit: $icinga.num_services_critical$ unknown: $icinga.num_services_unknown$ ackd: $icinga.num_services_acknowledged$ HOST: down: $icinga.num_hosts_down$ unreachable: $icinga.num_hosts_unreachable$"
+}
+
+object HostGroup "2742-windows-servers"{
+ display_name = "2742-windows-servers"
+ assign where match("2742-*", host.name)
+}
+
+apply Service "2742-macro-test" {
+ import "test-generic-service"
+ check_command = "2742-macro-command"
+ assign where match("2742-*", host.name)
+}
+
+object Host "2742-server" {
+ import "test-generic-host"
+ address = "192.168.1.5",
+}
+
diff --git a/test/config/5872.conf b/test/config/5872.conf
new file mode 100644
index 0000000..0405516
--- /dev/null
+++ b/test/config/5872.conf
@@ -0,0 +1,72 @@
+
+object HostGroup "5872-windows-servers"{
+ display_name = "5872-windows-servers"
+ assign where match("5872-*", host.name)
+}
+
+apply Service "5872-ping4" {
+ import "test-generic-service"
+ check_command = "ping4"
+ assign where match("5872-*", host.name)
+}
+
+object Host "5872-server" {
+ import "test-generic-host"
+ address = "192.168.1.5",
+}
+
+object Host "5872-pc" {
+ import "test-generic-host"
+ address = "192.168.1.101",
+}
+
+object Host "5872-router" {
+ import "test-generic-host"
+ address = "192.168.1.1",
+}
+
+object Host "5872-switch" {
+ import "test-generic-host"
+ address = "192.168.1.2",
+}
+
+apply Dependency "5872-host-switch" to Host {
+ parent_host_name = "5872-router"
+ disable_checks = true
+ assign where host.name == "5872-switch"
+}
+
+apply Dependency "5872-host-pc" to Host {
+ parent_host_name = "5872-switch"
+ disable_checks = true
+ assign where host.name == "5872-pc"
+}
+
+apply Dependency "5872-host-server" to Host {
+ parent_host_name = "5872-switch"
+ disable_checks = true
+ assign where host.name == "5872-server"
+}
+
+apply Dependency "5872-service-switch" to Service {
+ parent_host_name = "5872-router"
+ parent_service_name = "5872-ping4"
+ disable_checks = true
+ assign where host.name == "5872-switch"
+}
+
+apply Dependency "5872-service-pc" to Service {
+ parent_host_name = "5872-switch"
+ parent_service_name = "5872-ping4"
+ disable_checks = true
+ assign where host.name == "5872-pc"
+}
+
+apply Dependency "5872-service-server" to Service {
+ parent_host_name = "5872-switch"
+ parent_service_name = "5872-ping4"
+ states = [ Warning, Critical ]
+ disable_checks = true
+ assign where host.name == "5872-server"
+}
+
diff --git a/test/config/5912.conf.dis b/test/config/5912.conf.dis
new file mode 100644
index 0000000..0366b06
--- /dev/null
+++ b/test/config/5912.conf.dis
@@ -0,0 +1,14 @@
+
+apply Service "5912-ping4" {
+ import "test-generic-service"
+ check_command = "ping4"
+ host_name = "foo"
+ service_name = "bar"
+ assign where match("5912-*", host.name)
+}
+
+object Host "5912-server" {
+ import "test-generic-host"
+ address = "192.168.1.5",
+}
+
diff --git a/test/config/5926.conf b/test/config/5926.conf
new file mode 100644
index 0000000..e4060a6
--- /dev/null
+++ b/test/config/5926.conf
@@ -0,0 +1,23 @@
+
+
+object CheckCommand "5926-macro-test" {
+ command = "echo \"address: $address$ address_service: $service.vars.address$ foo: $foo$ keks: $keks$ god: $god$\""
+ //command = "echo \"address: $address$ address_service: $service.vars.address$\""
+}
+
+object Host "5926-macro-test-host" {
+ import "test-generic-host"
+ check_command = "5926-macro-test"
+ address = "1.2.3.4"
+ vars.god = "father"
+}
+
+apply Service "5926-macro-test-service" {
+ import "test-generic-service"
+ check_command = "5926-macro-test"
+ vars.address = "5.6.7.8"
+ vars.foo = "bar"
+ vars.keks = "schaschlik"
+
+ assign where host.name == "5926-macro-test-host"
+}
diff --git a/test/config/5927.conf b/test/config/5927.conf
new file mode 100644
index 0000000..b7041b6
--- /dev/null
+++ b/test/config/5927.conf
@@ -0,0 +1,44 @@
+
+object EventCommand "5927-handle" {
+ command = "echo \"event handler triggered.\""
+}
+
+object NotificationCommand "5927-notification" {
+ command = "echo \"notification triggered.\""
+}
+
+object HostGroup "5927-bar" {
+ assign where match("5927-keks*", host.name)
+}
+
+object Host "5927-keks" {
+ import "test-generic-host"
+ event_command = "5927-handle"
+ address = "1.2.3.4"
+}
+
+apply Service "5927-foo" {
+ import "test-generic-service"
+ check_command = "ping4"
+ event_command = "5927-handle"
+ assign where "5927-bar" in host.groups
+}
+
+apply Notification "5927-host-notification" to Host {
+ import "test-mail-host-notification"
+ command = "5927-notification"
+ assign where "5927-bar" in host.groups
+}
+
+apply Notification "5927-service-notification" to Service {
+ import "test-mail-service-notification"
+ command = "5927-notification"
+ assign where "5927-bar" in host.groups
+}
+
+object ServiceGroup "5927-bar" {
+ assign where service.name == "5927-foo"
+}
+
+
+
diff --git a/test/config/5980.conf b/test/config/5980.conf
new file mode 100644
index 0000000..494b5bd
--- /dev/null
+++ b/test/config/5980.conf
@@ -0,0 +1,58 @@
+
+
+object Host "5980-host" {
+ import "test-generic-host"
+ address = "127.0.0.1"
+}
+
+object Service "5980-service1" {
+ import "test-generic-service"
+ host_name = "5980-host"
+ check_command = "dummy"
+}
+
+object Service "5980-service2" {
+ import "test-generic-service"
+ host_name = "5980-host"
+ check_command = "dummy"
+}
+
+
+template ScheduledDowntime "5980-test-downtime" {
+ author = "icingaadmin"
+ comment = "Scheduled downtime for tests"
+
+ ranges = {
+ monday = "02:00-03:00"
+ tuesday = "02:00-03:00"
+ wednesday = "02:00-03:00"
+ thursday = "02:00-03:00"
+ friday = "02:00-03:00"
+ saturday = "02:00-03:00"
+ sunday = "02:00-03:00"
+ }
+}
+
+
+apply ScheduledDowntime "5980-test-service-downtime" to Host {
+ import "5980-test-downtime"
+ comment = "Scheduled host downtime for tests"
+
+ ranges = {
+ tuesday = "09:37-09:40"
+ }
+
+ assign where host.name == "5980-host"
+}
+
+apply ScheduledDowntime "5980-test-service-downtime" to Service {
+ import "5980-test-downtime"
+ comment = "Scheduled service downtime for tests"
+
+ ranges = {
+ tuesday = "09:37-09:40"
+ }
+
+ assign where host.name == "5980-host"
+}
+
diff --git a/test/config/6105.conf b/test/config/6105.conf
new file mode 100644
index 0000000..6bfccff
--- /dev/null
+++ b/test/config/6105.conf
@@ -0,0 +1,25 @@
+
+
+object HostGroup "6105-bar" {
+ assign where match("6105-keks*", host.name)
+ vars.foo = "bar"
+}
+
+object Host "6105-keks" {
+ import "test-generic-host"
+ address = "12.3.4"
+}
+
+apply Service "6105-foo" {
+ import "test-generic-service"
+ check_command = "ping4"
+ assign where "6105-bar" in host.groups
+}
+
+object ServiceGroup "6105-bar" {
+ assign where service.name == "6105-foo"
+ vars.bar = "foo"
+}
+
+
+
diff --git a/test/config/6479.conf b/test/config/6479.conf
new file mode 100644
index 0000000..68b08a3
--- /dev/null
+++ b/test/config/6479.conf
@@ -0,0 +1,44 @@
+
+object EventCommand "6479-handle" {
+ command = "echo \"event handler triggered.\""
+}
+
+object NotificationCommand "6479-notification" {
+ command = "echo \"notification triggered.\""
+}
+
+object HostGroup "6479-bar" {
+ assign where match("6479-keks*", host.name)
+}
+
+object Host "6479-keks" {
+ import "test-generic-host"
+ event_command = "6479-handle"
+ address = "1.2.3.4"
+}
+
+apply Service "6479-foo" {
+ import "test-generic-service"
+ check_command = "ping4"
+ event_command = "6479-handle"
+ assign where "6479-bar" in host.groups
+}
+
+apply Notification "6479-host-notification" to Host {
+ import "test-mail-host-notification"
+ command = "6479-notification"
+ assign where "6479-bar" in host.groups
+}
+
+apply Notification "6479-service-notification" to Service {
+ import "test-mail-service-notification"
+ command = "6479-notification"
+ assign where "6479-bar" in host.groups
+}
+
+object ServiceGroup "6479-bar" {
+ assign where service.name == "6479-foo"
+}
+
+
+
diff --git a/test/config/6608.conf b/test/config/6608.conf
new file mode 100644
index 0000000..e24d4c8
--- /dev/null
+++ b/test/config/6608.conf
@@ -0,0 +1,16 @@
+
+
+object Host "6608-host" {
+ import "test-generic-host"
+ vars.BUMSTI = "keks"
+ vars.bumsti = "schaschlik"
+}
+
+object Service "6608-service" {
+ import "test-generic-service"
+ check_command = "dummy"
+ host_name = "6608-host"
+ vars.DINGDONG = "$BUMSTI$"
+ vars.dingdong = "$bumsti$"
+}
+
diff --git a/test/config/6968.conf b/test/config/6968.conf
new file mode 100644
index 0000000..9882727
--- /dev/null
+++ b/test/config/6968.conf
@@ -0,0 +1,27 @@
+object Host "6968-server" {
+ import "test-generic-host"
+ address = "127.0.0.1"
+}
+
+object Service "6968-test" {
+ import "test-generic-service"
+
+ host_name = "6968-server"
+ check_command = "6968-check_vmware"
+ vars.vmware_check = "vCenter_License_Status"
+}
+
+object CheckCommand "6968-check_vmware" {
+ command = [ PluginDir + "/check_vmware.pl" ]
+
+ arguments = {
+ "--server" = "$address$"
+ "--username" = "***"
+ "--password" = "***"
+ "--check" = {
+ set_if = "$vmware_check$"
+ }
+ }
+}
+
+
diff --git a/test/config/7560.conf b/test/config/7560.conf
new file mode 100644
index 0000000..422cc04
--- /dev/null
+++ b/test/config/7560.conf
@@ -0,0 +1,41 @@
+object Host "7560-server" {
+ import "test-generic-host"
+ address = "127.0.0.1"
+ check_command = "hostalive"
+
+ vars.interfaces += {
+ eth0 = {
+ port = 1
+ vlan = "internal"
+ address = "127.0.0.2"
+ qos = "enabled"
+ }
+ eth1 = {
+ port = 2
+ vlan = "mgmt"
+ address = "127.0.1.2"
+ }
+ eth2 = {
+ port = 3
+ vlan = "remote"
+ address = "127.0.2.2"
+ }
+ }
+}
+
+apply Service "if-" for (if_name => config in host.vars.interfaces) {
+ import "test-generic-service"
+ check_command = "ping4"
+
+ vars.qos = "disabled"
+ vars += config
+
+ display_name = "if-" + if_name + "-" + vars.vlan
+
+ notes = "Interface check for Port " + string(vars.port) + " in VLAN " + vars.vlan + " on Address " + vars.address + " QoS " + vars.qos
+ notes_url = "http://foreman.company.com/hosts/" + host.name
+ action_url = "http://snmp.checker.company.com/" + host.name + "if-" + if_name
+
+ assign where match("7560-*", host.name) && typeof(host.vars.interfaces) == typeof({})
+}
+
diff --git a/test/config/7683.conf b/test/config/7683.conf
new file mode 100644
index 0000000..4e1a986
--- /dev/null
+++ b/test/config/7683.conf
@@ -0,0 +1,27 @@
+object Host "7683-parent" {
+ check_command = "dummy"
+ vars.dummy_state = 0
+}
+
+
+object Host "7683-child1" {
+ check_command = "dummy"
+ vars.dummy_state = 0
+}
+
+object Host "7683-child2" {
+ check_command = "dummy"
+ vars.dummy_state = 0
+}
+
+object Service "7683-service" {
+ check_command = "dummy"
+ host_name = "7683-parent"
+ vars.dummy_state = 0
+}
+
+apply Dependency "test-host" to Host {
+ parent_host_name = "7683-parent"
+ assign where match("7683-child*", host.name)
+}
+
diff --git a/test/config/8063.conf b/test/config/8063.conf
new file mode 100644
index 0000000..75676d1
--- /dev/null
+++ b/test/config/8063.conf
@@ -0,0 +1,73 @@
+object CheckCommand "8063-my-disk" {
+ command = [ PluginDir + "/check_disk" ]
+
+ arguments = {
+ "-w" = {
+ value = "$disk_wfree$"
+ description = "Exit with WARNING status if less than INTEGER units of disk are free or Exit with WARNING status if less than PERCENT of disk space is free"
+ required = true
+ }
+ "-c" = {
+ value = "$disk_cfree$"
+ description = "Exit with CRITICAL status if less than INTEGER units of disk are free or Exit with CRITCAL status if less than PERCENT of disk space is free"
+ required = true
+ }
+ "-W" = {
+ value = "$disk_inode_wfree$"
+ description = "Exit with WARNING status if less than PERCENT of inode space is free"
+ }
+ "-K" = {
+ value = "$disk_inode_cfree$"
+ description = "Exit with CRITICAL status if less than PERCENT of inode space is free"
+ }
+ "-p" = {
+ value = "$disk_partitions$"
+ description = "Path or partition (may be repeated)"
+ repeat_key = true
+ order = 1
+ }
+ "-x" = {
+ value = "$disk_partitions_excluded$"
+ description = "Ignore device (only works if -p unspecified)"
+ }
+ }
+
+ vars.disk_wfree = "20%"
+ vars.disk_cfree = "10%"
+}
+
+object Host "8063-my-server" {
+ import "generic-host"
+ address = "127.0.0.1"
+ address6 = "::1"
+
+ vars.local_disks["basic-partitions"] = {
+ disk_partitions = [ "/", "/tmp", "/var", "/home", "/run/user/1000/gvfs" ]
+ }
+}
+
+apply Service "8063-" for (disk => config in host.vars.local_disks) {
+ import "generic-service"
+ check_command = "8063-my-disk"
+ check_interval = 5s
+ retry_interval = 5s
+
+ volatile = true
+ vars.volatile_check = true
+
+ vars += config
+
+ vars.disk_wfree = "10%"
+ vars.disk_cfree = "5%"
+
+ assign where host.vars.local_disks
+}
+
+apply Notification "disk-notification" to Service {
+ import "test-mail-service-notification"
+
+ users = [ "test-icingaadmin" ]
+
+ assign where service.vars.volatile_check == true
+}
+
diff --git a/test/config/README b/test/config/README
new file mode 100644
index 0000000..5e8385f
--- /dev/null
+++ b/test/config/README
@@ -0,0 +1,2 @@
+Contains various test configuration for fixed issues.
+May be used for regression tests too.
diff --git a/test/config/templates.conf b/test/config/templates.conf
new file mode 100644
index 0000000..4a15bb8
--- /dev/null
+++ b/test/config/templates.conf
@@ -0,0 +1,80 @@
+/**
+ * test templates
+ */
+
+template Service "test-generic-service" {
+ max_check_attempts = 3
+ check_interval = 5m
+ retry_interval = 1m
+}
+
+template Host "test-generic-host" {
+ check_command = "hostalive"
+}
+
+template User "test-generic-user" {
+
+}
+
+template Notification "test-mail-host-notification" {
+ command = "mail-host-notification"
+
+ states = [ Up, Down ]
+ types = [ Problem, Acknowledgement, Recovery, Custom,
+ FlappingStart, FlappingEnd,
+ DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ period = "test-24x7"
+
+ user_groups = [ "test-icingaadmins" ]
+}
+
+/**
+ * Provides default settings for service notifications.
+ * By convention all service notifications should import
+ * this template.
+ */
+template Notification "test-mail-service-notification" {
+ command = "mail-service-notification"
+
+ states = [ OK, Warning, Critical, Unknown ]
+ types = [ Problem, Acknowledgement, Recovery, Custom,
+ FlappingStart, FlappingEnd,
+ DowntimeStart, DowntimeEnd, DowntimeRemoved ]
+
+ period = "test-24x7"
+
+ user_groups = [ "test-icingaadmins" ]
+}
+
+
+/* users */
+
+object User "test-icingaadmin" {
+ import "test-generic-user"
+
+ display_name = "Test Icinga 2 Admin"
+ groups = [ "test-icingaadmins" ]
+
+ email = "icinga@localhost"
+}
+
+object UserGroup "test-icingaadmins" {
+ display_name = "Test Icinga 2 Admin Group"
+}
+
+/* timeperiods */
+object TimePeriod "test-24x7" {
+ display_name = "Test Icinga 2 24x7 TimePeriod"
+
+ ranges = {
+ "monday" = "00:00-24:00"
+ "tuesday" = "00:00-24:00"
+ "wednesday" = "00:00-24:00"
+ "thursday" = "00:00-24:00"
+ "friday" = "00:00-24:00"
+ "saturday" = "00:00-24:00"
+ "sunday" = "00:00-24:00"
+ }
+}
+
diff --git a/test/icinga-checkable-fixture.cpp b/test/icinga-checkable-fixture.cpp
new file mode 100644
index 0000000..67fab1b
--- /dev/null
+++ b/test/icinga-checkable-fixture.cpp
@@ -0,0 +1,28 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "cli/daemonutility.hpp"
+#include "base/application.hpp"
+#include "base/loader.hpp"
+#include <BoostTestTargetConfig.h>
+#include <fstream>
+
+using namespace icinga;
+
+struct IcingaCheckableFixture
+{
+ IcingaCheckableFixture()
+ {
+ BOOST_TEST_MESSAGE("setup running Icinga 2 core");
+
+ Application::InitializeBase();
+ }
+
+ ~IcingaCheckableFixture()
+ {
+ BOOST_TEST_MESSAGE("cleanup Icinga 2 core");
+ Application::UninitializeBase();
+ }
+};
+
+BOOST_GLOBAL_FIXTURE(IcingaCheckableFixture);
+
diff --git a/test/icinga-checkable-flapping.cpp b/test/icinga-checkable-flapping.cpp
new file mode 100644
index 0000000..bc30564
--- /dev/null
+++ b/test/icinga-checkable-flapping.cpp
@@ -0,0 +1,248 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/host.hpp"
+#include <bitset>
+#include <iostream>
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+#ifdef I2_DEBUG
+static CheckResult::Ptr MakeCheckResult(ServiceState state)
+{
+ CheckResult::Ptr cr = new CheckResult();
+
+ cr->SetState(state);
+
+ double now = Utility::GetTime();
+ cr->SetScheduleStart(now);
+ cr->SetScheduleEnd(now);
+ cr->SetExecutionStart(now);
+ cr->SetExecutionEnd(now);
+
+ Utility::IncrementTime(60);
+
+ return cr;
+}
+
+static void LogFlapping(const Checkable::Ptr& obj)
+{
+ std::bitset<20> stateChangeBuf = obj->GetFlappingBuffer();
+ int oldestIndex = (obj->GetFlappingBuffer() & 0xFF00000) >> 20;
+
+ std::cout << "Flapping: " << obj->IsFlapping() << "\nHT: " << obj->GetFlappingThresholdHigh() << " LT: "
+ << obj->GetFlappingThresholdLow() << "\nOur value: " << obj->GetFlappingCurrent() << "\nPtr: " << oldestIndex
+ << " Buf: " << stateChangeBuf.to_ulong() << '\n';
+}
+
+
+static void LogHostStatus(const Host::Ptr &host)
+{
+ std::cout << "Current status: state: " << host->GetState() << " state_type: " << host->GetStateType()
+ << " check attempt: " << host->GetCheckAttempt() << "/" << host->GetMaxCheckAttempts() << " Active: " << host->IsActive() << std::endl;
+}
+#endif /* I2_DEBUG */
+
+BOOST_AUTO_TEST_SUITE(icinga_checkable_flapping)
+
+BOOST_AUTO_TEST_CASE(host_not_flapping)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ std::cout << "Running test with a non-flapping host...\n";
+
+ Host::Ptr host = new Host();
+ host->SetName("test");
+ host->SetEnableFlapping(true);
+ host->SetMaxCheckAttempts(5);
+ host->SetActive(true);
+
+ // Host otherwise is soft down
+ host->SetState(HostUp);
+ host->SetStateType(StateTypeHard);
+
+ Utility::SetTime(0);
+
+ BOOST_CHECK(host->GetFlappingCurrent() == 0);
+
+ LogFlapping(host);
+ LogHostStatus(host);
+
+ // watch the state being stable
+ int i = 0;
+ while (i++ < 10) {
+ // For some reason, elusive to me, the first check is a state change
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+
+ LogFlapping(host);
+ LogHostStatus(host);
+
+ BOOST_CHECK(host->GetState() == 0);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+
+ //Should not be flapping
+ BOOST_CHECK(!host->IsFlapping());
+ BOOST_CHECK(host->GetFlappingCurrent() < 30.0);
+ }
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(host_flapping)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ std::cout << "Running test with host changing state with every check...\n";
+
+ Host::Ptr host = new Host();
+ host->SetName("test");
+ host->SetEnableFlapping(true);
+ host->SetMaxCheckAttempts(5);
+ host->SetActive(true);
+
+ Utility::SetTime(0);
+
+ int i = 0;
+ while (i++ < 25) {
+ if (i % 2)
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ else
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+
+ LogFlapping(host);
+ LogHostStatus(host);
+
+ //30 Percent is our high Threshold
+ if (i >= 6) {
+ BOOST_CHECK(host->IsFlapping());
+ } else {
+ BOOST_CHECK(!host->IsFlapping());
+ }
+ }
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(host_flapping_recover)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ std::cout << "Running test with flapping recovery...\n";
+
+ Host::Ptr host = new Host();
+ host->SetName("test");
+ host->SetEnableFlapping(true);
+ host->SetMaxCheckAttempts(5);
+ host->SetActive(true);
+
+ // Host otherwise is soft down
+ host->SetState(HostUp);
+ host->SetStateType(StateTypeHard);
+
+ Utility::SetTime(0);
+
+ // A few warning
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+
+ LogFlapping(host);
+ LogHostStatus(host);
+ for (int i = 0; i <= 7; i++) {
+ if (i % 2)
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ else
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ }
+
+ LogFlapping(host);
+ LogHostStatus(host);
+
+ // We should be flapping now
+ BOOST_CHECK(host->GetFlappingCurrent() > 30.0);
+ BOOST_CHECK(host->IsFlapping());
+
+ // Now recover from flapping
+ int count = 0;
+ while (host->IsFlapping()) {
+ BOOST_CHECK(host->GetFlappingCurrent() > 25.0);
+ BOOST_CHECK(host->IsFlapping());
+
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ LogFlapping(host);
+ LogHostStatus(host);
+ count++;
+ }
+
+ std::cout << "Recovered from flapping after " << count << " Warning results.\n";
+
+ BOOST_CHECK(host->GetFlappingCurrent() < 25.0);
+ BOOST_CHECK(!host->IsFlapping());
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(host_flapping_docs_example)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ std::cout << "Simulating the documentation example...\n";
+
+ Host::Ptr host = new Host();
+ host->SetName("test");
+ host->SetEnableFlapping(true);
+ host->SetMaxCheckAttempts(5);
+ host->SetActive(true);
+
+ // Host otherwise is soft down
+ host->SetState(HostUp);
+ host->SetStateType(StateTypeHard);
+
+ Utility::SetTime(0);
+
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceWarning));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+
+ LogFlapping(host);
+ LogHostStatus(host);
+ BOOST_CHECK(host->GetFlappingCurrent() == 39.1);
+ BOOST_CHECK(host->IsFlapping());
+
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+
+ LogFlapping(host);
+ LogHostStatus(host);
+ BOOST_CHECK(host->GetFlappingCurrent() < 25.0);
+ BOOST_CHECK(!host->IsFlapping());
+#endif
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icinga-checkresult.cpp b/test/icinga-checkresult.cpp
new file mode 100644
index 0000000..fdc7891
--- /dev/null
+++ b/test/icinga-checkresult.cpp
@@ -0,0 +1,1032 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/downtime.hpp"
+#include "icinga/host.hpp"
+#include "icinga/service.hpp"
+#include <BoostTestTargetConfig.h>
+#include <iostream>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(icinga_checkresult)
+
+static CheckResult::Ptr MakeCheckResult(ServiceState state)
+{
+ CheckResult::Ptr cr = new CheckResult();
+
+ cr->SetState(state);
+
+ double now = Utility::GetTime();
+ cr->SetScheduleStart(now);
+ cr->SetScheduleEnd(now);
+ cr->SetExecutionStart(now);
+ cr->SetExecutionEnd(now);
+
+ return cr;
+}
+
+static void NotificationHandler(const Checkable::Ptr& checkable, NotificationType type)
+{
+ std::cout << "Notification triggered: " << Notification::NotificationTypeToString(type) << std::endl;
+
+ checkable->SetExtension("requested_notifications", true);
+ checkable->SetExtension("notification_type", type);
+}
+
+static void CheckNotification(const Checkable::Ptr& checkable, bool expected, NotificationType type = NotificationRecovery)
+{
+ BOOST_CHECK((expected && checkable->GetExtension("requested_notifications").ToBool()) || (!expected && !checkable->GetExtension("requested_notifications").ToBool()));
+
+ if (expected && checkable->GetExtension("requested_notifications").ToBool())
+ BOOST_CHECK(checkable->GetExtension("notification_type") == type);
+
+ checkable->SetExtension("requested_notifications", false);
+}
+
+BOOST_AUTO_TEST_CASE(host_1attempt)
+{
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ Host::Ptr host = new Host();
+ host->SetActive(true);
+ host->SetMaxCheckAttempts(1);
+ host->Activate();
+ host->SetAuthority(true);
+ host->SetStateRaw(ServiceOK);
+ host->SetStateType(StateTypeHard);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "First check result (unknown)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceUnknown));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationProblem);
+
+ std::cout << "Second check result (ok)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationRecovery);
+
+ std::cout << "Third check result (critical)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationProblem);
+
+ std::cout << "Fourth check result (ok)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationRecovery);
+
+ c.disconnect();
+}
+
+BOOST_AUTO_TEST_CASE(host_2attempts)
+{
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ Host::Ptr host = new Host();
+ host->SetActive(true);
+ host->SetMaxCheckAttempts(2);
+ host->Activate();
+ host->SetAuthority(true);
+ host->SetStateRaw(ServiceOK);
+ host->SetStateType(StateTypeHard);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "First check result (unknown)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceUnknown));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "Second check result (critical)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationProblem);
+
+ std::cout << "Third check result (ok)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationRecovery);
+
+ std::cout << "Fourth check result (critical)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "Fifth check result (ok)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ c.disconnect();
+}
+
+BOOST_AUTO_TEST_CASE(host_3attempts)
+{
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ Host::Ptr host = new Host();
+ host->SetActive(true);
+ host->SetMaxCheckAttempts(3);
+ host->Activate();
+ host->SetAuthority(true);
+ host->SetStateRaw(ServiceOK);
+ host->SetStateType(StateTypeHard);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "First check result (unknown)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceUnknown));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "Second check result (critical)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(host->GetCheckAttempt() == 2);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "Third check result (critical)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationProblem);
+
+ std::cout << "Fourth check result (ok)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, true, NotificationRecovery);
+
+ std::cout << "Fifth check result (critical)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(host->GetState() == HostDown);
+ BOOST_CHECK(host->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ std::cout << "Sixth check result (ok)" << std::endl;
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+ BOOST_CHECK(host->IsReachable() == true);
+ CheckNotification(host, false);
+
+ c.disconnect();
+}
+
+BOOST_AUTO_TEST_CASE(service_1attempt)
+{
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ Service::Ptr service = new Service();
+ service->SetActive(true);
+ service->SetMaxCheckAttempts(1);
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "First check result (unknown)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceUnknown));
+ BOOST_CHECK(service->GetState() == ServiceUnknown);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationProblem);
+
+ std::cout << "Second check result (ok)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationRecovery);
+
+ std::cout << "Third check result (critical)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationProblem);
+
+ std::cout << "Fourth check result (ok)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationRecovery);
+
+ c.disconnect();
+}
+
+BOOST_AUTO_TEST_CASE(service_2attempts)
+{
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ Service::Ptr service = new Service();
+ service->SetActive(true);
+ service->SetMaxCheckAttempts(2);
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "First check result (unknown)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceUnknown));
+ BOOST_CHECK(service->GetState() == ServiceUnknown);
+ BOOST_CHECK(service->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "Second check result (critical)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationProblem);
+
+ std::cout << "Third check result (ok)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationRecovery);
+
+ std::cout << "Fourth check result (critical)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+ BOOST_CHECK(service->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "Fifth check result (ok)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ c.disconnect();
+}
+
+BOOST_AUTO_TEST_CASE(service_3attempts)
+{
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ Service::Ptr service = new Service();
+ service->SetActive(true);
+ service->SetMaxCheckAttempts(3);
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "First check result (unknown)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceUnknown));
+ BOOST_CHECK(service->GetState() == ServiceUnknown);
+ BOOST_CHECK(service->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "Second check result (critical)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+ BOOST_CHECK(service->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(service->GetCheckAttempt() == 2);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "Third check result (critical)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationProblem);
+
+ std::cout << "Fourth check result (ok)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, true, NotificationRecovery);
+
+ std::cout << "Fifth check result (critical)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+ BOOST_CHECK(service->GetStateType() == StateTypeSoft);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ std::cout << "Sixth check result (ok)" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+ BOOST_CHECK(service->IsReachable() == true);
+ CheckNotification(service, false);
+
+ c.disconnect();
+}
+
+BOOST_AUTO_TEST_CASE(host_flapping_notification)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ int timeStepInterval = 60;
+
+ Host::Ptr host = new Host();
+ host->SetActive(true);
+ host->Activate();
+ host->SetAuthority(true);
+ host->SetStateRaw(ServiceOK);
+ host->SetStateType(StateTypeHard);
+ host->SetEnableFlapping(true);
+
+ /* Initialize start time */
+ Utility::SetTime(0);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(host->GetState() == HostUp);
+ BOOST_CHECK(host->GetStateType() == StateTypeHard);
+ BOOST_CHECK(host->GetCheckAttempt() == 1);
+
+ Utility::IncrementTime(timeStepInterval);
+
+ std::cout << "Inserting flapping check results" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ ServiceState state = (i % 2 == 0 ? ServiceOK : ServiceCritical);
+ host->ProcessCheckResult(MakeCheckResult(state));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ BOOST_CHECK(host->IsFlapping() == true);
+
+ CheckNotification(host, true, NotificationFlappingStart);
+
+ std::cout << "Now calm down..." << std::endl;
+
+ for (int i = 0; i < 20; i++) {
+ host->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ CheckNotification(host, true, NotificationFlappingEnd);
+
+
+ c.disconnect();
+
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(service_flapping_notification)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ int timeStepInterval = 60;
+
+ Service::Ptr service = new Service();
+ service->SetActive(true);
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+ service->SetEnableFlapping(true);
+
+ /* Initialize start time */
+ Utility::SetTime(0);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+
+ Utility::IncrementTime(timeStepInterval);
+
+ std::cout << "Inserting flapping check results" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ ServiceState state = (i % 2 == 0 ? ServiceOK : ServiceCritical);
+ service->ProcessCheckResult(MakeCheckResult(state));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ BOOST_CHECK(service->IsFlapping() == true);
+
+ CheckNotification(service, true, NotificationFlappingStart);
+
+
+
+ std::cout << "Now calm down..." << std::endl;
+
+ for (int i = 0; i < 20; i++) {
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ CheckNotification(service, true, NotificationFlappingEnd);
+
+ c.disconnect();
+
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(service_flapping_problem_notifications)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ int timeStepInterval = 60;
+
+ Service::Ptr service = new Service();
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+ service->SetEnableFlapping(true);
+ service->SetMaxCheckAttempts(3);
+
+ /* Initialize start time */
+ Utility::SetTime(0);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+
+ Utility::IncrementTime(timeStepInterval);
+
+ std::cout << "Inserting flapping check results" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ ServiceState state = (i % 2 == 0 ? ServiceOK : ServiceCritical);
+ service->ProcessCheckResult(MakeCheckResult(state));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ BOOST_CHECK(service->IsFlapping() == true);
+
+ CheckNotification(service, true, NotificationFlappingStart);
+
+ //Insert enough check results to get into hard problem state but staying flapping
+
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+
+
+ BOOST_CHECK(service->IsFlapping() == true);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+
+ CheckNotification(service, false, NotificationProblem);
+
+ // Calm down
+ while (service->IsFlapping()) {
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ CheckNotification(service, true, NotificationFlappingEnd);
+
+ /* Intended behaviour is a Problem notification being sent as well, but there are is a Problem:
+ * We don't know whether the Object was Critical before we started flapping and sent out a Notification.
+ * A notification will not be sent, no matter how many criticals follow.
+ *
+ * service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ * CheckNotification(service, true, NotificationProblem);
+ * ^ This fails, no notification will be sent
+ *
+ * There is also a different issue, when we receive a OK check result, a Recovery Notification will be sent
+ * since the service went from hard critical into soft ok. Yet there is no fitting critical notification.
+ * This should not happen:
+ *
+ * service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ * CheckNotification(service, false, NotificationRecovery);
+ * ^ This fails, recovery is sent
+ */
+
+ BOOST_CHECK(service->IsFlapping() == false);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+
+ // Known failure, see #5713
+ // CheckNotification(service, true, NotificationProblem);
+
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ Utility::IncrementTime(timeStepInterval);
+
+ // Known failure, see #5713
+ // CheckNotification(service, true, NotificationRecovery);
+
+ c.disconnect();
+
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(service_flapping_ok_into_bad)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ int timeStepInterval = 60;
+
+ Service::Ptr service = new Service();
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+ service->SetEnableFlapping(true);
+ service->SetMaxCheckAttempts(3);
+
+ /* Initialize start time */
+ Utility::SetTime(0);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+
+ Utility::IncrementTime(timeStepInterval);
+
+ std::cout << "Inserting flapping check results" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ ServiceState state = (i % 2 == 0 ? ServiceOK : ServiceCritical);
+ service->ProcessCheckResult(MakeCheckResult(state));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ BOOST_CHECK(service->IsFlapping() == true);
+
+ CheckNotification(service, true, NotificationFlappingStart);
+
+ //Insert enough check results to get into hard problem state but staying flapping
+
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+
+
+ BOOST_CHECK(service->IsFlapping() == true);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+
+ CheckNotification(service, false, NotificationProblem);
+
+ // Calm down
+ while (service->IsFlapping()) {
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ CheckNotification(service, true, NotificationFlappingEnd);
+
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+
+ BOOST_CHECK(service->IsFlapping() == false);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+
+ // We expect a problem notification here
+ // Known failure, see #5713
+ // CheckNotification(service, true, NotificationProblem);
+
+ c.disconnect();
+
+#endif /* I2_DEBUG */
+}
+BOOST_AUTO_TEST_CASE(service_flapping_ok_over_bad_into_ok)
+{
+#ifndef I2_DEBUG
+ BOOST_WARN_MESSAGE(false, "This test can only be run in a debug build!");
+#else /* I2_DEBUG */
+ boost::signals2::connection c = Checkable::OnNotificationsRequested.connect([](const Checkable::Ptr& checkable, NotificationType type,
+ const CheckResult::Ptr&, const String&, const String&, const MessageOrigin::Ptr&) {
+ NotificationHandler(checkable, type);
+ });
+
+ int timeStepInterval = 60;
+
+ Service::Ptr service = new Service();
+ service->Activate();
+ service->SetAuthority(true);
+ service->SetStateRaw(ServiceOK);
+ service->SetStateType(StateTypeHard);
+ service->SetEnableFlapping(true);
+ service->SetMaxCheckAttempts(3);
+
+ /* Initialize start time */
+ Utility::SetTime(0);
+
+ std::cout << "Before first check result (ok, hard)" << std::endl;
+ BOOST_CHECK(service->GetState() == ServiceOK);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetCheckAttempt() == 1);
+
+ Utility::IncrementTime(timeStepInterval);
+
+ std::cout << "Inserting flapping check results" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ ServiceState state = (i % 2 == 0 ? ServiceOK : ServiceCritical);
+ service->ProcessCheckResult(MakeCheckResult(state));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ BOOST_CHECK(service->IsFlapping() == true);
+
+ CheckNotification(service, true, NotificationFlappingStart);
+
+ //Insert enough check results to get into hard problem state but staying flapping
+
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+
+
+ BOOST_CHECK(service->IsFlapping() == true);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetState() == ServiceCritical);
+
+ CheckNotification(service, false, NotificationProblem);
+
+ // Calm down
+ while (service->IsFlapping()) {
+ service->ProcessCheckResult(MakeCheckResult(ServiceCritical));
+ Utility::IncrementTime(timeStepInterval);
+ }
+
+ CheckNotification(service, true, NotificationFlappingEnd);
+
+ service->ProcessCheckResult(MakeCheckResult(ServiceOK));
+ Utility::IncrementTime(timeStepInterval);
+
+ BOOST_CHECK(service->IsFlapping() == false);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ BOOST_CHECK(service->GetState() == ServiceOK);
+
+ // There should be no recovery
+ // Known failure, see #5713
+ // CheckNotification(service, false, NotificationRecovery);
+
+ c.disconnect();
+
+#endif /* I2_DEBUG */
+}
+
+BOOST_AUTO_TEST_CASE(suppressed_notification)
+{
+ /* Tests that suppressed notifications on a Checkable are sent after the suppression ends if and only if the first
+ * hard state after the suppression is different from the last hard state before the suppression. The test works
+ * by bringing a service in a defined hard state, creating a downtime, performing some state changes, removing the
+ * downtime, bringing the service into another defined hard state (if not already) and checking the requested
+ * notifications.
+ */
+
+ struct NotificationLog {
+ std::vector<std::pair<NotificationType, ServiceState>> GetAndClear() {
+ std::lock_guard<std::mutex> lock (mutex);
+
+ std::vector<std::pair<NotificationType, ServiceState>> ret;
+ std::swap(ret, log);
+ return ret;
+ }
+
+ void Add(std::pair<NotificationType, ServiceState> notification) {
+ std::lock_guard<std::mutex> lock (mutex);
+
+ log.emplace_back(notification);
+ }
+
+ private:
+ std::mutex mutex;
+ std::vector<std::pair<NotificationType, ServiceState>> log;
+ };
+
+ const std::vector<ServiceState> states {ServiceOK, ServiceWarning, ServiceCritical, ServiceUnknown};
+
+ for (bool isVolatile : {false, true}) {
+ for (int checkAttempts : {1, 2}) {
+ for (ServiceState initialState : states) {
+ for (ServiceState s1 : states)
+ for (ServiceState s2 : states)
+ for (ServiceState s3 : states) {
+ const std::vector<ServiceState> sequence {s1, s2, s3};
+
+ std::string testcase;
+
+ {
+ std::ostringstream buf;
+ buf << "volatile=" << isVolatile
+ << " checkAttempts=" << checkAttempts
+ << " sequence={" << Service::StateToString(initialState);
+
+ for (ServiceState s : sequence) {
+ buf << " " << Service::StateToString(s);
+ }
+
+ buf << "}";
+ testcase = buf.str();
+ }
+
+ std::cout << "Test case: " << testcase << std::endl;
+
+ // Create host and service for the test.
+ Host::Ptr host = new Host();
+ host->SetName("suppressed_notifications");
+ host->Register();
+
+ Service::Ptr service = new Service();
+ service->SetHostName(host->GetName());
+ service->SetName("service");
+ service->SetActive(true);
+ service->SetVolatile(isVolatile);
+ service->SetMaxCheckAttempts(checkAttempts);
+ service->Activate();
+ service->SetAuthority(true);
+ service->Register();
+
+ host->OnAllConfigLoaded();
+ service->OnAllConfigLoaded();
+
+ // Bring service into the initial hard state.
+ for (int i = 0; i < checkAttempts; i++) {
+ std::cout << " ProcessCheckResult("
+ << Service::StateToString(initialState) << ")" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(initialState));
+ }
+
+ BOOST_CHECK(service->GetState() == initialState);
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+
+ /* Keep track of all notifications requested from now on.
+ *
+ * Boost.Signal2 handler may still be executing from another thread after they were disconnected.
+ * Make the structures accessed by the handlers shared pointers so that they remain valid as long
+ * as they may be accessed from one of these handlers.
+ */
+ auto notificationLog = std::make_shared<NotificationLog>();
+
+ boost::signals2::scoped_connection c (Checkable::OnNotificationsRequested.connect(
+ [notificationLog,service](
+ const Checkable::Ptr& checkable, NotificationType type, const CheckResult::Ptr& cr,
+ const String&, const String&, const MessageOrigin::Ptr&
+ ) {
+ BOOST_CHECK_EQUAL(checkable, service);
+ std::cout << " -> OnNotificationsRequested(" << Notification::NotificationTypeToString(type)
+ << ", " << Service::StateToString(cr->GetState()) << ")" << std::endl;
+
+ notificationLog->Add({type, cr->GetState()});
+ }
+ ));
+
+ // Helper to assert which notifications were requested. Implicitly clears the stored notifications.
+ auto assertNotifications = [notificationLog](
+ const std::vector<std::pair<NotificationType, ServiceState>>& expected,
+ const std::string& extraMessage
+ ) {
+ // Pretty-printer for the vectors of requested and expected notifications.
+ auto pretty = [](const std::vector<std::pair<NotificationType, ServiceState>>& vec) {
+ std::ostringstream s;
+
+ s << "{";
+ bool first = true;
+ for (const auto &v : vec) {
+ if (first) {
+ first = false;
+ } else {
+ s << ", ";
+ }
+ s << Notification::NotificationTypeToString(v.first)
+ << "/" << Service::StateToString(v.second);
+ }
+ s << "}";
+
+ return s.str();
+ };
+
+ auto got (notificationLog->GetAndClear());
+
+ BOOST_CHECK_MESSAGE(got == expected, "expected=" << pretty(expected)
+ << " got=" << pretty(got)
+ << (extraMessage.empty() ? "" : " ") << extraMessage);
+ };
+
+ // Start a downtime for the service.
+ std::cout << " Downtime Start" << std::endl;
+ Downtime::Ptr downtime = new Downtime();
+ downtime->SetHostName(host->GetName());
+ downtime->SetServiceName(service->GetName());
+ downtime->SetName("downtime");
+ downtime->SetFixed(true);
+ downtime->SetStartTime(Utility::GetTime() - 3600);
+ downtime->SetEndTime(Utility::GetTime() + 3600);
+ service->RegisterDowntime(downtime);
+ downtime->Register();
+ downtime->OnAllConfigLoaded();
+ downtime->TriggerDowntime(Utility::GetTime());
+
+ BOOST_CHECK(service->IsInDowntime());
+
+ // Process check results for the state sequence.
+ for (ServiceState s : sequence) {
+ std::cout << " ProcessCheckResult(" << Service::StateToString(s) << ")" << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(s));
+ BOOST_CHECK(service->GetState() == s);
+ if (checkAttempts == 1) {
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+ }
+ }
+
+ assertNotifications({}, "(no notifications in downtime)");
+
+ if (service->GetSuppressedNotifications()) {
+ BOOST_CHECK_EQUAL(service->GetStateBeforeSuppression(), initialState);
+ }
+
+ // Remove the downtime.
+ std::cout << " Downtime End" << std::endl;
+ service->UnregisterDowntime(downtime);
+ downtime->Unregister();
+ BOOST_CHECK(!service->IsInDowntime());
+
+ if (service->GetStateType() == icinga::StateTypeSoft) {
+ // When the current state is a soft state, no notification should be sent just yet.
+ std::cout << " FireSuppressedNotifications()" << std::endl;
+ service->FireSuppressedNotifications();
+
+ assertNotifications({}, testcase + " (should not fire in soft state)");
+
+ // Repeat the last check result until reaching a hard state.
+ for (int i = 0; i < checkAttempts && service->GetStateType() == StateTypeSoft; i++) {
+ std::cout << " ProcessCheckResult(" << Service::StateToString(sequence.back()) << ")"
+ << std::endl;
+ service->ProcessCheckResult(MakeCheckResult(sequence.back()));
+ BOOST_CHECK(service->GetState() == sequence.back());
+ }
+ }
+
+ // The service should be in a hard state now and notifications should now be sent if applicable.
+ BOOST_CHECK(service->GetStateType() == StateTypeHard);
+
+ std::cout << " FireSuppressedNotifications()" << std::endl;
+ service->FireSuppressedNotifications();
+
+ if (initialState != sequence.back()) {
+ NotificationType t = sequence.back() == ServiceOK ? NotificationRecovery : NotificationProblem;
+ assertNotifications({{t, sequence.back()}}, testcase);
+ } else {
+ assertNotifications({}, testcase);
+ }
+
+ // Remove host and service.
+ service->Unregister();
+ host->Unregister();
+ }
+ }
+ }
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icinga-dependencies.cpp b/test/icinga-dependencies.cpp
new file mode 100644
index 0000000..929b6ca
--- /dev/null
+++ b/test/icinga-dependencies.cpp
@@ -0,0 +1,101 @@
+/* Icinga 2 | (c) 2020 Icinga GmbH | GPLv2+ */
+
+#include "icinga/host.hpp"
+#include "icinga/dependency.hpp"
+#include <BoostTestTargetConfig.h>
+#include <iostream>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(icinga_dependencies)
+
+BOOST_AUTO_TEST_CASE(multi_parent)
+{
+ /* One child host, two parent hosts. Simulate multi-parent dependencies. */
+ std::cout << "Testing reachability for multi parent dependencies." << std::endl;
+
+ /*
+ * Our mock requires:
+ * - SetParent/SetChild functions for the dependency
+ * - Parent objects need a CheckResult object
+ * - Dependencies need a StateFilter
+ */
+ Host::Ptr parentHost1 = new Host();
+ parentHost1->SetActive(true);
+ parentHost1->SetMaxCheckAttempts(1);
+ parentHost1->Activate();
+ parentHost1->SetAuthority(true);
+ parentHost1->SetStateRaw(ServiceCritical);
+ parentHost1->SetStateType(StateTypeHard);
+ parentHost1->SetLastCheckResult(new CheckResult());
+
+ Host::Ptr parentHost2 = new Host();
+ parentHost2->SetActive(true);
+ parentHost2->SetMaxCheckAttempts(1);
+ parentHost2->Activate();
+ parentHost2->SetAuthority(true);
+ parentHost2->SetStateRaw(ServiceOK);
+ parentHost2->SetStateType(StateTypeHard);
+ parentHost2->SetLastCheckResult(new CheckResult());
+
+ Host::Ptr childHost = new Host();
+ childHost->SetActive(true);
+ childHost->SetMaxCheckAttempts(1);
+ childHost->Activate();
+ childHost->SetAuthority(true);
+ childHost->SetStateRaw(ServiceOK);
+ childHost->SetStateType(StateTypeHard);
+
+ /* Build the dependency tree. */
+ Dependency::Ptr dep1 = new Dependency();
+
+ dep1->SetParent(parentHost1);
+ dep1->SetChild(childHost);
+ dep1->SetStateFilter(StateFilterUp);
+
+ // Reverse dependencies
+ childHost->AddDependency(dep1);
+ parentHost1->AddReverseDependency(dep1);
+
+ Dependency::Ptr dep2 = new Dependency();
+
+ dep2->SetParent(parentHost2);
+ dep2->SetChild(childHost);
+ dep2->SetStateFilter(StateFilterUp);
+
+ // Reverse dependencies
+ childHost->AddDependency(dep2);
+ parentHost2->AddReverseDependency(dep2);
+
+
+ /* Test the reachability from this point.
+ * parentHost1 is DOWN, parentHost2 is UP.
+ * Expected result: childHost is unreachable.
+ */
+ parentHost1->SetStateRaw(ServiceCritical); // parent Host 1 DOWN
+ parentHost2->SetStateRaw(ServiceOK); // parent Host 2 UP
+
+ BOOST_CHECK(childHost->IsReachable() == false);
+
+ /* The only DNS server is DOWN.
+ * Expected result: childHost is unreachable.
+ */
+ dep1->SetRedundancyGroup("DNS");
+ BOOST_CHECK(childHost->IsReachable() == false);
+
+ /* 1/2 DNS servers is DOWN.
+ * Expected result: childHost is reachable.
+ */
+ dep2->SetRedundancyGroup("DNS");
+ BOOST_CHECK(childHost->IsReachable() == true);
+
+ /* Both DNS servers are DOWN.
+ * Expected result: childHost is unreachable.
+ */
+ parentHost1->SetStateRaw(ServiceCritical); // parent Host 1 DOWN
+ parentHost2->SetStateRaw(ServiceCritical); // parent Host 2 DOWN
+
+ BOOST_CHECK(childHost->IsReachable() == false);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icinga-legacytimeperiod.cpp b/test/icinga-legacytimeperiod.cpp
new file mode 100644
index 0000000..e1150be
--- /dev/null
+++ b/test/icinga-legacytimeperiod.cpp
@@ -0,0 +1,694 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/utility.hpp"
+#include "icinga/legacytimeperiod.hpp"
+#include <boost/date_time/posix_time/posix_time.hpp>
+#include <boost/date_time/posix_time/ptime.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
+#include <boost/date_time/gregorian/conversion.hpp>
+#include <boost/date_time/date.hpp>
+#include <boost/optional.hpp>
+#include <iomanip>
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(icinga_legacytimeperiod);
+
+struct GlobalTimezoneFixture
+{
+ char *tz;
+
+ GlobalTimezoneFixture(const char *fixed_tz = "")
+ {
+ tz = getenv("TZ");
+#ifdef _WIN32
+ _putenv_s("TZ", fixed_tz == "" ? "UTC" : fixed_tz);
+#else
+ setenv("TZ", fixed_tz, 1);
+#endif
+ tzset();
+ }
+
+ ~GlobalTimezoneFixture()
+ {
+#ifdef _WIN32
+ if (tz)
+ _putenv_s("TZ", tz);
+ else
+ _putenv_s("TZ", "");
+#else
+ if (tz)
+ setenv("TZ", tz, 1);
+ else
+ unsetenv("TZ");
+#endif
+ tzset();
+ }
+};
+
+BOOST_GLOBAL_FIXTURE(GlobalTimezoneFixture);
+
+// DST changes in America/Los_Angeles:
+// 2021-03-14: 01:59:59 PST (UTC-8) -> 03:00:00 PDT (UTC-7)
+// 2021-11-07: 01:59:59 PDT (UTC-7) -> 01:00:00 PST (UTC-8)
+#ifndef _WIN32
+static const char *dst_test_timezone = "America/Los_Angeles";
+#else /* _WIN32 */
+// Tests are using pacific time because Windows only really supports timezones following US DST rules with the TZ
+// environment variable. Format is "[Standard TZ][negative UTC offset][DST TZ]".
+// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/tzset?view=msvc-160#remarks
+static const char *dst_test_timezone = "PST8PDT";
+#endif /* _WIN32 */
+
+BOOST_AUTO_TEST_CASE(simple)
+{
+ tm tm_beg, tm_end, tm_ref;
+ String timestamp;
+ boost::posix_time::ptime begin;
+ boost::posix_time::ptime end;
+ boost::posix_time::ptime expectedBegin;
+ boost::posix_time::ptime expectedEnd;
+
+ //-----------------------------------------------------
+ // check parsing of "YYYY-MM-DD" specs
+ timestamp = "2016-01-01";
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2016, 1, 1), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2016, 1, 2), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run test
+ LegacyTimePeriod::ParseTimeSpec(timestamp, &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+
+ //-----------------------------------------------------
+ timestamp = "2015-12-31";
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2015, 12, 31), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2016, 1, 1), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run test
+ LegacyTimePeriod::ParseTimeSpec(timestamp, &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+
+ //-----------------------------------------------------
+ // Break things forcefully
+ BOOST_CHECK_THROW(LegacyTimePeriod::ParseTimeSpec("2015-12-32", &tm_beg, &tm_end, &tm_ref),
+ std::invalid_argument);
+
+ BOOST_CHECK_THROW(LegacyTimePeriod::ParseTimeSpec("2015-28-01", &tm_beg, &tm_end, &tm_ref),
+ std::invalid_argument);
+
+ //-----------------------------------------------------
+ // check parsing of "day X" and "day -X" specs
+ timestamp = "day 2";
+ tm_ref.tm_year = 2016 - 1900;
+ tm_ref.tm_mon = 2 - 1;
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2016, 2, 2), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2016, 2, 3), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run Tests
+ LegacyTimePeriod::ParseTimeSpec(timestamp, &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+
+ //-----------------------------------------------------
+ timestamp = "day 31";
+ tm_ref.tm_year = 2018 - 1900;
+ tm_ref.tm_mon = 12 - 1;
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2018, 12, 31), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2019, 1, 1), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run Tests
+ LegacyTimePeriod::ParseTimeSpec(timestamp, &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+
+ //-----------------------------------------------------
+ // Last day of the month
+ timestamp = "day -1";
+ tm_ref.tm_year = 2012 - 1900;
+ tm_ref.tm_mon = 7 - 1;
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2012, 7, 31), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2012, 8, 1), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run Tests
+ LegacyTimePeriod::ParseTimeSpec(timestamp, &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+
+ //-----------------------------------------------------
+ // Third last day of the month
+ timestamp = "day -3";
+ tm_ref.tm_year = 2019 - 1900;
+ tm_ref.tm_mon = 7 - 1;
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2019, 7, 29), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2019, 7, 30), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run Tests
+ LegacyTimePeriod::ParseTimeSpec(timestamp, &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+
+ //-----------------------------------------------------
+ // Leap year with the last day of the month
+ timestamp = "day -1";
+ tm_ref.tm_year = 2016 - 1900; // leap year
+ tm_ref.tm_mon = 2 - 1;
+
+ expectedBegin = boost::posix_time::ptime(boost::gregorian::date(2016, 2, 29), boost::posix_time::time_duration(0, 0, 0));
+
+ expectedEnd = boost::posix_time::ptime(boost::gregorian::date(2016, 3, 1), boost::posix_time::time_duration(0, 0, 0));
+
+ // Run Tests
+ LegacyTimePeriod::ParseTimeSpec("day -1", &tm_beg, &tm_end, &tm_ref);
+
+ // Compare times
+ begin = boost::posix_time::ptime_from_tm(tm_beg);
+ end = boost::posix_time::ptime_from_tm(tm_end);
+
+ BOOST_CHECK_EQUAL(begin, expectedBegin);
+ BOOST_CHECK_EQUAL(end, expectedEnd);
+}
+
+struct DateTime
+{
+ struct {
+ int Year, Month, Day;
+ } Date;
+ struct {
+ int Hour, Minute, Second;
+ } Time;
+};
+
+static inline
+void AdvancedHelper(const char *timestamp, DateTime from, DateTime to)
+{
+ using boost::gregorian::date;
+ using boost::posix_time::ptime;
+ using boost::posix_time::ptime_from_tm;
+ using boost::posix_time::time_duration;
+
+ tm tm_beg, tm_end, tm_ref;
+
+ tm_ref.tm_year = from.Date.Year - 1900;
+ tm_ref.tm_mon = from.Date.Month - 1;
+ tm_ref.tm_mday = from.Date.Day;
+
+ // Run test
+ LegacyTimePeriod::ProcessTimeRangeRaw(timestamp, &tm_ref, &tm_beg, &tm_end);
+
+ // Compare times
+ BOOST_CHECK_EQUAL(ptime_from_tm(tm_beg), ptime(date(from.Date.Year, from.Date.Month, from.Date.Day), time_duration(from.Time.Hour, from.Time.Minute, from.Time.Second)));
+ BOOST_CHECK_EQUAL(ptime_from_tm(tm_end), ptime(date(to.Date.Year, to.Date.Month, to.Date.Day), time_duration(to.Time.Hour, to.Time.Minute, to.Time.Second)));
+}
+
+BOOST_AUTO_TEST_CASE(advanced)
+{
+ tm tm_beg, tm_end, tm_ref;
+ String timestamp;
+ boost::posix_time::ptime begin;
+ boost::posix_time::ptime end;
+ boost::posix_time::ptime expectedBegin;
+ boost::posix_time::ptime expectedEnd;
+
+ //-----------------------------------------------------
+ // 2019-05-06 where Icinga celebrates 10 years #monitoringlove
+ // 2019-05-06 22:00:00 - 2019-05-07 06:00:00
+ AdvancedHelper("22:00-06:00", {{2019, 5, 6}, {22, 0, 0}}, {{2019, 5, 7}, {6, 0, 0}});
+ AdvancedHelper("22:00:01-06:00", {{2019, 5, 6}, {22, 0, 1}}, {{2019, 5, 7}, {6, 0, 0}});
+ AdvancedHelper("22:00-06:00:02", {{2019, 5, 6}, {22, 0, 0}}, {{2019, 5, 7}, {6, 0, 2}});
+ AdvancedHelper("22:00:03-06:00:04", {{2019, 5, 6}, {22, 0, 3}}, {{2019, 5, 7}, {6, 0, 4}});
+
+ //-----------------------------------------------------
+ // 2019-05-06 Icinga is unleashed.
+ // 09:00:00 - 17:00:00
+ AdvancedHelper("09:00-17:00", {{2009, 5, 6}, {9, 0, 0}}, {{2009, 5, 6}, {17, 0, 0}});
+ AdvancedHelper("09:00:01-17:00", {{2009, 5, 6}, {9, 0, 1}}, {{2009, 5, 6}, {17, 0, 0}});
+ AdvancedHelper("09:00-17:00:02", {{2009, 5, 6}, {9, 0, 0}}, {{2009, 5, 6}, {17, 0, 2}});
+ AdvancedHelper("09:00:03-17:00:04", {{2009, 5, 6}, {9, 0, 3}}, {{2009, 5, 6}, {17, 0, 4}});
+
+ //-----------------------------------------------------
+ // At our first Icinga Camp in SFO 2014 at GitHub HQ, we partied all night long with an overflow.
+ // 2014-09-24 09:00:00 - 2014-09-25 06:00:00
+ AdvancedHelper("09:00-30:00", {{2014, 9, 24}, {9, 0, 0}}, {{2014, 9, 25}, {6, 0, 0}});
+ AdvancedHelper("09:00:01-30:00", {{2014, 9, 24}, {9, 0, 1}}, {{2014, 9, 25}, {6, 0, 0}});
+ AdvancedHelper("09:00-30:00:02", {{2014, 9, 24}, {9, 0, 0}}, {{2014, 9, 25}, {6, 0, 2}});
+ AdvancedHelper("09:00:03-30:00:04", {{2014, 9, 24}, {9, 0, 3}}, {{2014, 9, 25}, {6, 0, 4}});
+}
+
+tm make_tm(std::string s)
+{
+ int dst = -1;
+ size_t l = strlen("YYYY-MM-DD HH:MM:SS");
+ if (s.size() > l) {
+ std::string zone = s.substr(l);
+ if (zone == " PST") {
+ dst = 0;
+ } else if (zone == " PDT") {
+ dst = 1;
+ } else {
+ // tests should only use PST/PDT (for now)
+ BOOST_CHECK_MESSAGE(false, "invalid or unknown time time: " << zone);
+ }
+ }
+
+ std::tm t = {};
+#if defined(__GNUC__) && __GNUC__ < 5
+ // GCC did not implement std::get_time() until version 5
+ strptime(s.c_str(), "%Y-%m-%d %H:%M:%S", &t);
+#else /* defined(__GNUC__) && __GNUC__ < 5 */
+ std::istringstream stream(s);
+ stream >> std::get_time(&t, "%Y-%m-%d %H:%M:%S");
+#endif /* defined(__GNUC__) && __GNUC__ < 5 */
+ t.tm_isdst = dst;
+
+ return t;
+}
+
+time_t make_time_t(const tm* t)
+{
+ tm copy = *t;
+ return mktime(&copy);
+}
+
+time_t make_time_t(std::string s)
+{
+ tm t = make_tm(s);
+ return mktime(&t);
+}
+
+struct Segment
+{
+ time_t begin, end;
+
+ Segment(time_t begin, time_t end) : begin(begin), end(end) {}
+ Segment(std::string begin, std::string end) : begin(make_time_t(begin)), end(make_time_t(end)) {}
+
+ bool operator==(const Segment& o) const
+ {
+ return o.begin == begin && o.end == end;
+ }
+};
+
+std::string pretty_time(const tm& t)
+{
+#if defined(__GNUC__) && __GNUC__ < 5
+ // GCC did not implement std::put_time() until version 5
+ char buf[128];
+ size_t n = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S %Z", &t);
+ return std::string(buf, n);
+#else /* defined(__GNUC__) && __GNUC__ < 5 */
+ std::ostringstream stream;
+ stream << std::put_time(&t, "%Y-%m-%d %H:%M:%S %Z");
+ return stream.str();
+#endif /* defined(__GNUC__) && __GNUC__ < 5 */
+}
+
+std::string pretty_time(time_t t)
+{
+ return pretty_time(Utility::LocalTime(t));
+}
+
+std::ostream& operator<<(std::ostream& o, const Segment& s)
+{
+ return o << "(" << pretty_time(s.begin) << " (" << s.begin << ") .. " << pretty_time(s.end) << " (" << s.end << "))";
+}
+
+std::ostream& operator<<(std::ostream& o, const boost::optional<Segment>& s)
+{
+ if (s) {
+ return o << *s;
+ } else {
+ return o << "none";
+ }
+}
+
+BOOST_AUTO_TEST_CASE(dst)
+{
+ GlobalTimezoneFixture tz(dst_test_timezone);
+
+ // Self-tests for helper functions
+ BOOST_CHECK_EQUAL(make_tm("2021-11-07 02:30:00").tm_isdst, -1);
+ BOOST_CHECK_EQUAL(make_tm("2021-11-07 02:30:00 PST").tm_isdst, 0);
+ BOOST_CHECK_EQUAL(make_tm("2021-11-07 02:30:00 PDT").tm_isdst, 1);
+ BOOST_CHECK_EQUAL(make_time_t("2021-11-07 01:30:00 PST"), 1636277400); // date -d '2021-11-07 01:30:00 PST' +%s
+ BOOST_CHECK_EQUAL(make_time_t("2021-11-07 01:30:00 PDT"), 1636273800); // date -d '2021-11-07 01:30:00 PDT' +%s
+
+ struct TestData {
+ std::string day;
+ std::string ranges;
+ std::vector<tm> before;
+ std::vector<tm> during;
+ boost::optional<Segment> expected;
+ };
+
+ // Some of the following test cases have comments describing the current behavior. This might not necessarily be the
+ // best possible behavior, especially that it differs on Windows. So it might be perfectly valid to change this.
+ // These cases are just there to actually notice these changes in this case.
+ std::vector<TestData> tests;
+
+ // 2021-03-14: 01:59:59 PST (UTC-8) -> 03:00:00 PDT (UTC-7)
+ for (const std::string& day : {"2021-03-14", "sunday", "sunday 2", "sunday -3"}) {
+ // range before DST change
+ tests.push_back(TestData{
+ day, "00:30-01:30",
+ {make_tm("2021-03-14 00:00:00 PST")},
+ {make_tm("2021-03-14 01:00:00 PST")},
+ Segment("2021-03-14 00:30:00 PST", "2021-03-14 01:30:00 PST"),
+ });
+
+ if (day.find("sunday") == std::string::npos) { // skip for non-absolute day specs (would find another sunday)
+ // range end actually does not exist on that day
+ tests.push_back(TestData{
+ day, "01:30-02:30",
+ {make_tm("2021-03-14 01:00:00 PST")},
+ {make_tm("2021-03-14 01:59:59 PST")},
+#ifndef _WIN32
+ // As 02:30 does not exist on this day, it is parsed as if it was 02:30 PST which is actually 03:30 PDT.
+ Segment("2021-03-14 01:30:00 PST", "2021-03-14 03:30:00 PDT"),
+#else
+ // Windows interpretes 02:30 as 01:30 PST, so it is an empty segment.
+ boost::none,
+#endif
+ });
+ }
+
+ if (day.find("sunday") == std::string::npos) { // skip for non-absolute day specs (would find another sunday)
+ // range beginning does not actually exist on that day
+ tests.push_back(TestData{
+ day, "02:30-03:30",
+ {make_tm("2021-03-14 01:00:00 PST")},
+ {make_tm("2021-03-14 03:00:00 PDT")},
+#ifndef _WIN32
+ // As 02:30 does not exist on this day, it is parsed as if it was 02:30 PST which is actually 03:30 PDT.
+ // Therefore, the result is a segment from 03:30 PDT to 03:30 PDT with a duration of 0, i.e. no segment.
+ boost::none,
+#else
+ // Windows parses non-existing 02:30 as 01:30 PST, resulting in an 1 hour segment.
+ Segment("2021-03-14 01:30:00 PST", "2021-03-14 03:30:00 PDT"),
+#endif
+ });
+ }
+
+ // another range where the beginning does not actually exist on that day
+ tests.push_back(TestData{
+ day, "02:15-03:45",
+ {make_tm("2021-03-14 01:00:00 PST")},
+ {make_tm("2021-03-14 03:30:00 PDT")},
+#ifndef _WIN32
+ // As 02:15 does not exist on this day, it is parsed as if it was 02:15 PST which is actually 03:15 PDT.
+ Segment("2021-03-14 03:15:00 PDT", "2021-03-14 03:45:00 PDT"),
+#else
+ // Windows interprets 02:15 as 01:15 PST though.
+ Segment("2021-03-14 01:15:00 PST", "2021-03-14 03:45:00 PDT"),
+#endif
+ });
+
+ // range after DST change
+ tests.push_back(TestData{
+ day, "03:30-04:30",
+ {make_tm("2021-03-14 01:00:00 PST"), make_tm("2021-03-14 03:00:00 PDT")},
+ {make_tm("2021-03-14 04:00:00 PDT")},
+ Segment("2021-03-14 03:30:00 PDT", "2021-03-14 04:30:00 PDT"),
+ });
+
+ // range containing DST change
+ tests.push_back(TestData{
+ day, "01:30-03:30",
+ {make_tm("2021-03-14 01:00:00 PST")},
+ {make_tm("2021-03-14 01:45:00 PST"), make_tm("2021-03-14 03:15:00 PDT")},
+ Segment("2021-03-14 01:30:00 PST", "2021-03-14 03:30:00 PDT"),
+ });
+ }
+
+ // 2021-11-07: 01:59:59 PDT (UTC-7) -> 01:00:00 PST (UTC-8)
+ for (const std::string& day : {"2021-11-07", "sunday", "sunday 1", "sunday -4"}) {
+ // range before DST change
+ tests.push_back(TestData{
+ day, "00:15-00:45",
+ {make_tm("2021-11-07 00:00:00 PDT")},
+ {make_tm("2021-11-07 00:30:00 PDT")},
+ Segment("2021-11-07 00:15:00 PDT", "2021-11-07 00:45:00 PDT"),
+ });
+
+ if (day.find("sunday") == std::string::npos) { // skip for non-absolute day specs (would find another sunday)
+ // range existing twice during DST change (first instance)
+#ifndef _WIN32
+ tests.push_back(TestData{
+ day, "01:15-01:45",
+ {make_tm("2021-11-07 01:00:00 PDT")},
+ {make_tm("2021-11-07 01:30:00 PDT")},
+ // Duplicate times are interpreted as the first occurrence.
+ Segment("2021-11-07 01:15:00 PDT", "2021-11-07 01:45:00 PDT"),
+ });
+#else
+ tests.push_back(TestData{
+ day, "01:15-01:45",
+ {make_tm("2021-11-07 01:00:00 PDT")},
+ {make_tm("2021-11-07 01:30:00 PST")},
+ // However, Windows always uses the second occurrence.
+ Segment("2021-11-07 01:15:00 PST", "2021-11-07 01:45:00 PST"),
+ });
+#endif
+ }
+
+ if (day.find("sunday") == std::string::npos) { // skip for non-absolute day specs (would find another sunday)
+ // range existing twice during DST change (second instance)
+ tests.push_back(TestData{
+ day, "01:15-01:45",
+ {make_tm("2021-11-07 01:00:00 PST")},
+ {make_tm("2021-11-07 01:30:00 PST")},
+#ifndef _WIN32
+ // Interpreted as the first occurrence, so it's in the past.
+ boost::none,
+#else
+ // On Windows, it's the second occurrence, so it's still in the present/future and is found.
+ Segment("2021-11-07 01:15:00 PST", "2021-11-07 01:45:00 PST"),
+#endif
+ });
+ }
+
+ // range after DST change
+ tests.push_back(TestData{
+ day, "03:30-04:30",
+ {make_tm("2021-11-07 01:00:00 PDT"), make_tm("2021-11-07 03:00:00 PST")},
+ {make_tm("2021-11-07 04:00:00 PST")},
+ Segment("2021-11-07 03:30:00 PST", "2021-11-07 04:30:00 PST"),
+ });
+
+ // range containing DST change
+ tests.push_back(TestData{
+ day, "00:30-02:30",
+ {make_tm("2021-11-07 00:00:00 PDT")},
+ {make_tm("2021-11-07 00:45:00 PDT"), make_tm("2021-11-07 01:30:00 PDT"),
+ make_tm("2021-11-07 01:30:00 PST"), make_tm("2021-11-07 02:15:00 PST")},
+ Segment("2021-11-07 00:30:00 PDT", "2021-11-07 02:30:00 PST"),
+ });
+
+ // range ending during duplicate DST hour (first instance)
+ tests.push_back(TestData{
+ day, "00:30-01:30",
+ {make_tm("2021-11-07 00:00:00 PDT")},
+ {make_tm("2021-11-07 01:00:00 PDT")},
+#ifndef _WIN32
+ // Both times are interpreted as the first instance on that day (i.e both PDT).
+ Segment("2021-11-07 00:30:00 PDT", "2021-11-07 01:30:00 PDT")
+#else
+ // Windows interprets duplicate times as the second instance (i.e. both PST).
+ Segment("2021-11-07 00:30:00 PDT", "2021-11-07 01:30:00 PST")
+#endif
+ });
+
+ // range beginning during duplicate DST hour (first instance)
+ tests.push_back(TestData{
+ day, "01:30-02:30",
+ {make_tm("2021-11-07 01:00:00 PDT")},
+ {make_tm("2021-11-07 02:00:00 PST")},
+#ifndef _WIN32
+ // 01:30 is interpreted as the first occurrence (PDT) but since there's no 02:30 PDT, it's PST.
+ Segment("2021-11-07 01:30:00 PDT", "2021-11-07 02:30:00 PST")
+#else
+ // Windows interprets both as PST though.
+ Segment("2021-11-07 01:30:00 PST", "2021-11-07 02:30:00 PST")
+#endif
+ });
+
+ if (day.find("sunday") == std::string::npos) { // skip for non-absolute day specs (would find another sunday)
+ // range ending during duplicate DST hour (second instance)
+#ifndef _WIN32
+ tests.push_back(TestData{
+ day, "00:30-01:30",
+ {make_tm("2021-11-07 00:00:00 PST")},
+ {make_tm("2021-11-07 01:00:00 PST")},
+ // Both times are parsed as PDT. Thus, 00:00 PST (01:00 PDT) is during the segment and
+ // 01:00 PST (02:00 PDT) is after the segment.
+ boost::none,
+ });
+#else
+ tests.push_back(TestData{
+ day, "00:30-01:30",
+ {make_tm("2021-11-07 00:00:00 PDT")},
+ {make_tm("2021-11-07 01:00:00 PST")},
+ // As Windows interprets the end as PST, it's still in the future and the segment is found.
+ Segment("2021-11-07 00:30:00 PDT", "2021-11-07 01:30:00 PST"),
+ });
+#endif
+ }
+
+ // range beginning during duplicate DST hour (second instance)
+ tests.push_back(TestData{
+ day, "01:30-02:30",
+ {make_tm("2021-11-07 01:00:00 PDT")},
+ {make_tm("2021-11-07 02:00:00 PST")},
+#ifndef _WIN32
+ // As 01:30 always refers to the first occurrence (PDT), this is actually a 2 hour segment.
+ Segment("2021-11-07 01:30:00 PDT", "2021-11-07 02:30:00 PST"),
+#else
+ // On Windows, it refers t the second occurrence (PST), therefore it's an 1 hour segment.
+ Segment("2021-11-07 01:30:00 PST", "2021-11-07 02:30:00 PST"),
+#endif
+ });
+ }
+
+ auto seg = [](const Dictionary::Ptr& segment) -> boost::optional<Segment> {
+ if (segment == nullptr) {
+ return boost::none;
+ }
+
+ BOOST_CHECK(segment->Contains("begin"));
+ BOOST_CHECK(segment->Contains("end"));
+
+ return Segment{time_t(segment->Get("begin")), time_t(segment->Get("end"))};
+ };
+
+ for (const TestData& t : tests) {
+ for (const tm& ref : t.during) {
+ if (t.expected) {
+ // test data sanity check
+ time_t ref_ts = make_time_t(&ref);
+ BOOST_CHECK_MESSAGE(t.expected->begin < ref_ts, "[day='" << t.day << "' ranges='" << t.ranges
+ << "'] expected.begin='"<< pretty_time(t.expected->begin) << "' < ref='" << pretty_time(ref_ts)
+ << "' violated");
+ BOOST_CHECK_MESSAGE(ref_ts < t.expected->end, "[day='" << t.day << "' ranges='" << t.ranges
+ << "'] ref='" << pretty_time(ref_ts) << "' < expected.end='" << pretty_time(t.expected->end)
+ << "' violated");
+ }
+
+ tm mutRef = ref;
+ auto runningSeg = seg(LegacyTimePeriod::FindRunningSegment(t.day, t.ranges, &mutRef));
+ BOOST_CHECK_MESSAGE(runningSeg == t.expected, "FindRunningSegment(day='" << t.day
+ << "' ranges='" << t.ranges << "' ref='" << pretty_time(ref) << "'): got=" << runningSeg
+ << " expected=" << t.expected);
+ }
+
+ for (const tm& ref : t.before) {
+ if (t.expected) {
+ // test data sanity check
+ time_t ref_ts = make_time_t(&ref);
+ BOOST_CHECK_MESSAGE(ref_ts < t.expected->begin, "[day='" << t.day << "' ranges='" << t.ranges
+ << "'] ref='"<< pretty_time(ref_ts) << "' < expected.begin='" << pretty_time(t.expected->begin)
+ << "' violated");
+ BOOST_CHECK_MESSAGE(t.expected->begin < t.expected->end, "[day='" << t.day << "' ranges='" << t.ranges
+ << "'] expected.begin='" << pretty_time(t.expected->begin)
+ << "' < expected.end='" << pretty_time(t.expected->end) << "' violated");
+ }
+
+ tm mutRef = ref;
+ auto nextSeg = seg(LegacyTimePeriod::FindNextSegment(t.day, t.ranges, &mutRef));
+ BOOST_CHECK_MESSAGE(nextSeg == t.expected, "FindNextSegment(day='" << t.day << "' ranges='" << t.ranges
+ << "' ref='" << pretty_time(ref) << "'): got=" << nextSeg << " expected=" << t.expected);
+ }
+ }
+}
+
+// This tests checks that TimePeriod::IsInside() always returns true for a 24x7 period, even around DST changes.
+BOOST_AUTO_TEST_CASE(dst_isinside)
+{
+ GlobalTimezoneFixture tz(dst_test_timezone);
+
+ Function::Ptr update = new Function("LegacyTimePeriod", LegacyTimePeriod::ScriptFunc, {"tp", "begin", "end"});
+ Dictionary::Ptr ranges = new Dictionary({
+ {"monday", "00:00-24:00"},
+ {"tuesday", "00:00-24:00"},
+ {"wednesday", "00:00-24:00"},
+ {"thursday", "00:00-24:00"},
+ {"friday", "00:00-24:00"},
+ {"saturday", "00:00-24:00"},
+ {"sunday", "00:00-24:00"},
+ });
+
+ // Vary begin from Sat 06 Nov 2021 00:00:00 PDT to Mon 08 Nov 2021 00:00:00 PST in 30 minute intervals.
+ for (time_t t_begin = 1636182000; t_begin <= 1636358400; t_begin += 30*60) {
+ // Test varying interval lengths: 60 minutes, 24 hours, 48 hours.
+ for (time_t len : {60*60, 24*60*60, 48*60*60}) {
+ time_t t_end = t_begin + len;
+
+ TimePeriod::Ptr p = new TimePeriod();
+ p->SetUpdate(update, true);
+ p->SetRanges(ranges, true);
+
+ p->UpdateRegion(double(t_begin), double(t_end), true);
+
+ {
+ // Print resulting segments for easier debugging.
+ Array::Ptr segments = p->GetSegments();
+ ObjectLock lock(segments);
+ for (Dictionary::Ptr segment: segments) {
+ BOOST_TEST_MESSAGE("t_begin=" << t_begin << " t_end=" << t_end
+ << " segment.begin=" << segment->Get("begin") << " segment.end=" << segment->Get("end"));
+ }
+ }
+
+ time_t step = 10*60;
+ for (time_t t = t_begin+step; t < t_end; t += step) {
+ BOOST_CHECK_MESSAGE(p->IsInside(double(t)),
+ t << " should be inside for t_begin=" << t_begin << " t_end=" << t_end);
+ }
+ }
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icinga-macros.cpp b/test/icinga-macros.cpp
new file mode 100644
index 0000000..e7c789c
--- /dev/null
+++ b/test/icinga-macros.cpp
@@ -0,0 +1,50 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/macroprocessor.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(icinga_macros)
+
+BOOST_AUTO_TEST_CASE(simple)
+{
+ Dictionary::Ptr macrosA = new Dictionary();
+ macrosA->Set("testA", 7);
+ macrosA->Set("testB", "hello");
+
+ Dictionary::Ptr macrosB = new Dictionary();
+ macrosB->Set("testA", 3);
+ macrosB->Set("testC", "world");
+
+ Array::Ptr testD = new Array();
+ testD->Add(3);
+ testD->Add("test");
+
+ macrosB->Set("testD", testD);
+
+ MacroProcessor::ResolverList resolvers;
+ resolvers.emplace_back("macrosA", macrosA);
+ resolvers.emplace_back("macrosB", macrosB);
+
+ BOOST_CHECK(MacroProcessor::ResolveMacros("$macrosA.testB$ $macrosB.testC$", resolvers) == "hello world");
+ BOOST_CHECK(MacroProcessor::ResolveMacros("$testA$", resolvers) == "7");
+ BOOST_CHECK(MacroProcessor::ResolveMacros("$testA$$testB$", resolvers) == "7hello");
+
+ Array::Ptr result = MacroProcessor::ResolveMacros("$testD$", resolvers);
+ BOOST_CHECK(result->GetLength() == 2);
+
+ /* verify the config validator macro checks */
+ BOOST_CHECK(MacroProcessor::ValidateMacroString("$host.address") == false);
+ BOOST_CHECK(MacroProcessor::ValidateMacroString("host.vars.test$") == false);
+
+ BOOST_CHECK(MacroProcessor::ValidateMacroString("host.vars.test$") == false);
+ BOOST_CHECK(MacroProcessor::ValidateMacroString("$template::test$abc$") == false);
+
+ BOOST_CHECK(MacroProcessor::ValidateMacroString("$$test $host.vars.test$") == true);
+
+ BOOST_CHECK(MacroProcessor::ValidateMacroString("test $host.vars.test$") == true);
+
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icinga-notification.cpp b/test/icinga-notification.cpp
new file mode 100644
index 0000000..a0aeb7d
--- /dev/null
+++ b/test/icinga-notification.cpp
@@ -0,0 +1,215 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icinga/host.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/service.hpp"
+#include "icinga/user.hpp"
+#include <BoostTestTargetConfig.h>
+#include <iostream>
+
+using namespace icinga;
+
+struct DuplicateDueToFilterHelper
+{
+ Host::Ptr h = new Host();
+ Service::Ptr s = new Service();
+ User::Ptr u = new User();
+ NotificationCommand::Ptr nc = new NotificationCommand();
+ Notification::Ptr n = new Notification();
+ unsigned int called = 0;
+
+ DuplicateDueToFilterHelper(int typeFilter, int stateFilter)
+ {
+ h->SetName("example.com", true);
+ h->Register();
+
+ s->SetShortName("disk", true);
+ h->AddService(s);
+
+ u->SetName("jdoe", true);
+ u->SetTypeFilter(~0);
+ u->SetStateFilter(~0);
+ u->Register();
+
+ nc->SetName("mail", true);
+ nc->SetExecute(new Function("", [this]() { ++called; }), true);
+ nc->Register();
+
+ n->SetFieldByName("host_name", "example.com", false, DebugInfo());
+ n->SetFieldByName("service_name", "disk", false, DebugInfo());
+ n->SetFieldByName("command", "mail", false, DebugInfo());
+ n->SetUsersRaw(new Array({"jdoe"}), true);
+ n->SetTypeFilter(typeFilter);
+ n->SetStateFilter(stateFilter);
+ n->OnAllConfigLoaded(); // link Service
+ }
+
+ ~DuplicateDueToFilterHelper()
+ {
+ h->Unregister();
+ u->Unregister();
+ nc->Unregister();
+ }
+
+ void SendStateNotification(ServiceState state, bool isSent)
+ {
+ auto calledBefore (called);
+
+ s->SetStateRaw(state, true);
+ Application::GetTP().Start();
+
+ n->BeginExecuteNotification(
+ state == ServiceOK ? NotificationRecovery : NotificationProblem,
+ nullptr, false, false, "", ""
+ );
+
+ Application::GetTP().Stop();
+ BOOST_CHECK_EQUAL(called > calledBefore, isSent);
+ }
+};
+
+BOOST_AUTO_TEST_SUITE(icinga_notification)
+
+BOOST_AUTO_TEST_CASE(strings)
+{
+ // States
+ BOOST_CHECK("OK" == Notification::NotificationServiceStateToString(ServiceOK));
+ BOOST_CHECK("Critical" == Notification::NotificationServiceStateToString(ServiceCritical));
+ BOOST_CHECK("Up" == Notification::NotificationHostStateToString(HostUp));
+
+ // Types
+ BOOST_CHECK("DowntimeStart" == Notification::NotificationTypeToString(NotificationDowntimeStart));
+ BOOST_CHECK("Problem" == Notification::NotificationTypeToString(NotificationProblem));
+
+ // Compat
+ BOOST_CHECK("DOWNTIMECANCELLED" == Notification::NotificationTypeToStringCompat(NotificationDowntimeRemoved));
+}
+
+BOOST_AUTO_TEST_CASE(state_filter)
+{
+ unsigned long fstate;
+
+ Array::Ptr states = new Array();
+ states->Add("OK");
+ states->Add("Warning");
+
+ Notification::Ptr notification = new Notification();
+
+ notification->SetStateFilter(FilterArrayToInt(states, notification->GetStateFilterMap(), ~0));
+ notification->Activate();
+ notification->SetAuthority(true);
+
+ /* Test passing notification state */
+ fstate = StateFilterWarning;
+ std::cout << "#1 Notification state: " << fstate << " against " << notification->GetStateFilter() << " must pass. " << std::endl;
+ BOOST_CHECK(notification->GetStateFilter() & fstate);
+
+ /* Test filtered notification state */
+ fstate = StateFilterUnknown;
+ std::cout << "#2 Notification state: " << fstate << " against " << notification->GetStateFilter() << " must fail." << std::endl;
+ BOOST_CHECK(!(notification->GetStateFilter() & fstate));
+
+ /* Test unset states filter configuration */
+ notification->SetStateFilter(FilterArrayToInt(Array::Ptr(), notification->GetStateFilterMap(), ~0));
+
+ fstate = StateFilterOK;
+ std::cout << "#3 Notification state: " << fstate << " against " << notification->GetStateFilter() << " must pass." << std::endl;
+ BOOST_CHECK(notification->GetStateFilter() & fstate);
+
+ /* Test empty states filter configuration */
+ states->Clear();
+ notification->SetStateFilter(FilterArrayToInt(states, notification->GetStateFilterMap(), ~0));
+
+ fstate = StateFilterCritical;
+ std::cout << "#4 Notification state: " << fstate << " against " << notification->GetStateFilter() << " must fail." << std::endl;
+ BOOST_CHECK(!(notification->GetStateFilter() & fstate));
+}
+BOOST_AUTO_TEST_CASE(type_filter)
+{
+ unsigned long ftype;
+
+ Array::Ptr types = new Array();
+ types->Add("Problem");
+ types->Add("DowntimeStart");
+ types->Add("DowntimeEnd");
+
+ Notification::Ptr notification = new Notification();
+
+ notification->SetTypeFilter(FilterArrayToInt(types, notification->GetTypeFilterMap(), ~0));
+ notification->Activate();
+ notification->SetAuthority(true);
+
+ /* Test passing notification type */
+ ftype = NotificationProblem;
+ std::cout << "#1 Notification type: " << ftype << " against " << notification->GetTypeFilter() << " must pass." << std::endl;
+ BOOST_CHECK(notification->GetTypeFilter() & ftype);
+
+ /* Test filtered notification type */
+ ftype = NotificationCustom;
+ std::cout << "#2 Notification type: " << ftype << " against " << notification->GetTypeFilter() << " must fail." << std::endl;
+ BOOST_CHECK(!(notification->GetTypeFilter() & ftype));
+
+ /* Test unset types filter configuration */
+ notification->SetTypeFilter(FilterArrayToInt(Array::Ptr(), notification->GetTypeFilterMap(), ~0));
+
+ ftype = NotificationRecovery;
+ std::cout << "#3 Notification type: " << ftype << " against " << notification->GetTypeFilter() << " must pass." << std::endl;
+ BOOST_CHECK(notification->GetTypeFilter() & ftype);
+
+ /* Test empty types filter configuration */
+ types->Clear();
+ notification->SetTypeFilter(FilterArrayToInt(types, notification->GetTypeFilterMap(), ~0));
+
+ ftype = NotificationProblem;
+ std::cout << "#4 Notification type: " << ftype << " against " << notification->GetTypeFilter() << " must fail." << std::endl;
+ BOOST_CHECK(!(notification->GetTypeFilter() & ftype));
+}
+
+BOOST_AUTO_TEST_CASE(no_filter_problem_no_duplicate)
+{
+ DuplicateDueToFilterHelper helper (~0, ~0);
+
+ helper.SendStateNotification(ServiceCritical, true);
+ helper.SendStateNotification(ServiceWarning, true);
+ helper.SendStateNotification(ServiceCritical, true);
+}
+
+BOOST_AUTO_TEST_CASE(filter_problem_no_duplicate)
+{
+ DuplicateDueToFilterHelper helper (~0, ~StateFilterWarning);
+
+ helper.SendStateNotification(ServiceCritical, true);
+ helper.SendStateNotification(ServiceWarning, false);
+ helper.SendStateNotification(ServiceCritical, false);
+}
+
+BOOST_AUTO_TEST_CASE(volatile_filter_problem_duplicate)
+{
+ DuplicateDueToFilterHelper helper (~0, ~StateFilterWarning);
+
+ helper.s->SetVolatile(true, true);
+ helper.SendStateNotification(ServiceCritical, true);
+ helper.SendStateNotification(ServiceWarning, false);
+ helper.SendStateNotification(ServiceCritical, true);
+}
+
+BOOST_AUTO_TEST_CASE(no_recovery_filter_no_duplicate)
+{
+ DuplicateDueToFilterHelper helper (~0, ~0);
+
+ helper.SendStateNotification(ServiceCritical, true);
+ helper.SendStateNotification(ServiceOK, true);
+ helper.SendStateNotification(ServiceCritical, true);
+}
+
+BOOST_AUTO_TEST_CASE(recovery_filter_duplicate)
+{
+ DuplicateDueToFilterHelper helper (~NotificationRecovery, ~0);
+
+ helper.SendStateNotification(ServiceCritical, true);
+ helper.SendStateNotification(ServiceOK, false);
+ helper.SendStateNotification(ServiceCritical, true);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icinga-perfdata.cpp b/test/icinga-perfdata.cpp
new file mode 100644
index 0000000..12e1c28
--- /dev/null
+++ b/test/icinga-perfdata.cpp
@@ -0,0 +1,407 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/perfdatavalue.hpp"
+#include "icinga/pluginutility.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(icinga_perfdata)
+
+BOOST_AUTO_TEST_CASE(empty)
+{
+ Array::Ptr pd = PluginUtility::SplitPerfdata("");
+ BOOST_CHECK(pd->GetLength() == 0);
+}
+
+BOOST_AUTO_TEST_CASE(simple)
+{
+ PerfdataValue::Ptr pdv = PerfdataValue::Parse("test=123456");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 123456);
+
+ String str = pdv->Format();
+ BOOST_CHECK(str == "test=123456");
+}
+
+BOOST_AUTO_TEST_CASE(quotes)
+{
+ Array::Ptr pd = PluginUtility::SplitPerfdata("'hello world'=123456");
+ BOOST_CHECK(pd->GetLength() == 1);
+
+ PerfdataValue::Ptr pdv = PerfdataValue::Parse("'hello world'=123456");
+ BOOST_CHECK(pdv->GetLabel() == "hello world");
+ BOOST_CHECK(pdv->GetValue() == 123456);
+}
+
+BOOST_AUTO_TEST_CASE(multiple)
+{
+ Array::Ptr pd = PluginUtility::SplitPerfdata("testA=123456 testB=123456");
+ BOOST_CHECK(pd->GetLength() == 2);
+
+ String str = PluginUtility::FormatPerfdata(pd);
+ BOOST_CHECK(str == "testA=123456 testB=123456");
+}
+
+BOOST_AUTO_TEST_CASE(multiline)
+{
+ Array::Ptr pd = PluginUtility::SplitPerfdata(" 'testA'=123456 'testB'=123456");
+ BOOST_CHECK(pd->GetLength() == 2);
+
+ String str = PluginUtility::FormatPerfdata(pd);
+ BOOST_CHECK(str == "testA=123456 testB=123456");
+
+ pd = PluginUtility::SplitPerfdata(" 'testA'=123456 \n'testB'=123456");
+ BOOST_CHECK(pd->GetLength() == 2);
+
+ str = PluginUtility::FormatPerfdata(pd);
+ BOOST_CHECK(str == "testA=123456 testB=123456");
+}
+
+BOOST_AUTO_TEST_CASE(normalize)
+{
+ Array::Ptr pd = PluginUtility::SplitPerfdata("testA=2m;3;4;1;5 testB=2foobar");
+ BOOST_CHECK(pd->GetLength() == 2);
+
+ String str = PluginUtility::FormatPerfdata(pd, true);
+ BOOST_CHECK(str == "testA=120s;180;240;60;300 testB=2");
+}
+
+BOOST_AUTO_TEST_CASE(uom)
+{
+ PerfdataValue::Ptr pv = PerfdataValue::Parse("test=123456B");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 123456);
+ BOOST_CHECK(!pv->GetCounter());
+ BOOST_CHECK(pv->GetUnit() == "bytes");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ String str = pv->Format();
+ BOOST_CHECK(str == "test=123456B");
+
+ pv = PerfdataValue::Parse("test=1000ms;200;500");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "seconds");
+ BOOST_CHECK(pv->GetWarn() == 0.2);
+ BOOST_CHECK(pv->GetCrit() == 0.5);
+
+ pv = PerfdataValue::Parse("test=1000ms");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "seconds");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1s");
+
+ pv = PerfdataValue::Parse("test=1kAm");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 60 * 1000);
+ BOOST_CHECK(pv->GetUnit() == "ampere-seconds");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=60000As");
+
+ pv = PerfdataValue::Parse("test=1MA");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1000 * 1000);
+ BOOST_CHECK(pv->GetUnit() == "amperes");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1000000A");
+
+ pv = PerfdataValue::Parse("test=1gib");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1024 * 1024 * 1024);
+ BOOST_CHECK(pv->GetUnit() == "bits");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1073741824b");
+
+ pv = PerfdataValue::Parse("test=1dBm");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "decibel-milliwatts");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1dBm");
+
+ pv = PerfdataValue::Parse("test=1C");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "degrees-celsius");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1C");
+
+ pv = PerfdataValue::Parse("test=1F");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "degrees-fahrenheit");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1F");
+
+ pv = PerfdataValue::Parse("test=1K");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "degrees-kelvin");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1K");
+
+ pv = PerfdataValue::Parse("test=1t");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1000 * 1000);
+ BOOST_CHECK(pv->GetUnit() == "grams");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1000000g");
+
+ pv = PerfdataValue::Parse("test=1hl");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 100);
+ BOOST_CHECK(pv->GetUnit() == "liters");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=100l");
+
+ pv = PerfdataValue::Parse("test=1lm");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "lumens");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1lm");
+
+ pv = PerfdataValue::Parse("test=1TO");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1000.0 * 1000 * 1000 * 1000);
+ BOOST_CHECK(pv->GetUnit() == "ohms");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1000000000000O");
+
+ pv = PerfdataValue::Parse("test=1PV");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1000.0 * 1000 * 1000 * 1000 * 1000);
+ BOOST_CHECK(pv->GetUnit() == "volts");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1000000000000000V");
+
+ pv = PerfdataValue::Parse("test=1EWh");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1000.0 * 1000 * 1000 * 1000 * 1000 * 1000);
+ BOOST_CHECK(pv->GetUnit() == "watt-hours");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1000000000000000000Wh");
+
+ pv = PerfdataValue::Parse("test=1000mW");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 1);
+ BOOST_CHECK(pv->GetUnit() == "watts");
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetMin() == Empty);
+ BOOST_CHECK(pv->GetMax() == Empty);
+
+ str = pv->Format();
+ BOOST_CHECK(str == "test=1W");
+}
+
+BOOST_AUTO_TEST_CASE(warncritminmax)
+{
+ PerfdataValue::Ptr pv = PerfdataValue::Parse("test=123456B;1000;2000;3000;4000");
+ BOOST_CHECK(pv);
+
+ BOOST_CHECK(pv->GetValue() == 123456);
+ BOOST_CHECK(!pv->GetCounter());
+ BOOST_CHECK(pv->GetUnit() == "bytes");
+ BOOST_CHECK(pv->GetWarn() == 1000);
+ BOOST_CHECK(pv->GetCrit() == 2000);
+ BOOST_CHECK(pv->GetMin() == 3000);
+ BOOST_CHECK(pv->GetMax() == 4000);
+
+ BOOST_CHECK(pv->Format() == "test=123456B;1000;2000;3000;4000");
+}
+
+BOOST_AUTO_TEST_CASE(ignore_invalid_warn_crit_min_max)
+{
+ PerfdataValue::Ptr pv = PerfdataValue::Parse("test=123456;1000:2000;0:3000;3000;4000");
+ BOOST_CHECK(pv);
+ BOOST_CHECK(pv->GetValue() == 123456);
+ BOOST_CHECK(pv->GetWarn() == Empty);
+ BOOST_CHECK(pv->GetCrit() == Empty);
+ BOOST_CHECK(pv->GetMin() == 3000);
+ BOOST_CHECK(pv->GetMax() == 4000);
+
+ BOOST_CHECK(pv->Format() == "test=123456");
+}
+
+BOOST_AUTO_TEST_CASE(invalid)
+{
+ BOOST_CHECK_THROW(PerfdataValue::Parse("123456"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=1,23456"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=123_456"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test="), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=123,456;1;1;1;1"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=1;123,456;1;1;1"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=1;1;123,456;1;1"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=1;1;1;123,456;1"), boost::exception);
+ BOOST_CHECK_THROW(PerfdataValue::Parse("test=1;1;1;1;123,456"), boost::exception);
+}
+
+BOOST_AUTO_TEST_CASE(multi)
+{
+ Array::Ptr pd = PluginUtility::SplitPerfdata("test::a=3 b=4");
+ BOOST_CHECK(pd->Get(0) == "test::a=3");
+ BOOST_CHECK(pd->Get(1) == "test::b=4");
+}
+
+BOOST_AUTO_TEST_CASE(scientificnotation)
+{
+ PerfdataValue::Ptr pdv = PerfdataValue::Parse("test=1.1e+1");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 11);
+
+ String str = pdv->Format();
+ BOOST_CHECK(str == "test=11");
+
+ pdv = PerfdataValue::Parse("test=1.1e1");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 11);
+
+ str = pdv->Format();
+ BOOST_CHECK(str == "test=11");
+
+ pdv = PerfdataValue::Parse("test=1.1e-1");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 0.11);
+
+ str = pdv->Format();
+ BOOST_CHECK(str == "test=0.110000");
+
+ pdv = PerfdataValue::Parse("test=1.1E1");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 11);
+
+ str = pdv->Format();
+ BOOST_CHECK(str == "test=11");
+
+ pdv = PerfdataValue::Parse("test=1.1E-1");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 0.11);
+
+ str = pdv->Format();
+ BOOST_CHECK(str == "test=0.110000");
+
+ pdv = PerfdataValue::Parse("test=1.1E-1;1.2e+1;1.3E-1;1.4e-2;1.5E2");
+ BOOST_CHECK(pdv->GetLabel() == "test");
+ BOOST_CHECK(pdv->GetValue() == 0.11);
+ BOOST_CHECK(pdv->GetWarn() == 12);
+ BOOST_CHECK(pdv->GetCrit() == 0.13);
+ BOOST_CHECK(pdv->GetMin() == 0.014);
+ BOOST_CHECK(pdv->GetMax() == 150);
+
+ str = pdv->Format();
+ BOOST_CHECK(str == "test=0.110000;12;0.130000;0.014000;150");
+}
+
+BOOST_AUTO_TEST_CASE(parse_edgecases)
+{
+ // Trailing decimal point
+ PerfdataValue::Ptr pv = PerfdataValue::Parse("test=23.");
+ BOOST_CHECK(pv);
+ BOOST_CHECK(pv->GetValue() == 23.0);
+
+ // Leading decimal point
+ pv = PerfdataValue::Parse("test=.42");
+ BOOST_CHECK(pv);
+ BOOST_CHECK(pv->GetValue() == 0.42);
+
+ // E both as exponent and unit prefix
+ pv = PerfdataValue::Parse("test=+1.5E-15EB");
+ BOOST_CHECK(pv);
+ BOOST_CHECK(pv->GetValue() == 1.5e3);
+ BOOST_CHECK(pv->GetUnit() == "bytes");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/icingaapplication-fixture.cpp b/test/icingaapplication-fixture.cpp
new file mode 100644
index 0000000..80fa4bf
--- /dev/null
+++ b/test/icingaapplication-fixture.cpp
@@ -0,0 +1,32 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "icingaapplication-fixture.hpp"
+
+using namespace icinga;
+
+static bool IcingaInitialized = false;
+
+IcingaApplicationFixture::IcingaApplicationFixture()
+{
+ if (!IcingaInitialized)
+ InitIcingaApplication();
+}
+
+void IcingaApplicationFixture::InitIcingaApplication()
+{
+ BOOST_TEST_MESSAGE("Initializing Application...");
+ Application::InitializeBase();
+
+ BOOST_TEST_MESSAGE("Initializing IcingaApplication...");
+ IcingaApplication::Ptr appInst = new IcingaApplication();
+ static_pointer_cast<ConfigObject>(appInst)->OnConfigLoaded();
+
+ IcingaInitialized = true;
+}
+
+IcingaApplicationFixture::~IcingaApplicationFixture()
+{
+ IcingaApplication::GetInstance().reset();
+}
+
+BOOST_GLOBAL_FIXTURE(IcingaApplicationFixture);
diff --git a/test/icingaapplication-fixture.hpp b/test/icingaapplication-fixture.hpp
new file mode 100644
index 0000000..23f4c9c
--- /dev/null
+++ b/test/icingaapplication-fixture.hpp
@@ -0,0 +1,21 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef ICINGAAPPLICATION_FIXTURE_H
+#define ICINGAAPPLICATION_FIXTURE_H
+
+#include "icinga/icingaapplication.hpp"
+#include "base/application.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+struct IcingaApplicationFixture
+{
+ IcingaApplicationFixture();
+
+ void InitIcingaApplication();
+
+ ~IcingaApplicationFixture();
+};
+
+#endif // ICINGAAPPLICATION_FIXTURE_H
diff --git a/test/livestatus-fixture.cpp b/test/livestatus-fixture.cpp
new file mode 100644
index 0000000..aaa0e07
--- /dev/null
+++ b/test/livestatus-fixture.cpp
@@ -0,0 +1,53 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "config/configcompiler.hpp"
+#include "config/configitem.hpp"
+#include "base/application.hpp"
+#include "base/loader.hpp"
+#include "icingaapplication-fixture.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+struct LivestatusFixture
+{
+ LivestatusFixture()
+ {
+ // ensure IcingaApplication is initialized before we try to add config
+ IcingaApplicationFixture icinga;
+
+ BOOST_TEST_MESSAGE("Preparing config objects...");
+
+ ConfigItem::RunWithActivationContext(new Function("CreateTestObjects", CreateTestObjects));
+ }
+
+ static void CreateTestObjects()
+ {
+ String config = R"CONFIG(
+object CheckCommand "dummy" {
+ command = "/bin/echo"
+}
+
+object Host "test-01" {
+ address = "127.0.0.1"
+ check_command = "dummy"
+}
+
+object Host "test-02" {
+ address = "127.0.0.2"
+ check_command = "dummy"
+}
+
+apply Service "livestatus" {
+ check_command = "dummy"
+ notes = "test livestatus"
+ assign where match("test-*", host.name)
+}
+)CONFIG";
+
+ std::unique_ptr<Expression> expr = ConfigCompiler::CompileText("<livestatus>", config);
+ expr->Evaluate(*ScriptFrame::GetCurrentFrame());
+ }
+};
+
+BOOST_GLOBAL_FIXTURE(LivestatusFixture);
diff --git a/test/livestatus.cpp b/test/livestatus.cpp
new file mode 100644
index 0000000..6aafa3b
--- /dev/null
+++ b/test/livestatus.cpp
@@ -0,0 +1,107 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "livestatus/livestatusquery.hpp"
+#include "base/application.hpp"
+#include "base/stdiostream.hpp"
+#include "base/json.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+String LivestatusQueryHelper(const std::vector<String>& lines)
+{
+ LivestatusQuery::Ptr query = new LivestatusQuery(lines, "");
+
+ std::stringstream stream;
+ StdioStream::Ptr sstream = new StdioStream(&stream, false);
+
+ query->Execute(sstream);
+
+ String output;
+ String result;
+
+ StreamReadContext src;
+ for (;;) {
+ StreamReadStatus srs = sstream->ReadLine(&result, src);
+
+ if (srs == StatusEof)
+ break;
+
+ if (srs != StatusNewItem)
+ continue;
+
+ if (result.GetLength() > 0)
+ output += result + "\n";
+ else
+ break;
+ }
+
+ BOOST_TEST_MESSAGE("Query Result: " + output);
+
+ return output;
+}
+
+//____________________________________________________________________________//
+
+BOOST_AUTO_TEST_SUITE(livestatus)
+
+BOOST_AUTO_TEST_CASE(hosts)
+{
+ BOOST_TEST_MESSAGE( "Querying Livestatus...");
+
+ std::vector<String> lines;
+ lines.emplace_back("GET hosts");
+ lines.emplace_back("Columns: host_name address check_command");
+ lines.emplace_back("OutputFormat: json");
+ lines.emplace_back("\n");
+
+ /* use our query helper */
+ String output = LivestatusQueryHelper(lines);
+
+ Array::Ptr query_result = JsonDecode(output);
+
+ /* the outer elements */
+ BOOST_CHECK(query_result->GetLength() > 1);
+
+ Array::Ptr res1 = query_result->Get(0);
+ Array::Ptr res2 = query_result->Get(1);
+
+ /* results are non-deterministic and not sorted by livestatus */
+ BOOST_CHECK(res1->Contains("test-01") || res2->Contains("test-01"));
+ BOOST_CHECK(res1->Contains("test-02") || res2->Contains("test-02"));
+ BOOST_CHECK(res1->Contains("127.0.0.1") || res2->Contains("127.0.0.1"));
+ BOOST_CHECK(res1->Contains("127.0.0.2") || res2->Contains("127.0.0.2"));
+
+ BOOST_TEST_MESSAGE("Done with testing livestatus hosts...");
+}
+
+BOOST_AUTO_TEST_CASE(services)
+{
+ BOOST_TEST_MESSAGE( "Querying Livestatus...");
+
+ std::vector<String> lines;
+ lines.emplace_back("GET services");
+ lines.emplace_back("Columns: host_name service_description check_command notes");
+ lines.emplace_back("OutputFormat: json");
+ lines.emplace_back("\n");
+
+ /* use our query helper */
+ String output = LivestatusQueryHelper(lines);
+
+ Array::Ptr query_result = JsonDecode(output);
+
+ /* the outer elements */
+ BOOST_CHECK(query_result->GetLength() > 1);
+
+ Array::Ptr res1 = query_result->Get(0);
+ Array::Ptr res2 = query_result->Get(1);
+
+ /* results are non-deterministic and not sorted by livestatus */
+ BOOST_CHECK(res1->Contains("livestatus") || res2->Contains("livestatus")); //service_description
+ BOOST_CHECK(res1->Contains("test livestatus") || res2->Contains("test livestatus")); //notes
+
+ BOOST_TEST_MESSAGE("Done with testing livestatus services...");
+}
+//____________________________________________________________________________//
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/livestatus/README b/test/livestatus/README
new file mode 100644
index 0000000..0c839e0
--- /dev/null
+++ b/test/livestatus/README
@@ -0,0 +1,12 @@
+Compat Livestatus Component Query Tests
+=======================================
+
+Collection of queries used for execution
+on the livestatus socket.
+
+
+$ ./run_queries host/services
+
+or
+
+$ ./run_queries
diff --git a/test/livestatus/queries/commands/command b/test/livestatus/queries/commands/command
new file mode 100644
index 0000000..2e87152
--- /dev/null
+++ b/test/livestatus/queries/commands/command
@@ -0,0 +1,4 @@
+GET commands
+Columns: name line custom_variables
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/commands/modattr b/test/livestatus/queries/commands/modattr
new file mode 100644
index 0000000..90aaea7
--- /dev/null
+++ b/test/livestatus/queries/commands/modattr
@@ -0,0 +1,4 @@
+GET commands
+Columns: name modified_attributes modified_attributes_list
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/comments/comment b/test/livestatus/queries/comments/comment
new file mode 100644
index 0000000..18d7db9
--- /dev/null
+++ b/test/livestatus/queries/comments/comment
@@ -0,0 +1,3 @@
+GET comments
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/comments/comment_short b/test/livestatus/queries/comments/comment_short
new file mode 100644
index 0000000..e5c0072
--- /dev/null
+++ b/test/livestatus/queries/comments/comment_short
@@ -0,0 +1,4 @@
+GET comments
+Columns: id type is_service host_name service_description author comment
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/contacts/contacts b/test/livestatus/queries/contacts/contacts
new file mode 100644
index 0000000..a81c463
--- /dev/null
+++ b/test/livestatus/queries/contacts/contacts
@@ -0,0 +1,3 @@
+GET contacts
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/contacts/group b/test/livestatus/queries/contacts/group
new file mode 100644
index 0000000..a15b3a5
--- /dev/null
+++ b/test/livestatus/queries/contacts/group
@@ -0,0 +1,3 @@
+GET contactgroups
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/contacts/modattr b/test/livestatus/queries/contacts/modattr
new file mode 100644
index 0000000..8b520a0
--- /dev/null
+++ b/test/livestatus/queries/contacts/modattr
@@ -0,0 +1,4 @@
+GET contacts
+Columns: name modified_attributes modified_attributes_list
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/custom/scrambled b/test/livestatus/queries/custom/scrambled
new file mode 100644
index 0000000..01fe008
--- /dev/null
+++ b/test/livestatus/queries/custom/scrambled
@@ -0,0 +1,2 @@
+D
+
diff --git a/test/livestatus/queries/custom/thruk_alert_history b/test/livestatus/queries/custom/thruk_alert_history
new file mode 100644
index 0000000..de04c46
--- /dev/null
+++ b/test/livestatus/queries/custom/thruk_alert_history
@@ -0,0 +1,19 @@
+GET log
+Columns: class time type state host_name service_description plugin_output message options state_type contact_name
+Filter: time >= 1383692400
+Filter: time <= 1383778800
+Filter: type = SERVICE ALERT
+And: 1
+Filter: type = HOST ALERT
+And: 1
+Filter: type = SERVICE FLAPPING ALERT
+Filter: type = HOST FLAPPING ALERT
+Filter: type = SERVICE DOWNTIME ALERT
+Filter: type = HOST DOWNTIME ALERT
+Filter: message ~ starting\.\.\.
+Filter: message ~ shutting\ down\.\.\.
+Or: 8
+And: 3
+OutputFormat: json
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/custom/thruk_comments b/test/livestatus/queries/custom/thruk_comments
new file mode 100644
index 0000000..fc454ab
--- /dev/null
+++ b/test/livestatus/queries/custom/thruk_comments
@@ -0,0 +1,7 @@
+GET comments
+Columns: author comment entry_time entry_type expires expire_time host_name id persistent service_description source type
+Filter: host_name = localhost
+Filter: service_description = processes
+OutputFormat: json
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/downtimes/downtime b/test/livestatus/queries/downtimes/downtime
new file mode 100644
index 0000000..263dd51
--- /dev/null
+++ b/test/livestatus/queries/downtimes/downtime
@@ -0,0 +1,3 @@
+GET downtimes
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/downtimes/downtime_short b/test/livestatus/queries/downtimes/downtime_short
new file mode 100644
index 0000000..c2e1cf5
--- /dev/null
+++ b/test/livestatus/queries/downtimes/downtime_short
@@ -0,0 +1,4 @@
+GET downtimes
+Columns: id type is_service host_name service_description author comment start_time end_time
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/endpoints/endpoints b/test/livestatus/queries/endpoints/endpoints
new file mode 100644
index 0000000..3ff4a02
--- /dev/null
+++ b/test/livestatus/queries/endpoints/endpoints
@@ -0,0 +1,3 @@
+GET endpoints
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/bygroup b/test/livestatus/queries/hosts/bygroup
new file mode 100644
index 0000000..7485efc
--- /dev/null
+++ b/test/livestatus/queries/hosts/bygroup
@@ -0,0 +1,4 @@
+GET hostsbygroup
+Columns: hostgroup_name host_name
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/check b/test/livestatus/queries/hosts/check
new file mode 100644
index 0000000..bf4c216
--- /dev/null
+++ b/test/livestatus/queries/hosts/check
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name plugin_output check_source
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/command b/test/livestatus/queries/hosts/command
new file mode 100644
index 0000000..61dfe83
--- /dev/null
+++ b/test/livestatus/queries/hosts/command
@@ -0,0 +1,4 @@
+GET hosts
+Columns: check_command check_command_expanded
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/comment b/test/livestatus/queries/hosts/comment
new file mode 100644
index 0000000..1ac1e84
--- /dev/null
+++ b/test/livestatus/queries/hosts/comment
@@ -0,0 +1,4 @@
+GET hosts
+Columns: comments comments_with_info comments_with_extra_info
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/contact b/test/livestatus/queries/hosts/contact
new file mode 100644
index 0000000..9322944
--- /dev/null
+++ b/test/livestatus/queries/hosts/contact
@@ -0,0 +1,4 @@
+GET hosts
+Columns: contacts contact_groups
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/customvar b/test/livestatus/queries/hosts/customvar
new file mode 100644
index 0000000..57f2e00
--- /dev/null
+++ b/test/livestatus/queries/hosts/customvar
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name custom_variable_names custom_variable_values custom_variables cv_is_json
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/downtime b/test/livestatus/queries/hosts/downtime
new file mode 100644
index 0000000..db20ae7
--- /dev/null
+++ b/test/livestatus/queries/hosts/downtime
@@ -0,0 +1,4 @@
+GET hosts
+Columns: downtimes downtimes_with_info
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/extra b/test/livestatus/queries/hosts/extra
new file mode 100644
index 0000000..d6a583a
--- /dev/null
+++ b/test/livestatus/queries/hosts/extra
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name address notes notes_expanded notes_url notes_url_expanded action_url action_url_expanded icon_image icon_image_expanded icon_image_alt x_2d y_2d
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/group b/test/livestatus/queries/hosts/group
new file mode 100644
index 0000000..cc60032
--- /dev/null
+++ b/test/livestatus/queries/hosts/group
@@ -0,0 +1,3 @@
+GET hostgroups
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/host b/test/livestatus/queries/hosts/host
new file mode 100644
index 0000000..4390fa9
--- /dev/null
+++ b/test/livestatus/queries/hosts/host
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name parents childs
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/host_nagvis b/test/livestatus/queries/hosts/host_nagvis
new file mode 100644
index 0000000..a09d013
--- /dev/null
+++ b/test/livestatus/queries/hosts/host_nagvis
@@ -0,0 +1,6 @@
+GET hosts
+Columns: name alias host_name
+OutputFormat:json
+KeepAlive: on
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/legacy b/test/livestatus/queries/hosts/legacy
new file mode 100644
index 0000000..7a94932
--- /dev/null
+++ b/test/livestatus/queries/hosts/legacy
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name notes notes_url action_url icon_image icon_image_alt
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/modattr b/test/livestatus/queries/hosts/modattr
new file mode 100644
index 0000000..b34e828
--- /dev/null
+++ b/test/livestatus/queries/hosts/modattr
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name modified_attributes modified_attributes_list original_attributes
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/notification b/test/livestatus/queries/hosts/notification
new file mode 100644
index 0000000..27600ce
--- /dev/null
+++ b/test/livestatus/queries/hosts/notification
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name current_notification_number notification_period notification_interval notifications_enabled no_more_notifications last_notification next_notification
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/services b/test/livestatus/queries/hosts/services
new file mode 100644
index 0000000..a6e10ba
--- /dev/null
+++ b/test/livestatus/queries/hosts/services
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name num_services worst_service_state num_services_ok num_services_warn num_services_crit num_services_unknown num_services_pending worst_service_hard_state num_services_hard_ok num_services_hard_warn num_services_hard_crit num_services_hard_unknown services services_with_state services_with_info
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/state b/test/livestatus/queries/hosts/state
new file mode 100644
index 0000000..ba59c9e
--- /dev/null
+++ b/test/livestatus/queries/hosts/state
@@ -0,0 +1,4 @@
+GET hosts
+Columns: name last_state_change last_hard_state_change last_time_up last_time_down last_time_unreachable staleness is_reachable
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/hosts/stats_sum b/test/livestatus/queries/hosts/stats_sum
new file mode 100644
index 0000000..8a44ff4
--- /dev/null
+++ b/test/livestatus/queries/hosts/stats_sum
@@ -0,0 +1,5 @@
+GET hosts
+ResponseHeader: fixed16
+Stats: latency = 3
+Stats: sum latency
+
diff --git a/test/livestatus/queries/log/alerts b/test/livestatus/queries/log/alerts
new file mode 100644
index 0000000..8c4740a
--- /dev/null
+++ b/test/livestatus/queries/log/alerts
@@ -0,0 +1,6 @@
+GET log
+Columns: host_name service_description time lineno class type options plugin_output state state_type comment contact_name command_name
+Filter: time >= 1348657741
+Filter: message ~ ALERT
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/avail b/test/livestatus/queries/log/avail
new file mode 100644
index 0000000..b37162c
--- /dev/null
+++ b/test/livestatus/queries/log/avail
@@ -0,0 +1,12 @@
+GET log
+Columns: time type message class
+Filter: type = HOST ALERT
+Filter: state_type = HARD
+Filter: type = INITIAL HOST STATE
+Filter: state_type = HARD
+Filter: type = CURRENT HOST STATE
+Filter: state_type = HARD
+Filter: type = HOST DOWNTIME ALERT
+Or: 7
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/avail_svc b/test/livestatus/queries/log/avail_svc
new file mode 100644
index 0000000..9e3712b
--- /dev/null
+++ b/test/livestatus/queries/log/avail_svc
@@ -0,0 +1,13 @@
+GET log
+Columns: time type message class
+Filter: type = HOST DOWNTIME ALERT
+Filter: type = SERVICE ALERT
+Filter: state_type = HARD
+Filter: type = INITIAL SERVICE STATE
+Filter: state_type = HARD
+Filter: type = CURRENT SERVICE STATE
+Filter: state_type = HARD
+Filter: type = SERVICE DOWNTIME ALERT
+Or: 8
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/class b/test/livestatus/queries/log/class
new file mode 100644
index 0000000..c534980
--- /dev/null
+++ b/test/livestatus/queries/log/class
@@ -0,0 +1,5 @@
+GET log
+Filter: time >= 1348657741
+Filter: class = 1
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/localhost_disk b/test/livestatus/queries/log/localhost_disk
new file mode 100644
index 0000000..834047f
--- /dev/null
+++ b/test/livestatus/queries/log/localhost_disk
@@ -0,0 +1,7 @@
+GET log
+Columns: host_name service_description time lineno class type options plugin_output state state_type comment contact_name command_name
+Filter: time >= 1348657741
+Filter: host_name = localhost
+Filter: service_description = disk
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/log b/test/livestatus/queries/log/log
new file mode 100644
index 0000000..f69f2f4
--- /dev/null
+++ b/test/livestatus/queries/log/log
@@ -0,0 +1,4 @@
+GET log
+Filter: time >= 1348657741
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/minimal b/test/livestatus/queries/log/minimal
new file mode 100644
index 0000000..d224387
--- /dev/null
+++ b/test/livestatus/queries/log/minimal
@@ -0,0 +1,5 @@
+GET log
+Columns: host_name service_description time lineno class type options plugin_output state state_type comment contact_name command_name
+Filter: time >= 1348657741
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/log/trend b/test/livestatus/queries/log/trend
new file mode 100644
index 0000000..bd44d89
--- /dev/null
+++ b/test/livestatus/queries/log/trend
@@ -0,0 +1,26 @@
+GET log
+Columns: time type message
+Filter: host_name = localhost
+Filter: type = HOST ALERT
+Filter: state_type = HARD
+Filter: type = INITIAL HOST STATE
+Filter: state_type = HARD
+Filter: type = CURRENT HOST STATE
+Filter: state_type = HARD
+Filter: type = HOST DOWNTIME ALERT
+Or: 7
+And: 2
+Filter: host_name = localhost
+Filter: type = SERVICE ALERT
+Filter: state_type = HARD
+Filter: type = INITIAL SERVICE STATE
+Filter: state_type = HARD
+Filter: type = CURRENT SERVICE STATE
+Filter: state_type = HARD
+Filter: type = SERVICE DOWNTIME ALERT
+Or: 7
+And: 2
+Filter: class = 2
+Or: 3
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/bygroup b/test/livestatus/queries/services/bygroup
new file mode 100644
index 0000000..668e5f1
--- /dev/null
+++ b/test/livestatus/queries/services/bygroup
@@ -0,0 +1,4 @@
+GET servicesbygroup
+Columns: servicegroup_name host_name service_description
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/byhostgroup b/test/livestatus/queries/services/byhostgroup
new file mode 100644
index 0000000..ddcdbe8
--- /dev/null
+++ b/test/livestatus/queries/services/byhostgroup
@@ -0,0 +1,4 @@
+GET servicesbyhostgroup
+Columns: hostgroup_name host_name service_description
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/check b/test/livestatus/queries/services/check
new file mode 100644
index 0000000..1a8e857
--- /dev/null
+++ b/test/livestatus/queries/services/check
@@ -0,0 +1,4 @@
+GET services
+Columns: description host_name plugin_output check_source
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/command b/test/livestatus/queries/services/command
new file mode 100644
index 0000000..7bbf268
--- /dev/null
+++ b/test/livestatus/queries/services/command
@@ -0,0 +1,4 @@
+GET services
+Columns: check_command check_command_expanded
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/comment b/test/livestatus/queries/services/comment
new file mode 100644
index 0000000..d69ba67
--- /dev/null
+++ b/test/livestatus/queries/services/comment
@@ -0,0 +1,4 @@
+GET services
+Columns: comments comments_with_info comments_with_extra_info
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/contact b/test/livestatus/queries/services/contact
new file mode 100644
index 0000000..1729003
--- /dev/null
+++ b/test/livestatus/queries/services/contact
@@ -0,0 +1,4 @@
+GET services
+Columns: contacts contact_groups
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/customvar b/test/livestatus/queries/services/customvar
new file mode 100644
index 0000000..75c452c
--- /dev/null
+++ b/test/livestatus/queries/services/customvar
@@ -0,0 +1,4 @@
+GET services
+Columns: host_name service_description custom_variables cv_is_json
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/downtime b/test/livestatus/queries/services/downtime
new file mode 100644
index 0000000..243c000
--- /dev/null
+++ b/test/livestatus/queries/services/downtime
@@ -0,0 +1,4 @@
+GET services
+Columns: downtimes downtimes_with_info
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/extra b/test/livestatus/queries/services/extra
new file mode 100644
index 0000000..b58976f
--- /dev/null
+++ b/test/livestatus/queries/services/extra
@@ -0,0 +1,4 @@
+GET services
+Columns: description host_name host_address notes notes_expanded notes_url notes_url_expanded action_url action_url_expanded icon_image icon_image_expanded icon_image_alt
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/group b/test/livestatus/queries/services/group
new file mode 100644
index 0000000..83f4404
--- /dev/null
+++ b/test/livestatus/queries/services/group
@@ -0,0 +1,3 @@
+GET servicegroups
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/legacy b/test/livestatus/queries/services/legacy
new file mode 100644
index 0000000..ebc0b4b
--- /dev/null
+++ b/test/livestatus/queries/services/legacy
@@ -0,0 +1,4 @@
+GET services
+Columns: host_name description notes notes_url action_url icon_image icon_image_alt
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/modattr b/test/livestatus/queries/services/modattr
new file mode 100644
index 0000000..d1facd0
--- /dev/null
+++ b/test/livestatus/queries/services/modattr
@@ -0,0 +1,4 @@
+GET services
+Columns: description modified_attributes modified_attributes_list original_attributes
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/notification b/test/livestatus/queries/services/notification
new file mode 100644
index 0000000..eadcc90
--- /dev/null
+++ b/test/livestatus/queries/services/notification
@@ -0,0 +1,4 @@
+GET services
+Columns: description current_notification_number notification_period notification_interval notifications_enabled no_more_notifications last_notification next_notification
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/services b/test/livestatus/queries/services/services
new file mode 100644
index 0000000..09640fd
--- /dev/null
+++ b/test/livestatus/queries/services/services
@@ -0,0 +1,3 @@
+GET services
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/services/state b/test/livestatus/queries/services/state
new file mode 100644
index 0000000..65bf5f7
--- /dev/null
+++ b/test/livestatus/queries/services/state
@@ -0,0 +1,4 @@
+GET services
+Columns: description host_name last_state_change last_hard_state_change last_time_ok last_time_warning last_time_critical last_time_unknown staleness is_reachable
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/special/services b/test/livestatus/queries/special/services
new file mode 100644
index 0000000..94ddc91
--- /dev/null
+++ b/test/livestatus/queries/special/services
@@ -0,0 +1,5 @@
+GET services
+Separators: 10 32 35 95
+Columns: description custom_variables
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/statehist/duration b/test/livestatus/queries/statehist/duration
new file mode 100644
index 0000000..0de5bfa
--- /dev/null
+++ b/test/livestatus/queries/statehist/duration
@@ -0,0 +1,5 @@
+GET statehist
+Columns: host_name service_description state duration_ok duration_warning duration_critical duration_unknown duration_unmonitored
+Filter: time >= 1348657741
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/statehist/statehist b/test/livestatus/queries/statehist/statehist
new file mode 100644
index 0000000..1ef2382
--- /dev/null
+++ b/test/livestatus/queries/statehist/statehist
@@ -0,0 +1,5 @@
+GET statehist
+Columns: host_name service_description state duration duration_part in_downtime in_host_downtime in_notification_period is_flapping
+Filter: time >= 1348657741
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/statehist/statehist_disk b/test/livestatus/queries/statehist/statehist_disk
new file mode 100644
index 0000000..33f7340
--- /dev/null
+++ b/test/livestatus/queries/statehist/statehist_disk
@@ -0,0 +1,6 @@
+GET statehist
+Columns: host_name service_description state duration duration_part in_downtime in_host_downtime in_notification_period is_flapping
+Filter: service_description = disk
+Filter: time >= 1348657741
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/statehist/sum b/test/livestatus/queries/statehist/sum
new file mode 100644
index 0000000..b244adc
--- /dev/null
+++ b/test/livestatus/queries/statehist/sum
@@ -0,0 +1,9 @@
+GET statehist
+Columns: host_name service_description state duration duration_part
+Filter: host_name = localhost
+Filter: service_description = disk
+Filter: time >= 1348657741
+Stats: sum duration
+Stats: sum duration_part
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/status/checks b/test/livestatus/queries/status/checks
new file mode 100644
index 0000000..32e5957
--- /dev/null
+++ b/test/livestatus/queries/status/checks
@@ -0,0 +1,4 @@
+GET status
+Columns: accept_passive_host_checks accept_passive_service_checks execute_host_checks execute_service_checks
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/status/custom b/test/livestatus/queries/status/custom
new file mode 100644
index 0000000..1c8821d
--- /dev/null
+++ b/test/livestatus/queries/status/custom
@@ -0,0 +1,4 @@
+GET status
+Columns: custom_variables
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/status/livestatus b/test/livestatus/queries/status/livestatus
new file mode 100644
index 0000000..47f163c
--- /dev/null
+++ b/test/livestatus/queries/status/livestatus
@@ -0,0 +1,4 @@
+GET status
+Columns: connections connections_rate external_commands external_commands_rate livestatus_active_connections
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/status/program b/test/livestatus/queries/status/program
new file mode 100644
index 0000000..64d3c17
--- /dev/null
+++ b/test/livestatus/queries/status/program
@@ -0,0 +1,4 @@
+GET status
+Columns: nagios_pid program_start num_hosts num_services program_version
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/status/status b/test/livestatus/queries/status/status
new file mode 100644
index 0000000..bf92485
--- /dev/null
+++ b/test/livestatus/queries/status/status
@@ -0,0 +1,3 @@
+GET status
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/queries/timeperiods/timeperiod b/test/livestatus/queries/timeperiods/timeperiod
new file mode 100644
index 0000000..e5f02ca
--- /dev/null
+++ b/test/livestatus/queries/timeperiods/timeperiod
@@ -0,0 +1,3 @@
+GET timeperiods
+ResponseHeader: fixed16
+
diff --git a/test/livestatus/run_queries b/test/livestatus/run_queries
new file mode 100755
index 0000000..c80aa59
--- /dev/null
+++ b/test/livestatus/run_queries
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+NC=`which nc`
+LOCALSTATEDIR=`icinga2 variable get LocalStateDir`
+LIVESTATUSSOCKET="$LOCALSTATEDIR/run/icinga2/cmd/livestatus"
+LIVESTATUSHOST="127.0.0.1"
+LIVESTATUSPORT="6558"
+LIVESTATUSQUERIES="./queries"
+
+LIVESTATUSTABLE=$1
+
+echo -e "Querying Livestatus socket: $LIVESTATUSSOCKET"
+
+if [ -n "$LIVESTATUSTABLE" ]; then
+ cat "$LIVESTATUSTABLE"
+ (cat "$LIVESTATUSTABLE"; sleep 1) | $NC -U $LIVESTATUSSOCKET
+else
+
+ echo -e "Looking into $LIVESTATUSQUERIES\n"
+ for q in $(find $LIVESTATUSQUERIES -type f)
+ do
+ cat $q
+ (cat $q; sleep 1) | $NC -U $LIVESTATUSSOCKET
+ echo -e "================================\n\n"
+ done
+fi
diff --git a/test/methods-pluginnotificationtask.cpp b/test/methods-pluginnotificationtask.cpp
new file mode 100644
index 0000000..ec582dc
--- /dev/null
+++ b/test/methods-pluginnotificationtask.cpp
@@ -0,0 +1,88 @@
+/* Icinga 2 | (c) 2023 Icinga GmbH | GPLv2+ */
+
+#include "base/array.hpp"
+#include "icinga/checkresult.hpp"
+#include "icinga/host.hpp"
+#include "icinga/notification.hpp"
+#include "icinga/notificationcommand.hpp"
+#include "icinga/service.hpp"
+#include "icinga/user.hpp"
+#include "methods/pluginnotificationtask.hpp"
+#include <BoostTestTargetConfig.h>
+#include <future>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(methods_pluginnotificationtask)
+
+BOOST_AUTO_TEST_CASE(truncate_long_output)
+{
+#ifdef __linux__
+ Host::Ptr h = new Host();
+ CheckResult::Ptr hcr = new CheckResult();
+ CheckResult::Ptr scr = new CheckResult();
+ Service::Ptr s = new Service();
+ User::Ptr u = new User();
+ NotificationCommand::Ptr nc = new NotificationCommand();
+ Notification::Ptr n = new Notification();
+ String placeHolder (1024 * 1024, 'x');
+ std::promise<String> promise;
+ auto future (promise.get_future());
+
+ hcr->SetOutput("H" + placeHolder + "h", true);
+ scr->SetOutput("S" + placeHolder + "s", true);
+
+ h->SetName("example.com", true);
+ h->SetLastCheckResult(hcr, true);
+ h->Register();
+
+ s->SetHostName("example.com", true);
+ s->SetShortName("disk", true);
+ s->SetLastCheckResult(scr, true);
+ s->OnAllConfigLoaded(); // link Host
+
+ nc->SetCommandLine(
+ new Array({
+ "echo",
+ "host_output=$host.output$",
+ "service_output=$service.output$",
+ "notification_comment=$notification.comment$",
+ "output=$output$",
+ "comment=$comment$"
+ }),
+ true
+ );
+
+ nc->SetName("mail", true);
+ nc->Register();
+
+ n->SetFieldByName("host_name", "example.com", false, DebugInfo());
+ n->SetFieldByName("service_name", "disk", false, DebugInfo());
+ n->SetFieldByName("command", "mail", false, DebugInfo());
+ n->OnAllConfigLoaded(); // link Service
+
+ Checkable::ExecuteCommandProcessFinishedHandler = [&promise](const Value&, const ProcessResult& pr) {
+ promise.set_value(pr.Output);
+ };
+
+ PluginNotificationTask::ScriptFunc(n, u, nullptr, NotificationCustom, "jdoe", "C" + placeHolder + "c", nullptr, false);
+ future.wait();
+
+ Checkable::ExecuteCommandProcessFinishedHandler = nullptr;
+ h->Unregister();
+ nc->Unregister();
+
+ auto output (future.get());
+
+ BOOST_CHECK(output.Contains("host_output=Hx"));
+ BOOST_CHECK(!output.Contains("xh"));
+ BOOST_CHECK(output.Contains("x service_output=Sx"));
+ BOOST_CHECK(!output.Contains("xs"));
+ BOOST_CHECK(output.Contains("x notification_comment=Cx"));
+ BOOST_CHECK(!output.Contains("xc"));
+ BOOST_CHECK(output.Contains("x output=Sx"));
+ BOOST_CHECK(output.Contains("x comment=Cx"));
+#endif /* __linux__ */
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/remote-configpackageutility.cpp b/test/remote-configpackageutility.cpp
new file mode 100644
index 0000000..99c2a8b
--- /dev/null
+++ b/test/remote-configpackageutility.cpp
@@ -0,0 +1,25 @@
+/* Icinga 2 | (c) 2021 Icinga GmbH | GPLv2+ */
+
+#include "remote/configpackageutility.hpp"
+#include <vector>
+#include <string>
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(remote_configpackageutility)
+
+BOOST_AUTO_TEST_CASE(ValidateName)
+{
+ std::vector<std::string> validNames {"foo", "foo-bar", "FooBar", "Foo123", "_Foo-", "123bar"};
+ for (const std::string& n : validNames) {
+ BOOST_CHECK_MESSAGE(ConfigPackageUtility::ValidatePackageName(n), "'" << n << "' should be valid");
+ }
+
+ std::vector<std::string> invalidNames {"", ".", "..", "foo.bar", "foo/../bar", "foo/bar", "foo:bar"};
+ for (const std::string& n : invalidNames) {
+ BOOST_CHECK_MESSAGE(!ConfigPackageUtility::ValidatePackageName(n), "'" << n << "' should not be valid");
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/remote-url.cpp b/test/remote-url.cpp
new file mode 100644
index 0000000..36b7989
--- /dev/null
+++ b/test/remote-url.cpp
@@ -0,0 +1,128 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "base/array.hpp"
+#include "remote/url.hpp"
+#include <BoostTestTargetConfig.h>
+
+using namespace icinga;
+
+BOOST_AUTO_TEST_SUITE(remote_url)
+
+BOOST_AUTO_TEST_CASE(id_and_path)
+{
+ Url::Ptr url = new Url("http://icinga.com/foo/bar/baz?hurr=durr");
+
+ BOOST_CHECK(url->GetScheme() == "http");
+
+ BOOST_CHECK(url->GetAuthority() == "icinga.com");
+
+ std::vector<String> PathCorrect;
+ PathCorrect.emplace_back("foo");
+ PathCorrect.emplace_back("bar");
+ PathCorrect.emplace_back("baz");
+
+ BOOST_CHECK(url->GetPath() == PathCorrect);
+}
+
+BOOST_AUTO_TEST_CASE(get_and_set)
+{
+ Url::Ptr url = new Url();
+ url->SetScheme("ftp");
+ url->SetUsername("Horst");
+ url->SetPassword("Seehofer");
+ url->SetHost("koenigreich.bayern");
+ url->SetPort("1918");
+ url->SetPath({ "path", "to", "münchen" });
+
+ BOOST_CHECK(url->Format(false, true) == "ftp://Horst:Seehofer@koenigreich.bayern:1918/path/to/m%C3%BCnchen");
+
+ url->SetQuery({
+ {"shout", "hip"},
+ {"shout", "hip"},
+ {"shout", "hurra"},
+ {"sonderzeichen", "äü^ä+#ül-"}
+ });
+ url->AddQueryElement("count", "3");
+
+ auto mn (url->GetQuery());
+
+ BOOST_CHECK(mn.size() == 5);
+
+ BOOST_CHECK(mn[0].first == "shout");
+ BOOST_CHECK(mn[0].second == "hip");
+
+ BOOST_CHECK(mn[1].first == "shout");
+ BOOST_CHECK(mn[1].second == "hip");
+
+ BOOST_CHECK(mn[2].first == "shout");
+ BOOST_CHECK(mn[2].second == "hurra");
+
+ BOOST_CHECK(mn[3].first == "sonderzeichen");
+ BOOST_CHECK(mn[3].second == "äü^ä+#ül-");
+
+ BOOST_CHECK(mn[4].first == "count");
+ BOOST_CHECK(mn[4].second == "3");
+}
+
+BOOST_AUTO_TEST_CASE(parameters)
+{
+ Url::Ptr url = new Url("https://icinga.com/hya/?rair=robert&rain=karl&foo[]=bar");
+
+ auto query (url->GetQuery());
+
+ BOOST_CHECK(query.size() == 3);
+
+ BOOST_CHECK(query[0].first == "rair");
+ BOOST_CHECK(query[0].second == "robert");
+
+ BOOST_CHECK(query[1].first == "rain");
+ BOOST_CHECK(query[1].second == "karl");
+
+ BOOST_CHECK(query[2].first == "foo");
+ BOOST_CHECK(query[2].second == "bar");
+}
+
+BOOST_AUTO_TEST_CASE(format)
+{
+ Url::Ptr url = new Url("http://foo.bar/baz/?hop=top&flop=sop#iLIKEtrains");
+ Url::Ptr url2;
+ BOOST_CHECK(url2 = new Url(url->Format(false, false)));
+
+ url = new Url("//main.args/////////?k[]=one&k[]=two#three");
+ BOOST_CHECK(url2 = new Url(url->Format(false, false)));
+
+ url = new Url("/foo/bar/index.php?blaka");
+ BOOST_CHECK(url2 = new Url(url->Format(false, false)));
+ BOOST_CHECK(url->Format(false, false) == "/foo/bar/index.php?blaka");
+
+ url = new Url("/");
+ BOOST_CHECK(url->Format(false, false) == "/");
+
+ url = new Url("https://nsclient:8443/query/check_cpu?time%5B%5D=1m&time=5m&time%5B%5D=15m");
+ url->SetArrayFormatUseBrackets(false);
+ BOOST_CHECK(url2 = new Url(url->Format(false, false)));
+
+ url = new Url("https://icinga2/query?a[]=1&a[]=2&a[]=3");
+ url->SetArrayFormatUseBrackets(true);
+ BOOST_CHECK(url2 = new Url(url->Format(false, false)));
+}
+
+BOOST_AUTO_TEST_CASE(illegal_legal_strings)
+{
+ Url::Ptr url;
+ BOOST_CHECK(url = new Url("/?foo=barr&foo[]=bazz"));
+ BOOST_CHECK_THROW(url = new Url("/?]=gar"), std::invalid_argument);
+ BOOST_CHECK_THROW(url = new Url("/#?[]"), std::invalid_argument);
+ BOOST_CHECK(url = new Url("/?foo=bar&foo=ba"));
+ BOOST_CHECK_THROW(url = new Url("/?foo=bar&[]=d"), std::invalid_argument);
+ BOOST_CHECK(url = new Url("/?fo=&bar=garOA"));
+ BOOST_CHECK(url = new Url("https://127.0.0.1:5665/demo?type=Service&filter=service.state%3E0"));
+ BOOST_CHECK(url = new Url("/?foo=baz??&\?\?=/?"));
+ BOOST_CHECK(url = new Url("/"));
+ BOOST_CHECK(url = new Url("///////"));
+ BOOST_CHECK(url = new Url("/??[]=?#?=?"));
+ BOOST_CHECK(url = new Url("http://foo/#bar"));
+ BOOST_CHECK(url = new Url("//foo/"));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/test-runner.cpp b/test/test-runner.cpp
new file mode 100644
index 0000000..fac41ea
--- /dev/null
+++ b/test/test-runner.cpp
@@ -0,0 +1,21 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#define BOOST_TEST_MODULE icinga2
+#define BOOST_TEST_NO_MAIN
+#define BOOST_TEST_ALTERNATIVE_INIT_API
+
+#include <BoostTestTargetConfig.h>
+#include <boost/test/unit_test.hpp>
+#include <cstdlib>
+
+int BOOST_TEST_CALL_DECL
+main(int argc, char **argv)
+{
+ std::_Exit(boost::unit_test::unit_test_main(init_unit_test, argc, argv));
+ return EXIT_FAILURE;
+}
+
+#ifdef _WIN32
+#include <boost/test/impl/unit_test_main.ipp>
+#include <boost/test/impl/framework.ipp>
+#endif /* _WIN32 */
diff --git a/third-party/CMakeLists.txt b/third-party/CMakeLists.txt
new file mode 100644
index 0000000..fea750f
--- /dev/null
+++ b/third-party/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+add_subdirectory(mmatch)
+
+if(UNIX OR CYGWIN)
+ add_subdirectory(execvpe)
+endif()
+
+add_subdirectory(socketpair)
diff --git a/third-party/cmake/BoostTestTargets.cmake b/third-party/cmake/BoostTestTargets.cmake
new file mode 100644
index 0000000..4555dff
--- /dev/null
+++ b/third-party/cmake/BoostTestTargets.cmake
@@ -0,0 +1,262 @@
+# - Add tests using boost::test
+#
+# Add this line to your test files in place of including a basic boost test header:
+# #include <BoostTestTargetConfig.h>
+#
+# If you cannot do that and must use the included form for a given test,
+# include the line
+# // OVERRIDE_BOOST_TEST_INCLUDED_WARNING
+# in the same file with the boost test include.
+#
+# include(BoostTestTargets)
+# add_boost_test(<testdriver_name> SOURCES <source1> [<more sources...>]
+# [FAIL_REGULAR_EXPRESSION <additional fail regex>]
+# [LAUNCHER <generic launcher script>]
+# [LIBRARIES <library> [<library>...]]
+# [RESOURCES <resource> [<resource>...]]
+# [TESTS <testcasename> [<testcasename>...]]
+# [DEPENDENCIES <dependency> [<dependency>...]])
+#
+# If for some reason you need access to the executable target created,
+# it can be found in ${${testdriver_name}_TARGET_NAME} as specified when
+# you called add_boost_test
+#
+# Requires CMake 2.6 or newer (uses the 'function' command)
+#
+# Requires:
+# GetForceIncludeDefinitions
+# CopyResourcesToBuildTree
+#
+# Original Author:
+# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
+# http://academic.cleardefinition.com
+# Iowa State University HCI Graduate Program/VRAC
+#
+# Copyright Iowa State University 2009-2010.
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+if(__add_boost_test)
+ return()
+endif()
+set(__add_boost_test YES)
+
+set(BOOST_TEST_TARGET_PREFIX "boosttest")
+
+if(NOT Boost_FOUND)
+ find_package(Boost 1.34.0 QUIET)
+endif()
+
+include(GetForceIncludeDefinitions)
+include(CopyResourcesToBuildTree)
+
+if(Boost_FOUND)
+ set(_boosttesttargets_libs)
+ set(_boostConfig "BoostTestTargetsIncluded.h")
+ if(NOT Boost_UNIT_TEST_FRAMEWORK_LIBRARY)
+ find_package(Boost 1.34.0 QUIET COMPONENTS unit_test_framework)
+ endif()
+ if(Boost_UNIT_TEST_FRAMEWORK_LIBRARY)
+ set(_boosttesttargets_libs "${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}")
+ if(Boost_USE_STATIC_LIBS)
+ set(_boostConfig "BoostTestTargetsStatic.h")
+ else()
+ set(_boostConfig "BoostTestTargetsDynamic.h")
+ endif()
+ endif()
+ get_filename_component(_moddir ${CMAKE_CURRENT_LIST_FILE} PATH)
+ configure_file("${_moddir}/${_boostConfig}"
+ "${CMAKE_CURRENT_BINARY_DIR}/BoostTestTargetConfig.h"
+ COPYONLY)
+ include_directories("${CMAKE_CURRENT_BINARY_DIR}")
+endif()
+
+function(add_boost_test _name)
+ if(NOT BUILD_TESTING)
+ return()
+ endif()
+ if("${CMAKE_VERSION}" VERSION_LESS "2.8.0")
+ if(NOT "${_boost_test_cmakever_pestered}x" EQUAL "${CMAKE_VERSION}x")
+ message(STATUS
+ "Not adding boost::test targets - CMake 2.8.0 or newer required, using ${CMAKE_VERSION}")
+ set(_boost_test_cmakever_pestered
+ "${CMAKE_VERSION}"
+ CACHE
+ INTERNAL
+ ""
+ FORCE)
+ endif()
+ return()
+ endif()
+
+ # parse arguments
+ set(_nowhere)
+ set(_curdest _nowhere)
+ set(_val_args
+ SOURCES
+ FAIL_REGULAR_EXPRESSION
+ LAUNCHER
+ LIBRARIES
+ RESOURCES
+ TESTS
+ DEPENDENCIES)
+ set(_bool_args
+ USE_COMPILED_LIBRARY)
+ foreach(_arg ${_val_args} ${_bool_args})
+ set(${_arg})
+ endforeach()
+ foreach(_element ${ARGN})
+ list(FIND _val_args "${_element}" _val_arg_find)
+ list(FIND _bool_args "${_element}" _bool_arg_find)
+ if("${_val_arg_find}" GREATER "-1")
+ set(_curdest "${_element}")
+ elseif("${_bool_arg_find}" GREATER "-1")
+ set("${_element}" ON)
+ set(_curdest _nowhere)
+ else()
+ list(APPEND ${_curdest} "${_element}")
+ endif()
+ endforeach()
+
+ if(_nowhere)
+ message(FATAL_ERROR "Syntax error in use of add_boost_test!")
+ endif()
+
+ if(NOT SOURCES)
+ message(FATAL_ERROR
+ "Syntax error in use of add_boost_test: at least one source file required!")
+ endif()
+
+ if(Boost_FOUND)
+
+ include_directories(${Boost_INCLUDE_DIRS})
+
+ set(includeType)
+ foreach(src ${SOURCES})
+ file(READ ${src} thefile)
+ if("${thefile}" MATCHES ".*BoostTestTargetConfig.h.*")
+ set(includeType CONFIGURED)
+ set(includeFileLoc ${src})
+ break()
+ elseif("${thefile}" MATCHES ".*boost/test/included/unit_test.hpp.*")
+ set(includeType INCLUDED)
+ set(includeFileLoc ${src})
+ set(_boosttesttargets_libs) # clear this out - linking would be a bad idea
+ if(NOT
+ "${thefile}"
+ MATCHES
+ ".*OVERRIDE_BOOST_TEST_INCLUDED_WARNING.*")
+ message("Please replace the include line in ${src} with this alternate include line instead:")
+ message(" \#include <BoostTestTargetConfig.h>")
+ message("Once you've saved your changes, re-run CMake. (See BoostTestTargets.cmake for more info)")
+ endif()
+ break()
+ endif()
+ endforeach()
+
+ if(NOT _boostTestTargetsNagged${_name} STREQUAL "${includeType}")
+ if("${includeType}" STREQUAL "CONFIGURED")
+ message(STATUS
+ "Test '${_name}' uses the CMake-configurable form of the boost test framework - congrats! (Including File: ${includeFileLoc})")
+ elseif("${includeType}" STREQUAL "INCLUDED")
+ message("In test '${_name}': ${includeFileLoc} uses the 'included' form of the boost unit test framework.")
+ else()
+ message("In test '${_name}': Didn't detect the CMake-configurable boost test include.")
+ message("Please replace your existing boost test include in that test with the following:")
+ message(" \#include <BoostTestTargetConfig.h>")
+ message("Once you've saved your changes, re-run CMake. (See BoostTestTargets.cmake for more info)")
+ endif()
+ endif()
+ set(_boostTestTargetsNagged${_name}
+ "${includeType}"
+ CACHE
+ INTERNAL
+ ""
+ FORCE)
+
+
+ if(RESOURCES)
+ list(APPEND SOURCES ${RESOURCES})
+ endif()
+
+ # Generate a unique target name, using the relative binary dir
+ # and provided name. (transform all / into _ and remove all other
+ # non-alphabet characters)
+ file(RELATIVE_PATH
+ targetpath
+ "${CMAKE_BINARY_DIR}"
+ "${CMAKE_CURRENT_BINARY_DIR}")
+ string(REGEX REPLACE "[^A-Za-z/_]" "" targetpath "${targetpath}")
+ string(REPLACE "/" "_" targetpath "${targetpath}")
+
+ set(_target_name ${BOOST_TEST_TARGET_PREFIX}-${targetpath}-${_name})
+ set(${_name}_TARGET_NAME "${_target_name}" PARENT_SCOPE)
+
+ # Build the test.
+ add_executable(${_target_name} ${SOURCES})
+
+ list(APPEND LIBRARIES ${_boosttesttargets_libs})
+
+ if(LIBRARIES)
+ target_link_libraries(${_target_name} ${LIBRARIES})
+ endif()
+
+ if(RESOURCES)
+ set_property(TARGET ${_target_name} PROPERTY RESOURCE ${RESOURCES})
+ copy_resources_to_build_tree(${_target_name})
+ endif()
+
+ if(NOT Boost_TEST_FLAGS)
+# set(Boost_TEST_FLAGS --catch_system_error=yes --output_format=XML)
+ set(Boost_TEST_FLAGS --catch_system_error=yes)
+ endif()
+
+ # TODO: Figure out why only recent boost handles individual test running properly
+
+ if(LAUNCHER)
+ set(_test_command ${LAUNCHER} "\$<TARGET_FILE:${_target_name}>")
+ else()
+ set(_test_command ${_target_name})
+ endif()
+
+ if(TESTS)
+ foreach(_test ${TESTS})
+ add_test(NAME
+ ${_name}-${_test}
+ COMMAND
+ ${_test_command}
+ --run_test=${_test}
+ ${Boost_TEST_FLAGS})
+ if(FAIL_REGULAR_EXPRESSION)
+ set_tests_properties(${_name}-${_test}
+ PROPERTIES
+ FAIL_REGULAR_EXPRESSION
+ "${FAIL_REGULAR_EXPRESSION}")
+ endif()
+ endforeach()
+ else()
+ add_test(NAME
+ ${_name}-boost_test
+ COMMAND
+ ${_test_command}
+ ${Boost_TEST_FLAGS})
+ if(FAIL_REGULAR_EXPRESSION)
+ set_tests_properties(${_name}-${_test}
+ PROPERTIES
+ FAIL_REGULAR_EXPRESSION
+ "${FAIL_REGULAR_EXPRESSION}")
+ endif()
+ endif()
+
+ if (DEPENDENCIES)
+ add_dependencies(${_target_name} ${DEPENDENCIES})
+ endif()
+
+ # CppCheck the test if we can.
+ if(COMMAND add_cppcheck)
+ add_cppcheck(${_target_name} STYLE UNUSED_FUNCTIONS)
+ endif()
+
+ endif()
+endfunction()
diff --git a/third-party/cmake/BoostTestTargetsDynamic.h b/third-party/cmake/BoostTestTargetsDynamic.h
new file mode 100644
index 0000000..ae1f38e
--- /dev/null
+++ b/third-party/cmake/BoostTestTargetsDynamic.h
@@ -0,0 +1,9 @@
+// Small header computed by CMake to set up boost test.
+// include AFTER #define BOOST_TEST_MODULE whatever
+// but before any other boost test includes.
+
+// Using the Boost UTF dynamic library
+
+#define BOOST_TEST_DYN_LINK
+#include <boost/test/unit_test.hpp>
+
diff --git a/third-party/cmake/BoostTestTargetsIncluded.h b/third-party/cmake/BoostTestTargetsIncluded.h
new file mode 100644
index 0000000..253133c
--- /dev/null
+++ b/third-party/cmake/BoostTestTargetsIncluded.h
@@ -0,0 +1,7 @@
+// Small header computed by CMake to set up boost test.
+// include AFTER #define BOOST_TEST_MODULE whatever
+// but before any other boost test includes.
+
+// Using the Boost UTF included framework
+
+#include <boost/test/included/unit_test.hpp>
diff --git a/third-party/cmake/BoostTestTargetsStatic.h b/third-party/cmake/BoostTestTargetsStatic.h
new file mode 100644
index 0000000..dd3cdda
--- /dev/null
+++ b/third-party/cmake/BoostTestTargetsStatic.h
@@ -0,0 +1,7 @@
+// Small header computed by CMake to set up boost test.
+// include AFTER #define BOOST_TEST_MODULE whatever
+// but before any other boost test includes.
+
+// Using the Boost UTF static library
+
+#include <boost/test/unit_test.hpp>
diff --git a/third-party/cmake/CopyResourcesToBuildTree.cmake b/third-party/cmake/CopyResourcesToBuildTree.cmake
new file mode 100644
index 0000000..3512cc4
--- /dev/null
+++ b/third-party/cmake/CopyResourcesToBuildTree.cmake
@@ -0,0 +1,83 @@
+# - Copy the resources your app needs to the build tree.
+#
+# copy_resources_to_build_tree(<target_name>)
+#
+# Requires CMake 2.6 or newer (uses the 'function' command)
+#
+# Original Author:
+# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
+# http://academic.cleardefinition.com
+# Iowa State University HCI Graduate Program/VRAC
+#
+# Copyright Iowa State University 2009-2010.
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+if(__copy_resources_to_build_tree)
+ return()
+endif()
+set(__copy_resources_to_build_tree YES)
+
+function(copy_resources_to_build_tree _target)
+ get_target_property(_resources ${_target} RESOURCE)
+ if(NOT _resources)
+ # Bail if no resources
+ message(STATUS
+ "Told to copy resources for target ${_target}, but "
+ "no resources are set!")
+ return()
+ endif()
+
+ get_target_property(_path ${_target} LOCATION)
+ get_filename_component(_path "${_path}" PATH)
+
+ if(NOT MSVC AND NOT "${CMAKE_GENERATOR}" MATCHES "Makefiles")
+ foreach(_config ${CMAKE_CONFIGURATION_TYPES})
+ get_target_property(_path${_config} ${_target} LOCATION_${_config})
+ get_filename_component(_path${_config} "${_path${_config}}" PATH)
+ add_custom_command(TARGET ${_target}
+ POST_BUILD
+ COMMAND
+ ${CMAKE_COMMAND}
+ ARGS -E make_directory "${_path${_config}}/"
+ COMMENT "Creating directory ${_path${_config}}/")
+ endforeach()
+ endif()
+
+ foreach(_res ${_resources})
+ if(NOT IS_ABSOLUTE "${_res}")
+ get_filename_component(_res "${_res}" ABSOLUTE)
+ endif()
+ get_filename_component(_name "${_res}" NAME)
+
+ if(MSVC)
+ # Working dir is solution file dir, not exe file dir.
+ add_custom_command(TARGET ${_target}
+ POST_BUILD
+ COMMAND
+ ${CMAKE_COMMAND}
+ ARGS -E copy "${_res}" "${CMAKE_BINARY_DIR}/"
+ COMMENT "Copying ${_name} to ${CMAKE_BINARY_DIR}/ for MSVC")
+ else()
+ if("${CMAKE_GENERATOR}" MATCHES "Makefiles")
+ add_custom_command(TARGET ${_target}
+ POST_BUILD
+ COMMAND
+ ${CMAKE_COMMAND}
+ ARGS -E copy "${_res}" "${_path}/"
+ COMMENT "Copying ${_name} to ${_path}/")
+ else()
+ foreach(_config ${CMAKE_CONFIGURATION_TYPES})
+ add_custom_command(TARGET ${_target}
+ POST_BUILD
+ COMMAND
+ ${CMAKE_COMMAND}
+ ARGS -E copy "${_res}" "${_path${_config}}"
+ COMMENT "Copying ${_name} to ${_path${_config}}")
+ endforeach()
+
+ endif()
+ endif()
+ endforeach()
+endfunction()
diff --git a/third-party/cmake/FindBISON.cmake b/third-party/cmake/FindBISON.cmake
new file mode 100644
index 0000000..6c6b420
--- /dev/null
+++ b/third-party/cmake/FindBISON.cmake
@@ -0,0 +1,221 @@
+# - Find bison executable and provides macros to generate custom build rules
+# The module defines the following variables:
+#
+# BISON_EXECUTABLE - path to the bison program
+# BISON_VERSION - version of bison
+# BISON_FOUND - true if the program was found
+#
+# If bison is found, the module defines the macros:
+# BISON_TARGET(<Name> <YaccInput> <CodeOutput> [VERBOSE <file>]
+# [COMPILE_FLAGS <string>] [HEADER <FILE>])
+# which will create a custom rule to generate a parser. <YaccInput> is
+# the path to a yacc file. <CodeOutput> is the name of the source file
+# generated by bison. A header file containing the token list is also
+# generated according to bison's -d option by default or if the HEADER
+# option is used, the argument is passed to bison's --defines option to
+# specify output file. If COMPILE_FLAGS option is specified, the next
+# parameter is added in the bison command line. if VERBOSE option is
+# specified, <file> is created and contains verbose descriptions of the
+# grammar and parser. The macro defines a set of variables:
+# BISON_${Name}_DEFINED - true is the macro ran successfully
+# BISON_${Name}_INPUT - The input source file, an alias for <YaccInput>
+# BISON_${Name}_OUTPUT_SOURCE - The source file generated by bison
+# BISON_${Name}_OUTPUT_HEADER - The header file generated by bison
+# BISON_${Name}_OUTPUTS - The sources files generated by bison
+# BISON_${Name}_COMPILE_FLAGS - Options used in the bison command line
+#
+# ====================================================================
+# Example:
+#
+# find_package(BISON)
+# BISON_TARGET(MyParser parser.y ${CMAKE_CURRENT_BINARY_DIR}/parser.cpp)
+# add_executable(Foo main.cpp ${BISON_MyParser_OUTPUTS})
+# ====================================================================
+
+#=============================================================================
+# Copyright 2009 Kitware, Inc.
+# Copyright 2006 Tristan Carel
+# Modified 2010 by Jon Siwek, adding HEADER option
+#
+# Distributed under the OSI-approved BSD License (the "License"):
+# CMake - Cross Platform Makefile Generator
+# Copyright 2000-2009 Kitware, Inc., Insight Software Consortium
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the names of Kitware, Inc., the Insight Software Consortium,
+# nor the names of their contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+
+FIND_PROGRAM(BISON_EXECUTABLE bison DOC "path to the bison executable")
+MARK_AS_ADVANCED(BISON_EXECUTABLE)
+
+IF(BISON_EXECUTABLE)
+
+ EXECUTE_PROCESS(COMMAND ${BISON_EXECUTABLE} --version
+ OUTPUT_VARIABLE BISON_version_output
+ ERROR_VARIABLE BISON_version_error
+ RESULT_VARIABLE BISON_version_result
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ IF(NOT ${BISON_version_result} EQUAL 0)
+ MESSAGE(SEND_ERROR "Command \"${BISON_EXECUTABLE} --version\" failed with output:\n${BISON_version_error}")
+ ELSE()
+ STRING(REGEX REPLACE "^bison \\(GNU Bison\\) ([^\n]+)\n.*" "\\1"
+ BISON_VERSION "${BISON_version_output}")
+ ENDIF()
+
+ # internal macro
+ MACRO(BISON_TARGET_option_verbose Name BisonOutput filename)
+ LIST(APPEND BISON_TARGET_cmdopt "--verbose")
+ GET_FILENAME_COMPONENT(BISON_TARGET_output_path "${BisonOutput}" PATH)
+ GET_FILENAME_COMPONENT(BISON_TARGET_output_name "${BisonOutput}" NAME_WE)
+ ADD_CUSTOM_COMMAND(OUTPUT ${filename}
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy
+ "${BISON_TARGET_output_path}/${BISON_TARGET_output_name}.output"
+ "${filename}"
+ DEPENDS
+ "${BISON_TARGET_output_path}/${BISON_TARGET_output_name}.output"
+ COMMENT "[BISON][${Name}] Copying bison verbose table to ${filename}"
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
+ SET(BISON_${Name}_VERBOSE_FILE ${filename})
+ LIST(APPEND BISON_TARGET_extraoutputs
+ "${BISON_TARGET_output_path}/${BISON_TARGET_output_name}.output")
+ ENDMACRO(BISON_TARGET_option_verbose)
+
+ # internal macro
+ MACRO(BISON_TARGET_option_extraopts Options)
+ SET(BISON_TARGET_extraopts "${Options}")
+ SEPARATE_ARGUMENTS(BISON_TARGET_extraopts)
+ LIST(APPEND BISON_TARGET_cmdopt ${BISON_TARGET_extraopts})
+ ENDMACRO(BISON_TARGET_option_extraopts)
+
+ #============================================================
+ # BISON_TARGET (public macro)
+ #============================================================
+ #
+ MACRO(BISON_TARGET Name BisonInput BisonOutput)
+ SET(BISON_TARGET_output_header "")
+ #SET(BISON_TARGET_command_opt "")
+ SET(BISON_TARGET_cmdopt "")
+ SET(BISON_TARGET_outputs "${BisonOutput}")
+ IF(NOT ${ARGC} EQUAL 3 AND
+ NOT ${ARGC} EQUAL 5 AND
+ NOT ${ARGC} EQUAL 7 AND
+ NOT ${ARGC} EQUAL 9)
+ MESSAGE(SEND_ERROR "Usage")
+ ELSE()
+ # Parsing parameters
+ IF(${ARGC} GREATER 5 OR ${ARGC} EQUAL 5)
+ IF("${ARGV3}" STREQUAL "VERBOSE")
+ BISON_TARGET_option_verbose(${Name} ${BisonOutput} "${ARGV4}")
+ ENDIF()
+ IF("${ARGV3}" STREQUAL "COMPILE_FLAGS")
+ BISON_TARGET_option_extraopts("${ARGV4}")
+ ENDIF()
+ IF("${ARGV3}" STREQUAL "HEADER")
+ set(BISON_TARGET_output_header "${ARGV4}")
+ ENDIF()
+ ENDIF()
+
+ IF(${ARGC} GREATER 7 OR ${ARGC} EQUAL 7)
+ IF("${ARGV5}" STREQUAL "VERBOSE")
+ BISON_TARGET_option_verbose(${Name} ${BisonOutput} "${ARGV6}")
+ ENDIF()
+
+ IF("${ARGV5}" STREQUAL "COMPILE_FLAGS")
+ BISON_TARGET_option_extraopts("${ARGV6}")
+ ENDIF()
+
+ IF("${ARGV5}" STREQUAL "HEADER")
+ set(BISON_TARGET_output_header "${ARGV6}")
+ ENDIF()
+ ENDIF()
+
+ IF(${ARGC} EQUAL 9)
+ IF("${ARGV7}" STREQUAL "VERBOSE")
+ BISON_TARGET_option_verbose(${Name} ${BisonOutput} "${ARGV8}")
+ ENDIF()
+
+ IF("${ARGV7}" STREQUAL "COMPILE_FLAGS")
+ BISON_TARGET_option_extraopts("${ARGV8}")
+ ENDIF()
+
+ IF("${ARGV7}" STREQUAL "HEADER")
+ set(BISON_TARGET_output_header "${ARGV8}")
+ ENDIF()
+ ENDIF()
+
+ IF(BISON_TARGET_output_header)
+ # Header's name passed in as argument to be used in --defines option
+ LIST(APPEND BISON_TARGET_cmdopt
+ "--defines=${BISON_TARGET_output_header}")
+ set(BISON_${Name}_OUTPUT_HEADER ${BISON_TARGET_output_header})
+ ELSE()
+ # Header's name generated by bison (see option -d)
+ LIST(APPEND BISON_TARGET_cmdopt "-d")
+ STRING(REGEX REPLACE "^(.*)(\\.[^.]*)$" "\\2" _fileext "${ARGV2}")
+ STRING(REPLACE "c" "h" _fileext ${_fileext})
+ STRING(REGEX REPLACE "^(.*)(\\.[^.]*)$" "\\1${_fileext}"
+ BISON_${Name}_OUTPUT_HEADER "${ARGV2}")
+ ENDIF()
+
+ LIST(APPEND BISON_TARGET_outputs "${BISON_${Name}_OUTPUT_HEADER}")
+
+ ADD_CUSTOM_COMMAND(OUTPUT ${BISON_TARGET_outputs}
+ ${BISON_TARGET_extraoutputs}
+ COMMAND ${BISON_EXECUTABLE}
+ ARGS ${BISON_TARGET_cmdopt} -o ${ARGV2} ${ARGV1}
+ DEPENDS ${ARGV1}
+ COMMENT "[BISON][${Name}] Building parser with bison ${BISON_VERSION}"
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+
+ # define target variables
+ SET(BISON_${Name}_DEFINED TRUE)
+ SET(BISON_${Name}_INPUT ${ARGV1})
+ SET(BISON_${Name}_OUTPUTS ${BISON_TARGET_outputs})
+ SET(BISON_${Name}_COMPILE_FLAGS ${BISON_TARGET_cmdopt})
+ SET(BISON_${Name}_OUTPUT_SOURCE "${BisonOutput}")
+
+ ENDIF(NOT ${ARGC} EQUAL 3 AND
+ NOT ${ARGC} EQUAL 5 AND
+ NOT ${ARGC} EQUAL 7 AND
+ NOT ${ARGC} EQUAL 9)
+ ENDMACRO(BISON_TARGET)
+ #
+ #============================================================
+
+ENDIF(BISON_EXECUTABLE)
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(BISON DEFAULT_MSG BISON_EXECUTABLE)
+
+# FindBISON.cmake ends here \ No newline at end of file
diff --git a/third-party/cmake/FindEditline.cmake b/third-party/cmake/FindEditline.cmake
new file mode 100644
index 0000000..eb84e4b
--- /dev/null
+++ b/third-party/cmake/FindEditline.cmake
@@ -0,0 +1,86 @@
+# Copyright (c) 2014, Matthias Vallentin
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# Tries to find editline headers and libraries
+#
+# Usage of this module as follows:
+#
+# find_package(Editline)
+#
+# Variables used by this module, they can change the default behaviour and need
+# to be set before calling find_package:
+#
+# EDITLINE_ROOT_DIR Set this variable to the root installation of
+# editline if the module has problems finding
+# the proper installation path.
+#
+# Variables defined by this module:
+#
+# EDITLINE_FOUND System has Editline libs/headers
+# EDITLINE_LIBRARIES The Editline libraries
+# EDITLINE_INCLUDE_DIR The location of Editline headers
+# EDITLINE_VERSION The full version of Editline
+# EDITLINE_VERSION_MAJOR The version major of Editline
+# EDITLINE_VERSION_MINOR The version minor of Editline
+
+find_path(EDITLINE_INCLUDE_DIR
+ NAMES histedit.h
+ HINTS ${EDITLINE_ROOT_DIR}/include)
+
+if (EDITLINE_INCLUDE_DIR)
+ file(STRINGS ${EDITLINE_INCLUDE_DIR}/histedit.h editline_header REGEX "^#define.LIBEDIT_[A-Z]+.*$")
+
+ string(REGEX REPLACE ".*#define.LIBEDIT_MAJOR[ \t]+([0-9]+).*" "\\1" EDITLINE_VERSION_MAJOR "${editline_header}")
+ string(REGEX REPLACE ".*#define.LIBEDIT_MINOR[ \t]+([0-9]+).*" "\\1" EDITLINE_VERSION_MINOR "${editline_header}")
+
+ set(EDITLINE_VERSION_MAJOR ${EDITLINE_VERSION_MAJOR} CACHE STRING "" FORCE)
+ set(EDITLINE_VERSION_MINOR ${EDITLINE_VERSION_MINOR} CACHE STRING "" FORCE)
+ set(EDITLINE_VERSION ${EDITLINE_VERSION_MAJOR}.${EDITLINE_VERSION_MINOR}
+ CACHE STRING "" FORCE)
+endif ()
+
+find_library(EDITLINE_LIBRARIES
+ NAMES edit
+ HINTS ${EDITLINE_ROOT_DIR}/lib)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(
+ Editline
+ DEFAULT_MSG
+ EDITLINE_LIBRARIES
+ EDITLINE_INCLUDE_DIR)
+
+mark_as_advanced(
+ EDITLINE_ROOT_DIR
+ EDITLINE_LIBRARIES
+ EDITLINE_INCLUDE_DIR
+ EDITLINE_VERSION
+ EDITLINE_VERSION_MAJOR
+ EDITLINE_VERSION_MINOR
+ )
diff --git a/third-party/cmake/FindFLEX.cmake b/third-party/cmake/FindFLEX.cmake
new file mode 100644
index 0000000..7cd5c84
--- /dev/null
+++ b/third-party/cmake/FindFLEX.cmake
@@ -0,0 +1,185 @@
+# - Find flex executable and provides a macro to generate custom build rules
+#
+# The module defines the following variables:
+# FLEX_FOUND - true is flex executable is found
+# FLEX_EXECUTABLE - the path to the flex executable
+# FLEX_VERSION - the version of flex
+# FLEX_LIBRARIES - The flex libraries
+#
+# The minimum required version of flex can be specified using the
+# standard syntax, e.g. FIND_PACKAGE(FLEX 2.5.13)
+#
+#
+# If flex is found on the system, the module provides the macro:
+# FLEX_TARGET(Name FlexInput FlexOutput [COMPILE_FLAGS <string>])
+# which creates a custom command to generate the <FlexOutput> file from
+# the <FlexInput> file. If COMPILE_FLAGS option is specified, the next
+# parameter is added to the flex command line. Name is an alias used to
+# get details of this custom command. Indeed the macro defines the
+# following variables:
+# FLEX_${Name}_DEFINED - true is the macro ran successfully
+# FLEX_${Name}_OUTPUTS - the source file generated by the custom rule, an
+# alias for FlexOutput
+# FLEX_${Name}_INPUT - the flex source file, an alias for ${FlexInput}
+#
+# Flex scanners oftenly use tokens defined by Bison: the code generated
+# by Flex depends of the header generated by Bison. This module also
+# defines a macro:
+# ADD_FLEX_BISON_DEPENDENCY(FlexTarget BisonTarget)
+# which adds the required dependency between a scanner and a parser
+# where <FlexTarget> and <BisonTarget> are the first parameters of
+# respectively FLEX_TARGET and BISON_TARGET macros.
+#
+# ====================================================================
+# Example:
+#
+# find_package(BISON)
+# find_package(FLEX)
+#
+# BISON_TARGET(MyParser parser.y ${CMAKE_CURRENT_BINARY_DIR}/parser.cpp
+# FLEX_TARGET(MyScanner lexer.l ${CMAKE_CURRENT_BIANRY_DIR}/lexer.cpp)
+# ADD_FLEX_BISON_DEPENDENCY(MyScanner MyParser)
+#
+# include_directories(${CMAKE_CURRENT_BINARY_DIR})
+# add_executable(Foo
+# Foo.cc
+# ${BISON_MyParser_OUTPUTS}
+# ${FLEX_MyScanner_OUTPUTS}
+# )
+# ====================================================================
+
+#=============================================================================
+# Copyright 2009 Kitware, Inc.
+# Copyright 2006 Tristan Carel
+# Modified 2010 by Jon Siwek, backporting for CMake 2.6 compat
+#
+# Distributed under the OSI-approved BSD License (the "License"):
+# CMake - Cross Platform Makefile Generator
+# Copyright 2000-2009 Kitware, Inc., Insight Software Consortium
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the names of Kitware, Inc., the Insight Software Consortium,
+# nor the names of their contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+
+FIND_PROGRAM(FLEX_EXECUTABLE flex DOC "path to the flex executable")
+MARK_AS_ADVANCED(FLEX_EXECUTABLE)
+
+FIND_LIBRARY(FL_LIBRARY NAMES fl
+ DOC "path to the fl library")
+MARK_AS_ADVANCED(FL_LIBRARY)
+SET(FLEX_LIBRARIES ${FL_LIBRARY})
+
+IF(FLEX_EXECUTABLE)
+ GET_FILENAME_COMPONENT(FLEX_EXECUTABLE_NAME ${FLEX_EXECUTABLE} NAME)
+ EXECUTE_PROCESS(COMMAND ${FLEX_EXECUTABLE} --version
+ OUTPUT_VARIABLE FLEX_version_output
+ ERROR_VARIABLE FLEX_version_error
+ RESULT_VARIABLE FLEX_version_result
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ IF(NOT ${FLEX_version_result} EQUAL 0)
+ IF(FLEX_FIND_REQUIRED)
+ MESSAGE(SEND_ERROR "Command \"${FLEX_EXECUTABLE} --version\" failed with output:\n${FLEX_version_output}\n${FLEX_version_error}")
+ ELSE()
+ MESSAGE("Command \"${FLEX_EXECUTABLE} --version\" failed with output:\n${FLEX_version_output}\n${FLEX_version_error}\nFLEX_VERSION will not be available")
+ ENDIF()
+ ELSE()
+ STRING(REGEX REPLACE "^${FLEX_EXECUTABLE_NAME}[^ ]* (.*)$" "\\1"
+ FLEX_VERSION "${FLEX_version_output}")
+ ENDIF()
+
+ IF(FLEX_FIND_VERSION)
+ IF("${FLEX_VERSION}" VERSION_LESS "${FLEX_FIND_VERSION}")
+ MESSAGE(SEND_ERROR "Your version of flex is too old. You can specify an alternative path using -DFLEX_EXECUTABLE=/path/to/flex")
+ ENDIF()
+ ENDIF()
+
+ #============================================================
+ # FLEX_TARGET (public macro)
+ #============================================================
+ #
+ MACRO(FLEX_TARGET Name Input Output)
+ SET(FLEX_TARGET_usage "FLEX_TARGET(<Name> <Input> <Output> [COMPILE_FLAGS <string>]")
+ IF(${ARGC} GREATER 3)
+ IF(${ARGC} EQUAL 5)
+ IF("${ARGV3}" STREQUAL "COMPILE_FLAGS")
+ SET(FLEX_EXECUTABLE_opts "${ARGV4}")
+ SEPARATE_ARGUMENTS(FLEX_EXECUTABLE_opts)
+ ELSE()
+ MESSAGE(SEND_ERROR ${FLEX_TARGET_usage})
+ ENDIF()
+ ELSE()
+ MESSAGE(SEND_ERROR ${FLEX_TARGET_usage})
+ ENDIF()
+ ENDIF()
+
+ ADD_CUSTOM_COMMAND(OUTPUT ${Output}
+ COMMAND ${FLEX_EXECUTABLE}
+ ARGS ${FLEX_EXECUTABLE_opts} -o${Output} ${Input}
+ DEPENDS ${Input}
+ COMMENT "[FLEX][${Name}] Building scanner with flex ${FLEX_VERSION}"
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
+
+ SET(FLEX_${Name}_DEFINED TRUE)
+ SET(FLEX_${Name}_OUTPUTS ${Output})
+ SET(FLEX_${Name}_INPUT ${Input})
+ SET(FLEX_${Name}_COMPILE_FLAGS ${FLEX_EXECUTABLE_opts})
+ ENDMACRO(FLEX_TARGET)
+ #============================================================
+
+
+ #============================================================
+ # ADD_FLEX_BISON_DEPENDENCY (public macro)
+ #============================================================
+ #
+ MACRO(ADD_FLEX_BISON_DEPENDENCY FlexTarget BisonTarget)
+
+ IF(NOT FLEX_${FlexTarget}_OUTPUTS)
+ MESSAGE(SEND_ERROR "Flex target `${FlexTarget}' does not exists.")
+ ENDIF()
+
+ IF(NOT BISON_${BisonTarget}_OUTPUT_HEADER)
+ MESSAGE(SEND_ERROR "Bison target `${BisonTarget}' does not exists.")
+ ENDIF()
+
+ SET_SOURCE_FILES_PROPERTIES(${FLEX_${FlexTarget}_OUTPUTS}
+ PROPERTIES OBJECT_DEPENDS ${BISON_${BisonTarget}_OUTPUT_HEADER})
+ ENDMACRO(ADD_FLEX_BISON_DEPENDENCY)
+ #============================================================
+
+ENDIF(FLEX_EXECUTABLE)
+
+INCLUDE(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(FLEX FLEX_EXECUTABLE
+ FLEX_VERSION)
+
+# FindFLEX.cmake ends here
diff --git a/third-party/cmake/FindGit.cmake b/third-party/cmake/FindGit.cmake
new file mode 100644
index 0000000..d23e6f1
--- /dev/null
+++ b/third-party/cmake/FindGit.cmake
@@ -0,0 +1,73 @@
+#.rst:
+# FindGit
+# -------
+#
+#
+#
+# The module defines the following variables:
+#
+# ::
+#
+# GIT_EXECUTABLE - path to git command line client
+# GIT_FOUND - true if the command line client was found
+# GIT_VERSION_STRING - the version of git found (since CMake 2.8.8)
+#
+# Example usage:
+#
+# ::
+#
+# find_package(Git)
+# if(GIT_FOUND)
+# message("git found: ${GIT_EXECUTABLE}")
+# endif()
+
+#=============================================================================
+# Copyright 2010 Kitware, Inc.
+# Copyright 2012 Rolf Eike Beer <eike@sf-mail.de>
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+# Look for 'git' or 'eg' (easy git)
+#
+set(git_names git eg)
+
+# Prefer .cmd variants on Windows unless running in a Makefile
+# in the MSYS shell.
+#
+if(WIN32)
+ if(NOT CMAKE_GENERATOR MATCHES "MSYS")
+ set(git_names git.cmd git eg.cmd eg)
+ endif()
+endif()
+
+find_program(GIT_EXECUTABLE
+ NAMES ${git_names}
+ PATH_SUFFIXES Git/cmd Git/bin
+ DOC "git command line client"
+ )
+mark_as_advanced(GIT_EXECUTABLE)
+
+if(GIT_EXECUTABLE)
+ execute_process(COMMAND ${GIT_EXECUTABLE} --version
+ OUTPUT_VARIABLE git_version
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if (git_version MATCHES "^git version [0-9]")
+ string(REPLACE "git version " "" GIT_VERSION_STRING "${git_version}")
+ endif()
+endif()
+
+# Handle the QUIETLY and REQUIRED arguments and set GIT_FOUND to TRUE if
+# all listed variables are TRUE
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(Git GIT_EXECUTABLE
+ GIT_VERSION_STRING)
diff --git a/third-party/cmake/FindMySQL.cmake b/third-party/cmake/FindMySQL.cmake
new file mode 100644
index 0000000..d3a6e7f
--- /dev/null
+++ b/third-party/cmake/FindMySQL.cmake
@@ -0,0 +1,142 @@
+#--------------------------------------------------------
+# Copyright (C) 1995-2007 MySQL AB
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# There are special exceptions to the terms and conditions of the GPL
+# as it is applied to this software. View the full text of the exception
+# in file LICENSE.exceptions in the top-level directory of this software
+# distribution.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# The MySQL Connector/ODBC is licensed under the terms of the
+# GPL, like most MySQL Connectors. There are special exceptions
+# to the terms and conditions of the GPL as it is applied to
+# this software, see the FLOSS License Exception available on
+# mysql.com.
+
+##########################################################################
+
+
+FILE(GLOB _macports_include_dirs /opt/local/include/mysql*/mysql)
+
+#-------------- FIND MYSQL_INCLUDE_DIR ------------------
+FIND_PATH(MYSQL_INCLUDE_DIR mysql.h
+ $ENV{MYSQL_INCLUDE_DIR}
+ $ENV{MYSQL_DIR}/include
+ /usr/include/mysql
+ /usr/local/include/mysql
+ /usr/include/mariadb
+ /usr/local/include/mariadb
+ /opt/mysql/mysql/include
+ /opt/mysql/mysql/include/mysql
+ /opt/mysql/include
+ /opt/mariadb/include/mysql
+ /opt/local/include/mysql5
+ /usr/local/mysql/include
+ /usr/local/mysql/include/mysql
+ ${_macports_include_dirs}
+ $ENV{ProgramFiles}/MySQL/*/include
+ $ENV{SystemDrive}/MySQL/*/include
+ $ENV{ProgramFiles}/MariaDB*/include/mysql
+ $ENV{SystemDrive}/MariaDB*/include/mysql
+)
+
+UNSET(_macports_include_dirs)
+
+#----------------- FIND MYSQL_LIB_DIR -------------------
+IF (WIN32)
+ SET(MYSQL_CLIENT_LIBS libmysql)
+
+ # Set lib path suffixes
+ # dist = for mysql binary distributions
+ # build = for custom built tree
+ IF (CMAKE_BUILD_TYPE STREQUAL Debug)
+ SET(libsuffixDist debug)
+ SET(libsuffixBuild Debug)
+ ELSE (CMAKE_BUILD_TYPE STREQUAL Debug)
+ SET(libsuffixDist opt)
+ SET(libsuffixBuild Release)
+ ADD_DEFINITIONS(-DDBUG_OFF)
+ ENDIF (CMAKE_BUILD_TYPE STREQUAL Debug)
+
+ FIND_LIBRARY(MYSQL_LIB NAMES mysqlclient
+ PATHS
+ $ENV{MYSQL_DIR}
+ $ENV{MYSQL_DIR}/lib/${libsuffixDist}
+ $ENV{MYSQL_DIR}/libmysql
+ $ENV{MYSQL_DIR}/libmysql/${libsuffixBuild}
+ $ENV{MYSQL_DIR}/client/${libsuffixBuild}
+ $ENV{MYSQL_DIR}/libmysql/${libsuffixBuild}
+ $ENV{ProgramFiles}/MySQL/*/lib/${libsuffixDist}
+ $ENV{SystemDrive}/MySQL/*/lib/${libsuffixDist})
+ELSE (WIN32)
+ IF (NOT MYSQL_CLIENT_LIBS)
+ SET(MYSQL_CLIENT_LIBS mysqlclient mariadbclient)
+ ENDIF (NOT MYSQL_CLIENT_LIBS)
+
+ FILE(GLOB _macports_lib_dirs /opt/local/lib/mysql*/mysql)
+
+ FIND_LIBRARY(MYSQL_LIB NAMES ${MYSQL_CLIENT_LIBS}
+ PATHS
+ $ENV{MYSQL_DIR}
+ $ENV{MYSQL_DIR}/libmysql_r/.libs
+ $ENV{MYSQL_DIR}/lib
+ $ENV{MYSQL_DIR}/lib/mysql
+ /usr/lib/mysql
+ /usr/local/lib/mysql
+ /usr/local/mysql/lib
+ /usr/local/mysql/lib/mysql
+ /opt/local/mysql5/lib
+ /opt/local/lib/mysql5/mysql
+ /opt/mysql/mysql/lib/mysql
+ /opt/mysql/lib/mysql
+ ${_macports_lib_dirs})
+
+ UNSET(_macports_lib_dirs)
+ENDIF (WIN32)
+
+IF(MYSQL_LIB)
+ GET_FILENAME_COMPONENT(MYSQL_LIB_DIR ${MYSQL_LIB} PATH)
+ENDIF(MYSQL_LIB)
+
+IF (MYSQL_INCLUDE_DIR AND MYSQL_LIB_DIR)
+ SET(MYSQL_FOUND TRUE)
+
+ INCLUDE_DIRECTORIES(${MYSQL_INCLUDE_DIR})
+ LINK_DIRECTORIES(${MYSQL_LIB_DIR})
+
+ FIND_LIBRARY(MYSQL_ZLIB zlib PATHS ${MYSQL_LIB_DIR})
+ FIND_LIBRARY(MYSQL_YASSL yassl PATHS ${MYSQL_LIB_DIR})
+ FIND_LIBRARY(MYSQL_TAOCRYPT taocrypt PATHS ${MYSQL_LIB_DIR})
+
+ IF (MYSQL_ZLIB)
+ SET(MYSQL_CLIENT_LIBS ${MYSQL_CLIENT_LIBS} zlib)
+ ENDIF (MYSQL_ZLIB)
+ IF (MYSQL_YASSL)
+ SET(MYSQL_CLIENT_LIBS ${MYSQL_CLIENT_LIBS} yassl)
+ ENDIF (MYSQL_YASSL)
+ IF (MYSQL_TAOCRYPT)
+ SET(MYSQL_CLIENT_LIBS ${MYSQL_CLIENT_LIBS} taocrypt)
+ ENDIF (MYSQL_TAOCRYPT)
+ # Added needed mysqlclient dependencies on Windows
+ IF (WIN32)
+ SET(MYSQL_CLIENT_LIBS ${MYSQL_CLIENT_LIBS} ws2_32)
+ ENDIF (WIN32)
+
+ MESSAGE(STATUS "MySQL Include dir: ${MYSQL_INCLUDE_DIR} library dir: ${MYSQL_LIB_DIR}")
+ MESSAGE(STATUS "MySQL client libraries: ${MYSQL_CLIENT_LIBS}")
+ELSE (MYSQL_INCLUDE_DIR AND MYSQL_LIB_DIR)
+ MESSAGE(STATUS "Cannot find MySQL. Include dir: ${MYSQL_INCLUDE_DIR} library dir: ${MYSQL_LIB_DIR}")
+ SET(MYSQL_FOUND FALSE)
+ENDIF (MYSQL_INCLUDE_DIR AND MYSQL_LIB_DIR)
diff --git a/third-party/cmake/FindPostgreSQL.cmake b/third-party/cmake/FindPostgreSQL.cmake
new file mode 100644
index 0000000..4d213d2
--- /dev/null
+++ b/third-party/cmake/FindPostgreSQL.cmake
@@ -0,0 +1,185 @@
+#.rst:
+# FindPostgreSQL
+# --------------
+#
+# Find the PostgreSQL installation.
+#
+# In Windows, we make the assumption that, if the PostgreSQL files are
+# installed, the default directory will be C:\Program Files\PostgreSQL.
+#
+# This module defines
+#
+# ::
+#
+# PostgreSQL_LIBRARIES - the PostgreSQL libraries needed for linking
+# PostgreSQL_INCLUDE_DIRS - the directories of the PostgreSQL headers
+
+#=============================================================================
+# Copyright 2004-2009 Kitware, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the names of Kitware, Inc., the Insight Software Consortium,
+# nor the names of their contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+
+# ----------------------------------------------------------------------------
+# History:
+# This module is derived from the module originally found in the VTK source tree.
+#
+# ----------------------------------------------------------------------------
+# Note:
+# PostgreSQL_ADDITIONAL_VERSIONS is a variable that can be used to set the
+# version mumber of the implementation of PostgreSQL.
+# In Windows the default installation of PostgreSQL uses that as part of the path.
+# E.g C:\Program Files\PostgreSQL\8.4.
+# Currently, the following version numbers are known to this module:
+# "9.1" "9.0" "8.4" "8.3" "8.2" "8.1" "8.0"
+#
+# To use this variable just do something like this:
+# set(PostgreSQL_ADDITIONAL_VERSIONS "9.2" "8.4.4")
+# before calling find_package(PostgreSQL) in your CMakeLists.txt file.
+# This will mean that the versions you set here will be found first in the order
+# specified before the default ones are searched.
+#
+# ----------------------------------------------------------------------------
+# You may need to manually set:
+# PostgreSQL_INCLUDE_DIR - the path to where the PostgreSQL include files are.
+# PostgreSQL_LIBRARY_DIR - The path to where the PostgreSQL library files are.
+# If FindPostgreSQL.cmake cannot find the include files or the library files.
+#
+# ----------------------------------------------------------------------------
+# The following variables are set if PostgreSQL is found:
+# PostgreSQL_FOUND - Set to true when PostgreSQL is found.
+# PostgreSQL_INCLUDE_DIRS - Include directories for PostgreSQL
+# PostgreSQL_LIBRARY_DIRS - Link directories for PostgreSQL libraries
+# PostgreSQL_LIBRARIES - The PostgreSQL libraries.
+#
+# ----------------------------------------------------------------------------
+# If you have installed PostgreSQL in a non-standard location.
+# (Please note that in the following comments, it is assumed that <Your Path>
+# points to the root directory of the include directory of PostgreSQL.)
+# Then you have three options.
+# 1) After CMake runs, set PostgreSQL_INCLUDE_DIR to <Your Path>/include and
+# PostgreSQL_LIBRARY_DIR to wherever the library pq (or libpq in windows) is
+# 2) Use CMAKE_INCLUDE_PATH to set a path to <Your Path>/PostgreSQL<-version>. This will allow find_path()
+# to locate PostgreSQL_INCLUDE_DIR by utilizing the PATH_SUFFIXES option. e.g. In your CMakeLists.txt file
+# set(CMAKE_INCLUDE_PATH ${CMAKE_INCLUDE_PATH} "<Your Path>/include")
+# 3) Set an environment variable called ${PostgreSQL_ROOT} that points to the root of where you have
+# installed PostgreSQL, e.g. <Your Path>.
+#
+# ----------------------------------------------------------------------------
+
+set(PostgreSQL_INCLUDE_PATH_DESCRIPTION "top-level directory containing the PostgreSQL include directories. E.g /usr/local/include/PostgreSQL/8.4 or C:/Program Files/PostgreSQL/8.4/include")
+set(PostgreSQL_INCLUDE_DIR_MESSAGE "Set the PostgreSQL_INCLUDE_DIR cmake cache entry to the ${PostgreSQL_INCLUDE_PATH_DESCRIPTION}")
+set(PostgreSQL_LIBRARY_PATH_DESCRIPTION "top-level directory containing the PostgreSQL libraries.")
+set(PostgreSQL_LIBRARY_DIR_MESSAGE "Set the PostgreSQL_LIBRARY_DIR cmake cache entry to the ${PostgreSQL_LIBRARY_PATH_DESCRIPTION}")
+set(PostgreSQL_ROOT_DIR_MESSAGE "Set the PostgreSQL_ROOT system variable to where PostgreSQL is found on the machine E.g C:/Program Files/PostgreSQL/8.4")
+
+
+set(PostgreSQL_KNOWN_VERSIONS ${PostgreSQL_ADDITIONAL_VERSIONS}
+ "9.1" "9.0" "8.4" "8.3" "8.2" "8.1" "8.0")
+
+# Define additional search paths for root directories.
+if ( WIN32 )
+ foreach (suffix ${PostgreSQL_KNOWN_VERSIONS} )
+ set(PostgreSQL_ADDITIONAL_SEARCH_PATHS ${PostgreSQL_ADDITIONAL_SEARCH_PATHS} "C:/Program Files/PostgreSQL/${suffix}" )
+ endforeach()
+else()
+ set(PostgreSQL_ADDITIONAL_SEARCH_PATHS ${PostgreSQL_ADDITIONAL_SEARCH_PATHS} "/Library/PostgreSQL/*")
+endif()
+set( PostgreSQL_ROOT_DIRECTORIES
+ ENV PostgreSQL_ROOT
+ ${PostgreSQL_ROOT}
+ ${PostgreSQL_ADDITIONAL_SEARCH_PATHS}
+)
+
+#
+# Look for an installation.
+#
+find_path(PostgreSQL_INCLUDE_DIR
+ NAMES libpq-fe.h
+ PATHS
+ # Look in other places.
+ ${PostgreSQL_ROOT_DIRECTORIES}
+ PATH_SUFFIXES
+ pgsql
+ postgresql
+ include
+ # Help the user find it if we cannot.
+ DOC "The ${PostgreSQL_INCLUDE_DIR_MESSAGE}"
+)
+
+# The PostgreSQL library.
+set (PostgreSQL_LIBRARY_TO_FIND pq)
+# Setting some more prefixes for the library
+set (PostgreSQL_LIB_PREFIX "")
+if ( WIN32 )
+ set (PostgreSQL_LIB_PREFIX ${PostgreSQL_LIB_PREFIX} "lib")
+ set ( PostgreSQL_LIBRARY_TO_FIND ${PostgreSQL_LIB_PREFIX}${PostgreSQL_LIBRARY_TO_FIND})
+endif()
+
+find_library( PostgreSQL_LIBRARY
+ NAMES ${PostgreSQL_LIBRARY_TO_FIND}
+ PATHS
+ ${PostgreSQL_ROOT_DIRECTORIES}
+ PATH_SUFFIXES
+ lib
+)
+get_filename_component(PostgreSQL_LIBRARY_DIR ${PostgreSQL_LIBRARY} PATH)
+
+if (PostgreSQL_INCLUDE_DIR AND EXISTS "${PostgreSQL_INCLUDE_DIR}/pg_config.h")
+ file(STRINGS "${PostgreSQL_INCLUDE_DIR}/pg_config.h" pgsql_version_str
+ REGEX "^#define[\t ]+PG_VERSION[\t ]+\".*\"")
+
+ string(REGEX REPLACE "^#define[\t ]+PG_VERSION[\t ]+\"([^\"]*)\".*" "\\1"
+ PostgreSQL_VERSION_STRING "${pgsql_version_str}")
+ set(pgsql_version_str "")
+endif()
+
+# Did we find anything?
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(PostgreSQL DEFAULT_MSG
+ PostgreSQL_LIBRARY PostgreSQL_INCLUDE_DIR)
+set( PostgreSQL_FOUND ${POSTGRESQL_FOUND})
+
+# Now try to get the include and library path.
+if(PostgreSQL_FOUND)
+
+ set(PostgreSQL_INCLUDE_DIRS ${PostgreSQL_INCLUDE_DIR} )
+ set(PostgreSQL_LIBRARY_DIRS ${PostgreSQL_LIBRARY_DIR} )
+ set(PostgreSQL_LIBRARIES ${PostgreSQL_LIBRARY_TO_FIND})
+
+ #message("Final PostgreSQL include dir: ${PostgreSQL_INCLUDE_DIRS}")
+ #message("Final PostgreSQL library dir: ${PostgreSQL_LIBRARY_DIRS}")
+ #message("Final PostgreSQL libraries: ${PostgreSQL_LIBRARIES}")
+endif()
+
+mark_as_advanced(PostgreSQL_INCLUDE_DIR PostgreSQL_LIBRARY )
diff --git a/third-party/cmake/FindTermcap.cmake b/third-party/cmake/FindTermcap.cmake
new file mode 100644
index 0000000..ba7c97b
--- /dev/null
+++ b/third-party/cmake/FindTermcap.cmake
@@ -0,0 +1,68 @@
+# Copyright (c) 2014, Matthias Vallentin
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# Tries to find termcap headers and libraries
+#
+# Usage of this module as follows:
+#
+# find_package(Termcap)
+#
+# Variables used by this module, they can change the default behaviour and need
+# to be set before calling find_package:
+#
+# TERMCAP_ROOT_DIR Set this variable to the root installation of
+# termcap if the module has problems finding
+# the proper installation path.
+#
+# Variables defined by this module:
+#
+# TERMCAP_FOUND System has Termcap libs/headers
+# TERMCAP_LIBRARIES The Termcap libraries
+# TERMCAP_INCLUDE_DIR The location of Termcap headers
+
+find_path(TERMCAP_INCLUDE_DIR
+ NAMES termcap.h
+ HINTS ${TERMCAP_ROOT_DIR}/include)
+
+find_library(TERMCAP_LIBRARIES
+ NAMES termcap ncurses
+ HINTS ${TERMCAP_ROOT_DIR}/lib)
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(
+ Termcap
+ DEFAULT_MSG
+ TERMCAP_LIBRARIES
+ TERMCAP_INCLUDE_DIR)
+
+mark_as_advanced(
+ TERMCAP_ROOT_DIR
+ TERMCAP_LIBRARIES
+ TERMCAP_INCLUDE_DIR
+ )
diff --git a/third-party/cmake/GNUInstallDirs.cmake b/third-party/cmake/GNUInstallDirs.cmake
new file mode 100644
index 0000000..60e9099
--- /dev/null
+++ b/third-party/cmake/GNUInstallDirs.cmake
@@ -0,0 +1,245 @@
+#.rst:
+# GNUInstallDirs
+# --------------
+#
+# Define GNU standard installation directories
+#
+# Provides install directory variables as defined for GNU software:
+#
+# ::
+#
+# http://www.gnu.org/prep/standards/html_node/Directory-Variables.html
+#
+# Inclusion of this module defines the following variables:
+#
+# ::
+#
+# CMAKE_INSTALL_<dir> - destination for files of a given type
+# CMAKE_INSTALL_FULL_<dir> - corresponding absolute path
+#
+# where <dir> is one of:
+#
+# ::
+#
+# BINDIR - user executables (bin)
+# SBINDIR - system admin executables (sbin)
+# LIBEXECDIR - program executables (libexec)
+# SYSCONFDIR - read-only single-machine data (etc)
+# SHAREDSTATEDIR - modifiable architecture-independent data (com)
+# LOCALSTATEDIR - modifiable single-machine data (var)
+# LIBDIR - object code libraries (lib or lib64 or lib/<multiarch-tuple> on Debian)
+# INCLUDEDIR - C header files (include)
+# OLDINCLUDEDIR - C header files for non-gcc (/usr/include)
+# DATAROOTDIR - read-only architecture-independent data root (share)
+# DATADIR - read-only architecture-independent data (DATAROOTDIR)
+# INFODIR - info documentation (DATAROOTDIR/info)
+# LOCALEDIR - locale-dependent data (DATAROOTDIR/locale)
+# MANDIR - man documentation (DATAROOTDIR/man)
+# DOCDIR - documentation root (DATAROOTDIR/doc/PROJECT_NAME)
+#
+# Each CMAKE_INSTALL_<dir> value may be passed to the DESTINATION
+# options of install() commands for the corresponding file type. If the
+# includer does not define a value the above-shown default will be used
+# and the value will appear in the cache for editing by the user. Each
+# CMAKE_INSTALL_FULL_<dir> value contains an absolute path constructed
+# from the corresponding destination by prepending (if necessary) the
+# value of CMAKE_INSTALL_PREFIX.
+
+#=============================================================================
+# Copyright 2011 Nikita Krupen'ko <krnekit@gmail.com>
+# Copyright 2011 Kitware, Inc.
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+# License text for the above reference.)
+
+# Installation directories
+#
+if(NOT DEFINED CMAKE_INSTALL_BINDIR)
+ set(CMAKE_INSTALL_BINDIR "bin" CACHE PATH "user executables (bin)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_SBINDIR)
+ set(CMAKE_INSTALL_SBINDIR "sbin" CACHE PATH "system admin executables (sbin)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_LIBEXECDIR)
+ set(CMAKE_INSTALL_LIBEXECDIR "libexec" CACHE PATH "program executables (libexec)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_SYSCONFDIR)
+ set(CMAKE_INSTALL_SYSCONFDIR "etc" CACHE PATH "read-only single-machine data (etc)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_SHAREDSTATEDIR)
+ set(CMAKE_INSTALL_SHAREDSTATEDIR "com" CACHE PATH "modifiable architecture-independent data (com)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_LOCALSTATEDIR)
+ set(CMAKE_INSTALL_LOCALSTATEDIR "var" CACHE PATH "modifiable single-machine data (var)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_LIBDIR)
+ set(_LIBDIR_DEFAULT "lib")
+ # Override this default 'lib' with 'lib64' iff:
+ # - we are on Linux system but NOT cross-compiling
+ # - we are NOT on debian
+ # - we are on a 64 bits system
+ # reason is: amd64 ABI: http://www.x86-64.org/documentation/abi.pdf
+ # For Debian with multiarch, use 'lib/${CMAKE_LIBRARY_ARCHITECTURE}' if
+ # CMAKE_LIBRARY_ARCHITECTURE is set (which contains e.g. "i386-linux-gnu"
+ # and CMAKE_INSTALL_PREFIX is "/usr"
+ # See http://wiki.debian.org/Multiarch
+ if(DEFINED _GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX)
+ set(__LAST_LIBDIR_DEFAULT "lib")
+ # __LAST_LIBDIR_DEFAULT is the default value that we compute from
+ # _GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX, not a cache entry for
+ # the value that was last used as the default.
+ # This value is used to figure out whether the user changed the
+ # CMAKE_INSTALL_LIBDIR value manually, or if the value was the
+ # default one. When CMAKE_INSTALL_PREFIX changes, the value is
+ # updated to the new default, unless the user explicitly changed it.
+ endif()
+ if(CMAKE_SYSTEM_NAME MATCHES "^(Linux|kFreeBSD|GNU)$"
+ AND NOT CMAKE_CROSSCOMPILING)
+ if (EXISTS "/etc/debian_version") # is this a debian system ?
+ if(CMAKE_LIBRARY_ARCHITECTURE)
+ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/?$")
+ set(_LIBDIR_DEFAULT "lib/${CMAKE_LIBRARY_ARCHITECTURE}")
+ endif()
+ if(DEFINED _GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX
+ AND "${_GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/?$")
+ set(__LAST_LIBDIR_DEFAULT "lib/${CMAKE_LIBRARY_ARCHITECTURE}")
+ endif()
+ endif()
+ else() # not debian, rely on CMAKE_SIZEOF_VOID_P:
+ if(NOT DEFINED CMAKE_SIZEOF_VOID_P)
+ message(AUTHOR_WARNING
+ "Unable to determine default CMAKE_INSTALL_LIBDIR directory because no target architecture is known. "
+ "Please enable at least one language before including GNUInstallDirs.")
+ else()
+ if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ set(_LIBDIR_DEFAULT "lib64")
+ if(DEFINED _GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX)
+ set(__LAST_LIBDIR_DEFAULT "lib64")
+ endif()
+ endif()
+ endif()
+ endif()
+ endif()
+ if(NOT DEFINED CMAKE_INSTALL_LIBDIR)
+ set(CMAKE_INSTALL_LIBDIR "${_LIBDIR_DEFAULT}" CACHE PATH "object code libraries (${_LIBDIR_DEFAULT})")
+ elseif(DEFINED __LAST_LIBDIR_DEFAULT
+ AND "${__LAST_LIBDIR_DEFAULT}" STREQUAL "${CMAKE_INSTALL_LIBDIR}")
+ set_property(CACHE CMAKE_INSTALL_LIBDIR PROPERTY VALUE "${_LIBDIR_DEFAULT}")
+ endif()
+endif()
+# Save for next run
+set(_GNUInstallDirs_LAST_CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}" CACHE INTERNAL "CMAKE_INSTALL_PREFIX during last run")
+
+
+if(NOT DEFINED CMAKE_INSTALL_INCLUDEDIR)
+ set(CMAKE_INSTALL_INCLUDEDIR "include" CACHE PATH "C header files (include)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_OLDINCLUDEDIR)
+ set(CMAKE_INSTALL_OLDINCLUDEDIR "/usr/include" CACHE PATH "C header files for non-gcc (/usr/include)")
+endif()
+
+if(NOT DEFINED CMAKE_INSTALL_DATAROOTDIR)
+ set(CMAKE_INSTALL_DATAROOTDIR "share" CACHE PATH "read-only architecture-independent data root (share)")
+endif()
+
+#-----------------------------------------------------------------------------
+# Values whose defaults are relative to DATAROOTDIR. Store empty values in
+# the cache and store the defaults in local variables if the cache values are
+# not set explicitly. This auto-updates the defaults as DATAROOTDIR changes.
+
+if(NOT CMAKE_INSTALL_DATADIR)
+ set(CMAKE_INSTALL_DATADIR "" CACHE PATH "read-only architecture-independent data (DATAROOTDIR)")
+ set(CMAKE_INSTALL_DATADIR "${CMAKE_INSTALL_DATAROOTDIR}")
+endif()
+
+if(CMAKE_SYSTEM_NAME MATCHES "(DragonFly|FreeBSD|OpenBSD|NetBSD)")
+ if(NOT CMAKE_INSTALL_INFODIR)
+ set(CMAKE_INSTALL_INFODIR "" CACHE PATH "info documentation (info)")
+ set(CMAKE_INSTALL_INFODIR "info")
+ endif()
+
+ if(NOT CMAKE_INSTALL_MANDDIR)
+ set(CMAKE_INSTALL_MANDIR "" CACHE PATH "man documentation (man)")
+ set(CMAKE_INSTALL_MANDIR "man")
+ endif()
+else()
+ if(NOT CMAKE_INSTALL_INFODIR)
+ set(CMAKE_INSTALL_INFODIR "" CACHE PATH "info documentation (DATAROOTDIR/info)")
+ set(CMAKE_INSTALL_INFODIR "${CMAKE_INSTALL_DATAROOTDIR}/info")
+ endif()
+
+ if(NOT CMAKE_INSTALL_MANDDIR)
+ set(CMAKE_INSTALL_MANDIR "" CACHE PATH "man documentation (DATAROOTDIR/man)")
+ set(CMAKE_INSTALL_MANDIR "${CMAKE_INSTALL_DATAROOTDIR}/man")
+ endif()
+endif()
+
+if(NOT CMAKE_INSTALL_LOCALEDIR)
+ set(CMAKE_INSTALL_LOCALEDIR "" CACHE PATH "locale-dependent data (DATAROOTDIR/locale)")
+ set(CMAKE_INSTALL_LOCALEDIR "${CMAKE_INSTALL_DATAROOTDIR}/locale")
+endif()
+
+if(NOT CMAKE_INSTALL_DOCDIR)
+ set(CMAKE_INSTALL_DOCDIR "" CACHE PATH "documentation root (DATAROOTDIR/doc/PROJECT_NAME)")
+ set(CMAKE_INSTALL_DOCDIR "${CMAKE_INSTALL_DATAROOTDIR}/doc/${PROJECT_NAME}")
+endif()
+
+#-----------------------------------------------------------------------------
+
+mark_as_advanced(
+ CMAKE_INSTALL_BINDIR
+ CMAKE_INSTALL_SBINDIR
+ CMAKE_INSTALL_LIBEXECDIR
+ CMAKE_INSTALL_SYSCONFDIR
+ CMAKE_INSTALL_SHAREDSTATEDIR
+ CMAKE_INSTALL_LOCALSTATEDIR
+ CMAKE_INSTALL_LIBDIR
+ CMAKE_INSTALL_INCLUDEDIR
+ CMAKE_INSTALL_OLDINCLUDEDIR
+ CMAKE_INSTALL_DATAROOTDIR
+ CMAKE_INSTALL_DATADIR
+ CMAKE_INSTALL_INFODIR
+ CMAKE_INSTALL_LOCALEDIR
+ CMAKE_INSTALL_MANDIR
+ CMAKE_INSTALL_DOCDIR
+ )
+
+# Result directories
+#
+foreach(dir
+ BINDIR
+ SBINDIR
+ LIBEXECDIR
+ SYSCONFDIR
+ SHAREDSTATEDIR
+ LOCALSTATEDIR
+ LIBDIR
+ INCLUDEDIR
+ OLDINCLUDEDIR
+ DATAROOTDIR
+ DATADIR
+ INFODIR
+ LOCALEDIR
+ MANDIR
+ DOCDIR
+ )
+ if(NOT IS_ABSOLUTE ${CMAKE_INSTALL_${dir}})
+ set(CMAKE_INSTALL_FULL_${dir} "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_${dir}}")
+ else()
+ set(CMAKE_INSTALL_FULL_${dir} "${CMAKE_INSTALL_${dir}}")
+ endif()
+endforeach()
diff --git a/third-party/cmake/GetForceIncludeDefinitions.cmake b/third-party/cmake/GetForceIncludeDefinitions.cmake
new file mode 100644
index 0000000..efcca04
--- /dev/null
+++ b/third-party/cmake/GetForceIncludeDefinitions.cmake
@@ -0,0 +1,44 @@
+# - Get the platform-appropriate flags to add to force inclusion of a file
+#
+# The most common use of this is to use a generated config.h-type file
+# placed out of the source tree in all files.
+#
+# get_force_include_definitions(var forcedincludefiles...) -
+# where var is the name of your desired output variable, and everything
+# else is a source file to forcibly include.
+# a list item to be filtered.
+#
+# Original Author:
+# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
+# http://academic.cleardefinition.com
+# Iowa State University HCI Graduate Program/VRAC
+#
+# Copyright Iowa State University 2009-2010.
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+if(__get_force_include_definitions)
+ return()
+endif()
+set(__get_force_include_definitions YES)
+
+function(get_force_include_definitions var)
+ set(_flagprefix)
+ if(CMAKE_COMPILER_IS_GNUCXX)
+ set(_flag "-include")
+ elseif(MSVC)
+ set(_flag "/FI")
+ else()
+ message(SEND_ERROR "You don't seem to be using MSVC or GCC, but")
+ message(SEND_ERROR "the project called get_force_include_definitions.")
+ message(SEND_ERROR "Contact this project with the name of your")
+ message(FATAL_ERROR "compiler and preferably the flag to force includes")
+ endif()
+
+ set(_out)
+ foreach(_item ${ARGN})
+ list(APPEND _out "${_flag} \"${_item}\"")
+ endforeach()
+ set(${var} "${_out}" PARENT_SCOPE)
+endfunction()
diff --git a/third-party/cmake/GetGitRevisionDescription.cmake b/third-party/cmake/GetGitRevisionDescription.cmake
new file mode 100644
index 0000000..4fbd90d
--- /dev/null
+++ b/third-party/cmake/GetGitRevisionDescription.cmake
@@ -0,0 +1,284 @@
+# - Returns a version string from Git
+#
+# These functions force a re-configure on each git commit so that you can
+# trust the values of the variables in your build system.
+#
+# get_git_head_revision(<refspecvar> <hashvar> [ALLOW_LOOKING_ABOVE_CMAKE_SOURCE_DIR])
+#
+# Returns the refspec and sha hash of the current head revision
+#
+# git_describe(<var> [<additional arguments to git describe> ...])
+#
+# Returns the results of git describe on the source tree, and adjusting
+# the output so that it tests false if an error occurs.
+#
+# git_describe_working_tree(<var> [<additional arguments to git describe> ...])
+#
+# Returns the results of git describe on the working tree (--dirty option),
+# and adjusting the output so that it tests false if an error occurs.
+#
+# git_get_exact_tag(<var> [<additional arguments to git describe> ...])
+#
+# Returns the results of git describe --exact-match on the source tree,
+# and adjusting the output so that it tests false if there was no exact
+# matching tag.
+#
+# git_local_changes(<var>)
+#
+# Returns either "CLEAN" or "DIRTY" with respect to uncommitted changes.
+# Uses the return code of "git diff-index --quiet HEAD --".
+# Does not regard untracked files.
+#
+# Requires CMake 2.6 or newer (uses the 'function' command)
+#
+# Original Author:
+# 2009-2020 Ryan Pavlik <ryan.pavlik@gmail.com> <abiryan@ryand.net>
+# http://academic.cleardefinition.com
+#
+# Copyright 2009-2013, Iowa State University.
+# Copyright 2013-2020, Ryan Pavlik
+# Copyright 2013-2020, Contributors
+# SPDX-License-Identifier: BSL-1.0
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+if(__get_git_revision_description)
+ return()
+endif()
+set(__get_git_revision_description YES)
+
+# We must run the following at "include" time, not at function call time,
+# to find the path to this module rather than the path to a calling list file
+get_filename_component(_gitdescmoddir ${CMAKE_CURRENT_LIST_FILE} PATH)
+
+# Function _git_find_closest_git_dir finds the next closest .git directory
+# that is part of any directory in the path defined by _start_dir.
+# The result is returned in the parent scope variable whose name is passed
+# as variable _git_dir_var. If no .git directory can be found, the
+# function returns an empty string via _git_dir_var.
+#
+# Example: Given a path C:/bla/foo/bar and assuming C:/bla/.git exists and
+# neither foo nor bar contain a file/directory .git. This wil return
+# C:/bla/.git
+#
+function(_git_find_closest_git_dir _start_dir _git_dir_var)
+ set(cur_dir "${_start_dir}")
+ set(git_dir "${_start_dir}/.git")
+ while(NOT EXISTS "${git_dir}")
+ # .git dir not found, search parent directories
+ set(git_previous_parent "${cur_dir}")
+ get_filename_component(cur_dir "${cur_dir}" DIRECTORY)
+ if(cur_dir STREQUAL git_previous_parent)
+ # We have reached the root directory, we are not in git
+ set(${_git_dir_var}
+ ""
+ PARENT_SCOPE)
+ return()
+ endif()
+ set(git_dir "${cur_dir}/.git")
+ endwhile()
+ set(${_git_dir_var}
+ "${git_dir}"
+ PARENT_SCOPE)
+endfunction()
+
+function(get_git_head_revision _refspecvar _hashvar)
+ _git_find_closest_git_dir("${CMAKE_CURRENT_SOURCE_DIR}" GIT_DIR)
+
+ if("${ARGN}" STREQUAL "ALLOW_LOOKING_ABOVE_CMAKE_SOURCE_DIR")
+ set(ALLOW_LOOKING_ABOVE_CMAKE_SOURCE_DIR TRUE)
+ else()
+ set(ALLOW_LOOKING_ABOVE_CMAKE_SOURCE_DIR FALSE)
+ endif()
+ if(NOT "${GIT_DIR}" STREQUAL "")
+ file(RELATIVE_PATH _relative_to_source_dir "${CMAKE_SOURCE_DIR}"
+ "${GIT_DIR}")
+ if("${_relative_to_source_dir}" MATCHES "[.][.]" AND NOT ALLOW_LOOKING_ABOVE_CMAKE_SOURCE_DIR)
+ # We've gone above the CMake root dir.
+ set(GIT_DIR "")
+ endif()
+ endif()
+ if("${GIT_DIR}" STREQUAL "")
+ set(${_refspecvar}
+ "GITDIR-NOTFOUND"
+ PARENT_SCOPE)
+ set(${_hashvar}
+ "GITDIR-NOTFOUND"
+ PARENT_SCOPE)
+ return()
+ endif()
+
+ # Check if the current source dir is a git submodule or a worktree.
+ # In both cases .git is a file instead of a directory.
+ #
+ if(NOT IS_DIRECTORY ${GIT_DIR})
+ # The following git command will return a non empty string that
+ # points to the super project working tree if the current
+ # source dir is inside a git submodule.
+ # Otherwise the command will return an empty string.
+ #
+ execute_process(
+ COMMAND "${GIT_EXECUTABLE}" rev-parse
+ --show-superproject-working-tree
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ OUTPUT_VARIABLE out
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT "${out}" STREQUAL "")
+ # If out is empty, GIT_DIR/CMAKE_CURRENT_SOURCE_DIR is in a submodule
+ file(READ ${GIT_DIR} submodule)
+ string(REGEX REPLACE "gitdir: (.*)$" "\\1" GIT_DIR_RELATIVE
+ ${submodule})
+ string(STRIP ${GIT_DIR_RELATIVE} GIT_DIR_RELATIVE)
+ get_filename_component(SUBMODULE_DIR ${GIT_DIR} PATH)
+ get_filename_component(GIT_DIR ${SUBMODULE_DIR}/${GIT_DIR_RELATIVE}
+ ABSOLUTE)
+ set(HEAD_SOURCE_FILE "${GIT_DIR}/HEAD")
+ else()
+ # GIT_DIR/CMAKE_CURRENT_SOURCE_DIR is in a worktree
+ file(READ ${GIT_DIR} worktree_ref)
+ # The .git directory contains a path to the worktree information directory
+ # inside the parent git repo of the worktree.
+ #
+ string(REGEX REPLACE "gitdir: (.*)$" "\\1" git_worktree_dir
+ ${worktree_ref})
+ string(STRIP ${git_worktree_dir} git_worktree_dir)
+ _git_find_closest_git_dir("${git_worktree_dir}" GIT_DIR)
+ set(HEAD_SOURCE_FILE "${git_worktree_dir}/HEAD")
+ endif()
+ else()
+ set(HEAD_SOURCE_FILE "${GIT_DIR}/HEAD")
+ endif()
+ set(GIT_DATA "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/git-data")
+ if(NOT EXISTS "${GIT_DATA}")
+ file(MAKE_DIRECTORY "${GIT_DATA}")
+ endif()
+
+ if(NOT EXISTS "${HEAD_SOURCE_FILE}")
+ return()
+ endif()
+ set(HEAD_FILE "${GIT_DATA}/HEAD")
+ configure_file("${HEAD_SOURCE_FILE}" "${HEAD_FILE}" COPYONLY)
+
+ configure_file("${_gitdescmoddir}/GetGitRevisionDescription.cmake.in"
+ "${GIT_DATA}/grabRef.cmake" @ONLY)
+ include("${GIT_DATA}/grabRef.cmake")
+
+ set(${_refspecvar}
+ "${HEAD_REF}"
+ PARENT_SCOPE)
+ set(${_hashvar}
+ "${HEAD_HASH}"
+ PARENT_SCOPE)
+endfunction()
+
+function(git_describe _var)
+ if(NOT GIT_FOUND)
+ find_package(Git QUIET)
+ endif()
+ get_git_head_revision(refspec hash)
+ if(NOT GIT_FOUND)
+ set(${_var}
+ "GIT-NOTFOUND"
+ PARENT_SCOPE)
+ return()
+ endif()
+ if(NOT hash)
+ set(${_var}
+ "HEAD-HASH-NOTFOUND"
+ PARENT_SCOPE)
+ return()
+ endif()
+
+ # TODO sanitize
+ #if((${ARGN}" MATCHES "&&") OR
+ # (ARGN MATCHES "||") OR
+ # (ARGN MATCHES "\\;"))
+ # message("Please report the following error to the project!")
+ # message(FATAL_ERROR "Looks like someone's doing something nefarious with git_describe! Passed arguments ${ARGN}")
+ #endif()
+
+ #message(STATUS "Arguments to execute_process: ${ARGN}")
+
+ execute_process(
+ COMMAND "${GIT_EXECUTABLE}" describe --tags --always ${hash} ${ARGN}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE out
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT res EQUAL 0)
+ set(out "${out}-${res}-NOTFOUND")
+ endif()
+
+ set(${_var}
+ "${out}"
+ PARENT_SCOPE)
+endfunction()
+
+function(git_describe_working_tree _var)
+ if(NOT GIT_FOUND)
+ find_package(Git QUIET)
+ endif()
+ if(NOT GIT_FOUND)
+ set(${_var}
+ "GIT-NOTFOUND"
+ PARENT_SCOPE)
+ return()
+ endif()
+
+ execute_process(
+ COMMAND "${GIT_EXECUTABLE}" describe --dirty ${ARGN}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE out
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(NOT res EQUAL 0)
+ set(out "${out}-${res}-NOTFOUND")
+ endif()
+
+ set(${_var}
+ "${out}"
+ PARENT_SCOPE)
+endfunction()
+
+function(git_get_exact_tag _var)
+ git_describe(out --exact-match ${ARGN})
+ set(${_var}
+ "${out}"
+ PARENT_SCOPE)
+endfunction()
+
+function(git_local_changes _var)
+ if(NOT GIT_FOUND)
+ find_package(Git QUIET)
+ endif()
+ get_git_head_revision(refspec hash)
+ if(NOT GIT_FOUND)
+ set(${_var}
+ "GIT-NOTFOUND"
+ PARENT_SCOPE)
+ return()
+ endif()
+ if(NOT hash)
+ set(${_var}
+ "HEAD-HASH-NOTFOUND"
+ PARENT_SCOPE)
+ return()
+ endif()
+
+ execute_process(
+ COMMAND "${GIT_EXECUTABLE}" diff-index --quiet HEAD --
+ WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
+ RESULT_VARIABLE res
+ OUTPUT_VARIABLE out
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(res EQUAL 0)
+ set(${_var}
+ "CLEAN"
+ PARENT_SCOPE)
+ else()
+ set(${_var}
+ "DIRTY"
+ PARENT_SCOPE)
+ endif()
+endfunction()
diff --git a/third-party/cmake/GetGitRevisionDescription.cmake.in b/third-party/cmake/GetGitRevisionDescription.cmake.in
new file mode 100644
index 0000000..116efc4
--- /dev/null
+++ b/third-party/cmake/GetGitRevisionDescription.cmake.in
@@ -0,0 +1,43 @@
+#
+# Internal file for GetGitRevisionDescription.cmake
+#
+# Requires CMake 2.6 or newer (uses the 'function' command)
+#
+# Original Author:
+# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
+# http://academic.cleardefinition.com
+# Iowa State University HCI Graduate Program/VRAC
+#
+# Copyright 2009-2012, Iowa State University
+# Copyright 2011-2015, Contributors
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+# SPDX-License-Identifier: BSL-1.0
+
+set(HEAD_HASH)
+
+file(READ "@HEAD_FILE@" HEAD_CONTENTS LIMIT 1024)
+
+string(STRIP "${HEAD_CONTENTS}" HEAD_CONTENTS)
+if(HEAD_CONTENTS MATCHES "ref")
+ # named branch
+ string(REPLACE "ref: " "" HEAD_REF "${HEAD_CONTENTS}")
+ if(EXISTS "@GIT_DIR@/${HEAD_REF}")
+ configure_file("@GIT_DIR@/${HEAD_REF}" "@GIT_DATA@/head-ref" COPYONLY)
+ else()
+ configure_file("@GIT_DIR@/packed-refs" "@GIT_DATA@/packed-refs" COPYONLY)
+ file(READ "@GIT_DATA@/packed-refs" PACKED_REFS)
+ if(${PACKED_REFS} MATCHES "([0-9a-z]*) ${HEAD_REF}")
+ set(HEAD_HASH "${CMAKE_MATCH_1}")
+ endif()
+ endif()
+else()
+ # detached HEAD
+ configure_file("@GIT_DIR@/HEAD" "@GIT_DATA@/head-ref" COPYONLY)
+endif()
+
+if(NOT HEAD_HASH)
+ file(READ "@GIT_DATA@/head-ref" HEAD_HASH LIMIT 1024)
+ string(STRIP "${HEAD_HASH}" HEAD_HASH)
+endif()
diff --git a/third-party/execvpe/CMakeLists.txt b/third-party/execvpe/CMakeLists.txt
new file mode 100644
index 0000000..4dda40a
--- /dev/null
+++ b/third-party/execvpe/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+set(execvpe_SOURCES
+ execvpe.c execvpe.h
+)
+
+add_library(execvpe OBJECT ${execvpe_SOURCES})
+
+set_target_properties (
+ execvpe PROPERTIES
+ FOLDER Lib
+)
diff --git a/third-party/execvpe/execvpe.c b/third-party/execvpe/execvpe.c
new file mode 100644
index 0000000..d985df2
--- /dev/null
+++ b/third-party/execvpe/execvpe.c
@@ -0,0 +1,208 @@
+/* Copyright (C) 1991,92, 1995-99, 2002, 2004, 2005, 2007, 2009
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+#include <alloca.h>
+#endif /* !__FreeBSD__ && !__OpenBSD__ && !__NetBSD__ */
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "execvpe.h"
+
+#if !defined(_MSC_VER) && !defined(HAVE_EXECVPE)
+
+/* The file is accessible but it is not an executable file. Invoke
+ the shell to interpret it as a script. */
+static void
+scripts_argv (const char *file, char *const argv[], int argc, char **new_argv)
+{
+ /* Construct an argument list for the shell. */
+ new_argv[0] = (char *) "/bin/sh";
+ new_argv[1] = (char *) file;
+ while (argc > 1)
+ {
+ new_argv[argc] = argv[argc - 1];
+ --argc;
+ }
+}
+
+
+/* Execute FILE, searching in the `PATH' environment variable if it contains
+ no slashes, with arguments ARGV and environment from ENVP. */
+int
+icinga2_execvpe (file, argv, envp)
+ const char *file;
+ char *const argv[];
+ char *const envp[];
+{
+ if (*file == '\0')
+ {
+ /* We check the simple case first. */
+ errno = ENOENT;
+ return -1;
+ }
+
+ if (strchr (file, '/') != NULL)
+ {
+ /* Don't search when it contains a slash. */
+ execve (file, argv, envp);
+
+ if (errno == ENOEXEC)
+ {
+ /* Count the arguments. */
+ int argc = 0;
+ while (argv[argc++])
+ ;
+ size_t len = (argc + 1) * sizeof (char *);
+ char **script_argv;
+ void *ptr = NULL;
+ script_argv = alloca (len);
+
+ if (script_argv != NULL)
+ {
+ scripts_argv (file, argv, argc, script_argv);
+ execve (script_argv[0], script_argv, envp);
+
+ free (ptr);
+ }
+ }
+ }
+ else
+ {
+ size_t pathlen;
+ size_t alloclen = 0;
+ char *path = getenv ("PATH");
+ if (path == NULL)
+ {
+ pathlen = confstr (_CS_PATH, (char *) NULL, 0);
+ alloclen = pathlen + 1;
+ }
+ else
+ pathlen = strlen (path);
+
+ size_t len = strlen (file) + 1;
+ alloclen += pathlen + len + 1;
+
+ char *name;
+ name = alloca (alloclen);
+
+ if (path == NULL)
+ {
+ /* There is no `PATH' in the environment.
+ The default search path is the current directory
+ followed by the path `confstr' returns for `_CS_PATH'. */
+ path = name + pathlen + len + 1;
+ path[0] = ':';
+ (void) confstr (_CS_PATH, path + 1, pathlen);
+ }
+
+ /* Copy the file name at the top. */
+ name = (char *) memcpy (name + pathlen + 1, file, len);
+ /* And add the slash. */
+ *--name = '/';
+
+ char **script_argv = NULL;
+ bool got_eacces = false;
+ char *p = path;
+ do
+ {
+ char *startp;
+
+ path = p;
+ p = strchr (path, ':');
+ if (!p)
+ p = path + strlen(path);
+
+ if (p == path)
+ /* Two adjacent colons, or a colon at the beginning or the end
+ of `PATH' means to search the current directory. */
+ startp = name + 1;
+ else
+ startp = (char *) memcpy (name - (p - path), path, p - path);
+
+ /* Try to execute this name. If it works, execve will not return. */
+ execve (startp, argv, envp);
+
+ if (errno == ENOEXEC)
+ {
+ if (script_argv == NULL)
+ {
+ /* Count the arguments. */
+ int argc = 0;
+ while (argv[argc++])
+ ;
+ size_t arglen = (argc + 1) * sizeof (char *);
+ script_argv = alloca (arglen);
+ if (script_argv == NULL)
+ {
+ /* A possible EACCES error is not as important as
+ the ENOMEM. */
+ got_eacces = false;
+ break;
+ }
+ scripts_argv (startp, argv, argc, script_argv);
+ }
+
+ execve (script_argv[0], script_argv, envp);
+ }
+
+ switch (errno)
+ {
+ case EACCES:
+ /* Record the we got a `Permission denied' error. If we end
+ up finding no executable we can use, we want to diagnose
+ that we did find one but were denied access. */
+ got_eacces = true;
+ case ENOENT:
+ case ESTALE:
+ case ENOTDIR:
+ /* Those errors indicate the file is missing or not executable
+ by us, in which case we want to just try the next path
+ directory. */
+ case ENODEV:
+ case ETIMEDOUT:
+ /* Some strange filesystems like AFS return even
+ stranger error numbers. They cannot reasonably mean
+ anything else so ignore those, too. */
+ break;
+
+ default:
+ /* Some other error means we found an executable file, but
+ something went wrong executing it; return the error to our
+ caller. */
+ return -1;
+ }
+ }
+ while (*p++ != '\0');
+
+ /* We tried every element and none of them worked. */
+ if (got_eacces)
+ /* At least one failure was due to permissions, so report that
+ error. */
+ errno = EACCES;
+ }
+
+ /* Return the error from the last attempt (probably ENOENT). */
+ return -1;
+}
+
+#endif /* !defined(_MSC_VER) && !defined(HAVE_EXECVPE) */
diff --git a/third-party/execvpe/execvpe.h b/third-party/execvpe/execvpe.h
new file mode 100644
index 0000000..5e32d66
--- /dev/null
+++ b/third-party/execvpe/execvpe.h
@@ -0,0 +1,18 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef EXECVPE_H
+#define EXECVPE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifndef _MSC_VER
+int icinga2_execvpe(const char *file, char *const argv[], char *const envp[]);
+#endif /* _MSC_VER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* EXECVPE_H */
diff --git a/third-party/mmatch/CMakeLists.txt b/third-party/mmatch/CMakeLists.txt
new file mode 100644
index 0000000..f48e073
--- /dev/null
+++ b/third-party/mmatch/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+set(mmatch_SOURCES
+ mmatch.c mmatch.h
+)
+
+add_library(mmatch OBJECT ${mmatch_SOURCES})
+
+set_target_properties(
+ mmatch PROPERTIES
+ FOLDER Lib
+)
diff --git a/third-party/mmatch/mmatch.c b/third-party/mmatch/mmatch.c
new file mode 100644
index 0000000..b68f1bf
--- /dev/null
+++ b/third-party/mmatch/mmatch.c
@@ -0,0 +1,309 @@
+/*
+ * IRC - Internet Relay Chat, common/match.c
+ * Copyright (C) 1990 Jarkko Oikarinen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 1, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: Match.cpp,v 1.2 2005/08/15 10:08:50 shroud23 Exp $
+ */
+
+#include <ctype.h>
+#include "mmatch.h"
+
+#define ToLower tolower
+
+/*
+ * mmatch()
+ *
+ * Written by Run (carlo@runaway.xs4all.nl), 25-10-96
+ *
+ *
+ * From: Carlo Wood <carlo@runaway.xs4all.nl>
+ * Message-Id: <199609021026.MAA02393@runaway.xs4all.nl>
+ * Subject: [C-Com] Analysis for `mmatch' (was: gline4 problem)
+ * To: coder-com@mail.undernet.org (coder committee)
+ * Date: Mon, 2 Sep 1996 12:26:01 +0200 (MET DST)
+ *
+ * We need a new function `mmatch(const char *old_mask, const char *new_mask)'
+ * which returns `true' likewise the current `match' (start with copying it),
+ * but which treats '*' and '?' in `new_mask' differently (not "\*" and "\?" !)
+ * as follows: a '*' in `new_mask' does not match a '?' in `old_mask' and
+ * a '?' in `new_mask' does not match a '\?' in `old_mask'.
+ * And ofcourse... a '*' in `new_mask' does not match a '\*' in `old_mask'...
+ * And last but not least, '\?' and '\*' in `new_mask' now become one character.
+ */
+
+#if 0
+int mmatch(const char *old_mask, const char *new_mask)
+{
+ const char *m = old_mask;
+ const char *n = new_mask;
+ const char *ma = m;
+ const char *na = n;
+ int wild = 0;
+ int mq = 0, nq = 0;
+
+ while (1)
+ {
+ if (*m == '*')
+ {
+ while (*m == '*')
+ m++;
+ wild = 1;
+ ma = m;
+ na = n;
+ }
+
+ if (!*m)
+ {
+ if (!*n)
+ return 0;
+ for (m--; (m > old_mask) && (*m == '?'); m--)
+ ;
+ if ((*m == '*') && (m > old_mask) && (m[-1] != '\\'))
+ return 0;
+ if (!wild)
+ return 1;
+ m = ma;
+
+ /* Added to `mmatch' : Because '\?' and '\*' now is one character: */
+ if ((*na == '\\') && ((na[1] == '*') || (na[1] == '?')))
+ ++na;
+
+ n = ++na;
+ }
+ else if (!*n)
+ {
+ while (*m == '*')
+ m++;
+ return (*m != 0);
+ }
+ if ((*m == '\\') && ((m[1] == '*') || (m[1] == '?')))
+ {
+ m++;
+ mq = 1;
+ }
+ else
+ mq = 0;
+
+ /* Added to `mmatch' : Because '\?' and '\*' now is one character: */
+ if ((*n == '\\') && ((n[1] == '*') || (n[1] == '?')))
+ {
+ n++;
+ nq = 1;
+ }
+ else
+ nq = 0;
+
+/*
+ * This `if' has been changed compared to match() to do the following:
+ * Match when:
+ * old (m) new (n) boolean expression
+ * * any (*m == '*' && !mq) ||
+ * ? any except '*' (*m == '?' && !mq && (*n != '*' || nq)) ||
+ * any except * or ? same as m (!((*m == '*' || *m == '?') && !mq) &&
+ * ToLower(*m) == ToLower(*n) &&
+ * !((mq && !nq) || (!mq && nq)))
+ *
+ * Here `any' also includes \* and \? !
+ *
+ * After reworking the boolean expressions, we get:
+ * (Optimized to use boolean shortcircuits, with most frequently occuring
+ * cases upfront (which took 2 hours!)).
+ */
+ if ((*m == '*' && !mq) ||
+ ((!mq || nq) && ToLower(*m) == ToLower(*n)) ||
+ (*m == '?' && !mq && (*n != '*' || nq)))
+ {
+ if (*m)
+ m++;
+ if (*n)
+ n++;
+ }
+ else
+ {
+ if (!wild)
+ return 1;
+ m = ma;
+
+ /* Added to `mmatch' : Because '\?' and '\*' now is one character: */
+ if ((*na == '\\') && ((na[1] == '*') || (na[1] == '?')))
+ ++na;
+
+ n = ++na;
+ }
+ }
+}
+#endif
+
+/*
+ * Compare if a given string (name) matches the given
+ * mask (which can contain wild cards: '*' - match any
+ * number of chars, '?' - match any single character.
+ *
+ * return 0, if match
+ * 1, if no match
+ */
+
+/*
+ * match
+ *
+ * Rewritten by Andrea Cocito (Nemesi), November 1998.
+ *
+ */
+
+/****************** Nemesi's match() ***************/
+
+int match(const char *mask, const char *str)
+{
+ const char *m = mask, *s = str;
+ char ch;
+ const char *bm, *bs; /* Will be reg anyway on a decent CPU/compiler */
+
+ /* Process the "head" of the mask, if any */
+ while ((ch = *m++) && (ch != '*'))
+ switch (ch)
+ {
+ case '\\':
+ if (*m == '?' || *m == '*')
+ ch = *m++;
+ default:
+ if (ToLower(*s) != ToLower(ch))
+ return 1;
+ case '?':
+ if (!*s++)
+ return 1;
+ };
+ if (!ch)
+ return *s;
+
+ /* We got a star: quickly find if/where we match the next char */
+got_star:
+ bm = m; /* Next try rollback here */
+ while ((ch = *m++))
+ switch (ch)
+ {
+ case '?':
+ if (!*s++)
+ return 1;
+ case '*':
+ bm = m;
+ continue; /* while */
+ case '\\':
+ if (*m == '?' || *m == '*')
+ ch = *m++;
+ default:
+ goto break_while; /* C is structured ? */
+ };
+break_while:
+ if (!ch)
+ return 0; /* mask ends with '*', we got it */
+ ch = ToLower(ch);
+ if (!*s) /* String is already empty, don't continue */
+ return 1; /* This fixes the #quakenet access denied bug */
+ while (ToLower(*s++) != ch)
+ if (!*s)
+ return 1;
+ bs = s; /* Next try start from here */
+
+ /* Check the rest of the "chunk" */
+ while ((ch = *m++))
+ {
+ switch (ch)
+ {
+ case '*':
+ goto got_star;
+ case '\\':
+ if (*m == '?' || *m == '*')
+ ch = *m++;
+ default:
+ if (ToLower(*s) != ToLower(ch))
+ {
+ /* If we've run out of string, give up */
+ if (!*bs)
+ return 1;
+ m = bm;
+ s = bs;
+ goto got_star;
+ };
+ case '?':
+ if (!*s++)
+ return 1;
+ };
+ };
+ if (*s)
+ {
+ m = bm;
+ s = bs;
+ goto got_star;
+ };
+ return 0;
+}
+
+/*
+ * collapse()
+ * Collapse a pattern string into minimal components.
+ * This particular version is "in place", so that it changes the pattern
+ * which is to be reduced to a "minimal" size.
+ *
+ * (C) Carlo Wood - 6 Oct 1998
+ * Speedup rewrite by Andrea Cocito, December 1998.
+ * Note that this new optimized alghoritm can *only* work in place.
+ */
+
+#if 0
+char *collapse(char *pattern)
+{
+ int star = 0;
+ char *m = pattern;
+ char *b;
+
+ if (m)
+ {
+ do
+ {
+ if ((*m == '*') && ((m[1] == '*') || (m[1] == '?')))
+ {
+ b = m;
+ do
+ {
+ if (*m == '*')
+ star = 1;
+ else
+ {
+ if (star && (*m != '?'))
+ {
+ *b++ = '*';
+ star = 0;
+ };
+ *b++ = *m;
+ if ((*m == '\\') && ((m[1] == '*') || (m[1] == '?')))
+ *b++ = *++m;
+ };
+ }
+ while (*m++);
+ break;
+ }
+ else
+ {
+ if ((*m == '\\') && ((m[1] == '*') || (m[1] == '?')))
+ m++;
+ };
+ }
+ while (*m++);
+ };
+ return pattern;
+}
+#endif
diff --git a/third-party/mmatch/mmatch.h b/third-party/mmatch/mmatch.h
new file mode 100644
index 0000000..8a451b2
--- /dev/null
+++ b/third-party/mmatch/mmatch.h
@@ -0,0 +1,16 @@
+#ifndef MMATCH_H
+#define MMATCH_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+int mmatch(const char *old_mask, const char *new_mask);
+int match(const char *mask, const char *str);
+char *collapse(char *pattern);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* MMATCH_H */
diff --git a/third-party/nlohmann_json/LICENSE b/third-party/nlohmann_json/LICENSE
new file mode 100644
index 0000000..ffef714
--- /dev/null
+++ b/third-party/nlohmann_json/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2013-2020 Niels Lohmann
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third-party/nlohmann_json/json.hpp b/third-party/nlohmann_json/json.hpp
new file mode 100644
index 0000000..a70aaf8
--- /dev/null
+++ b/third-party/nlohmann_json/json.hpp
@@ -0,0 +1,25447 @@
+/*
+ __ _____ _____ _____
+ __| | __| | | | JSON for Modern C++
+| | |__ | | | | | | version 3.9.1
+|_____|_____|_____|_|___| https://github.com/nlohmann/json
+
+Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+SPDX-License-Identifier: MIT
+Copyright (c) 2013-2019 Niels Lohmann <http://nlohmann.me>.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+#ifndef INCLUDE_NLOHMANN_JSON_HPP_
+#define INCLUDE_NLOHMANN_JSON_HPP_
+
+#define NLOHMANN_JSON_VERSION_MAJOR 3
+#define NLOHMANN_JSON_VERSION_MINOR 9
+#define NLOHMANN_JSON_VERSION_PATCH 1
+
+#include <algorithm> // all_of, find, for_each
+#include <cstddef> // nullptr_t, ptrdiff_t, size_t
+#include <functional> // hash, less
+#include <initializer_list> // initializer_list
+#include <iosfwd> // istream, ostream
+#include <iterator> // random_access_iterator_tag
+#include <memory> // unique_ptr
+#include <numeric> // accumulate
+#include <string> // string, stoi, to_string
+#include <utility> // declval, forward, move, pair, swap
+#include <vector> // vector
+
+// #include <nlohmann/adl_serializer.hpp>
+
+
+#include <utility>
+
+// #include <nlohmann/detail/conversions/from_json.hpp>
+
+
+#include <algorithm> // transform
+#include <array> // array
+#include <forward_list> // forward_list
+#include <iterator> // inserter, front_inserter, end
+#include <map> // map
+#include <string> // string
+#include <tuple> // tuple, make_tuple
+#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
+#include <unordered_map> // unordered_map
+#include <utility> // pair, declval
+#include <valarray> // valarray
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+
+#include <exception> // exception
+#include <stdexcept> // runtime_error
+#include <string> // to_string
+
+// #include <nlohmann/detail/input/position_t.hpp>
+
+
+#include <cstddef> // size_t
+
+namespace nlohmann
+{
+namespace detail
+{
+/// struct to capture the start position of the current token
+struct position_t
+{
+ /// the total number of characters read
+ std::size_t chars_read_total = 0;
+ /// the number of characters read in the current line
+ std::size_t chars_read_current_line = 0;
+ /// the number of lines read
+ std::size_t lines_read = 0;
+
+ /// conversion to size_t to preserve SAX interface
+ constexpr operator size_t() const
+ {
+ return chars_read_total;
+ }
+};
+
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+#include <utility> // pair
+// #include <nlohmann/thirdparty/hedley/hedley.hpp>
+/* Hedley - https://nemequ.github.io/hedley
+ * Created by Evan Nemerson <evan@nemerson.com>
+ *
+ * To the extent possible under law, the author(s) have dedicated all
+ * copyright and related and neighboring rights to this software to
+ * the public domain worldwide. This software is distributed without
+ * any warranty.
+ *
+ * For details, see <http://creativecommons.org/publicdomain/zero/1.0/>.
+ * SPDX-License-Identifier: CC0-1.0
+ */
+
+#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 13)
+#if defined(JSON_HEDLEY_VERSION)
+ #undef JSON_HEDLEY_VERSION
+#endif
+#define JSON_HEDLEY_VERSION 13
+
+#if defined(JSON_HEDLEY_STRINGIFY_EX)
+ #undef JSON_HEDLEY_STRINGIFY_EX
+#endif
+#define JSON_HEDLEY_STRINGIFY_EX(x) #x
+
+#if defined(JSON_HEDLEY_STRINGIFY)
+ #undef JSON_HEDLEY_STRINGIFY
+#endif
+#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x)
+
+#if defined(JSON_HEDLEY_CONCAT_EX)
+ #undef JSON_HEDLEY_CONCAT_EX
+#endif
+#define JSON_HEDLEY_CONCAT_EX(a,b) a##b
+
+#if defined(JSON_HEDLEY_CONCAT)
+ #undef JSON_HEDLEY_CONCAT
+#endif
+#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b)
+
+#if defined(JSON_HEDLEY_CONCAT3_EX)
+ #undef JSON_HEDLEY_CONCAT3_EX
+#endif
+#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c
+
+#if defined(JSON_HEDLEY_CONCAT3)
+ #undef JSON_HEDLEY_CONCAT3
+#endif
+#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c)
+
+#if defined(JSON_HEDLEY_VERSION_ENCODE)
+ #undef JSON_HEDLEY_VERSION_ENCODE
+#endif
+#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision))
+
+#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR)
+ #undef JSON_HEDLEY_VERSION_DECODE_MAJOR
+#endif
+#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000)
+
+#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR)
+ #undef JSON_HEDLEY_VERSION_DECODE_MINOR
+#endif
+#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000)
+
+#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION)
+ #undef JSON_HEDLEY_VERSION_DECODE_REVISION
+#endif
+#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000)
+
+#if defined(JSON_HEDLEY_GNUC_VERSION)
+ #undef JSON_HEDLEY_GNUC_VERSION
+#endif
+#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__)
+ #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
+#elif defined(__GNUC__)
+ #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK)
+ #undef JSON_HEDLEY_GNUC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_GNUC_VERSION)
+ #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_MSVC_VERSION)
+ #undef JSON_HEDLEY_MSVC_VERSION
+#endif
+#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000)
+ #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100)
+#elif defined(_MSC_FULL_VER)
+ #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10)
+#elif defined(_MSC_VER)
+ #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0)
+#endif
+
+#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK)
+ #undef JSON_HEDLEY_MSVC_VERSION_CHECK
+#endif
+#if !defined(_MSC_VER)
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0)
+#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch)))
+#elif defined(_MSC_VER) && (_MSC_VER >= 1200)
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch)))
+#else
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor)))
+#endif
+
+#if defined(JSON_HEDLEY_INTEL_VERSION)
+ #undef JSON_HEDLEY_INTEL_VERSION
+#endif
+#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE)
+ #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE)
+#elif defined(__INTEL_COMPILER)
+ #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0)
+#endif
+
+#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK)
+ #undef JSON_HEDLEY_INTEL_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_INTEL_VERSION)
+ #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_PGI_VERSION)
+ #undef JSON_HEDLEY_PGI_VERSION
+#endif
+#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__)
+ #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__)
+#endif
+
+#if defined(JSON_HEDLEY_PGI_VERSION_CHECK)
+ #undef JSON_HEDLEY_PGI_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_PGI_VERSION)
+ #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_SUNPRO_VERSION)
+ #undef JSON_HEDLEY_SUNPRO_VERSION
+#endif
+#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10)
+#elif defined(__SUNPRO_C)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf)
+#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10)
+#elif defined(__SUNPRO_CC)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf)
+#endif
+
+#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK)
+ #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_SUNPRO_VERSION)
+ #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION)
+ #undef JSON_HEDLEY_EMSCRIPTEN_VERSION
+#endif
+#if defined(__EMSCRIPTEN__)
+ #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__)
+#endif
+
+#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK)
+ #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION)
+ #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_ARM_VERSION)
+ #undef JSON_HEDLEY_ARM_VERSION
+#endif
+#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION)
+ #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100)
+#elif defined(__CC_ARM) && defined(__ARMCC_VERSION)
+ #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100)
+#endif
+
+#if defined(JSON_HEDLEY_ARM_VERSION_CHECK)
+ #undef JSON_HEDLEY_ARM_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_ARM_VERSION)
+ #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_IBM_VERSION)
+ #undef JSON_HEDLEY_IBM_VERSION
+#endif
+#if defined(__ibmxl__)
+ #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__)
+#elif defined(__xlC__) && defined(__xlC_ver__)
+ #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff)
+#elif defined(__xlC__)
+ #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0)
+#endif
+
+#if defined(JSON_HEDLEY_IBM_VERSION_CHECK)
+ #undef JSON_HEDLEY_IBM_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_IBM_VERSION)
+ #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_VERSION)
+ #undef JSON_HEDLEY_TI_VERSION
+#endif
+#if \
+ defined(__TI_COMPILER_VERSION__) && \
+ ( \
+ defined(__TMS470__) || defined(__TI_ARM__) || \
+ defined(__MSP430__) || \
+ defined(__TMS320C2000__) \
+ )
+#if (__TI_COMPILER_VERSION__ >= 16000000)
+ #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+#endif
+
+#if defined(JSON_HEDLEY_TI_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_VERSION)
+ #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL2000_VERSION)
+ #undef JSON_HEDLEY_TI_CL2000_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__)
+ #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL2000_VERSION)
+ #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL430_VERSION)
+ #undef JSON_HEDLEY_TI_CL430_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__)
+ #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL430_VERSION)
+ #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_ARMCL_VERSION)
+ #undef JSON_HEDLEY_TI_ARMCL_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__))
+ #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_ARMCL_VERSION)
+ #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL6X_VERSION)
+ #undef JSON_HEDLEY_TI_CL6X_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__)
+ #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL6X_VERSION)
+ #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL7X_VERSION)
+ #undef JSON_HEDLEY_TI_CL7X_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__)
+ #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL7X_VERSION)
+ #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CLPRU_VERSION)
+ #undef JSON_HEDLEY_TI_CLPRU_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__)
+ #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CLPRU_VERSION)
+ #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_CRAY_VERSION)
+ #undef JSON_HEDLEY_CRAY_VERSION
+#endif
+#if defined(_CRAYC)
+ #if defined(_RELEASE_PATCHLEVEL)
+ #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL)
+ #else
+ #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0)
+ #endif
+#endif
+
+#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK)
+ #undef JSON_HEDLEY_CRAY_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_CRAY_VERSION)
+ #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_IAR_VERSION)
+ #undef JSON_HEDLEY_IAR_VERSION
+#endif
+#if defined(__IAR_SYSTEMS_ICC__)
+ #if __VER__ > 1000
+ #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000))
+ #else
+ #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(VER / 100, __VER__ % 100, 0)
+ #endif
+#endif
+
+#if defined(JSON_HEDLEY_IAR_VERSION_CHECK)
+ #undef JSON_HEDLEY_IAR_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_IAR_VERSION)
+ #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TINYC_VERSION)
+ #undef JSON_HEDLEY_TINYC_VERSION
+#endif
+#if defined(__TINYC__)
+ #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100)
+#endif
+
+#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK)
+ #undef JSON_HEDLEY_TINYC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TINYC_VERSION)
+ #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_DMC_VERSION)
+ #undef JSON_HEDLEY_DMC_VERSION
+#endif
+#if defined(__DMC__)
+ #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf)
+#endif
+
+#if defined(JSON_HEDLEY_DMC_VERSION_CHECK)
+ #undef JSON_HEDLEY_DMC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_DMC_VERSION)
+ #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_COMPCERT_VERSION)
+ #undef JSON_HEDLEY_COMPCERT_VERSION
+#endif
+#if defined(__COMPCERT_VERSION__)
+ #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100)
+#endif
+
+#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK)
+ #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_COMPCERT_VERSION)
+ #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_PELLES_VERSION)
+ #undef JSON_HEDLEY_PELLES_VERSION
+#endif
+#if defined(__POCC__)
+ #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0)
+#endif
+
+#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK)
+ #undef JSON_HEDLEY_PELLES_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_PELLES_VERSION)
+ #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_VERSION)
+ #undef JSON_HEDLEY_GCC_VERSION
+#endif
+#if \
+ defined(JSON_HEDLEY_GNUC_VERSION) && \
+ !defined(__clang__) && \
+ !defined(JSON_HEDLEY_INTEL_VERSION) && \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_ARM_VERSION) && \
+ !defined(JSON_HEDLEY_TI_VERSION) && \
+ !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL430_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \
+ !defined(__COMPCERT__)
+ #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION
+#endif
+
+#if defined(JSON_HEDLEY_GCC_VERSION_CHECK)
+ #undef JSON_HEDLEY_GCC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_GCC_VERSION)
+ #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_HAS_ATTRIBUTE
+#endif
+#if defined(__has_attribute)
+ #define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute)
+#else
+ #define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE
+#endif
+#if defined(__has_attribute)
+ #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) __has_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE
+#endif
+#if defined(__has_attribute)
+ #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) __has_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE
+#endif
+#if \
+ defined(__has_cpp_attribute) && \
+ defined(__cplusplus) && \
+ (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0))
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute)
+#else
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS)
+ #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS
+#endif
+#if !defined(__cplusplus) || !defined(__has_cpp_attribute)
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
+#elif \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_IAR_VERSION) && \
+ (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \
+ (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0))
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute)
+#else
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE
+#endif
+#if defined(__has_cpp_attribute) && defined(__cplusplus)
+ #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE
+#endif
+#if defined(__has_cpp_attribute) && defined(__cplusplus)
+ #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_BUILTIN)
+ #undef JSON_HEDLEY_HAS_BUILTIN
+#endif
+#if defined(__has_builtin)
+ #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin)
+#else
+ #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN)
+ #undef JSON_HEDLEY_GNUC_HAS_BUILTIN
+#endif
+#if defined(__has_builtin)
+ #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN)
+ #undef JSON_HEDLEY_GCC_HAS_BUILTIN
+#endif
+#if defined(__has_builtin)
+ #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
+#else
+ #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_FEATURE)
+ #undef JSON_HEDLEY_HAS_FEATURE
+#endif
+#if defined(__has_feature)
+ #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature)
+#else
+ #define JSON_HEDLEY_HAS_FEATURE(feature) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE)
+ #undef JSON_HEDLEY_GNUC_HAS_FEATURE
+#endif
+#if defined(__has_feature)
+ #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_FEATURE)
+ #undef JSON_HEDLEY_GCC_HAS_FEATURE
+#endif
+#if defined(__has_feature)
+ #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
+#else
+ #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_EXTENSION)
+ #undef JSON_HEDLEY_HAS_EXTENSION
+#endif
+#if defined(__has_extension)
+ #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension)
+#else
+ #define JSON_HEDLEY_HAS_EXTENSION(extension) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION)
+ #undef JSON_HEDLEY_GNUC_HAS_EXTENSION
+#endif
+#if defined(__has_extension)
+ #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION)
+ #undef JSON_HEDLEY_GCC_HAS_EXTENSION
+#endif
+#if defined(__has_extension)
+ #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
+#else
+ #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE
+#endif
+#if defined(__has_declspec_attribute)
+ #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute)
+#else
+ #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE
+#endif
+#if defined(__has_declspec_attribute)
+ #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE
+#endif
+#if defined(__has_declspec_attribute)
+ #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_WARNING)
+ #undef JSON_HEDLEY_HAS_WARNING
+#endif
+#if defined(__has_warning)
+ #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning)
+#else
+ #define JSON_HEDLEY_HAS_WARNING(warning) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_WARNING)
+ #undef JSON_HEDLEY_GNUC_HAS_WARNING
+#endif
+#if defined(__has_warning)
+ #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_WARNING)
+ #undef JSON_HEDLEY_GCC_HAS_WARNING
+#endif
+#if defined(__has_warning)
+ #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
+#else
+ #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for
+ HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_
+#endif
+#if defined(__cplusplus)
+# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat")
+# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions")
+# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
+ _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \
+ xpr \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# else
+# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
+ xpr \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# endif
+# endif
+#endif
+#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x
+#endif
+
+#if defined(JSON_HEDLEY_CONST_CAST)
+ #undef JSON_HEDLEY_CONST_CAST
+#endif
+#if defined(__cplusplus)
+# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast<T>(expr))
+#elif \
+ JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
+ ((T) (expr)); \
+ JSON_HEDLEY_DIAGNOSTIC_POP \
+ }))
+#else
+# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr))
+#endif
+
+#if defined(JSON_HEDLEY_REINTERPRET_CAST)
+ #undef JSON_HEDLEY_REINTERPRET_CAST
+#endif
+#if defined(__cplusplus)
+ #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast<T>(expr))
+#else
+ #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr))
+#endif
+
+#if defined(JSON_HEDLEY_STATIC_CAST)
+ #undef JSON_HEDLEY_STATIC_CAST
+#endif
+#if defined(__cplusplus)
+ #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast<T>(expr))
+#else
+ #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr))
+#endif
+
+#if defined(JSON_HEDLEY_CPP_CAST)
+ #undef JSON_HEDLEY_CPP_CAST
+#endif
+#if defined(__cplusplus)
+# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast")
+# define JSON_HEDLEY_CPP_CAST(T, expr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \
+ ((T) (expr)) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0)
+# define JSON_HEDLEY_CPP_CAST(T, expr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("diag_suppress=Pe137") \
+ JSON_HEDLEY_DIAGNOSTIC_POP \
+# else
+# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr))
+# endif
+#else
+# define JSON_HEDLEY_CPP_CAST(T, expr) (expr)
+#endif
+
+#if \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
+ defined(__clang__) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \
+ (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR))
+ #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value)
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_PRAGMA(value) __pragma(value)
+#else
+ #define JSON_HEDLEY_PRAGMA(value)
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH)
+ #undef JSON_HEDLEY_DIAGNOSTIC_PUSH
+#endif
+#if defined(JSON_HEDLEY_DIAGNOSTIC_POP)
+ #undef JSON_HEDLEY_DIAGNOSTIC_POP
+#endif
+#if defined(__clang__)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push))
+ #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop))
+#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop")
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop")
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH
+ #define JSON_HEDLEY_DIAGNOSTIC_POP
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)")
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996))
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718")
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)")
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215")
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)")
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068))
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
+#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030))
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097")
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)")
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
+#endif
+
+#if defined(JSON_HEDLEY_DEPRECATED)
+ #undef JSON_HEDLEY_DEPRECATED
+#endif
+#if defined(JSON_HEDLEY_DEPRECATED_FOR)
+ #undef JSON_HEDLEY_DEPRECATED_FOR
+#endif
+#if JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since))
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement))
+#elif defined(__cplusplus) && (__cplusplus >= 201402L)
+ #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]])
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]])
+#elif \
+ JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since)))
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement)))
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__))
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__))
+#elif \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
+ JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated)
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated)
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated")
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated")
+#else
+ #define JSON_HEDLEY_DEPRECATED(since)
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement)
+#endif
+
+#if defined(JSON_HEDLEY_UNAVAILABLE)
+ #undef JSON_HEDLEY_UNAVAILABLE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since)))
+#else
+ #define JSON_HEDLEY_UNAVAILABLE(available_since)
+#endif
+
+#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT)
+ #undef JSON_HEDLEY_WARN_UNUSED_RESULT
+#endif
+#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG)
+ #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG
+#endif
+#if (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L)
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]])
+#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard)
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__))
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__))
+#elif defined(_Check_return_) /* SAL */
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_
+#else
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg)
+#endif
+
+#if defined(JSON_HEDLEY_SENTINEL)
+ #undef JSON_HEDLEY_SENTINEL
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0)
+ #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position)))
+#else
+ #define JSON_HEDLEY_SENTINEL(position)
+#endif
+
+#if defined(JSON_HEDLEY_NO_RETURN)
+ #undef JSON_HEDLEY_NO_RETURN
+#endif
+#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_NO_RETURN __noreturn
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__))
+#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+ #define JSON_HEDLEY_NO_RETURN _Noreturn
+#elif defined(__cplusplus) && (__cplusplus >= 201103L)
+ #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]])
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__))
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+ #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0)
+ #define JSON_HEDLEY_NO_RETURN __declspec(noreturn)
+#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;")
+#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0)
+ #define JSON_HEDLEY_NO_RETURN __attribute((noreturn))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0)
+ #define JSON_HEDLEY_NO_RETURN __declspec(noreturn)
+#else
+ #define JSON_HEDLEY_NO_RETURN
+#endif
+
+#if defined(JSON_HEDLEY_NO_ESCAPE)
+ #undef JSON_HEDLEY_NO_ESCAPE
+#endif
+#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape)
+ #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__))
+#else
+ #define JSON_HEDLEY_NO_ESCAPE
+#endif
+
+#if defined(JSON_HEDLEY_UNREACHABLE)
+ #undef JSON_HEDLEY_UNREACHABLE
+#endif
+#if defined(JSON_HEDLEY_UNREACHABLE_RETURN)
+ #undef JSON_HEDLEY_UNREACHABLE_RETURN
+#endif
+#if defined(JSON_HEDLEY_ASSUME)
+ #undef JSON_HEDLEY_ASSUME
+#endif
+#if \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_ASSUME(expr) __assume(expr)
+#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume)
+ #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr)
+#elif \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0)
+ #if defined(__cplusplus)
+ #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr)
+ #else
+ #define JSON_HEDLEY_ASSUME(expr) _nassert(expr)
+ #endif
+#endif
+#if \
+ (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5)
+ #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable()
+#elif defined(JSON_HEDLEY_ASSUME)
+ #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0)
+#endif
+#if !defined(JSON_HEDLEY_ASSUME)
+ #if defined(JSON_HEDLEY_UNREACHABLE)
+ #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1)))
+ #else
+ #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr)
+ #endif
+#endif
+#if defined(JSON_HEDLEY_UNREACHABLE)
+ #if \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0)
+ #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value))
+ #else
+ #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE()
+ #endif
+#else
+ #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value)
+#endif
+#if !defined(JSON_HEDLEY_UNREACHABLE)
+ #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0)
+#endif
+
+JSON_HEDLEY_DIAGNOSTIC_PUSH
+#if JSON_HEDLEY_HAS_WARNING("-Wpedantic")
+ #pragma clang diagnostic ignored "-Wpedantic"
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus)
+ #pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
+#endif
+#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0)
+ #if defined(__clang__)
+ #pragma clang diagnostic ignored "-Wvariadic-macros"
+ #elif defined(JSON_HEDLEY_GCC_VERSION)
+ #pragma GCC diagnostic ignored "-Wvariadic-macros"
+ #endif
+#endif
+#if defined(JSON_HEDLEY_NON_NULL)
+ #undef JSON_HEDLEY_NON_NULL
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0)
+ #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__)))
+#else
+ #define JSON_HEDLEY_NON_NULL(...)
+#endif
+JSON_HEDLEY_DIAGNOSTIC_POP
+
+#if defined(JSON_HEDLEY_PRINTF_FORMAT)
+ #undef JSON_HEDLEY_PRINTF_FORMAT
+#endif
+#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check)))
+#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check)))
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(format) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check)))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check))
+#else
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check)
+#endif
+
+#if defined(JSON_HEDLEY_CONSTEXPR)
+ #undef JSON_HEDLEY_CONSTEXPR
+#endif
+#if defined(__cplusplus)
+ #if __cplusplus >= 201103L
+ #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr)
+ #endif
+#endif
+#if !defined(JSON_HEDLEY_CONSTEXPR)
+ #define JSON_HEDLEY_CONSTEXPR
+#endif
+
+#if defined(JSON_HEDLEY_PREDICT)
+ #undef JSON_HEDLEY_PREDICT
+#endif
+#if defined(JSON_HEDLEY_LIKELY)
+ #undef JSON_HEDLEY_LIKELY
+#endif
+#if defined(JSON_HEDLEY_UNLIKELY)
+ #undef JSON_HEDLEY_UNLIKELY
+#endif
+#if defined(JSON_HEDLEY_UNPREDICTABLE)
+ #undef JSON_HEDLEY_UNPREDICTABLE
+#endif
+#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable)
+ #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr))
+#endif
+#if \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0)
+# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability))
+# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability))
+# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability))
+# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 )
+# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 )
+#elif \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0)
+# define JSON_HEDLEY_PREDICT(expr, expected, probability) \
+ (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)))
+# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \
+ (__extension__ ({ \
+ double hedley_probability_ = (probability); \
+ ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \
+ }))
+# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \
+ (__extension__ ({ \
+ double hedley_probability_ = (probability); \
+ ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \
+ }))
+# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1)
+# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0)
+#else
+# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))
+# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr))
+# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr))
+# define JSON_HEDLEY_LIKELY(expr) (!!(expr))
+# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr))
+#endif
+#if !defined(JSON_HEDLEY_UNPREDICTABLE)
+ #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5)
+#endif
+
+#if defined(JSON_HEDLEY_MALLOC)
+ #undef JSON_HEDLEY_MALLOC
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_MALLOC __attribute__((__malloc__))
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+ #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(14, 0, 0)
+ #define JSON_HEDLEY_MALLOC __declspec(restrict)
+#else
+ #define JSON_HEDLEY_MALLOC
+#endif
+
+#if defined(JSON_HEDLEY_PURE)
+ #undef JSON_HEDLEY_PURE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+# define JSON_HEDLEY_PURE __attribute__((__pure__))
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data")
+#elif defined(__cplusplus) && \
+ ( \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \
+ )
+# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;")
+#else
+# define JSON_HEDLEY_PURE
+#endif
+
+#if defined(JSON_HEDLEY_CONST)
+ #undef JSON_HEDLEY_CONST
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(const) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_CONST __attribute__((__const__))
+#elif \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+ #define JSON_HEDLEY_CONST _Pragma("no_side_effect")
+#else
+ #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE
+#endif
+
+#if defined(JSON_HEDLEY_RESTRICT)
+ #undef JSON_HEDLEY_RESTRICT
+#endif
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus)
+ #define JSON_HEDLEY_RESTRICT restrict
+#elif \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \
+ JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
+ defined(__clang__)
+ #define JSON_HEDLEY_RESTRICT __restrict
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus)
+ #define JSON_HEDLEY_RESTRICT _Restrict
+#else
+ #define JSON_HEDLEY_RESTRICT
+#endif
+
+#if defined(JSON_HEDLEY_INLINE)
+ #undef JSON_HEDLEY_INLINE
+#endif
+#if \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
+ (defined(__cplusplus) && (__cplusplus >= 199711L))
+ #define JSON_HEDLEY_INLINE inline
+#elif \
+ defined(JSON_HEDLEY_GCC_VERSION) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0)
+ #define JSON_HEDLEY_INLINE __inline__
+#elif \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_INLINE __inline
+#else
+ #define JSON_HEDLEY_INLINE
+#endif
+
+#if defined(JSON_HEDLEY_ALWAYS_INLINE)
+ #undef JSON_HEDLEY_ALWAYS_INLINE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0)
+# define JSON_HEDLEY_ALWAYS_INLINE __forceinline
+#elif defined(__cplusplus) && \
+ ( \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \
+ )
+# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced")
+#else
+# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE
+#endif
+
+#if defined(JSON_HEDLEY_NEVER_INLINE)
+ #undef JSON_HEDLEY_NEVER_INLINE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__))
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0)
+ #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline)
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0)
+ #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline")
+#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never")
+#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0)
+ #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0)
+ #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline)
+#else
+ #define JSON_HEDLEY_NEVER_INLINE
+#endif
+
+#if defined(JSON_HEDLEY_PRIVATE)
+ #undef JSON_HEDLEY_PRIVATE
+#endif
+#if defined(JSON_HEDLEY_PUBLIC)
+ #undef JSON_HEDLEY_PUBLIC
+#endif
+#if defined(JSON_HEDLEY_IMPORT)
+ #undef JSON_HEDLEY_IMPORT
+#endif
+#if defined(_WIN32) || defined(__CYGWIN__)
+# define JSON_HEDLEY_PRIVATE
+# define JSON_HEDLEY_PUBLIC __declspec(dllexport)
+# define JSON_HEDLEY_IMPORT __declspec(dllimport)
+#else
+# if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
+ ( \
+ defined(__TI_EABI__) && \
+ ( \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \
+ ) \
+ )
+# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden")))
+# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default")))
+# else
+# define JSON_HEDLEY_PRIVATE
+# define JSON_HEDLEY_PUBLIC
+# endif
+# define JSON_HEDLEY_IMPORT extern
+#endif
+
+#if defined(JSON_HEDLEY_NO_THROW)
+ #undef JSON_HEDLEY_NO_THROW
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__))
+#elif \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0)
+ #define JSON_HEDLEY_NO_THROW __declspec(nothrow)
+#else
+ #define JSON_HEDLEY_NO_THROW
+#endif
+
+#if defined(JSON_HEDLEY_FALL_THROUGH)
+ #undef JSON_HEDLEY_FALL_THROUGH
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0)
+ #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__))
+#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough)
+ #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]])
+#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough)
+ #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]])
+#elif defined(__fallthrough) /* SAL */
+ #define JSON_HEDLEY_FALL_THROUGH __fallthrough
+#else
+ #define JSON_HEDLEY_FALL_THROUGH
+#endif
+
+#if defined(JSON_HEDLEY_RETURNS_NON_NULL)
+ #undef JSON_HEDLEY_RETURNS_NON_NULL
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0)
+ #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__))
+#elif defined(_Ret_notnull_) /* SAL */
+ #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_
+#else
+ #define JSON_HEDLEY_RETURNS_NON_NULL
+#endif
+
+#if defined(JSON_HEDLEY_ARRAY_PARAM)
+ #undef JSON_HEDLEY_ARRAY_PARAM
+#endif
+#if \
+ defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
+ !defined(__STDC_NO_VLA__) && \
+ !defined(__cplusplus) && \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_TINYC_VERSION)
+ #define JSON_HEDLEY_ARRAY_PARAM(name) (name)
+#else
+ #define JSON_HEDLEY_ARRAY_PARAM(name)
+#endif
+
+#if defined(JSON_HEDLEY_IS_CONSTANT)
+ #undef JSON_HEDLEY_IS_CONSTANT
+#endif
+#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR)
+ #undef JSON_HEDLEY_REQUIRE_CONSTEXPR
+#endif
+/* JSON_HEDLEY_IS_CONSTEXPR_ is for
+ HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
+#if defined(JSON_HEDLEY_IS_CONSTEXPR_)
+ #undef JSON_HEDLEY_IS_CONSTEXPR_
+#endif
+#if \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0)
+ #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr)
+#endif
+#if !defined(__cplusplus)
+# if \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24)
+#if defined(__INTPTR_TYPE__)
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*)
+#else
+ #include <stdint.h>
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*)
+#endif
+# elif \
+ ( \
+ defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
+ !defined(JSON_HEDLEY_SUNPRO_VERSION) && \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_IAR_VERSION)) || \
+ JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0)
+#if defined(__INTPTR_TYPE__)
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0)
+#else
+ #include <stdint.h>
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0)
+#endif
+# elif \
+ defined(JSON_HEDLEY_GCC_VERSION) || \
+ defined(JSON_HEDLEY_INTEL_VERSION) || \
+ defined(JSON_HEDLEY_TINYC_VERSION) || \
+ defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \
+ defined(JSON_HEDLEY_TI_CL2000_VERSION) || \
+ defined(JSON_HEDLEY_TI_CL6X_VERSION) || \
+ defined(JSON_HEDLEY_TI_CL7X_VERSION) || \
+ defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \
+ defined(__clang__)
+# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \
+ sizeof(void) != \
+ sizeof(*( \
+ 1 ? \
+ ((void*) ((expr) * 0L) ) : \
+((struct { char v[sizeof(void) * 2]; } *) 1) \
+ ) \
+ ) \
+ )
+# endif
+#endif
+#if defined(JSON_HEDLEY_IS_CONSTEXPR_)
+ #if !defined(JSON_HEDLEY_IS_CONSTANT)
+ #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr)
+ #endif
+ #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1))
+#else
+ #if !defined(JSON_HEDLEY_IS_CONSTANT)
+ #define JSON_HEDLEY_IS_CONSTANT(expr) (0)
+ #endif
+ #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr)
+#endif
+
+#if defined(JSON_HEDLEY_BEGIN_C_DECLS)
+ #undef JSON_HEDLEY_BEGIN_C_DECLS
+#endif
+#if defined(JSON_HEDLEY_END_C_DECLS)
+ #undef JSON_HEDLEY_END_C_DECLS
+#endif
+#if defined(JSON_HEDLEY_C_DECL)
+ #undef JSON_HEDLEY_C_DECL
+#endif
+#if defined(__cplusplus)
+ #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" {
+ #define JSON_HEDLEY_END_C_DECLS }
+ #define JSON_HEDLEY_C_DECL extern "C"
+#else
+ #define JSON_HEDLEY_BEGIN_C_DECLS
+ #define JSON_HEDLEY_END_C_DECLS
+ #define JSON_HEDLEY_C_DECL
+#endif
+
+#if defined(JSON_HEDLEY_STATIC_ASSERT)
+ #undef JSON_HEDLEY_STATIC_ASSERT
+#endif
+#if \
+ !defined(__cplusplus) && ( \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \
+ JSON_HEDLEY_HAS_FEATURE(c_static_assert) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ defined(_Static_assert) \
+ )
+# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message)
+#elif \
+ (defined(__cplusplus) && (__cplusplus >= 201103L)) || \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0)
+# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message))
+#else
+# define JSON_HEDLEY_STATIC_ASSERT(expr, message)
+#endif
+
+#if defined(JSON_HEDLEY_NULL)
+ #undef JSON_HEDLEY_NULL
+#endif
+#if defined(__cplusplus)
+ #if __cplusplus >= 201103L
+ #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr)
+ #elif defined(NULL)
+ #define JSON_HEDLEY_NULL NULL
+ #else
+ #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0)
+ #endif
+#elif defined(NULL)
+ #define JSON_HEDLEY_NULL NULL
+#else
+ #define JSON_HEDLEY_NULL ((void*) 0)
+#endif
+
+#if defined(JSON_HEDLEY_MESSAGE)
+ #undef JSON_HEDLEY_MESSAGE
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
+# define JSON_HEDLEY_MESSAGE(msg) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \
+ JSON_HEDLEY_PRAGMA(message msg) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+#elif \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg)
+#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg)
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg))
+#else
+# define JSON_HEDLEY_MESSAGE(msg)
+#endif
+
+#if defined(JSON_HEDLEY_WARNING)
+ #undef JSON_HEDLEY_WARNING
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
+# define JSON_HEDLEY_WARNING(msg) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \
+ JSON_HEDLEY_PRAGMA(clang warning msg) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+#elif \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg)
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg))
+#else
+# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg)
+#endif
+
+#if defined(JSON_HEDLEY_REQUIRE)
+ #undef JSON_HEDLEY_REQUIRE
+#endif
+#if defined(JSON_HEDLEY_REQUIRE_MSG)
+ #undef JSON_HEDLEY_REQUIRE_MSG
+#endif
+#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if)
+# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat")
+# define JSON_HEDLEY_REQUIRE(expr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
+ __attribute__((diagnose_if(!(expr), #expr, "error"))) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
+ __attribute__((diagnose_if(!(expr), msg, "error"))) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# else
+# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error")))
+# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error")))
+# endif
+#else
+# define JSON_HEDLEY_REQUIRE(expr)
+# define JSON_HEDLEY_REQUIRE_MSG(expr,msg)
+#endif
+
+#if defined(JSON_HEDLEY_FLAGS)
+ #undef JSON_HEDLEY_FLAGS
+#endif
+#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum)
+ #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__))
+#endif
+
+#if defined(JSON_HEDLEY_FLAGS_CAST)
+ #undef JSON_HEDLEY_FLAGS_CAST
+#endif
+#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0)
+# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("warning(disable:188)") \
+ ((T) (expr)); \
+ JSON_HEDLEY_DIAGNOSTIC_POP \
+ }))
+#else
+# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr)
+#endif
+
+#if defined(JSON_HEDLEY_EMPTY_BASES)
+ #undef JSON_HEDLEY_EMPTY_BASES
+#endif
+#if JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)
+ #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases)
+#else
+ #define JSON_HEDLEY_EMPTY_BASES
+#endif
+
+/* Remaining macros are deprecated. */
+
+#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK)
+ #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK
+#endif
+#if defined(__clang__)
+ #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0)
+#else
+ #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN)
+ #undef JSON_HEDLEY_CLANG_HAS_BUILTIN
+#endif
+#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE)
+ #undef JSON_HEDLEY_CLANG_HAS_FEATURE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION)
+ #undef JSON_HEDLEY_CLANG_HAS_EXTENSION
+#endif
+#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_WARNING)
+ #undef JSON_HEDLEY_CLANG_HAS_WARNING
+#endif
+#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning)
+
+#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */
+
+
+// This file contains all internal macro definitions
+// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them
+
+// exclude unsupported compilers
+#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK)
+ #if defined(__clang__)
+ #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400
+ #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers"
+ #endif
+ #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER))
+ #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800
+ #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers"
+ #endif
+ #endif
+#endif
+
+// C++ language standard detection
+#if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
+ #define JSON_HAS_CPP_20
+ #define JSON_HAS_CPP_17
+ #define JSON_HAS_CPP_14
+#elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464
+ #define JSON_HAS_CPP_17
+ #define JSON_HAS_CPP_14
+#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1)
+ #define JSON_HAS_CPP_14
+#endif
+
+// disable float-equal warnings on GCC/clang
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+
+// disable documentation warnings on clang
+#if defined(__clang__)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wdocumentation"
+#endif
+
+// allow to disable exceptions
+#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION)
+ #define JSON_THROW(exception) throw exception
+ #define JSON_TRY try
+ #define JSON_CATCH(exception) catch(exception)
+ #define JSON_INTERNAL_CATCH(exception) catch(exception)
+#else
+ #include <cstdlib>
+ #define JSON_THROW(exception) std::abort()
+ #define JSON_TRY if(true)
+ #define JSON_CATCH(exception) if(false)
+ #define JSON_INTERNAL_CATCH(exception) if(false)
+#endif
+
+// override exception macros
+#if defined(JSON_THROW_USER)
+ #undef JSON_THROW
+ #define JSON_THROW JSON_THROW_USER
+#endif
+#if defined(JSON_TRY_USER)
+ #undef JSON_TRY
+ #define JSON_TRY JSON_TRY_USER
+#endif
+#if defined(JSON_CATCH_USER)
+ #undef JSON_CATCH
+ #define JSON_CATCH JSON_CATCH_USER
+ #undef JSON_INTERNAL_CATCH
+ #define JSON_INTERNAL_CATCH JSON_CATCH_USER
+#endif
+#if defined(JSON_INTERNAL_CATCH_USER)
+ #undef JSON_INTERNAL_CATCH
+ #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER
+#endif
+
+// allow to override assert
+#if !defined(JSON_ASSERT)
+ #include <cassert> // assert
+ #define JSON_ASSERT(x) assert(x)
+#endif
+
+/*!
+@brief macro to briefly define a mapping between an enum and JSON
+@def NLOHMANN_JSON_SERIALIZE_ENUM
+@since version 3.4.0
+*/
+#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \
+ template<typename BasicJsonType> \
+ inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \
+ { \
+ static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
+ static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
+ auto it = std::find_if(std::begin(m), std::end(m), \
+ [e](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
+ { \
+ return ej_pair.first == e; \
+ }); \
+ j = ((it != std::end(m)) ? it : std::begin(m))->second; \
+ } \
+ template<typename BasicJsonType> \
+ inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \
+ { \
+ static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
+ static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
+ auto it = std::find_if(std::begin(m), std::end(m), \
+ [&j](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
+ { \
+ return ej_pair.second == j; \
+ }); \
+ e = ((it != std::end(m)) ? it : std::begin(m))->first; \
+ }
+
+// Ugly macros to avoid uglier copy-paste when specializing basic_json. They
+// may be removed in the future once the class is split.
+
+#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \
+ template<template<typename, typename, typename...> class ObjectType, \
+ template<typename, typename...> class ArrayType, \
+ class StringType, class BooleanType, class NumberIntegerType, \
+ class NumberUnsignedType, class NumberFloatType, \
+ template<typename> class AllocatorType, \
+ template<typename, typename = void> class JSONSerializer, \
+ class BinaryType>
+
+#define NLOHMANN_BASIC_JSON_TPL \
+ basic_json<ObjectType, ArrayType, StringType, BooleanType, \
+ NumberIntegerType, NumberUnsignedType, NumberFloatType, \
+ AllocatorType, JSONSerializer, BinaryType>
+
+// Macros to simplify conversion from/to types
+
+#define NLOHMANN_JSON_EXPAND( x ) x
+#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME
+#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \
+ NLOHMANN_JSON_PASTE64, \
+ NLOHMANN_JSON_PASTE63, \
+ NLOHMANN_JSON_PASTE62, \
+ NLOHMANN_JSON_PASTE61, \
+ NLOHMANN_JSON_PASTE60, \
+ NLOHMANN_JSON_PASTE59, \
+ NLOHMANN_JSON_PASTE58, \
+ NLOHMANN_JSON_PASTE57, \
+ NLOHMANN_JSON_PASTE56, \
+ NLOHMANN_JSON_PASTE55, \
+ NLOHMANN_JSON_PASTE54, \
+ NLOHMANN_JSON_PASTE53, \
+ NLOHMANN_JSON_PASTE52, \
+ NLOHMANN_JSON_PASTE51, \
+ NLOHMANN_JSON_PASTE50, \
+ NLOHMANN_JSON_PASTE49, \
+ NLOHMANN_JSON_PASTE48, \
+ NLOHMANN_JSON_PASTE47, \
+ NLOHMANN_JSON_PASTE46, \
+ NLOHMANN_JSON_PASTE45, \
+ NLOHMANN_JSON_PASTE44, \
+ NLOHMANN_JSON_PASTE43, \
+ NLOHMANN_JSON_PASTE42, \
+ NLOHMANN_JSON_PASTE41, \
+ NLOHMANN_JSON_PASTE40, \
+ NLOHMANN_JSON_PASTE39, \
+ NLOHMANN_JSON_PASTE38, \
+ NLOHMANN_JSON_PASTE37, \
+ NLOHMANN_JSON_PASTE36, \
+ NLOHMANN_JSON_PASTE35, \
+ NLOHMANN_JSON_PASTE34, \
+ NLOHMANN_JSON_PASTE33, \
+ NLOHMANN_JSON_PASTE32, \
+ NLOHMANN_JSON_PASTE31, \
+ NLOHMANN_JSON_PASTE30, \
+ NLOHMANN_JSON_PASTE29, \
+ NLOHMANN_JSON_PASTE28, \
+ NLOHMANN_JSON_PASTE27, \
+ NLOHMANN_JSON_PASTE26, \
+ NLOHMANN_JSON_PASTE25, \
+ NLOHMANN_JSON_PASTE24, \
+ NLOHMANN_JSON_PASTE23, \
+ NLOHMANN_JSON_PASTE22, \
+ NLOHMANN_JSON_PASTE21, \
+ NLOHMANN_JSON_PASTE20, \
+ NLOHMANN_JSON_PASTE19, \
+ NLOHMANN_JSON_PASTE18, \
+ NLOHMANN_JSON_PASTE17, \
+ NLOHMANN_JSON_PASTE16, \
+ NLOHMANN_JSON_PASTE15, \
+ NLOHMANN_JSON_PASTE14, \
+ NLOHMANN_JSON_PASTE13, \
+ NLOHMANN_JSON_PASTE12, \
+ NLOHMANN_JSON_PASTE11, \
+ NLOHMANN_JSON_PASTE10, \
+ NLOHMANN_JSON_PASTE9, \
+ NLOHMANN_JSON_PASTE8, \
+ NLOHMANN_JSON_PASTE7, \
+ NLOHMANN_JSON_PASTE6, \
+ NLOHMANN_JSON_PASTE5, \
+ NLOHMANN_JSON_PASTE4, \
+ NLOHMANN_JSON_PASTE3, \
+ NLOHMANN_JSON_PASTE2, \
+ NLOHMANN_JSON_PASTE1)(__VA_ARGS__))
+#define NLOHMANN_JSON_PASTE2(func, v1) func(v1)
+#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2)
+#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3)
+#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4)
+#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5)
+#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6)
+#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7)
+#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8)
+#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9)
+#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10)
+#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11)
+#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12)
+#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13)
+#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14)
+#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15)
+#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16)
+#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17)
+#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18)
+#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19)
+#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20)
+#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21)
+#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22)
+#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23)
+#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24)
+#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25)
+#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26)
+#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27)
+#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28)
+#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29)
+#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30)
+#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31)
+#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32)
+#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33)
+#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34)
+#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35)
+#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36)
+#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37)
+#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38)
+#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39)
+#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40)
+#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41)
+#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42)
+#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43)
+#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44)
+#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45)
+#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46)
+#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47)
+#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48)
+#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49)
+#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50)
+#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51)
+#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52)
+#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53)
+#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54)
+#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55)
+#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56)
+#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57)
+#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58)
+#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59)
+#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60)
+#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61)
+#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62)
+#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63)
+
+#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1;
+#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1);
+
+/*!
+@brief macro
+@def NLOHMANN_DEFINE_TYPE_INTRUSIVE
+@since version 3.9.0
+*/
+#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \
+ friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
+ friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) }
+
+/*!
+@brief macro
+@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE
+@since version 3.9.0
+*/
+#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \
+ inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
+ inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) }
+
+#ifndef JSON_USE_IMPLICIT_CONVERSIONS
+ #define JSON_USE_IMPLICIT_CONVERSIONS 1
+#endif
+
+#if JSON_USE_IMPLICIT_CONVERSIONS
+ #define JSON_EXPLICIT
+#else
+ #define JSON_EXPLICIT explicit
+#endif
+
+
+namespace nlohmann
+{
+namespace detail
+{
+////////////////
+// exceptions //
+////////////////
+
+/*!
+@brief general exception of the @ref basic_json class
+
+This class is an extension of `std::exception` objects with a member @a id for
+exception ids. It is used as the base class for all exceptions thrown by the
+@ref basic_json class. This class can hence be used as "wildcard" to catch
+exceptions.
+
+Subclasses:
+- @ref parse_error for exceptions indicating a parse error
+- @ref invalid_iterator for exceptions indicating errors with iterators
+- @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+- @ref out_of_range for exceptions indicating access out of the defined range
+- @ref other_error for exceptions indicating other library errors
+
+@internal
+@note To have nothrow-copy-constructible exceptions, we internally use
+ `std::runtime_error` which can cope with arbitrary-length error messages.
+ Intermediate strings are built with static functions and then passed to
+ the actual constructor.
+@endinternal
+
+@liveexample{The following code shows how arbitrary library exceptions can be
+caught.,exception}
+
+@since version 3.0.0
+*/
+class exception : public std::exception
+{
+ public:
+ /// returns the explanatory string
+ JSON_HEDLEY_RETURNS_NON_NULL
+ const char* what() const noexcept override
+ {
+ return m.what();
+ }
+
+ /// the id of the exception
+ const int id;
+
+ protected:
+ JSON_HEDLEY_NON_NULL(3)
+ exception(int id_, const char* what_arg) : id(id_), m(what_arg) {}
+
+ static std::string name(const std::string& ename, int id_)
+ {
+ return "[json.exception." + ename + "." + std::to_string(id_) + "] ";
+ }
+
+ private:
+ /// an exception object as storage for error messages
+ std::runtime_error m;
+};
+
+/*!
+@brief exception indicating a parse error
+
+This exception is thrown by the library when a parse error occurs. Parse errors
+can occur during the deserialization of JSON text, CBOR, MessagePack, as well
+as when using JSON Patch.
+
+Member @a byte holds the byte index of the last read character in the input
+file.
+
+Exceptions have ids 1xx.
+
+name / id | example message | description
+------------------------------ | --------------- | -------------------------
+json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position.
+json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point.
+json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid.
+json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects.
+json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors.
+json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`.
+json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character.
+json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences.
+json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number.
+json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read.
+json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read.
+json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read.
+json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet).
+json.exception.parse_error.115 | parse error at byte 5: syntax error while parsing UBJSON high-precision number: invalid number text: 1A | A UBJSON high-precision number could not be parsed.
+
+@note For an input with n bytes, 1 is the index of the first character and n+1
+ is the index of the terminating null byte or the end of file. This also
+ holds true when reading a byte vector (CBOR or MessagePack).
+
+@liveexample{The following code shows how a `parse_error` exception can be
+caught.,parse_error}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class parse_error : public exception
+{
+ public:
+ /*!
+ @brief create a parse error exception
+ @param[in] id_ the id of the exception
+ @param[in] pos the position where the error occurred (or with
+ chars_read_total=0 if the position cannot be
+ determined)
+ @param[in] what_arg the explanatory string
+ @return parse_error object
+ */
+ static parse_error create(int id_, const position_t& pos, const std::string& what_arg)
+ {
+ std::string w = exception::name("parse_error", id_) + "parse error" +
+ position_string(pos) + ": " + what_arg;
+ return parse_error(id_, pos.chars_read_total, w.c_str());
+ }
+
+ static parse_error create(int id_, std::size_t byte_, const std::string& what_arg)
+ {
+ std::string w = exception::name("parse_error", id_) + "parse error" +
+ (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") +
+ ": " + what_arg;
+ return parse_error(id_, byte_, w.c_str());
+ }
+
+ /*!
+ @brief byte index of the parse error
+
+ The byte index of the last read character in the input file.
+
+ @note For an input with n bytes, 1 is the index of the first character and
+ n+1 is the index of the terminating null byte or the end of file.
+ This also holds true when reading a byte vector (CBOR or MessagePack).
+ */
+ const std::size_t byte;
+
+ private:
+ parse_error(int id_, std::size_t byte_, const char* what_arg)
+ : exception(id_, what_arg), byte(byte_) {}
+
+ static std::string position_string(const position_t& pos)
+ {
+ return " at line " + std::to_string(pos.lines_read + 1) +
+ ", column " + std::to_string(pos.chars_read_current_line);
+ }
+};
+
+/*!
+@brief exception indicating errors with iterators
+
+This exception is thrown if iterators passed to a library function do not match
+the expected semantics.
+
+Exceptions have ids 2xx.
+
+name / id | example message | description
+----------------------------------- | --------------- | -------------------------
+json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
+json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion.
+json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from.
+json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid.
+json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid.
+json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range.
+json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key.
+json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
+json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
+json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
+json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to.
+json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container.
+json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered.
+json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin().
+
+@liveexample{The following code shows how an `invalid_iterator` exception can be
+caught.,invalid_iterator}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class invalid_iterator : public exception
+{
+ public:
+ static invalid_iterator create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("invalid_iterator", id_) + what_arg;
+ return invalid_iterator(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ invalid_iterator(int id_, const char* what_arg)
+ : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating executing a member function with a wrong type
+
+This exception is thrown in case of a type error; that is, a library function is
+executed on a JSON value whose type does not match the expected semantics.
+
+Exceptions have ids 3xx.
+
+name / id | example message | description
+----------------------------- | --------------- | -------------------------
+json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead.
+json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types.
+json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t &.
+json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types.
+json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types.
+json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types.
+json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types.
+json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types.
+json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types.
+json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types.
+json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types.
+json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types.
+json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined.
+json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers.
+json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive.
+json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. |
+json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) |
+
+@liveexample{The following code shows how a `type_error` exception can be
+caught.,type_error}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class type_error : public exception
+{
+ public:
+ static type_error create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("type_error", id_) + what_arg;
+ return type_error(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating access out of the defined range
+
+This exception is thrown in case a library function is called on an input
+parameter that exceeds the expected range, for instance in case of array
+indices or nonexisting object keys.
+
+Exceptions have ids 4xx.
+
+name / id | example message | description
+------------------------------- | --------------- | -------------------------
+json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1.
+json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it.
+json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object.
+json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved.
+json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value.
+json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF.
+json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. (until version 3.8.0) |
+json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. |
+json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string |
+
+@liveexample{The following code shows how an `out_of_range` exception can be
+caught.,out_of_range}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class out_of_range : public exception
+{
+ public:
+ static out_of_range create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("out_of_range", id_) + what_arg;
+ return out_of_range(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating other library errors
+
+This exception is thrown in case of errors that cannot be classified with the
+other exception types.
+
+Exceptions have ids 5xx.
+
+name / id | example message | description
+------------------------------ | --------------- | -------------------------
+json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed.
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+
+@liveexample{The following code shows how an `other_error` exception can be
+caught.,other_error}
+
+@since version 3.0.0
+*/
+class other_error : public exception
+{
+ public:
+ static other_error create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("other_error", id_) + what_arg;
+ return other_error(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+
+#include <cstddef> // size_t
+#include <type_traits> // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type
+
+namespace nlohmann
+{
+namespace detail
+{
+// alias templates to reduce boilerplate
+template<bool B, typename T = void>
+using enable_if_t = typename std::enable_if<B, T>::type;
+
+template<typename T>
+using uncvref_t = typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
+// implementation of C++14 index_sequence and affiliates
+// source: https://stackoverflow.com/a/32223343
+template<std::size_t... Ints>
+struct index_sequence
+{
+ using type = index_sequence;
+ using value_type = std::size_t;
+ static constexpr std::size_t size() noexcept
+ {
+ return sizeof...(Ints);
+ }
+};
+
+template<class Sequence1, class Sequence2>
+struct merge_and_renumber;
+
+template<std::size_t... I1, std::size_t... I2>
+struct merge_and_renumber<index_sequence<I1...>, index_sequence<I2...>>
+ : index_sequence < I1..., (sizeof...(I1) + I2)... > {};
+
+template<std::size_t N>
+struct make_index_sequence
+ : merge_and_renumber < typename make_index_sequence < N / 2 >::type,
+ typename make_index_sequence < N - N / 2 >::type > {};
+
+template<> struct make_index_sequence<0> : index_sequence<> {};
+template<> struct make_index_sequence<1> : index_sequence<0> {};
+
+template<typename... Ts>
+using index_sequence_for = make_index_sequence<sizeof...(Ts)>;
+
+// dispatch utility (taken from ranges-v3)
+template<unsigned N> struct priority_tag : priority_tag < N - 1 > {};
+template<> struct priority_tag<0> {};
+
+// taken from ranges-v3
+template<typename T>
+struct static_const
+{
+ static constexpr T value{};
+};
+
+template<typename T>
+constexpr T static_const<T>::value;
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+#include <limits> // numeric_limits
+#include <type_traits> // false_type, is_constructible, is_integral, is_same, true_type
+#include <utility> // declval
+
+// #include <nlohmann/detail/iterators/iterator_traits.hpp>
+
+
+#include <iterator> // random_access_iterator_tag
+
+// #include <nlohmann/detail/meta/void_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename ...Ts> struct make_void
+{
+ using type = void;
+};
+template<typename ...Ts> using void_t = typename make_void<Ts...>::type;
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename It, typename = void>
+struct iterator_types {};
+
+template<typename It>
+struct iterator_types <
+ It,
+ void_t<typename It::difference_type, typename It::value_type, typename It::pointer,
+ typename It::reference, typename It::iterator_category >>
+{
+ using difference_type = typename It::difference_type;
+ using value_type = typename It::value_type;
+ using pointer = typename It::pointer;
+ using reference = typename It::reference;
+ using iterator_category = typename It::iterator_category;
+};
+
+// This is required as some compilers implement std::iterator_traits in a way that
+// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341.
+template<typename T, typename = void>
+struct iterator_traits
+{
+};
+
+template<typename T>
+struct iterator_traits < T, enable_if_t < !std::is_pointer<T>::value >>
+ : iterator_types<T>
+{
+};
+
+template<typename T>
+struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
+{
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = T;
+ using difference_type = ptrdiff_t;
+ using pointer = T*;
+ using reference = T&;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/detected.hpp>
+
+
+#include <type_traits>
+
+// #include <nlohmann/detail/meta/void_t.hpp>
+
+
+// https://en.cppreference.com/w/cpp/experimental/is_detected
+namespace nlohmann
+{
+namespace detail
+{
+struct nonesuch
+{
+ nonesuch() = delete;
+ ~nonesuch() = delete;
+ nonesuch(nonesuch const&) = delete;
+ nonesuch(nonesuch const&&) = delete;
+ void operator=(nonesuch const&) = delete;
+ void operator=(nonesuch&&) = delete;
+};
+
+template<class Default,
+ class AlwaysVoid,
+ template<class...> class Op,
+ class... Args>
+struct detector
+{
+ using value_t = std::false_type;
+ using type = Default;
+};
+
+template<class Default, template<class...> class Op, class... Args>
+struct detector<Default, void_t<Op<Args...>>, Op, Args...>
+{
+ using value_t = std::true_type;
+ using type = Op<Args...>;
+};
+
+template<template<class...> class Op, class... Args>
+using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
+
+template<template<class...> class Op, class... Args>
+using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
+
+template<class Default, template<class...> class Op, class... Args>
+using detected_or = detector<Default, void, Op, Args...>;
+
+template<class Default, template<class...> class Op, class... Args>
+using detected_or_t = typename detected_or<Default, Op, Args...>::type;
+
+template<class Expected, template<class...> class Op, class... Args>
+using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
+
+template<class To, template<class...> class Op, class... Args>
+using is_detected_convertible =
+ std::is_convertible<detected_t<Op, Args...>, To>;
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/json_fwd.hpp>
+#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_
+#define INCLUDE_NLOHMANN_JSON_FWD_HPP_
+
+#include <cstdint> // int64_t, uint64_t
+#include <map> // map
+#include <memory> // allocator
+#include <string> // string
+#include <vector> // vector
+
+/*!
+@brief namespace for Niels Lohmann
+@see https://github.com/nlohmann
+@since version 1.0.0
+*/
+namespace nlohmann
+{
+/*!
+@brief default JSONSerializer template argument
+
+This serializer ignores the template arguments and uses ADL
+([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl))
+for serialization.
+*/
+template<typename T = void, typename SFINAE = void>
+struct adl_serializer;
+
+template<template<typename U, typename V, typename... Args> class ObjectType =
+ std::map,
+ template<typename U, typename... Args> class ArrayType = std::vector,
+ class StringType = std::string, class BooleanType = bool,
+ class NumberIntegerType = std::int64_t,
+ class NumberUnsignedType = std::uint64_t,
+ class NumberFloatType = double,
+ template<typename U> class AllocatorType = std::allocator,
+ template<typename T, typename SFINAE = void> class JSONSerializer =
+ adl_serializer,
+ class BinaryType = std::vector<std::uint8_t>>
+class basic_json;
+
+/*!
+@brief JSON Pointer
+
+A JSON pointer defines a string syntax for identifying a specific value
+within a JSON document. It can be used with functions `at` and
+`operator[]`. Furthermore, JSON pointers are the base for JSON patches.
+
+@sa [RFC 6901](https://tools.ietf.org/html/rfc6901)
+
+@since version 2.0.0
+*/
+template<typename BasicJsonType>
+class json_pointer;
+
+/*!
+@brief default JSON class
+
+This type is the default specialization of the @ref basic_json class which
+uses the standard template types.
+
+@since version 1.0.0
+*/
+using json = basic_json<>;
+
+template<class Key, class T, class IgnoredLess, class Allocator>
+struct ordered_map;
+
+/*!
+@brief ordered JSON class
+
+This type preserves the insertion order of object keys.
+
+@since version 3.9.0
+*/
+using ordered_json = basic_json<nlohmann::ordered_map>;
+
+} // namespace nlohmann
+
+#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_
+
+
+namespace nlohmann
+{
+/*!
+@brief detail namespace with internal helper functions
+
+This namespace collects functions that should not be exposed,
+implementations of some @ref basic_json methods, and meta-programming helpers.
+
+@since version 2.1.0
+*/
+namespace detail
+{
+/////////////
+// helpers //
+/////////////
+
+// Note to maintainers:
+//
+// Every trait in this file expects a non CV-qualified type.
+// The only exceptions are in the 'aliases for detected' section
+// (i.e. those of the form: decltype(T::member_function(std::declval<T>())))
+//
+// In this case, T has to be properly CV-qualified to constraint the function arguments
+// (e.g. to_json(BasicJsonType&, const T&))
+
+template<typename> struct is_basic_json : std::false_type {};
+
+NLOHMANN_BASIC_JSON_TPL_DECLARATION
+struct is_basic_json<NLOHMANN_BASIC_JSON_TPL> : std::true_type {};
+
+//////////////////////
+// json_ref helpers //
+//////////////////////
+
+template<typename>
+class json_ref;
+
+template<typename>
+struct is_json_ref : std::false_type {};
+
+template<typename T>
+struct is_json_ref<json_ref<T>> : std::true_type {};
+
+//////////////////////////
+// aliases for detected //
+//////////////////////////
+
+template<typename T>
+using mapped_type_t = typename T::mapped_type;
+
+template<typename T>
+using key_type_t = typename T::key_type;
+
+template<typename T>
+using value_type_t = typename T::value_type;
+
+template<typename T>
+using difference_type_t = typename T::difference_type;
+
+template<typename T>
+using pointer_t = typename T::pointer;
+
+template<typename T>
+using reference_t = typename T::reference;
+
+template<typename T>
+using iterator_category_t = typename T::iterator_category;
+
+template<typename T>
+using iterator_t = typename T::iterator;
+
+template<typename T, typename... Args>
+using to_json_function = decltype(T::to_json(std::declval<Args>()...));
+
+template<typename T, typename... Args>
+using from_json_function = decltype(T::from_json(std::declval<Args>()...));
+
+template<typename T, typename U>
+using get_template_function = decltype(std::declval<T>().template get<U>());
+
+// trait checking if JSONSerializer<T>::from_json(json const&, udt&) exists
+template<typename BasicJsonType, typename T, typename = void>
+struct has_from_json : std::false_type {};
+
+// trait checking if j.get<T> is valid
+// use this trait instead of std::is_constructible or std::is_convertible,
+// both rely on, or make use of implicit conversions, and thus fail when T
+// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958)
+template <typename BasicJsonType, typename T>
+struct is_getable
+{
+ static constexpr bool value = is_detected<get_template_function, const BasicJsonType&, T>::value;
+};
+
+template<typename BasicJsonType, typename T>
+struct has_from_json < BasicJsonType, T,
+ enable_if_t < !is_basic_json<T>::value >>
+{
+ using serializer = typename BasicJsonType::template json_serializer<T, void>;
+
+ static constexpr bool value =
+ is_detected_exact<void, from_json_function, serializer,
+ const BasicJsonType&, T&>::value;
+};
+
+// This trait checks if JSONSerializer<T>::from_json(json const&) exists
+// this overload is used for non-default-constructible user-defined-types
+template<typename BasicJsonType, typename T, typename = void>
+struct has_non_default_from_json : std::false_type {};
+
+template<typename BasicJsonType, typename T>
+struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
+{
+ using serializer = typename BasicJsonType::template json_serializer<T, void>;
+
+ static constexpr bool value =
+ is_detected_exact<T, from_json_function, serializer,
+ const BasicJsonType&>::value;
+};
+
+// This trait checks if BasicJsonType::json_serializer<T>::to_json exists
+// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion.
+template<typename BasicJsonType, typename T, typename = void>
+struct has_to_json : std::false_type {};
+
+template<typename BasicJsonType, typename T>
+struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
+{
+ using serializer = typename BasicJsonType::template json_serializer<T, void>;
+
+ static constexpr bool value =
+ is_detected_exact<void, to_json_function, serializer, BasicJsonType&,
+ T>::value;
+};
+
+
+///////////////////
+// is_ functions //
+///////////////////
+
+template<typename T, typename = void>
+struct is_iterator_traits : std::false_type {};
+
+template<typename T>
+struct is_iterator_traits<iterator_traits<T>>
+{
+ private:
+ using traits = iterator_traits<T>;
+
+ public:
+ static constexpr auto value =
+ is_detected<value_type_t, traits>::value &&
+ is_detected<difference_type_t, traits>::value &&
+ is_detected<pointer_t, traits>::value &&
+ is_detected<iterator_category_t, traits>::value &&
+ is_detected<reference_t, traits>::value;
+};
+
+// source: https://stackoverflow.com/a/37193089/4116453
+
+template<typename T, typename = void>
+struct is_complete_type : std::false_type {};
+
+template<typename T>
+struct is_complete_type<T, decltype(void(sizeof(T)))> : std::true_type {};
+
+template<typename BasicJsonType, typename CompatibleObjectType,
+ typename = void>
+struct is_compatible_object_type_impl : std::false_type {};
+
+template<typename BasicJsonType, typename CompatibleObjectType>
+struct is_compatible_object_type_impl <
+ BasicJsonType, CompatibleObjectType,
+ enable_if_t < is_detected<mapped_type_t, CompatibleObjectType>::value&&
+ is_detected<key_type_t, CompatibleObjectType>::value >>
+{
+
+ using object_t = typename BasicJsonType::object_t;
+
+ // macOS's is_constructible does not play well with nonesuch...
+ static constexpr bool value =
+ std::is_constructible<typename object_t::key_type,
+ typename CompatibleObjectType::key_type>::value &&
+ std::is_constructible<typename object_t::mapped_type,
+ typename CompatibleObjectType::mapped_type>::value;
+};
+
+template<typename BasicJsonType, typename CompatibleObjectType>
+struct is_compatible_object_type
+ : is_compatible_object_type_impl<BasicJsonType, CompatibleObjectType> {};
+
+template<typename BasicJsonType, typename ConstructibleObjectType,
+ typename = void>
+struct is_constructible_object_type_impl : std::false_type {};
+
+template<typename BasicJsonType, typename ConstructibleObjectType>
+struct is_constructible_object_type_impl <
+ BasicJsonType, ConstructibleObjectType,
+ enable_if_t < is_detected<mapped_type_t, ConstructibleObjectType>::value&&
+ is_detected<key_type_t, ConstructibleObjectType>::value >>
+{
+ using object_t = typename BasicJsonType::object_t;
+
+ static constexpr bool value =
+ (std::is_default_constructible<ConstructibleObjectType>::value &&
+ (std::is_move_assignable<ConstructibleObjectType>::value ||
+ std::is_copy_assignable<ConstructibleObjectType>::value) &&
+ (std::is_constructible<typename ConstructibleObjectType::key_type,
+ typename object_t::key_type>::value &&
+ std::is_same <
+ typename object_t::mapped_type,
+ typename ConstructibleObjectType::mapped_type >::value)) ||
+ (has_from_json<BasicJsonType,
+ typename ConstructibleObjectType::mapped_type>::value ||
+ has_non_default_from_json <
+ BasicJsonType,
+ typename ConstructibleObjectType::mapped_type >::value);
+};
+
+template<typename BasicJsonType, typename ConstructibleObjectType>
+struct is_constructible_object_type
+ : is_constructible_object_type_impl<BasicJsonType,
+ ConstructibleObjectType> {};
+
+template<typename BasicJsonType, typename CompatibleStringType,
+ typename = void>
+struct is_compatible_string_type_impl : std::false_type {};
+
+template<typename BasicJsonType, typename CompatibleStringType>
+struct is_compatible_string_type_impl <
+ BasicJsonType, CompatibleStringType,
+ enable_if_t<is_detected_exact<typename BasicJsonType::string_t::value_type,
+ value_type_t, CompatibleStringType>::value >>
+{
+ static constexpr auto value =
+ std::is_constructible<typename BasicJsonType::string_t, CompatibleStringType>::value;
+};
+
+template<typename BasicJsonType, typename ConstructibleStringType>
+struct is_compatible_string_type
+ : is_compatible_string_type_impl<BasicJsonType, ConstructibleStringType> {};
+
+template<typename BasicJsonType, typename ConstructibleStringType,
+ typename = void>
+struct is_constructible_string_type_impl : std::false_type {};
+
+template<typename BasicJsonType, typename ConstructibleStringType>
+struct is_constructible_string_type_impl <
+ BasicJsonType, ConstructibleStringType,
+ enable_if_t<is_detected_exact<typename BasicJsonType::string_t::value_type,
+ value_type_t, ConstructibleStringType>::value >>
+{
+ static constexpr auto value =
+ std::is_constructible<ConstructibleStringType,
+ typename BasicJsonType::string_t>::value;
+};
+
+template<typename BasicJsonType, typename ConstructibleStringType>
+struct is_constructible_string_type
+ : is_constructible_string_type_impl<BasicJsonType, ConstructibleStringType> {};
+
+template<typename BasicJsonType, typename CompatibleArrayType, typename = void>
+struct is_compatible_array_type_impl : std::false_type {};
+
+template<typename BasicJsonType, typename CompatibleArrayType>
+struct is_compatible_array_type_impl <
+ BasicJsonType, CompatibleArrayType,
+ enable_if_t < is_detected<value_type_t, CompatibleArrayType>::value&&
+ is_detected<iterator_t, CompatibleArrayType>::value&&
+// This is needed because json_reverse_iterator has a ::iterator type...
+// Therefore it is detected as a CompatibleArrayType.
+// The real fix would be to have an Iterable concept.
+ !is_iterator_traits <
+ iterator_traits<CompatibleArrayType >>::value >>
+{
+ static constexpr bool value =
+ std::is_constructible<BasicJsonType,
+ typename CompatibleArrayType::value_type>::value;
+};
+
+template<typename BasicJsonType, typename CompatibleArrayType>
+struct is_compatible_array_type
+ : is_compatible_array_type_impl<BasicJsonType, CompatibleArrayType> {};
+
+template<typename BasicJsonType, typename ConstructibleArrayType, typename = void>
+struct is_constructible_array_type_impl : std::false_type {};
+
+template<typename BasicJsonType, typename ConstructibleArrayType>
+struct is_constructible_array_type_impl <
+ BasicJsonType, ConstructibleArrayType,
+ enable_if_t<std::is_same<ConstructibleArrayType,
+ typename BasicJsonType::value_type>::value >>
+ : std::true_type {};
+
+template<typename BasicJsonType, typename ConstructibleArrayType>
+struct is_constructible_array_type_impl <
+ BasicJsonType, ConstructibleArrayType,
+ enable_if_t < !std::is_same<ConstructibleArrayType,
+ typename BasicJsonType::value_type>::value&&
+ std::is_default_constructible<ConstructibleArrayType>::value&&
+(std::is_move_assignable<ConstructibleArrayType>::value ||
+ std::is_copy_assignable<ConstructibleArrayType>::value)&&
+is_detected<value_type_t, ConstructibleArrayType>::value&&
+is_detected<iterator_t, ConstructibleArrayType>::value&&
+is_complete_type <
+detected_t<value_type_t, ConstructibleArrayType >>::value >>
+{
+ static constexpr bool value =
+ // This is needed because json_reverse_iterator has a ::iterator type,
+ // furthermore, std::back_insert_iterator (and other iterators) have a
+ // base class `iterator`... Therefore it is detected as a
+ // ConstructibleArrayType. The real fix would be to have an Iterable
+ // concept.
+ !is_iterator_traits<iterator_traits<ConstructibleArrayType>>::value &&
+
+ (std::is_same<typename ConstructibleArrayType::value_type,
+ typename BasicJsonType::array_t::value_type>::value ||
+ has_from_json<BasicJsonType,
+ typename ConstructibleArrayType::value_type>::value ||
+ has_non_default_from_json <
+ BasicJsonType, typename ConstructibleArrayType::value_type >::value);
+};
+
+template<typename BasicJsonType, typename ConstructibleArrayType>
+struct is_constructible_array_type
+ : is_constructible_array_type_impl<BasicJsonType, ConstructibleArrayType> {};
+
+template<typename RealIntegerType, typename CompatibleNumberIntegerType,
+ typename = void>
+struct is_compatible_integer_type_impl : std::false_type {};
+
+template<typename RealIntegerType, typename CompatibleNumberIntegerType>
+struct is_compatible_integer_type_impl <
+ RealIntegerType, CompatibleNumberIntegerType,
+ enable_if_t < std::is_integral<RealIntegerType>::value&&
+ std::is_integral<CompatibleNumberIntegerType>::value&&
+ !std::is_same<bool, CompatibleNumberIntegerType>::value >>
+{
+ // is there an assert somewhere on overflows?
+ using RealLimits = std::numeric_limits<RealIntegerType>;
+ using CompatibleLimits = std::numeric_limits<CompatibleNumberIntegerType>;
+
+ static constexpr auto value =
+ std::is_constructible<RealIntegerType,
+ CompatibleNumberIntegerType>::value &&
+ CompatibleLimits::is_integer &&
+ RealLimits::is_signed == CompatibleLimits::is_signed;
+};
+
+template<typename RealIntegerType, typename CompatibleNumberIntegerType>
+struct is_compatible_integer_type
+ : is_compatible_integer_type_impl<RealIntegerType,
+ CompatibleNumberIntegerType> {};
+
+template<typename BasicJsonType, typename CompatibleType, typename = void>
+struct is_compatible_type_impl: std::false_type {};
+
+template<typename BasicJsonType, typename CompatibleType>
+struct is_compatible_type_impl <
+ BasicJsonType, CompatibleType,
+ enable_if_t<is_complete_type<CompatibleType>::value >>
+{
+ static constexpr bool value =
+ has_to_json<BasicJsonType, CompatibleType>::value;
+};
+
+template<typename BasicJsonType, typename CompatibleType>
+struct is_compatible_type
+ : is_compatible_type_impl<BasicJsonType, CompatibleType> {};
+
+// https://en.cppreference.com/w/cpp/types/conjunction
+template<class...> struct conjunction : std::true_type { };
+template<class B1> struct conjunction<B1> : B1 { };
+template<class B1, class... Bn>
+struct conjunction<B1, Bn...>
+: std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {};
+
+template<typename T1, typename T2>
+struct is_constructible_tuple : std::false_type {};
+
+template<typename T1, typename... Args>
+struct is_constructible_tuple<T1, std::tuple<Args...>> : conjunction<std::is_constructible<T1, Args>...> {};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+#include <array> // array
+#include <cstddef> // size_t
+#include <cstdint> // uint8_t
+#include <string> // string
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////////////
+// JSON type enumeration //
+///////////////////////////
+
+/*!
+@brief the JSON type enumeration
+
+This enumeration collects the different JSON types. It is internally used to
+distinguish the stored values, and the functions @ref basic_json::is_null(),
+@ref basic_json::is_object(), @ref basic_json::is_array(),
+@ref basic_json::is_string(), @ref basic_json::is_boolean(),
+@ref basic_json::is_number() (with @ref basic_json::is_number_integer(),
+@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()),
+@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and
+@ref basic_json::is_structured() rely on it.
+
+@note There are three enumeration entries (number_integer, number_unsigned, and
+number_float), because the library distinguishes these three types for numbers:
+@ref basic_json::number_unsigned_t is used for unsigned integers,
+@ref basic_json::number_integer_t is used for signed integers, and
+@ref basic_json::number_float_t is used for floating-point numbers or to
+approximate integers which do not fit in the limits of their respective type.
+
+@sa @ref basic_json::basic_json(const value_t value_type) -- create a JSON
+value with the default value for a given type
+
+@since version 1.0.0
+*/
+enum class value_t : std::uint8_t
+{
+ null, ///< null value
+ object, ///< object (unordered set of name/value pairs)
+ array, ///< array (ordered collection of values)
+ string, ///< string value
+ boolean, ///< boolean value
+ number_integer, ///< number value (signed integer)
+ number_unsigned, ///< number value (unsigned integer)
+ number_float, ///< number value (floating-point)
+ binary, ///< binary array (ordered collection of bytes)
+ discarded ///< discarded by the parser callback function
+};
+
+/*!
+@brief comparison operator for JSON types
+
+Returns an ordering that is similar to Python:
+- order: null < boolean < number < object < array < string < binary
+- furthermore, each type is not smaller than itself
+- discarded values are not comparable
+- binary is represented as a b"" string in python and directly comparable to a
+ string; however, making a binary array directly comparable with a string would
+ be surprising behavior in a JSON file.
+
+@since version 1.0.0
+*/
+inline bool operator<(const value_t lhs, const value_t rhs) noexcept
+{
+ static constexpr std::array<std::uint8_t, 9> order = {{
+ 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */,
+ 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */,
+ 6 /* binary */
+ }
+ };
+
+ const auto l_index = static_cast<std::size_t>(lhs);
+ const auto r_index = static_cast<std::size_t>(rhs);
+ return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index];
+}
+} // namespace detail
+} // namespace nlohmann
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_null()))
+ {
+ JSON_THROW(type_error::create(302, "type must be null, but is " + std::string(j.type_name())));
+ }
+ n = nullptr;
+}
+
+// overloads for basic_json template parameters
+template < typename BasicJsonType, typename ArithmeticType,
+ enable_if_t < std::is_arithmetic<ArithmeticType>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
+ int > = 0 >
+void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
+{
+ switch (static_cast<value_t>(j))
+ {
+ case value_t::number_unsigned:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
+ break;
+ }
+ case value_t::number_integer:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
+ break;
+ }
+ case value_t::number_float:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
+ break;
+ }
+
+ default:
+ JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name())));
+ }
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_boolean()))
+ {
+ JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(j.type_name())));
+ }
+ b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>();
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
+ {
+ JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name())));
+ }
+ s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
+}
+
+template <
+ typename BasicJsonType, typename ConstructibleStringType,
+ enable_if_t <
+ is_constructible_string_type<BasicJsonType, ConstructibleStringType>::value&&
+ !std::is_same<typename BasicJsonType::string_t,
+ ConstructibleStringType>::value,
+ int > = 0 >
+void from_json(const BasicJsonType& j, ConstructibleStringType& s)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
+ {
+ JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name())));
+ }
+
+ s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val)
+{
+ get_arithmetic_value(j, val);
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val)
+{
+ get_arithmetic_value(j, val);
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val)
+{
+ get_arithmetic_value(j, val);
+}
+
+template<typename BasicJsonType, typename EnumType,
+ enable_if_t<std::is_enum<EnumType>::value, int> = 0>
+void from_json(const BasicJsonType& j, EnumType& e)
+{
+ typename std::underlying_type<EnumType>::type val;
+ get_arithmetic_value(j, val);
+ e = static_cast<EnumType>(val);
+}
+
+// forward_list doesn't have an insert method
+template<typename BasicJsonType, typename T, typename Allocator,
+ enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
+void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+ }
+ l.clear();
+ std::transform(j.rbegin(), j.rend(),
+ std::front_inserter(l), [](const BasicJsonType & i)
+ {
+ return i.template get<T>();
+ });
+}
+
+// valarray doesn't have an insert method
+template<typename BasicJsonType, typename T,
+ enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
+void from_json(const BasicJsonType& j, std::valarray<T>& l)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+ }
+ l.resize(j.size());
+ std::transform(j.begin(), j.end(), std::begin(l),
+ [](const BasicJsonType & elem)
+ {
+ return elem.template get<T>();
+ });
+}
+
+template<typename BasicJsonType, typename T, std::size_t N>
+auto from_json(const BasicJsonType& j, T (&arr)[N])
+-> decltype(j.template get<T>(), void())
+{
+ for (std::size_t i = 0; i < N; ++i)
+ {
+ arr[i] = j.at(i).template get<T>();
+ }
+}
+
+template<typename BasicJsonType>
+void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/)
+{
+ arr = *j.template get_ptr<const typename BasicJsonType::array_t*>();
+}
+
+template<typename BasicJsonType, typename T, std::size_t N>
+auto from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr,
+ priority_tag<2> /*unused*/)
+-> decltype(j.template get<T>(), void())
+{
+ for (std::size_t i = 0; i < N; ++i)
+ {
+ arr[i] = j.at(i).template get<T>();
+ }
+}
+
+template<typename BasicJsonType, typename ConstructibleArrayType>
+auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/)
+-> decltype(
+ arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()),
+ j.template get<typename ConstructibleArrayType::value_type>(),
+ void())
+{
+ using std::end;
+
+ ConstructibleArrayType ret;
+ ret.reserve(j.size());
+ std::transform(j.begin(), j.end(),
+ std::inserter(ret, end(ret)), [](const BasicJsonType & i)
+ {
+ // get<BasicJsonType>() returns *this, this won't call a from_json
+ // method when value_type is BasicJsonType
+ return i.template get<typename ConstructibleArrayType::value_type>();
+ });
+ arr = std::move(ret);
+}
+
+template<typename BasicJsonType, typename ConstructibleArrayType>
+void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr,
+ priority_tag<0> /*unused*/)
+{
+ using std::end;
+
+ ConstructibleArrayType ret;
+ std::transform(
+ j.begin(), j.end(), std::inserter(ret, end(ret)),
+ [](const BasicJsonType & i)
+ {
+ // get<BasicJsonType>() returns *this, this won't call a from_json
+ // method when value_type is BasicJsonType
+ return i.template get<typename ConstructibleArrayType::value_type>();
+ });
+ arr = std::move(ret);
+}
+
+template < typename BasicJsonType, typename ConstructibleArrayType,
+ enable_if_t <
+ is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value&&
+ !is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value&&
+ !is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value&&
+ !std::is_same<ConstructibleArrayType, typename BasicJsonType::binary_t>::value&&
+ !is_basic_json<ConstructibleArrayType>::value,
+ int > = 0 >
+auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
+-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
+j.template get<typename ConstructibleArrayType::value_type>(),
+void())
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " +
+ std::string(j.type_name())));
+ }
+
+ from_json_array_impl(j, arr, priority_tag<3> {});
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_binary()))
+ {
+ JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(j.type_name())));
+ }
+
+ bin = *j.template get_ptr<const typename BasicJsonType::binary_t*>();
+}
+
+template<typename BasicJsonType, typename ConstructibleObjectType,
+ enable_if_t<is_constructible_object_type<BasicJsonType, ConstructibleObjectType>::value, int> = 0>
+void from_json(const BasicJsonType& j, ConstructibleObjectType& obj)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
+ {
+ JSON_THROW(type_error::create(302, "type must be object, but is " + std::string(j.type_name())));
+ }
+
+ ConstructibleObjectType ret;
+ auto inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>();
+ using value_type = typename ConstructibleObjectType::value_type;
+ std::transform(
+ inner_object->begin(), inner_object->end(),
+ std::inserter(ret, ret.begin()),
+ [](typename BasicJsonType::object_t::value_type const & p)
+ {
+ return value_type(p.first, p.second.template get<typename ConstructibleObjectType::mapped_type>());
+ });
+ obj = std::move(ret);
+}
+
+// overload for arithmetic types, not chosen for basic_json template arguments
+// (BooleanType, etc..); note: Is it really necessary to provide explicit
+// overloads for boolean_t etc. in case of a custom BooleanType which is not
+// an arithmetic type?
+template < typename BasicJsonType, typename ArithmeticType,
+ enable_if_t <
+ std::is_arithmetic<ArithmeticType>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
+ int > = 0 >
+void from_json(const BasicJsonType& j, ArithmeticType& val)
+{
+ switch (static_cast<value_t>(j))
+ {
+ case value_t::number_unsigned:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
+ break;
+ }
+ case value_t::number_integer:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
+ break;
+ }
+ case value_t::number_float:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
+ break;
+ }
+ case value_t::boolean:
+ {
+ val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::boolean_t*>());
+ break;
+ }
+
+ default:
+ JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name())));
+ }
+}
+
+template<typename BasicJsonType, typename A1, typename A2>
+void from_json(const BasicJsonType& j, std::pair<A1, A2>& p)
+{
+ p = {j.at(0).template get<A1>(), j.at(1).template get<A2>()};
+}
+
+template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
+void from_json_tuple_impl(const BasicJsonType& j, Tuple& t, index_sequence<Idx...> /*unused*/)
+{
+ t = std::make_tuple(j.at(Idx).template get<typename std::tuple_element<Idx, Tuple>::type>()...);
+}
+
+template<typename BasicJsonType, typename... Args>
+void from_json(const BasicJsonType& j, std::tuple<Args...>& t)
+{
+ from_json_tuple_impl(j, t, index_sequence_for<Args...> {});
+}
+
+template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
+ typename = enable_if_t < !std::is_constructible <
+ typename BasicJsonType::string_t, Key >::value >>
+void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>& m)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+ }
+ m.clear();
+ for (const auto& p : j)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name())));
+ }
+ m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
+ }
+}
+
+template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
+ typename = enable_if_t < !std::is_constructible <
+ typename BasicJsonType::string_t, Key >::value >>
+void from_json(const BasicJsonType& j, std::unordered_map<Key, Value, Hash, KeyEqual, Allocator>& m)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+ }
+ m.clear();
+ for (const auto& p : j)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
+ {
+ JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name())));
+ }
+ m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
+ }
+}
+
+struct from_json_fn
+{
+ template<typename BasicJsonType, typename T>
+ auto operator()(const BasicJsonType& j, T& val) const
+ noexcept(noexcept(from_json(j, val)))
+ -> decltype(from_json(j, val), void())
+ {
+ return from_json(j, val);
+ }
+};
+} // namespace detail
+
+/// namespace to hold default `from_json` function
+/// to see why this is required:
+/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
+namespace
+{
+constexpr const auto& from_json = detail::static_const<detail::from_json_fn>::value;
+} // namespace
+} // namespace nlohmann
+
+// #include <nlohmann/detail/conversions/to_json.hpp>
+
+
+#include <algorithm> // copy
+#include <iterator> // begin, end
+#include <string> // string
+#include <tuple> // tuple, get
+#include <type_traits> // is_same, is_constructible, is_floating_point, is_enum, underlying_type
+#include <utility> // move, forward, declval, pair
+#include <valarray> // valarray
+#include <vector> // vector
+
+// #include <nlohmann/detail/iterators/iteration_proxy.hpp>
+
+
+#include <cstddef> // size_t
+#include <iterator> // input_iterator_tag
+#include <string> // string, to_string
+#include <tuple> // tuple_size, get, tuple_element
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename string_type>
+void int_to_string( string_type& target, std::size_t value )
+{
+ // For ADL
+ using std::to_string;
+ target = to_string(value);
+}
+template<typename IteratorType> class iteration_proxy_value
+{
+ public:
+ using difference_type = std::ptrdiff_t;
+ using value_type = iteration_proxy_value;
+ using pointer = value_type * ;
+ using reference = value_type & ;
+ using iterator_category = std::input_iterator_tag;
+ using string_type = typename std::remove_cv< typename std::remove_reference<decltype( std::declval<IteratorType>().key() ) >::type >::type;
+
+ private:
+ /// the iterator
+ IteratorType anchor;
+ /// an index for arrays (used to create key names)
+ std::size_t array_index = 0;
+ /// last stringified array index
+ mutable std::size_t array_index_last = 0;
+ /// a string representation of the array index
+ mutable string_type array_index_str = "0";
+ /// an empty string (to return a reference for primitive values)
+ const string_type empty_str = "";
+
+ public:
+ explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {}
+
+ /// dereference operator (needed for range-based for)
+ iteration_proxy_value& operator*()
+ {
+ return *this;
+ }
+
+ /// increment operator (needed for range-based for)
+ iteration_proxy_value& operator++()
+ {
+ ++anchor;
+ ++array_index;
+
+ return *this;
+ }
+
+ /// equality operator (needed for InputIterator)
+ bool operator==(const iteration_proxy_value& o) const
+ {
+ return anchor == o.anchor;
+ }
+
+ /// inequality operator (needed for range-based for)
+ bool operator!=(const iteration_proxy_value& o) const
+ {
+ return anchor != o.anchor;
+ }
+
+ /// return key of the iterator
+ const string_type& key() const
+ {
+ JSON_ASSERT(anchor.m_object != nullptr);
+
+ switch (anchor.m_object->type())
+ {
+ // use integer array index as key
+ case value_t::array:
+ {
+ if (array_index != array_index_last)
+ {
+ int_to_string( array_index_str, array_index );
+ array_index_last = array_index;
+ }
+ return array_index_str;
+ }
+
+ // use key from the object
+ case value_t::object:
+ return anchor.key();
+
+ // use an empty key for all primitive types
+ default:
+ return empty_str;
+ }
+ }
+
+ /// return value of the iterator
+ typename IteratorType::reference value() const
+ {
+ return anchor.value();
+ }
+};
+
+/// proxy class for the items() function
+template<typename IteratorType> class iteration_proxy
+{
+ private:
+ /// the container to iterate
+ typename IteratorType::reference container;
+
+ public:
+ /// construct iteration proxy from a container
+ explicit iteration_proxy(typename IteratorType::reference cont) noexcept
+ : container(cont) {}
+
+ /// return iterator begin (needed for range-based for)
+ iteration_proxy_value<IteratorType> begin() noexcept
+ {
+ return iteration_proxy_value<IteratorType>(container.begin());
+ }
+
+ /// return iterator end (needed for range-based for)
+ iteration_proxy_value<IteratorType> end() noexcept
+ {
+ return iteration_proxy_value<IteratorType>(container.end());
+ }
+};
+// Structured Bindings Support
+// For further reference see https://blog.tartanllama.xyz/structured-bindings/
+// And see https://github.com/nlohmann/json/pull/1391
+template<std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
+auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.key())
+{
+ return i.key();
+}
+// Structured Bindings Support
+// For further reference see https://blog.tartanllama.xyz/structured-bindings/
+// And see https://github.com/nlohmann/json/pull/1391
+template<std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
+auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.value())
+{
+ return i.value();
+}
+} // namespace detail
+} // namespace nlohmann
+
+// The Addition to the STD Namespace is required to add
+// Structured Bindings Support to the iteration_proxy_value class
+// For further reference see https://blog.tartanllama.xyz/structured-bindings/
+// And see https://github.com/nlohmann/json/pull/1391
+namespace std
+{
+#if defined(__clang__)
+ // Fix: https://github.com/nlohmann/json/issues/1401
+ #pragma clang diagnostic push
+ #pragma clang diagnostic ignored "-Wmismatched-tags"
+#endif
+template<typename IteratorType>
+class tuple_size<::nlohmann::detail::iteration_proxy_value<IteratorType>>
+ : public std::integral_constant<std::size_t, 2> {};
+
+template<std::size_t N, typename IteratorType>
+class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >>
+{
+ public:
+ using type = decltype(
+ get<N>(std::declval <
+ ::nlohmann::detail::iteration_proxy_value<IteratorType >> ()));
+};
+#if defined(__clang__)
+ #pragma clang diagnostic pop
+#endif
+} // namespace std
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+//////////////////
+// constructors //
+//////////////////
+
+template<value_t> struct external_constructor;
+
+template<>
+struct external_constructor<value_t::boolean>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept
+ {
+ j.m_type = value_t::boolean;
+ j.m_value = b;
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::string>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s)
+ {
+ j.m_type = value_t::string;
+ j.m_value = s;
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s)
+ {
+ j.m_type = value_t::string;
+ j.m_value = std::move(s);
+ j.assert_invariant();
+ }
+
+ template < typename BasicJsonType, typename CompatibleStringType,
+ enable_if_t < !std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
+ int > = 0 >
+ static void construct(BasicJsonType& j, const CompatibleStringType& str)
+ {
+ j.m_type = value_t::string;
+ j.m_value.string = j.template create<typename BasicJsonType::string_t>(str);
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::binary>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b)
+ {
+ j.m_type = value_t::binary;
+ typename BasicJsonType::binary_t value{b};
+ j.m_value = value;
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b)
+ {
+ j.m_type = value_t::binary;
+ typename BasicJsonType::binary_t value{std::move(b)};
+ j.m_value = value;
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::number_float>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept
+ {
+ j.m_type = value_t::number_float;
+ j.m_value = val;
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::number_unsigned>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept
+ {
+ j.m_type = value_t::number_unsigned;
+ j.m_value = val;
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::number_integer>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept
+ {
+ j.m_type = value_t::number_integer;
+ j.m_value = val;
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::array>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr)
+ {
+ j.m_type = value_t::array;
+ j.m_value = arr;
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
+ {
+ j.m_type = value_t::array;
+ j.m_value = std::move(arr);
+ j.assert_invariant();
+ }
+
+ template < typename BasicJsonType, typename CompatibleArrayType,
+ enable_if_t < !std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
+ int > = 0 >
+ static void construct(BasicJsonType& j, const CompatibleArrayType& arr)
+ {
+ using std::begin;
+ using std::end;
+ j.m_type = value_t::array;
+ j.m_value.array = j.template create<typename BasicJsonType::array_t>(begin(arr), end(arr));
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, const std::vector<bool>& arr)
+ {
+ j.m_type = value_t::array;
+ j.m_value = value_t::array;
+ j.m_value.array->reserve(arr.size());
+ for (const bool x : arr)
+ {
+ j.m_value.array->push_back(x);
+ }
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType, typename T,
+ enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
+ static void construct(BasicJsonType& j, const std::valarray<T>& arr)
+ {
+ j.m_type = value_t::array;
+ j.m_value = value_t::array;
+ j.m_value.array->resize(arr.size());
+ if (arr.size() > 0)
+ {
+ std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin());
+ }
+ j.assert_invariant();
+ }
+};
+
+template<>
+struct external_constructor<value_t::object>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj)
+ {
+ j.m_type = value_t::object;
+ j.m_value = obj;
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
+ {
+ j.m_type = value_t::object;
+ j.m_value = std::move(obj);
+ j.assert_invariant();
+ }
+
+ template < typename BasicJsonType, typename CompatibleObjectType,
+ enable_if_t < !std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int > = 0 >
+ static void construct(BasicJsonType& j, const CompatibleObjectType& obj)
+ {
+ using std::begin;
+ using std::end;
+
+ j.m_type = value_t::object;
+ j.m_value.object = j.template create<typename BasicJsonType::object_t>(begin(obj), end(obj));
+ j.assert_invariant();
+ }
+};
+
+/////////////
+// to_json //
+/////////////
+
+template<typename BasicJsonType, typename T,
+ enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0>
+void to_json(BasicJsonType& j, T b) noexcept
+{
+ external_constructor<value_t::boolean>::construct(j, b);
+}
+
+template<typename BasicJsonType, typename CompatibleString,
+ enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0>
+void to_json(BasicJsonType& j, const CompatibleString& s)
+{
+ external_constructor<value_t::string>::construct(j, s);
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s)
+{
+ external_constructor<value_t::string>::construct(j, std::move(s));
+}
+
+template<typename BasicJsonType, typename FloatType,
+ enable_if_t<std::is_floating_point<FloatType>::value, int> = 0>
+void to_json(BasicJsonType& j, FloatType val) noexcept
+{
+ external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val));
+}
+
+template<typename BasicJsonType, typename CompatibleNumberUnsignedType,
+ enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0>
+void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept
+{
+ external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val));
+}
+
+template<typename BasicJsonType, typename CompatibleNumberIntegerType,
+ enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0>
+void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept
+{
+ external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val));
+}
+
+template<typename BasicJsonType, typename EnumType,
+ enable_if_t<std::is_enum<EnumType>::value, int> = 0>
+void to_json(BasicJsonType& j, EnumType e) noexcept
+{
+ using underlying_type = typename std::underlying_type<EnumType>::type;
+ external_constructor<value_t::number_integer>::construct(j, static_cast<underlying_type>(e));
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, const std::vector<bool>& e)
+{
+ external_constructor<value_t::array>::construct(j, e);
+}
+
+template < typename BasicJsonType, typename CompatibleArrayType,
+ enable_if_t < is_compatible_array_type<BasicJsonType,
+ CompatibleArrayType>::value&&
+ !is_compatible_object_type<BasicJsonType, CompatibleArrayType>::value&&
+ !is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value&&
+ !std::is_same<typename BasicJsonType::binary_t, CompatibleArrayType>::value&&
+ !is_basic_json<CompatibleArrayType>::value,
+ int > = 0 >
+void to_json(BasicJsonType& j, const CompatibleArrayType& arr)
+{
+ external_constructor<value_t::array>::construct(j, arr);
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin)
+{
+ external_constructor<value_t::binary>::construct(j, bin);
+}
+
+template<typename BasicJsonType, typename T,
+ enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
+void to_json(BasicJsonType& j, const std::valarray<T>& arr)
+{
+ external_constructor<value_t::array>::construct(j, std::move(arr));
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
+{
+ external_constructor<value_t::array>::construct(j, std::move(arr));
+}
+
+template < typename BasicJsonType, typename CompatibleObjectType,
+ enable_if_t < is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value&& !is_basic_json<CompatibleObjectType>::value, int > = 0 >
+void to_json(BasicJsonType& j, const CompatibleObjectType& obj)
+{
+ external_constructor<value_t::object>::construct(j, obj);
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
+{
+ external_constructor<value_t::object>::construct(j, std::move(obj));
+}
+
+template <
+ typename BasicJsonType, typename T, std::size_t N,
+ enable_if_t < !std::is_constructible<typename BasicJsonType::string_t,
+ const T(&)[N]>::value,
+ int > = 0 >
+void to_json(BasicJsonType& j, const T(&arr)[N])
+{
+ external_constructor<value_t::array>::construct(j, arr);
+}
+
+template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible<BasicJsonType, T1>::value&& std::is_constructible<BasicJsonType, T2>::value, int > = 0 >
+void to_json(BasicJsonType& j, const std::pair<T1, T2>& p)
+{
+ j = { p.first, p.second };
+}
+
+// for https://github.com/nlohmann/json/pull/1134
+template<typename BasicJsonType, typename T,
+ enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
+void to_json(BasicJsonType& j, const T& b)
+{
+ j = { {b.key(), b.value()} };
+}
+
+template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
+void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...> /*unused*/)
+{
+ j = { std::get<Idx>(t)... };
+}
+
+template<typename BasicJsonType, typename T, enable_if_t<is_constructible_tuple<BasicJsonType, T>::value, int > = 0>
+void to_json(BasicJsonType& j, const T& t)
+{
+ to_json_tuple_impl(j, t, make_index_sequence<std::tuple_size<T>::value> {});
+}
+
+struct to_json_fn
+{
+ template<typename BasicJsonType, typename T>
+ auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward<T>(val))))
+ -> decltype(to_json(j, std::forward<T>(val)), void())
+ {
+ return to_json(j, std::forward<T>(val));
+ }
+};
+} // namespace detail
+
+/// namespace to hold default `to_json` function
+namespace
+{
+constexpr const auto& to_json = detail::static_const<detail::to_json_fn>::value;
+} // namespace
+} // namespace nlohmann
+
+
+namespace nlohmann
+{
+
+template<typename, typename>
+struct adl_serializer
+{
+ /*!
+ @brief convert a JSON value to any value type
+
+ This function is usually called by the `get()` function of the
+ @ref basic_json class (either explicit or via conversion operators).
+
+ @param[in] j JSON value to read from
+ @param[in,out] val value to write to
+ */
+ template<typename BasicJsonType, typename ValueType>
+ static auto from_json(BasicJsonType&& j, ValueType& val) noexcept(
+ noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
+ -> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
+ {
+ ::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
+ }
+
+ /*!
+ @brief convert any value type to a JSON value
+
+ This function is usually called by the constructors of the @ref basic_json
+ class.
+
+ @param[in,out] j JSON value to write to
+ @param[in] val value to read from
+ */
+ template<typename BasicJsonType, typename ValueType>
+ static auto to_json(BasicJsonType& j, ValueType&& val) noexcept(
+ noexcept(::nlohmann::to_json(j, std::forward<ValueType>(val))))
+ -> decltype(::nlohmann::to_json(j, std::forward<ValueType>(val)), void())
+ {
+ ::nlohmann::to_json(j, std::forward<ValueType>(val));
+ }
+};
+
+} // namespace nlohmann
+
+// #include <nlohmann/byte_container_with_subtype.hpp>
+
+
+#include <cstdint> // uint8_t
+#include <tuple> // tie
+#include <utility> // move
+
+namespace nlohmann
+{
+
+/*!
+@brief an internal type for a backed binary type
+
+This type extends the template parameter @a BinaryType provided to `basic_json`
+with a subtype used by BSON and MessagePack. This type exists so that the user
+does not have to specify a type themselves with a specific naming scheme in
+order to override the binary type.
+
+@tparam BinaryType container to store bytes (`std::vector<std::uint8_t>` by
+ default)
+
+@since version 3.8.0
+*/
+template<typename BinaryType>
+class byte_container_with_subtype : public BinaryType
+{
+ public:
+ /// the type of the underlying container
+ using container_type = BinaryType;
+
+ byte_container_with_subtype() noexcept(noexcept(container_type()))
+ : container_type()
+ {}
+
+ byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b)))
+ : container_type(b)
+ {}
+
+ byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b))))
+ : container_type(std::move(b))
+ {}
+
+ byte_container_with_subtype(const container_type& b, std::uint8_t subtype) noexcept(noexcept(container_type(b)))
+ : container_type(b)
+ , m_subtype(subtype)
+ , m_has_subtype(true)
+ {}
+
+ byte_container_with_subtype(container_type&& b, std::uint8_t subtype) noexcept(noexcept(container_type(std::move(b))))
+ : container_type(std::move(b))
+ , m_subtype(subtype)
+ , m_has_subtype(true)
+ {}
+
+ bool operator==(const byte_container_with_subtype& rhs) const
+ {
+ return std::tie(static_cast<const BinaryType&>(*this), m_subtype, m_has_subtype) ==
+ std::tie(static_cast<const BinaryType&>(rhs), rhs.m_subtype, rhs.m_has_subtype);
+ }
+
+ bool operator!=(const byte_container_with_subtype& rhs) const
+ {
+ return !(rhs == *this);
+ }
+
+ /*!
+ @brief sets the binary subtype
+
+ Sets the binary subtype of the value, also flags a binary JSON value as
+ having a subtype, which has implications for serialization.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref subtype() -- return the binary subtype
+ @sa @ref clear_subtype() -- clears the binary subtype
+ @sa @ref has_subtype() -- returns whether or not the binary value has a
+ subtype
+
+ @since version 3.8.0
+ */
+ void set_subtype(std::uint8_t subtype) noexcept
+ {
+ m_subtype = subtype;
+ m_has_subtype = true;
+ }
+
+ /*!
+ @brief return the binary subtype
+
+ Returns the numerical subtype of the value if it has a subtype. If it does
+ not have a subtype, this function will return size_t(-1) as a sentinel
+ value.
+
+ @return the numerical subtype of the binary value
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref set_subtype() -- sets the binary subtype
+ @sa @ref clear_subtype() -- clears the binary subtype
+ @sa @ref has_subtype() -- returns whether or not the binary value has a
+ subtype
+
+ @since version 3.8.0
+ */
+ constexpr std::uint8_t subtype() const noexcept
+ {
+ return m_subtype;
+ }
+
+ /*!
+ @brief return whether the value has a subtype
+
+ @return whether the value has a subtype
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref subtype() -- return the binary subtype
+ @sa @ref set_subtype() -- sets the binary subtype
+ @sa @ref clear_subtype() -- clears the binary subtype
+
+ @since version 3.8.0
+ */
+ constexpr bool has_subtype() const noexcept
+ {
+ return m_has_subtype;
+ }
+
+ /*!
+ @brief clears the binary subtype
+
+ Clears the binary subtype and flags the value as not having a subtype, which
+ has implications for serialization; for instance MessagePack will prefer the
+ bin family over the ext family.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref subtype() -- return the binary subtype
+ @sa @ref set_subtype() -- sets the binary subtype
+ @sa @ref has_subtype() -- returns whether or not the binary value has a
+ subtype
+
+ @since version 3.8.0
+ */
+ void clear_subtype() noexcept
+ {
+ m_subtype = 0;
+ m_has_subtype = false;
+ }
+
+ private:
+ std::uint8_t m_subtype = 0;
+ bool m_has_subtype = false;
+};
+
+} // namespace nlohmann
+
+// #include <nlohmann/detail/conversions/from_json.hpp>
+
+// #include <nlohmann/detail/conversions/to_json.hpp>
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/hash.hpp>
+
+
+#include <cstddef> // size_t, uint8_t
+#include <functional> // hash
+
+namespace nlohmann
+{
+namespace detail
+{
+
+// boost::hash_combine
+inline std::size_t combine(std::size_t seed, std::size_t h) noexcept
+{
+ seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U);
+ return seed;
+}
+
+/*!
+@brief hash a JSON value
+
+The hash function tries to rely on std::hash where possible. Furthermore, the
+type of the JSON value is taken into account to have different hash values for
+null, 0, 0U, and false, etc.
+
+@tparam BasicJsonType basic_json specialization
+@param j JSON value to hash
+@return hash value of j
+*/
+template<typename BasicJsonType>
+std::size_t hash(const BasicJsonType& j)
+{
+ using string_t = typename BasicJsonType::string_t;
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+
+ const auto type = static_cast<std::size_t>(j.type());
+ switch (j.type())
+ {
+ case BasicJsonType::value_t::null:
+ case BasicJsonType::value_t::discarded:
+ {
+ return combine(type, 0);
+ }
+
+ case BasicJsonType::value_t::object:
+ {
+ auto seed = combine(type, j.size());
+ for (const auto& element : j.items())
+ {
+ const auto h = std::hash<string_t> {}(element.key());
+ seed = combine(seed, h);
+ seed = combine(seed, hash(element.value()));
+ }
+ return seed;
+ }
+
+ case BasicJsonType::value_t::array:
+ {
+ auto seed = combine(type, j.size());
+ for (const auto& element : j)
+ {
+ seed = combine(seed, hash(element));
+ }
+ return seed;
+ }
+
+ case BasicJsonType::value_t::string:
+ {
+ const auto h = std::hash<string_t> {}(j.template get_ref<const string_t&>());
+ return combine(type, h);
+ }
+
+ case BasicJsonType::value_t::boolean:
+ {
+ const auto h = std::hash<bool> {}(j.template get<bool>());
+ return combine(type, h);
+ }
+
+ case BasicJsonType::value_t::number_integer:
+ {
+ const auto h = std::hash<number_integer_t> {}(j.template get<number_integer_t>());
+ return combine(type, h);
+ }
+
+ case nlohmann::detail::value_t::number_unsigned:
+ {
+ const auto h = std::hash<number_unsigned_t> {}(j.template get<number_unsigned_t>());
+ return combine(type, h);
+ }
+
+ case nlohmann::detail::value_t::number_float:
+ {
+ const auto h = std::hash<number_float_t> {}(j.template get<number_float_t>());
+ return combine(type, h);
+ }
+
+ case nlohmann::detail::value_t::binary:
+ {
+ auto seed = combine(type, j.get_binary().size());
+ const auto h = std::hash<bool> {}(j.get_binary().has_subtype());
+ seed = combine(seed, h);
+ seed = combine(seed, j.get_binary().subtype());
+ for (const auto byte : j.get_binary())
+ {
+ seed = combine(seed, std::hash<std::uint8_t> {}(byte));
+ }
+ return seed;
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+}
+
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/input/binary_reader.hpp>
+
+
+#include <algorithm> // generate_n
+#include <array> // array
+#include <cmath> // ldexp
+#include <cstddef> // size_t
+#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <cstdio> // snprintf
+#include <cstring> // memcpy
+#include <iterator> // back_inserter
+#include <limits> // numeric_limits
+#include <string> // char_traits, string
+#include <utility> // make_pair, move
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+
+#include <array> // array
+#include <cstddef> // size_t
+#include <cstdio> //FILE *
+#include <cstring> // strlen
+#include <istream> // istream
+#include <iterator> // begin, end, iterator_traits, random_access_iterator_tag, distance, next
+#include <memory> // shared_ptr, make_shared, addressof
+#include <numeric> // accumulate
+#include <string> // string, char_traits
+#include <type_traits> // enable_if, is_base_of, is_pointer, is_integral, remove_pointer
+#include <utility> // pair, declval
+
+// #include <nlohmann/detail/iterators/iterator_traits.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+/// the supported input formats
+enum class input_format_t { json, cbor, msgpack, ubjson, bson };
+
+////////////////////
+// input adapters //
+////////////////////
+
+/*!
+Input adapter for stdio file access. This adapter read only 1 byte and do not use any
+ buffer. This adapter is a very low level adapter.
+*/
+class file_input_adapter
+{
+ public:
+ using char_type = char;
+
+ JSON_HEDLEY_NON_NULL(2)
+ explicit file_input_adapter(std::FILE* f) noexcept
+ : m_file(f)
+ {}
+
+ // make class move-only
+ file_input_adapter(const file_input_adapter&) = delete;
+ file_input_adapter(file_input_adapter&&) = default;
+ file_input_adapter& operator=(const file_input_adapter&) = delete;
+ file_input_adapter& operator=(file_input_adapter&&) = delete;
+
+ std::char_traits<char>::int_type get_character() noexcept
+ {
+ return std::fgetc(m_file);
+ }
+
+ private:
+ /// the file pointer to read from
+ std::FILE* m_file;
+};
+
+
+/*!
+Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at
+beginning of input. Does not support changing the underlying std::streambuf
+in mid-input. Maintains underlying std::istream and std::streambuf to support
+subsequent use of standard std::istream operations to process any input
+characters following those used in parsing the JSON input. Clears the
+std::istream flags; any input errors (e.g., EOF) will be detected by the first
+subsequent call for input from the std::istream.
+*/
+class input_stream_adapter
+{
+ public:
+ using char_type = char;
+
+ ~input_stream_adapter()
+ {
+ // clear stream flags; we use underlying streambuf I/O, do not
+ // maintain ifstream flags, except eof
+ if (is != nullptr)
+ {
+ is->clear(is->rdstate() & std::ios::eofbit);
+ }
+ }
+
+ explicit input_stream_adapter(std::istream& i)
+ : is(&i), sb(i.rdbuf())
+ {}
+
+ // delete because of pointer members
+ input_stream_adapter(const input_stream_adapter&) = delete;
+ input_stream_adapter& operator=(input_stream_adapter&) = delete;
+ input_stream_adapter& operator=(input_stream_adapter&& rhs) = delete;
+
+ input_stream_adapter(input_stream_adapter&& rhs) noexcept : is(rhs.is), sb(rhs.sb)
+ {
+ rhs.is = nullptr;
+ rhs.sb = nullptr;
+ }
+
+ // std::istream/std::streambuf use std::char_traits<char>::to_int_type, to
+ // ensure that std::char_traits<char>::eof() and the character 0xFF do not
+ // end up as the same value, eg. 0xFFFFFFFF.
+ std::char_traits<char>::int_type get_character()
+ {
+ auto res = sb->sbumpc();
+ // set eof manually, as we don't use the istream interface.
+ if (JSON_HEDLEY_UNLIKELY(res == EOF))
+ {
+ is->clear(is->rdstate() | std::ios::eofbit);
+ }
+ return res;
+ }
+
+ private:
+ /// the associated input stream
+ std::istream* is = nullptr;
+ std::streambuf* sb = nullptr;
+};
+
+// General-purpose iterator-based adapter. It might not be as fast as
+// theoretically possible for some containers, but it is extremely versatile.
+template<typename IteratorType>
+class iterator_input_adapter
+{
+ public:
+ using char_type = typename std::iterator_traits<IteratorType>::value_type;
+
+ iterator_input_adapter(IteratorType first, IteratorType last)
+ : current(std::move(first)), end(std::move(last)) {}
+
+ typename std::char_traits<char_type>::int_type get_character()
+ {
+ if (JSON_HEDLEY_LIKELY(current != end))
+ {
+ auto result = std::char_traits<char_type>::to_int_type(*current);
+ std::advance(current, 1);
+ return result;
+ }
+ else
+ {
+ return std::char_traits<char_type>::eof();
+ }
+ }
+
+ private:
+ IteratorType current;
+ IteratorType end;
+
+ template<typename BaseInputAdapter, size_t T>
+ friend struct wide_string_input_helper;
+
+ bool empty() const
+ {
+ return current == end;
+ }
+
+};
+
+
+template<typename BaseInputAdapter, size_t T>
+struct wide_string_input_helper;
+
+template<typename BaseInputAdapter>
+struct wide_string_input_helper<BaseInputAdapter, 4>
+{
+ // UTF-32
+ static void fill_buffer(BaseInputAdapter& input,
+ std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
+ size_t& utf8_bytes_index,
+ size_t& utf8_bytes_filled)
+ {
+ utf8_bytes_index = 0;
+
+ if (JSON_HEDLEY_UNLIKELY(input.empty()))
+ {
+ utf8_bytes[0] = std::char_traits<char>::eof();
+ utf8_bytes_filled = 1;
+ }
+ else
+ {
+ // get the current character
+ const auto wc = input.get_character();
+
+ // UTF-32 to UTF-8 encoding
+ if (wc < 0x80)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
+ utf8_bytes_filled = 1;
+ }
+ else if (wc <= 0x7FF)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u) & 0x1Fu));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
+ utf8_bytes_filled = 2;
+ }
+ else if (wc <= 0xFFFF)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u) & 0x0Fu));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
+ utf8_bytes_filled = 3;
+ }
+ else if (wc <= 0x10FFFF)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | ((static_cast<unsigned int>(wc) >> 18u) & 0x07u));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 12u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
+ utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
+ utf8_bytes_filled = 4;
+ }
+ else
+ {
+ // unknown character
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
+ utf8_bytes_filled = 1;
+ }
+ }
+ }
+};
+
+template<typename BaseInputAdapter>
+struct wide_string_input_helper<BaseInputAdapter, 2>
+{
+ // UTF-16
+ static void fill_buffer(BaseInputAdapter& input,
+ std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
+ size_t& utf8_bytes_index,
+ size_t& utf8_bytes_filled)
+ {
+ utf8_bytes_index = 0;
+
+ if (JSON_HEDLEY_UNLIKELY(input.empty()))
+ {
+ utf8_bytes[0] = std::char_traits<char>::eof();
+ utf8_bytes_filled = 1;
+ }
+ else
+ {
+ // get the current character
+ const auto wc = input.get_character();
+
+ // UTF-16 to UTF-8 encoding
+ if (wc < 0x80)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
+ utf8_bytes_filled = 1;
+ }
+ else if (wc <= 0x7FF)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u)));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
+ utf8_bytes_filled = 2;
+ }
+ else if (0xD800 > wc || wc >= 0xE000)
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u)));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
+ utf8_bytes_filled = 3;
+ }
+ else
+ {
+ if (JSON_HEDLEY_UNLIKELY(!input.empty()))
+ {
+ const auto wc2 = static_cast<unsigned int>(input.get_character());
+ const auto charcode = 0x10000u + (((static_cast<unsigned int>(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu));
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | (charcode >> 18u));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu));
+ utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (charcode & 0x3Fu));
+ utf8_bytes_filled = 4;
+ }
+ else
+ {
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
+ utf8_bytes_filled = 1;
+ }
+ }
+ }
+ }
+};
+
+// Wraps another input apdater to convert wide character types into individual bytes.
+template<typename BaseInputAdapter, typename WideCharType>
+class wide_string_input_adapter
+{
+ public:
+ using char_type = char;
+
+ wide_string_input_adapter(BaseInputAdapter base)
+ : base_adapter(base) {}
+
+ typename std::char_traits<char>::int_type get_character() noexcept
+ {
+ // check if buffer needs to be filled
+ if (utf8_bytes_index == utf8_bytes_filled)
+ {
+ fill_buffer<sizeof(WideCharType)>();
+
+ JSON_ASSERT(utf8_bytes_filled > 0);
+ JSON_ASSERT(utf8_bytes_index == 0);
+ }
+
+ // use buffer
+ JSON_ASSERT(utf8_bytes_filled > 0);
+ JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled);
+ return utf8_bytes[utf8_bytes_index++];
+ }
+
+ private:
+ BaseInputAdapter base_adapter;
+
+ template<size_t T>
+ void fill_buffer()
+ {
+ wide_string_input_helper<BaseInputAdapter, T>::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
+ }
+
+ /// a buffer for UTF-8 bytes
+ std::array<std::char_traits<char>::int_type, 4> utf8_bytes = {{0, 0, 0, 0}};
+
+ /// index to the utf8_codes array for the next valid byte
+ std::size_t utf8_bytes_index = 0;
+ /// number of valid bytes in the utf8_codes array
+ std::size_t utf8_bytes_filled = 0;
+};
+
+
+template<typename IteratorType, typename Enable = void>
+struct iterator_input_adapter_factory
+{
+ using iterator_type = IteratorType;
+ using char_type = typename std::iterator_traits<iterator_type>::value_type;
+ using adapter_type = iterator_input_adapter<iterator_type>;
+
+ static adapter_type create(IteratorType first, IteratorType last)
+ {
+ return adapter_type(std::move(first), std::move(last));
+ }
+};
+
+template<typename T>
+struct is_iterator_of_multibyte
+{
+ using value_type = typename std::iterator_traits<T>::value_type;
+ enum
+ {
+ value = sizeof(value_type) > 1
+ };
+};
+
+template<typename IteratorType>
+struct iterator_input_adapter_factory<IteratorType, enable_if_t<is_iterator_of_multibyte<IteratorType>::value>>
+{
+ using iterator_type = IteratorType;
+ using char_type = typename std::iterator_traits<iterator_type>::value_type;
+ using base_adapter_type = iterator_input_adapter<iterator_type>;
+ using adapter_type = wide_string_input_adapter<base_adapter_type, char_type>;
+
+ static adapter_type create(IteratorType first, IteratorType last)
+ {
+ return adapter_type(base_adapter_type(std::move(first), std::move(last)));
+ }
+};
+
+// General purpose iterator-based input
+template<typename IteratorType>
+typename iterator_input_adapter_factory<IteratorType>::adapter_type input_adapter(IteratorType first, IteratorType last)
+{
+ using factory_type = iterator_input_adapter_factory<IteratorType>;
+ return factory_type::create(first, last);
+}
+
+// Convenience shorthand from container to iterator
+template<typename ContainerType>
+auto input_adapter(const ContainerType& container) -> decltype(input_adapter(begin(container), end(container)))
+{
+ // Enable ADL
+ using std::begin;
+ using std::end;
+
+ return input_adapter(begin(container), end(container));
+}
+
+// Special cases with fast paths
+inline file_input_adapter input_adapter(std::FILE* file)
+{
+ return file_input_adapter(file);
+}
+
+inline input_stream_adapter input_adapter(std::istream& stream)
+{
+ return input_stream_adapter(stream);
+}
+
+inline input_stream_adapter input_adapter(std::istream&& stream)
+{
+ return input_stream_adapter(stream);
+}
+
+using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval<const char*>(), std::declval<const char*>()));
+
+// Null-delimited strings, and the like.
+template < typename CharT,
+ typename std::enable_if <
+ std::is_pointer<CharT>::value&&
+ !std::is_array<CharT>::value&&
+ std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
+ sizeof(typename std::remove_pointer<CharT>::type) == 1,
+ int >::type = 0 >
+contiguous_bytes_input_adapter input_adapter(CharT b)
+{
+ auto length = std::strlen(reinterpret_cast<const char*>(b));
+ const auto* ptr = reinterpret_cast<const char*>(b);
+ return input_adapter(ptr, ptr + length);
+}
+
+template<typename T, std::size_t N>
+auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N))
+{
+ return input_adapter(array, array + N);
+}
+
+// This class only handles inputs of input_buffer_adapter type.
+// It's required so that expressions like {ptr, len} can be implicitely casted
+// to the correct adapter.
+class span_input_adapter
+{
+ public:
+ template < typename CharT,
+ typename std::enable_if <
+ std::is_pointer<CharT>::value&&
+ std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
+ sizeof(typename std::remove_pointer<CharT>::type) == 1,
+ int >::type = 0 >
+ span_input_adapter(CharT b, std::size_t l)
+ : ia(reinterpret_cast<const char*>(b), reinterpret_cast<const char*>(b) + l) {}
+
+ template<class IteratorType,
+ typename std::enable_if<
+ std::is_same<typename iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value,
+ int>::type = 0>
+ span_input_adapter(IteratorType first, IteratorType last)
+ : ia(input_adapter(first, last)) {}
+
+ contiguous_bytes_input_adapter&& get()
+ {
+ return std::move(ia);
+ }
+
+ private:
+ contiguous_bytes_input_adapter ia;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/input/json_sax.hpp>
+
+
+#include <cstddef>
+#include <string> // string
+#include <utility> // move
+#include <vector> // vector
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+
+/*!
+@brief SAX interface
+
+This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
+Each function is called in different situations while the input is parsed. The
+boolean return value informs the parser whether to continue processing the
+input.
+*/
+template<typename BasicJsonType>
+struct json_sax
+{
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+
+ /*!
+ @brief a null value was read
+ @return whether parsing should proceed
+ */
+ virtual bool null() = 0;
+
+ /*!
+ @brief a boolean value was read
+ @param[in] val boolean value
+ @return whether parsing should proceed
+ */
+ virtual bool boolean(bool val) = 0;
+
+ /*!
+ @brief an integer number was read
+ @param[in] val integer value
+ @return whether parsing should proceed
+ */
+ virtual bool number_integer(number_integer_t val) = 0;
+
+ /*!
+ @brief an unsigned integer number was read
+ @param[in] val unsigned integer value
+ @return whether parsing should proceed
+ */
+ virtual bool number_unsigned(number_unsigned_t val) = 0;
+
+ /*!
+ @brief an floating-point number was read
+ @param[in] val floating-point value
+ @param[in] s raw token value
+ @return whether parsing should proceed
+ */
+ virtual bool number_float(number_float_t val, const string_t& s) = 0;
+
+ /*!
+ @brief a string was read
+ @param[in] val string value
+ @return whether parsing should proceed
+ @note It is safe to move the passed string.
+ */
+ virtual bool string(string_t& val) = 0;
+
+ /*!
+ @brief a binary string was read
+ @param[in] val binary value
+ @return whether parsing should proceed
+ @note It is safe to move the passed binary.
+ */
+ virtual bool binary(binary_t& val) = 0;
+
+ /*!
+ @brief the beginning of an object was read
+ @param[in] elements number of object elements or -1 if unknown
+ @return whether parsing should proceed
+ @note binary formats may report the number of elements
+ */
+ virtual bool start_object(std::size_t elements) = 0;
+
+ /*!
+ @brief an object key was read
+ @param[in] val object key
+ @return whether parsing should proceed
+ @note It is safe to move the passed string.
+ */
+ virtual bool key(string_t& val) = 0;
+
+ /*!
+ @brief the end of an object was read
+ @return whether parsing should proceed
+ */
+ virtual bool end_object() = 0;
+
+ /*!
+ @brief the beginning of an array was read
+ @param[in] elements number of array elements or -1 if unknown
+ @return whether parsing should proceed
+ @note binary formats may report the number of elements
+ */
+ virtual bool start_array(std::size_t elements) = 0;
+
+ /*!
+ @brief the end of an array was read
+ @return whether parsing should proceed
+ */
+ virtual bool end_array() = 0;
+
+ /*!
+ @brief a parse error occurred
+ @param[in] position the position in the input where the error occurs
+ @param[in] last_token the last read token
+ @param[in] ex an exception object describing the error
+ @return whether parsing should proceed (must return false)
+ */
+ virtual bool parse_error(std::size_t position,
+ const std::string& last_token,
+ const detail::exception& ex) = 0;
+
+ virtual ~json_sax() = default;
+};
+
+
+namespace detail
+{
+/*!
+@brief SAX implementation to create a JSON value from SAX events
+
+This class implements the @ref json_sax interface and processes the SAX events
+to create a JSON value which makes it basically a DOM parser. The structure or
+hierarchy of the JSON value is managed by the stack `ref_stack` which contains
+a pointer to the respective array or object for each recursion depth.
+
+After successful parsing, the value that is passed by reference to the
+constructor contains the parsed value.
+
+@tparam BasicJsonType the JSON type
+*/
+template<typename BasicJsonType>
+class json_sax_dom_parser
+{
+ public:
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+
+ /*!
+ @param[in, out] r reference to a JSON value that is manipulated while
+ parsing
+ @param[in] allow_exceptions_ whether parse errors yield exceptions
+ */
+ explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
+ : root(r), allow_exceptions(allow_exceptions_)
+ {}
+
+ // make class move-only
+ json_sax_dom_parser(const json_sax_dom_parser&) = delete;
+ json_sax_dom_parser(json_sax_dom_parser&&) = default;
+ json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete;
+ json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default;
+ ~json_sax_dom_parser() = default;
+
+ bool null()
+ {
+ handle_value(nullptr);
+ return true;
+ }
+
+ bool boolean(bool val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_integer(number_integer_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_unsigned(number_unsigned_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_float(number_float_t val, const string_t& /*unused*/)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool string(string_t& val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool binary(binary_t& val)
+ {
+ handle_value(std::move(val));
+ return true;
+ }
+
+ bool start_object(std::size_t len)
+ {
+ ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
+
+ if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
+ {
+ JSON_THROW(out_of_range::create(408,
+ "excessive object size: " + std::to_string(len)));
+ }
+
+ return true;
+ }
+
+ bool key(string_t& val)
+ {
+ // add null at given key and store the reference for later
+ object_element = &(ref_stack.back()->m_value.object->operator[](val));
+ return true;
+ }
+
+ bool end_object()
+ {
+ ref_stack.pop_back();
+ return true;
+ }
+
+ bool start_array(std::size_t len)
+ {
+ ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
+
+ if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
+ {
+ JSON_THROW(out_of_range::create(408,
+ "excessive array size: " + std::to_string(len)));
+ }
+
+ return true;
+ }
+
+ bool end_array()
+ {
+ ref_stack.pop_back();
+ return true;
+ }
+
+ template<class Exception>
+ bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
+ const Exception& ex)
+ {
+ errored = true;
+ static_cast<void>(ex);
+ if (allow_exceptions)
+ {
+ JSON_THROW(ex);
+ }
+ return false;
+ }
+
+ constexpr bool is_errored() const
+ {
+ return errored;
+ }
+
+ private:
+ /*!
+ @invariant If the ref stack is empty, then the passed value will be the new
+ root.
+ @invariant If the ref stack contains a value, then it is an array or an
+ object to which we can add elements
+ */
+ template<typename Value>
+ JSON_HEDLEY_RETURNS_NON_NULL
+ BasicJsonType* handle_value(Value&& v)
+ {
+ if (ref_stack.empty())
+ {
+ root = BasicJsonType(std::forward<Value>(v));
+ return &root;
+ }
+
+ JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
+
+ if (ref_stack.back()->is_array())
+ {
+ ref_stack.back()->m_value.array->emplace_back(std::forward<Value>(v));
+ return &(ref_stack.back()->m_value.array->back());
+ }
+
+ JSON_ASSERT(ref_stack.back()->is_object());
+ JSON_ASSERT(object_element);
+ *object_element = BasicJsonType(std::forward<Value>(v));
+ return object_element;
+ }
+
+ /// the parsed JSON value
+ BasicJsonType& root;
+ /// stack to model hierarchy of values
+ std::vector<BasicJsonType*> ref_stack {};
+ /// helper to hold the reference for the next object element
+ BasicJsonType* object_element = nullptr;
+ /// whether a syntax error occurred
+ bool errored = false;
+ /// whether to throw exceptions in case of errors
+ const bool allow_exceptions = true;
+};
+
+template<typename BasicJsonType>
+class json_sax_dom_callback_parser
+{
+ public:
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using parser_callback_t = typename BasicJsonType::parser_callback_t;
+ using parse_event_t = typename BasicJsonType::parse_event_t;
+
+ json_sax_dom_callback_parser(BasicJsonType& r,
+ const parser_callback_t cb,
+ const bool allow_exceptions_ = true)
+ : root(r), callback(cb), allow_exceptions(allow_exceptions_)
+ {
+ keep_stack.push_back(true);
+ }
+
+ // make class move-only
+ json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete;
+ json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default;
+ json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete;
+ json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default;
+ ~json_sax_dom_callback_parser() = default;
+
+ bool null()
+ {
+ handle_value(nullptr);
+ return true;
+ }
+
+ bool boolean(bool val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_integer(number_integer_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_unsigned(number_unsigned_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_float(number_float_t val, const string_t& /*unused*/)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool string(string_t& val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool binary(binary_t& val)
+ {
+ handle_value(std::move(val));
+ return true;
+ }
+
+ bool start_object(std::size_t len)
+ {
+ // check callback for object start
+ const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
+ keep_stack.push_back(keep);
+
+ auto val = handle_value(BasicJsonType::value_t::object, true);
+ ref_stack.push_back(val.second);
+
+ // check object limit
+ if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
+ {
+ JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len)));
+ }
+
+ return true;
+ }
+
+ bool key(string_t& val)
+ {
+ BasicJsonType k = BasicJsonType(val);
+
+ // check callback for key
+ const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
+ key_keep_stack.push_back(keep);
+
+ // add discarded value at given key and store the reference for later
+ if (keep && ref_stack.back())
+ {
+ object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded);
+ }
+
+ return true;
+ }
+
+ bool end_object()
+ {
+ if (ref_stack.back() && !callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
+ {
+ // discard object
+ *ref_stack.back() = discarded;
+ }
+
+ JSON_ASSERT(!ref_stack.empty());
+ JSON_ASSERT(!keep_stack.empty());
+ ref_stack.pop_back();
+ keep_stack.pop_back();
+
+ if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured())
+ {
+ // remove discarded value
+ for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
+ {
+ if (it->is_discarded())
+ {
+ ref_stack.back()->erase(it);
+ break;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ bool start_array(std::size_t len)
+ {
+ const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
+ keep_stack.push_back(keep);
+
+ auto val = handle_value(BasicJsonType::value_t::array, true);
+ ref_stack.push_back(val.second);
+
+ // check array limit
+ if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
+ {
+ JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len)));
+ }
+
+ return true;
+ }
+
+ bool end_array()
+ {
+ bool keep = true;
+
+ if (ref_stack.back())
+ {
+ keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
+ if (!keep)
+ {
+ // discard array
+ *ref_stack.back() = discarded;
+ }
+ }
+
+ JSON_ASSERT(!ref_stack.empty());
+ JSON_ASSERT(!keep_stack.empty());
+ ref_stack.pop_back();
+ keep_stack.pop_back();
+
+ // remove discarded value
+ if (!keep && !ref_stack.empty() && ref_stack.back()->is_array())
+ {
+ ref_stack.back()->m_value.array->pop_back();
+ }
+
+ return true;
+ }
+
+ template<class Exception>
+ bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
+ const Exception& ex)
+ {
+ errored = true;
+ static_cast<void>(ex);
+ if (allow_exceptions)
+ {
+ JSON_THROW(ex);
+ }
+ return false;
+ }
+
+ constexpr bool is_errored() const
+ {
+ return errored;
+ }
+
+ private:
+ /*!
+ @param[in] v value to add to the JSON value we build during parsing
+ @param[in] skip_callback whether we should skip calling the callback
+ function; this is required after start_array() and
+ start_object() SAX events, because otherwise we would call the
+ callback function with an empty array or object, respectively.
+
+ @invariant If the ref stack is empty, then the passed value will be the new
+ root.
+ @invariant If the ref stack contains a value, then it is an array or an
+ object to which we can add elements
+
+ @return pair of boolean (whether value should be kept) and pointer (to the
+ passed value in the ref_stack hierarchy; nullptr if not kept)
+ */
+ template<typename Value>
+ std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
+ {
+ JSON_ASSERT(!keep_stack.empty());
+
+ // do not handle this value if we know it would be added to a discarded
+ // container
+ if (!keep_stack.back())
+ {
+ return {false, nullptr};
+ }
+
+ // create value
+ auto value = BasicJsonType(std::forward<Value>(v));
+
+ // check callback
+ const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
+
+ // do not handle this value if we just learnt it shall be discarded
+ if (!keep)
+ {
+ return {false, nullptr};
+ }
+
+ if (ref_stack.empty())
+ {
+ root = std::move(value);
+ return {true, &root};
+ }
+
+ // skip this value if we already decided to skip the parent
+ // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
+ if (!ref_stack.back())
+ {
+ return {false, nullptr};
+ }
+
+ // we now only expect arrays and objects
+ JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
+
+ // array
+ if (ref_stack.back()->is_array())
+ {
+ ref_stack.back()->m_value.array->push_back(std::move(value));
+ return {true, &(ref_stack.back()->m_value.array->back())};
+ }
+
+ // object
+ JSON_ASSERT(ref_stack.back()->is_object());
+ // check if we should store an element for the current key
+ JSON_ASSERT(!key_keep_stack.empty());
+ const bool store_element = key_keep_stack.back();
+ key_keep_stack.pop_back();
+
+ if (!store_element)
+ {
+ return {false, nullptr};
+ }
+
+ JSON_ASSERT(object_element);
+ *object_element = std::move(value);
+ return {true, object_element};
+ }
+
+ /// the parsed JSON value
+ BasicJsonType& root;
+ /// stack to model hierarchy of values
+ std::vector<BasicJsonType*> ref_stack {};
+ /// stack to manage which values to keep
+ std::vector<bool> keep_stack {};
+ /// stack to manage which object keys to keep
+ std::vector<bool> key_keep_stack {};
+ /// helper to hold the reference for the next object element
+ BasicJsonType* object_element = nullptr;
+ /// whether a syntax error occurred
+ bool errored = false;
+ /// callback function
+ const parser_callback_t callback = nullptr;
+ /// whether to throw exceptions in case of errors
+ const bool allow_exceptions = true;
+ /// a discarded value for the callback
+ BasicJsonType discarded = BasicJsonType::value_t::discarded;
+};
+
+template<typename BasicJsonType>
+class json_sax_acceptor
+{
+ public:
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+
+ bool null()
+ {
+ return true;
+ }
+
+ bool boolean(bool /*unused*/)
+ {
+ return true;
+ }
+
+ bool number_integer(number_integer_t /*unused*/)
+ {
+ return true;
+ }
+
+ bool number_unsigned(number_unsigned_t /*unused*/)
+ {
+ return true;
+ }
+
+ bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool string(string_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool binary(binary_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool start_object(std::size_t /*unused*/ = std::size_t(-1))
+ {
+ return true;
+ }
+
+ bool key(string_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool end_object()
+ {
+ return true;
+ }
+
+ bool start_array(std::size_t /*unused*/ = std::size_t(-1))
+ {
+ return true;
+ }
+
+ bool end_array()
+ {
+ return true;
+ }
+
+ bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
+ {
+ return false;
+ }
+};
+} // namespace detail
+
+} // namespace nlohmann
+
+// #include <nlohmann/detail/input/lexer.hpp>
+
+
+#include <array> // array
+#include <clocale> // localeconv
+#include <cstddef> // size_t
+#include <cstdio> // snprintf
+#include <cstdlib> // strtof, strtod, strtold, strtoll, strtoull
+#include <initializer_list> // initializer_list
+#include <string> // char_traits, string
+#include <utility> // move
+#include <vector> // vector
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/position_t.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////
+// lexer //
+///////////
+
+template<typename BasicJsonType>
+class lexer_base
+{
+ public:
+ /// token types for the parser
+ enum class token_type
+ {
+ uninitialized, ///< indicating the scanner is uninitialized
+ literal_true, ///< the `true` literal
+ literal_false, ///< the `false` literal
+ literal_null, ///< the `null` literal
+ value_string, ///< a string -- use get_string() for actual value
+ value_unsigned, ///< an unsigned integer -- use get_number_unsigned() for actual value
+ value_integer, ///< a signed integer -- use get_number_integer() for actual value
+ value_float, ///< an floating point number -- use get_number_float() for actual value
+ begin_array, ///< the character for array begin `[`
+ begin_object, ///< the character for object begin `{`
+ end_array, ///< the character for array end `]`
+ end_object, ///< the character for object end `}`
+ name_separator, ///< the name separator `:`
+ value_separator, ///< the value separator `,`
+ parse_error, ///< indicating a parse error
+ end_of_input, ///< indicating the end of the input buffer
+ literal_or_value ///< a literal or the begin of a value (only for diagnostics)
+ };
+
+ /// return name of values of type token_type (only used for errors)
+ JSON_HEDLEY_RETURNS_NON_NULL
+ JSON_HEDLEY_CONST
+ static const char* token_type_name(const token_type t) noexcept
+ {
+ switch (t)
+ {
+ case token_type::uninitialized:
+ return "<uninitialized>";
+ case token_type::literal_true:
+ return "true literal";
+ case token_type::literal_false:
+ return "false literal";
+ case token_type::literal_null:
+ return "null literal";
+ case token_type::value_string:
+ return "string literal";
+ case token_type::value_unsigned:
+ case token_type::value_integer:
+ case token_type::value_float:
+ return "number literal";
+ case token_type::begin_array:
+ return "'['";
+ case token_type::begin_object:
+ return "'{'";
+ case token_type::end_array:
+ return "']'";
+ case token_type::end_object:
+ return "'}'";
+ case token_type::name_separator:
+ return "':'";
+ case token_type::value_separator:
+ return "','";
+ case token_type::parse_error:
+ return "<parse error>";
+ case token_type::end_of_input:
+ return "end of input";
+ case token_type::literal_or_value:
+ return "'[', '{', or a literal";
+ // LCOV_EXCL_START
+ default: // catch non-enum values
+ return "unknown token";
+ // LCOV_EXCL_STOP
+ }
+ }
+};
+/*!
+@brief lexical analysis
+
+This class organizes the lexical analysis during JSON deserialization.
+*/
+template<typename BasicJsonType, typename InputAdapterType>
+class lexer : public lexer_base<BasicJsonType>
+{
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using char_type = typename InputAdapterType::char_type;
+ using char_int_type = typename std::char_traits<char_type>::int_type;
+
+ public:
+ using token_type = typename lexer_base<BasicJsonType>::token_type;
+
+ explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false)
+ : ia(std::move(adapter))
+ , ignore_comments(ignore_comments_)
+ , decimal_point_char(static_cast<char_int_type>(get_decimal_point()))
+ {}
+
+ // delete because of pointer members
+ lexer(const lexer&) = delete;
+ lexer(lexer&&) = default;
+ lexer& operator=(lexer&) = delete;
+ lexer& operator=(lexer&&) = default;
+ ~lexer() = default;
+
+ private:
+ /////////////////////
+ // locales
+ /////////////////////
+
+ /// return the locale-dependent decimal point
+ JSON_HEDLEY_PURE
+ static char get_decimal_point() noexcept
+ {
+ const auto* loc = localeconv();
+ JSON_ASSERT(loc != nullptr);
+ return (loc->decimal_point == nullptr) ? '.' : *(loc->decimal_point);
+ }
+
+ /////////////////////
+ // scan functions
+ /////////////////////
+
+ /*!
+ @brief get codepoint from 4 hex characters following `\u`
+
+ For input "\u c1 c2 c3 c4" the codepoint is:
+ (c1 * 0x1000) + (c2 * 0x0100) + (c3 * 0x0010) + c4
+ = (c1 << 12) + (c2 << 8) + (c3 << 4) + (c4 << 0)
+
+ Furthermore, the possible characters '0'..'9', 'A'..'F', and 'a'..'f'
+ must be converted to the integers 0x0..0x9, 0xA..0xF, 0xA..0xF, resp. The
+ conversion is done by subtracting the offset (0x30, 0x37, and 0x57)
+ between the ASCII value of the character and the desired integer value.
+
+ @return codepoint (0x0000..0xFFFF) or -1 in case of an error (e.g. EOF or
+ non-hex character)
+ */
+ int get_codepoint()
+ {
+ // this function only makes sense after reading `\u`
+ JSON_ASSERT(current == 'u');
+ int codepoint = 0;
+
+ const auto factors = { 12u, 8u, 4u, 0u };
+ for (const auto factor : factors)
+ {
+ get();
+
+ if (current >= '0' && current <= '9')
+ {
+ codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x30u) << factor);
+ }
+ else if (current >= 'A' && current <= 'F')
+ {
+ codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x37u) << factor);
+ }
+ else if (current >= 'a' && current <= 'f')
+ {
+ codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x57u) << factor);
+ }
+ else
+ {
+ return -1;
+ }
+ }
+
+ JSON_ASSERT(0x0000 <= codepoint && codepoint <= 0xFFFF);
+ return codepoint;
+ }
+
+ /*!
+ @brief check if the next byte(s) are inside a given range
+
+ Adds the current byte and, for each passed range, reads a new byte and
+ checks if it is inside the range. If a violation was detected, set up an
+ error message and return false. Otherwise, return true.
+
+ @param[in] ranges list of integers; interpreted as list of pairs of
+ inclusive lower and upper bound, respectively
+
+ @pre The passed list @a ranges must have 2, 4, or 6 elements; that is,
+ 1, 2, or 3 pairs. This precondition is enforced by an assertion.
+
+ @return true if and only if no range violation was detected
+ */
+ bool next_byte_in_range(std::initializer_list<char_int_type> ranges)
+ {
+ JSON_ASSERT(ranges.size() == 2 || ranges.size() == 4 || ranges.size() == 6);
+ add(current);
+
+ for (auto range = ranges.begin(); range != ranges.end(); ++range)
+ {
+ get();
+ if (JSON_HEDLEY_LIKELY(*range <= current && current <= *(++range)))
+ {
+ add(current);
+ }
+ else
+ {
+ error_message = "invalid string: ill-formed UTF-8 byte";
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ /*!
+ @brief scan a string literal
+
+ This function scans a string according to Sect. 7 of RFC 7159. While
+ scanning, bytes are escaped and copied into buffer token_buffer. Then the
+ function returns successfully, token_buffer is *not* null-terminated (as it
+ may contain \0 bytes), and token_buffer.size() is the number of bytes in the
+ string.
+
+ @return token_type::value_string if string could be successfully scanned,
+ token_type::parse_error otherwise
+
+ @note In case of errors, variable error_message contains a textual
+ description.
+ */
+ token_type scan_string()
+ {
+ // reset token_buffer (ignore opening quote)
+ reset();
+
+ // we entered the function by reading an open quote
+ JSON_ASSERT(current == '\"');
+
+ while (true)
+ {
+ // get next character
+ switch (get())
+ {
+ // end of file while parsing string
+ case std::char_traits<char_type>::eof():
+ {
+ error_message = "invalid string: missing closing quote";
+ return token_type::parse_error;
+ }
+
+ // closing quote
+ case '\"':
+ {
+ return token_type::value_string;
+ }
+
+ // escapes
+ case '\\':
+ {
+ switch (get())
+ {
+ // quotation mark
+ case '\"':
+ add('\"');
+ break;
+ // reverse solidus
+ case '\\':
+ add('\\');
+ break;
+ // solidus
+ case '/':
+ add('/');
+ break;
+ // backspace
+ case 'b':
+ add('\b');
+ break;
+ // form feed
+ case 'f':
+ add('\f');
+ break;
+ // line feed
+ case 'n':
+ add('\n');
+ break;
+ // carriage return
+ case 'r':
+ add('\r');
+ break;
+ // tab
+ case 't':
+ add('\t');
+ break;
+
+ // unicode escapes
+ case 'u':
+ {
+ const int codepoint1 = get_codepoint();
+ int codepoint = codepoint1; // start with codepoint1
+
+ if (JSON_HEDLEY_UNLIKELY(codepoint1 == -1))
+ {
+ error_message = "invalid string: '\\u' must be followed by 4 hex digits";
+ return token_type::parse_error;
+ }
+
+ // check if code point is a high surrogate
+ if (0xD800 <= codepoint1 && codepoint1 <= 0xDBFF)
+ {
+ // expect next \uxxxx entry
+ if (JSON_HEDLEY_LIKELY(get() == '\\' && get() == 'u'))
+ {
+ const int codepoint2 = get_codepoint();
+
+ if (JSON_HEDLEY_UNLIKELY(codepoint2 == -1))
+ {
+ error_message = "invalid string: '\\u' must be followed by 4 hex digits";
+ return token_type::parse_error;
+ }
+
+ // check if codepoint2 is a low surrogate
+ if (JSON_HEDLEY_LIKELY(0xDC00 <= codepoint2 && codepoint2 <= 0xDFFF))
+ {
+ // overwrite codepoint
+ codepoint = static_cast<int>(
+ // high surrogate occupies the most significant 22 bits
+ (static_cast<unsigned int>(codepoint1) << 10u)
+ // low surrogate occupies the least significant 15 bits
+ + static_cast<unsigned int>(codepoint2)
+ // there is still the 0xD800, 0xDC00 and 0x10000 noise
+ // in the result so we have to subtract with:
+ // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00
+ - 0x35FDC00u);
+ }
+ else
+ {
+ error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF";
+ return token_type::parse_error;
+ }
+ }
+ else
+ {
+ error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF";
+ return token_type::parse_error;
+ }
+ }
+ else
+ {
+ if (JSON_HEDLEY_UNLIKELY(0xDC00 <= codepoint1 && codepoint1 <= 0xDFFF))
+ {
+ error_message = "invalid string: surrogate U+DC00..U+DFFF must follow U+D800..U+DBFF";
+ return token_type::parse_error;
+ }
+ }
+
+ // result of the above calculation yields a proper codepoint
+ JSON_ASSERT(0x00 <= codepoint && codepoint <= 0x10FFFF);
+
+ // translate codepoint into bytes
+ if (codepoint < 0x80)
+ {
+ // 1-byte characters: 0xxxxxxx (ASCII)
+ add(static_cast<char_int_type>(codepoint));
+ }
+ else if (codepoint <= 0x7FF)
+ {
+ // 2-byte characters: 110xxxxx 10xxxxxx
+ add(static_cast<char_int_type>(0xC0u | (static_cast<unsigned int>(codepoint) >> 6u)));
+ add(static_cast<char_int_type>(0x80u | (static_cast<unsigned int>(codepoint) & 0x3Fu)));
+ }
+ else if (codepoint <= 0xFFFF)
+ {
+ // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
+ add(static_cast<char_int_type>(0xE0u | (static_cast<unsigned int>(codepoint) >> 12u)));
+ add(static_cast<char_int_type>(0x80u | ((static_cast<unsigned int>(codepoint) >> 6u) & 0x3Fu)));
+ add(static_cast<char_int_type>(0x80u | (static_cast<unsigned int>(codepoint) & 0x3Fu)));
+ }
+ else
+ {
+ // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ add(static_cast<char_int_type>(0xF0u | (static_cast<unsigned int>(codepoint) >> 18u)));
+ add(static_cast<char_int_type>(0x80u | ((static_cast<unsigned int>(codepoint) >> 12u) & 0x3Fu)));
+ add(static_cast<char_int_type>(0x80u | ((static_cast<unsigned int>(codepoint) >> 6u) & 0x3Fu)));
+ add(static_cast<char_int_type>(0x80u | (static_cast<unsigned int>(codepoint) & 0x3Fu)));
+ }
+
+ break;
+ }
+
+ // other characters after escape
+ default:
+ error_message = "invalid string: forbidden character after backslash";
+ return token_type::parse_error;
+ }
+
+ break;
+ }
+
+ // invalid control characters
+ case 0x00:
+ {
+ error_message = "invalid string: control character U+0000 (NUL) must be escaped to \\u0000";
+ return token_type::parse_error;
+ }
+
+ case 0x01:
+ {
+ error_message = "invalid string: control character U+0001 (SOH) must be escaped to \\u0001";
+ return token_type::parse_error;
+ }
+
+ case 0x02:
+ {
+ error_message = "invalid string: control character U+0002 (STX) must be escaped to \\u0002";
+ return token_type::parse_error;
+ }
+
+ case 0x03:
+ {
+ error_message = "invalid string: control character U+0003 (ETX) must be escaped to \\u0003";
+ return token_type::parse_error;
+ }
+
+ case 0x04:
+ {
+ error_message = "invalid string: control character U+0004 (EOT) must be escaped to \\u0004";
+ return token_type::parse_error;
+ }
+
+ case 0x05:
+ {
+ error_message = "invalid string: control character U+0005 (ENQ) must be escaped to \\u0005";
+ return token_type::parse_error;
+ }
+
+ case 0x06:
+ {
+ error_message = "invalid string: control character U+0006 (ACK) must be escaped to \\u0006";
+ return token_type::parse_error;
+ }
+
+ case 0x07:
+ {
+ error_message = "invalid string: control character U+0007 (BEL) must be escaped to \\u0007";
+ return token_type::parse_error;
+ }
+
+ case 0x08:
+ {
+ error_message = "invalid string: control character U+0008 (BS) must be escaped to \\u0008 or \\b";
+ return token_type::parse_error;
+ }
+
+ case 0x09:
+ {
+ error_message = "invalid string: control character U+0009 (HT) must be escaped to \\u0009 or \\t";
+ return token_type::parse_error;
+ }
+
+ case 0x0A:
+ {
+ error_message = "invalid string: control character U+000A (LF) must be escaped to \\u000A or \\n";
+ return token_type::parse_error;
+ }
+
+ case 0x0B:
+ {
+ error_message = "invalid string: control character U+000B (VT) must be escaped to \\u000B";
+ return token_type::parse_error;
+ }
+
+ case 0x0C:
+ {
+ error_message = "invalid string: control character U+000C (FF) must be escaped to \\u000C or \\f";
+ return token_type::parse_error;
+ }
+
+ case 0x0D:
+ {
+ error_message = "invalid string: control character U+000D (CR) must be escaped to \\u000D or \\r";
+ return token_type::parse_error;
+ }
+
+ case 0x0E:
+ {
+ error_message = "invalid string: control character U+000E (SO) must be escaped to \\u000E";
+ return token_type::parse_error;
+ }
+
+ case 0x0F:
+ {
+ error_message = "invalid string: control character U+000F (SI) must be escaped to \\u000F";
+ return token_type::parse_error;
+ }
+
+ case 0x10:
+ {
+ error_message = "invalid string: control character U+0010 (DLE) must be escaped to \\u0010";
+ return token_type::parse_error;
+ }
+
+ case 0x11:
+ {
+ error_message = "invalid string: control character U+0011 (DC1) must be escaped to \\u0011";
+ return token_type::parse_error;
+ }
+
+ case 0x12:
+ {
+ error_message = "invalid string: control character U+0012 (DC2) must be escaped to \\u0012";
+ return token_type::parse_error;
+ }
+
+ case 0x13:
+ {
+ error_message = "invalid string: control character U+0013 (DC3) must be escaped to \\u0013";
+ return token_type::parse_error;
+ }
+
+ case 0x14:
+ {
+ error_message = "invalid string: control character U+0014 (DC4) must be escaped to \\u0014";
+ return token_type::parse_error;
+ }
+
+ case 0x15:
+ {
+ error_message = "invalid string: control character U+0015 (NAK) must be escaped to \\u0015";
+ return token_type::parse_error;
+ }
+
+ case 0x16:
+ {
+ error_message = "invalid string: control character U+0016 (SYN) must be escaped to \\u0016";
+ return token_type::parse_error;
+ }
+
+ case 0x17:
+ {
+ error_message = "invalid string: control character U+0017 (ETB) must be escaped to \\u0017";
+ return token_type::parse_error;
+ }
+
+ case 0x18:
+ {
+ error_message = "invalid string: control character U+0018 (CAN) must be escaped to \\u0018";
+ return token_type::parse_error;
+ }
+
+ case 0x19:
+ {
+ error_message = "invalid string: control character U+0019 (EM) must be escaped to \\u0019";
+ return token_type::parse_error;
+ }
+
+ case 0x1A:
+ {
+ error_message = "invalid string: control character U+001A (SUB) must be escaped to \\u001A";
+ return token_type::parse_error;
+ }
+
+ case 0x1B:
+ {
+ error_message = "invalid string: control character U+001B (ESC) must be escaped to \\u001B";
+ return token_type::parse_error;
+ }
+
+ case 0x1C:
+ {
+ error_message = "invalid string: control character U+001C (FS) must be escaped to \\u001C";
+ return token_type::parse_error;
+ }
+
+ case 0x1D:
+ {
+ error_message = "invalid string: control character U+001D (GS) must be escaped to \\u001D";
+ return token_type::parse_error;
+ }
+
+ case 0x1E:
+ {
+ error_message = "invalid string: control character U+001E (RS) must be escaped to \\u001E";
+ return token_type::parse_error;
+ }
+
+ case 0x1F:
+ {
+ error_message = "invalid string: control character U+001F (US) must be escaped to \\u001F";
+ return token_type::parse_error;
+ }
+
+ // U+0020..U+007F (except U+0022 (quote) and U+005C (backspace))
+ case 0x20:
+ case 0x21:
+ case 0x23:
+ case 0x24:
+ case 0x25:
+ case 0x26:
+ case 0x27:
+ case 0x28:
+ case 0x29:
+ case 0x2A:
+ case 0x2B:
+ case 0x2C:
+ case 0x2D:
+ case 0x2E:
+ case 0x2F:
+ case 0x30:
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ case 0x38:
+ case 0x39:
+ case 0x3A:
+ case 0x3B:
+ case 0x3C:
+ case 0x3D:
+ case 0x3E:
+ case 0x3F:
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ case 0x58:
+ case 0x59:
+ case 0x5A:
+ case 0x5B:
+ case 0x5D:
+ case 0x5E:
+ case 0x5F:
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ {
+ add(current);
+ break;
+ }
+
+ // U+0080..U+07FF: bytes C2..DF 80..BF
+ case 0xC2:
+ case 0xC3:
+ case 0xC4:
+ case 0xC5:
+ case 0xC6:
+ case 0xC7:
+ case 0xC8:
+ case 0xC9:
+ case 0xCA:
+ case 0xCB:
+ case 0xCC:
+ case 0xCD:
+ case 0xCE:
+ case 0xCF:
+ case 0xD0:
+ case 0xD1:
+ case 0xD2:
+ case 0xD3:
+ case 0xD4:
+ case 0xD5:
+ case 0xD6:
+ case 0xD7:
+ case 0xD8:
+ case 0xD9:
+ case 0xDA:
+ case 0xDB:
+ case 0xDC:
+ case 0xDD:
+ case 0xDE:
+ case 0xDF:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!next_byte_in_range({0x80, 0xBF})))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // U+0800..U+0FFF: bytes E0 A0..BF 80..BF
+ case 0xE0:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF}))))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // U+1000..U+CFFF: bytes E1..EC 80..BF 80..BF
+ // U+E000..U+FFFF: bytes EE..EF 80..BF 80..BF
+ case 0xE1:
+ case 0xE2:
+ case 0xE3:
+ case 0xE4:
+ case 0xE5:
+ case 0xE6:
+ case 0xE7:
+ case 0xE8:
+ case 0xE9:
+ case 0xEA:
+ case 0xEB:
+ case 0xEC:
+ case 0xEE:
+ case 0xEF:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF}))))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // U+D000..U+D7FF: bytes ED 80..9F 80..BF
+ case 0xED:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x9F, 0x80, 0xBF}))))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // U+10000..U+3FFFF F0 90..BF 80..BF 80..BF
+ case 0xF0:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // U+40000..U+FFFFF F1..F3 80..BF 80..BF 80..BF
+ case 0xF1:
+ case 0xF2:
+ case 0xF3:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // U+100000..U+10FFFF F4 80..8F 80..BF 80..BF
+ case 0xF4:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF}))))
+ {
+ return token_type::parse_error;
+ }
+ break;
+ }
+
+ // remaining bytes (80..C1 and F5..FF) are ill-formed
+ default:
+ {
+ error_message = "invalid string: ill-formed UTF-8 byte";
+ return token_type::parse_error;
+ }
+ }
+ }
+ }
+
+ /*!
+ * @brief scan a comment
+ * @return whether comment could be scanned successfully
+ */
+ bool scan_comment()
+ {
+ switch (get())
+ {
+ // single-line comments skip input until a newline or EOF is read
+ case '/':
+ {
+ while (true)
+ {
+ switch (get())
+ {
+ case '\n':
+ case '\r':
+ case std::char_traits<char_type>::eof():
+ case '\0':
+ return true;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ // multi-line comments skip input until */ is read
+ case '*':
+ {
+ while (true)
+ {
+ switch (get())
+ {
+ case std::char_traits<char_type>::eof():
+ case '\0':
+ {
+ error_message = "invalid comment; missing closing '*/'";
+ return false;
+ }
+
+ case '*':
+ {
+ switch (get())
+ {
+ case '/':
+ return true;
+
+ default:
+ {
+ unget();
+ continue;
+ }
+ }
+ }
+
+ default:
+ continue;
+ }
+ }
+ }
+
+ // unexpected character after reading '/'
+ default:
+ {
+ error_message = "invalid comment; expecting '/' or '*' after '/'";
+ return false;
+ }
+ }
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
+ static void strtof(float& f, const char* str, char** endptr) noexcept
+ {
+ f = std::strtof(str, endptr);
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
+ static void strtof(double& f, const char* str, char** endptr) noexcept
+ {
+ f = std::strtod(str, endptr);
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
+ static void strtof(long double& f, const char* str, char** endptr) noexcept
+ {
+ f = std::strtold(str, endptr);
+ }
+
+ /*!
+ @brief scan a number literal
+
+ This function scans a string according to Sect. 6 of RFC 7159.
+
+ The function is realized with a deterministic finite state machine derived
+ from the grammar described in RFC 7159. Starting in state "init", the
+ input is read and used to determined the next state. Only state "done"
+ accepts the number. State "error" is a trap state to model errors. In the
+ table below, "anything" means any character but the ones listed before.
+
+ state | 0 | 1-9 | e E | + | - | . | anything
+ ---------|----------|----------|----------|---------|---------|----------|-----------
+ init | zero | any1 | [error] | [error] | minus | [error] | [error]
+ minus | zero | any1 | [error] | [error] | [error] | [error] | [error]
+ zero | done | done | exponent | done | done | decimal1 | done
+ any1 | any1 | any1 | exponent | done | done | decimal1 | done
+ decimal1 | decimal2 | decimal2 | [error] | [error] | [error] | [error] | [error]
+ decimal2 | decimal2 | decimal2 | exponent | done | done | done | done
+ exponent | any2 | any2 | [error] | sign | sign | [error] | [error]
+ sign | any2 | any2 | [error] | [error] | [error] | [error] | [error]
+ any2 | any2 | any2 | done | done | done | done | done
+
+ The state machine is realized with one label per state (prefixed with
+ "scan_number_") and `goto` statements between them. The state machine
+ contains cycles, but any cycle can be left when EOF is read. Therefore,
+ the function is guaranteed to terminate.
+
+ During scanning, the read bytes are stored in token_buffer. This string is
+ then converted to a signed integer, an unsigned integer, or a
+ floating-point number.
+
+ @return token_type::value_unsigned, token_type::value_integer, or
+ token_type::value_float if number could be successfully scanned,
+ token_type::parse_error otherwise
+
+ @note The scanner is independent of the current locale. Internally, the
+ locale's decimal point is used instead of `.` to work with the
+ locale-dependent converters.
+ */
+ token_type scan_number() // lgtm [cpp/use-of-goto]
+ {
+ // reset token_buffer to store the number's bytes
+ reset();
+
+ // the type of the parsed number; initially set to unsigned; will be
+ // changed if minus sign, decimal point or exponent is read
+ token_type number_type = token_type::value_unsigned;
+
+ // state (init): we just found out we need to scan a number
+ switch (current)
+ {
+ case '-':
+ {
+ add(current);
+ goto scan_number_minus;
+ }
+
+ case '0':
+ {
+ add(current);
+ goto scan_number_zero;
+ }
+
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_any1;
+ }
+
+ // all other characters are rejected outside scan_number()
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+
+scan_number_minus:
+ // state: we just parsed a leading minus sign
+ number_type = token_type::value_integer;
+ switch (get())
+ {
+ case '0':
+ {
+ add(current);
+ goto scan_number_zero;
+ }
+
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_any1;
+ }
+
+ default:
+ {
+ error_message = "invalid number; expected digit after '-'";
+ return token_type::parse_error;
+ }
+ }
+
+scan_number_zero:
+ // state: we just parse a zero (maybe with a leading minus sign)
+ switch (get())
+ {
+ case '.':
+ {
+ add(decimal_point_char);
+ goto scan_number_decimal1;
+ }
+
+ case 'e':
+ case 'E':
+ {
+ add(current);
+ goto scan_number_exponent;
+ }
+
+ default:
+ goto scan_number_done;
+ }
+
+scan_number_any1:
+ // state: we just parsed a number 0-9 (maybe with a leading minus sign)
+ switch (get())
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_any1;
+ }
+
+ case '.':
+ {
+ add(decimal_point_char);
+ goto scan_number_decimal1;
+ }
+
+ case 'e':
+ case 'E':
+ {
+ add(current);
+ goto scan_number_exponent;
+ }
+
+ default:
+ goto scan_number_done;
+ }
+
+scan_number_decimal1:
+ // state: we just parsed a decimal point
+ number_type = token_type::value_float;
+ switch (get())
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_decimal2;
+ }
+
+ default:
+ {
+ error_message = "invalid number; expected digit after '.'";
+ return token_type::parse_error;
+ }
+ }
+
+scan_number_decimal2:
+ // we just parsed at least one number after a decimal point
+ switch (get())
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_decimal2;
+ }
+
+ case 'e':
+ case 'E':
+ {
+ add(current);
+ goto scan_number_exponent;
+ }
+
+ default:
+ goto scan_number_done;
+ }
+
+scan_number_exponent:
+ // we just parsed an exponent
+ number_type = token_type::value_float;
+ switch (get())
+ {
+ case '+':
+ case '-':
+ {
+ add(current);
+ goto scan_number_sign;
+ }
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_any2;
+ }
+
+ default:
+ {
+ error_message =
+ "invalid number; expected '+', '-', or digit after exponent";
+ return token_type::parse_error;
+ }
+ }
+
+scan_number_sign:
+ // we just parsed an exponent sign
+ switch (get())
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_any2;
+ }
+
+ default:
+ {
+ error_message = "invalid number; expected digit after exponent sign";
+ return token_type::parse_error;
+ }
+ }
+
+scan_number_any2:
+ // we just parsed a number after the exponent or exponent sign
+ switch (get())
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ {
+ add(current);
+ goto scan_number_any2;
+ }
+
+ default:
+ goto scan_number_done;
+ }
+
+scan_number_done:
+ // unget the character after the number (we only read it to know that
+ // we are done scanning a number)
+ unget();
+
+ char* endptr = nullptr;
+ errno = 0;
+
+ // try to parse integers first and fall back to floats
+ if (number_type == token_type::value_unsigned)
+ {
+ const auto x = std::strtoull(token_buffer.data(), &endptr, 10);
+
+ // we checked the number format before
+ JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size());
+
+ if (errno == 0)
+ {
+ value_unsigned = static_cast<number_unsigned_t>(x);
+ if (value_unsigned == x)
+ {
+ return token_type::value_unsigned;
+ }
+ }
+ }
+ else if (number_type == token_type::value_integer)
+ {
+ const auto x = std::strtoll(token_buffer.data(), &endptr, 10);
+
+ // we checked the number format before
+ JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size());
+
+ if (errno == 0)
+ {
+ value_integer = static_cast<number_integer_t>(x);
+ if (value_integer == x)
+ {
+ return token_type::value_integer;
+ }
+ }
+ }
+
+ // this code is reached if we parse a floating-point number or if an
+ // integer conversion above failed
+ strtof(value_float, token_buffer.data(), &endptr);
+
+ // we checked the number format before
+ JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size());
+
+ return token_type::value_float;
+ }
+
+ /*!
+ @param[in] literal_text the literal text to expect
+ @param[in] length the length of the passed literal text
+ @param[in] return_type the token type to return on success
+ */
+ JSON_HEDLEY_NON_NULL(2)
+ token_type scan_literal(const char_type* literal_text, const std::size_t length,
+ token_type return_type)
+ {
+ JSON_ASSERT(std::char_traits<char_type>::to_char_type(current) == literal_text[0]);
+ for (std::size_t i = 1; i < length; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(std::char_traits<char_type>::to_char_type(get()) != literal_text[i]))
+ {
+ error_message = "invalid literal";
+ return token_type::parse_error;
+ }
+ }
+ return return_type;
+ }
+
+ /////////////////////
+ // input management
+ /////////////////////
+
+ /// reset token_buffer; current character is beginning of token
+ void reset() noexcept
+ {
+ token_buffer.clear();
+ token_string.clear();
+ token_string.push_back(std::char_traits<char_type>::to_char_type(current));
+ }
+
+ /*
+ @brief get next character from the input
+
+ This function provides the interface to the used input adapter. It does
+ not throw in case the input reached EOF, but returns a
+ `std::char_traits<char>::eof()` in that case. Stores the scanned characters
+ for use in error messages.
+
+ @return character read from the input
+ */
+ char_int_type get()
+ {
+ ++position.chars_read_total;
+ ++position.chars_read_current_line;
+
+ if (next_unget)
+ {
+ // just reset the next_unget variable and work with current
+ next_unget = false;
+ }
+ else
+ {
+ current = ia.get_character();
+ }
+
+ if (JSON_HEDLEY_LIKELY(current != std::char_traits<char_type>::eof()))
+ {
+ token_string.push_back(std::char_traits<char_type>::to_char_type(current));
+ }
+
+ if (current == '\n')
+ {
+ ++position.lines_read;
+ position.chars_read_current_line = 0;
+ }
+
+ return current;
+ }
+
+ /*!
+ @brief unget current character (read it again on next get)
+
+ We implement unget by setting variable next_unget to true. The input is not
+ changed - we just simulate ungetting by modifying chars_read_total,
+ chars_read_current_line, and token_string. The next call to get() will
+ behave as if the unget character is read again.
+ */
+ void unget()
+ {
+ next_unget = true;
+
+ --position.chars_read_total;
+
+ // in case we "unget" a newline, we have to also decrement the lines_read
+ if (position.chars_read_current_line == 0)
+ {
+ if (position.lines_read > 0)
+ {
+ --position.lines_read;
+ }
+ }
+ else
+ {
+ --position.chars_read_current_line;
+ }
+
+ if (JSON_HEDLEY_LIKELY(current != std::char_traits<char_type>::eof()))
+ {
+ JSON_ASSERT(!token_string.empty());
+ token_string.pop_back();
+ }
+ }
+
+ /// add a character to token_buffer
+ void add(char_int_type c)
+ {
+ token_buffer.push_back(static_cast<typename string_t::value_type>(c));
+ }
+
+ public:
+ /////////////////////
+ // value getters
+ /////////////////////
+
+ /// return integer value
+ constexpr number_integer_t get_number_integer() const noexcept
+ {
+ return value_integer;
+ }
+
+ /// return unsigned integer value
+ constexpr number_unsigned_t get_number_unsigned() const noexcept
+ {
+ return value_unsigned;
+ }
+
+ /// return floating-point value
+ constexpr number_float_t get_number_float() const noexcept
+ {
+ return value_float;
+ }
+
+ /// return current string value (implicitly resets the token; useful only once)
+ string_t& get_string()
+ {
+ return token_buffer;
+ }
+
+ /////////////////////
+ // diagnostics
+ /////////////////////
+
+ /// return position of last read token
+ constexpr position_t get_position() const noexcept
+ {
+ return position;
+ }
+
+ /// return the last read token (for errors only). Will never contain EOF
+ /// (an arbitrary value that is not a valid char value, often -1), because
+ /// 255 may legitimately occur. May contain NUL, which should be escaped.
+ std::string get_token_string() const
+ {
+ // escape control characters
+ std::string result;
+ for (const auto c : token_string)
+ {
+ if (static_cast<unsigned char>(c) <= '\x1F')
+ {
+ // escape control characters
+ std::array<char, 9> cs{{}};
+ (std::snprintf)(cs.data(), cs.size(), "<U+%.4X>", static_cast<unsigned char>(c));
+ result += cs.data();
+ }
+ else
+ {
+ // add character as is
+ result.push_back(static_cast<std::string::value_type>(c));
+ }
+ }
+
+ return result;
+ }
+
+ /// return syntax error message
+ JSON_HEDLEY_RETURNS_NON_NULL
+ constexpr const char* get_error_message() const noexcept
+ {
+ return error_message;
+ }
+
+ /////////////////////
+ // actual scanner
+ /////////////////////
+
+ /*!
+ @brief skip the UTF-8 byte order mark
+ @return true iff there is no BOM or the correct BOM has been skipped
+ */
+ bool skip_bom()
+ {
+ if (get() == 0xEF)
+ {
+ // check if we completely parse the BOM
+ return get() == 0xBB && get() == 0xBF;
+ }
+
+ // the first character is not the beginning of the BOM; unget it to
+ // process is later
+ unget();
+ return true;
+ }
+
+ void skip_whitespace()
+ {
+ do
+ {
+ get();
+ }
+ while (current == ' ' || current == '\t' || current == '\n' || current == '\r');
+ }
+
+ token_type scan()
+ {
+ // initially, skip the BOM
+ if (position.chars_read_total == 0 && !skip_bom())
+ {
+ error_message = "invalid BOM; must be 0xEF 0xBB 0xBF if given";
+ return token_type::parse_error;
+ }
+
+ // read next character and ignore whitespace
+ skip_whitespace();
+
+ // ignore comments
+ while (ignore_comments && current == '/')
+ {
+ if (!scan_comment())
+ {
+ return token_type::parse_error;
+ }
+
+ // skip following whitespace
+ skip_whitespace();
+ }
+
+ switch (current)
+ {
+ // structural characters
+ case '[':
+ return token_type::begin_array;
+ case ']':
+ return token_type::end_array;
+ case '{':
+ return token_type::begin_object;
+ case '}':
+ return token_type::end_object;
+ case ':':
+ return token_type::name_separator;
+ case ',':
+ return token_type::value_separator;
+
+ // literals
+ case 't':
+ {
+ std::array<char_type, 4> true_literal = {{'t', 'r', 'u', 'e'}};
+ return scan_literal(true_literal.data(), true_literal.size(), token_type::literal_true);
+ }
+ case 'f':
+ {
+ std::array<char_type, 5> false_literal = {{'f', 'a', 'l', 's', 'e'}};
+ return scan_literal(false_literal.data(), false_literal.size(), token_type::literal_false);
+ }
+ case 'n':
+ {
+ std::array<char_type, 4> null_literal = {{'n', 'u', 'l', 'l'}};
+ return scan_literal(null_literal.data(), null_literal.size(), token_type::literal_null);
+ }
+
+ // string
+ case '\"':
+ return scan_string();
+
+ // number
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ return scan_number();
+
+ // end of input (the null byte is needed when parsing from
+ // string literals)
+ case '\0':
+ case std::char_traits<char_type>::eof():
+ return token_type::end_of_input;
+
+ // error
+ default:
+ error_message = "invalid literal";
+ return token_type::parse_error;
+ }
+ }
+
+ private:
+ /// input adapter
+ InputAdapterType ia;
+
+ /// whether comments should be ignored (true) or signaled as errors (false)
+ const bool ignore_comments = false;
+
+ /// the current character
+ char_int_type current = std::char_traits<char_type>::eof();
+
+ /// whether the next get() call should just return current
+ bool next_unget = false;
+
+ /// the start position of the current token
+ position_t position {};
+
+ /// raw input token string (for error messages)
+ std::vector<char_type> token_string {};
+
+ /// buffer for variable-length tokens (numbers, strings)
+ string_t token_buffer {};
+
+ /// a description of occurred lexer errors
+ const char* error_message = "";
+
+ // number values
+ number_integer_t value_integer = 0;
+ number_unsigned_t value_unsigned = 0;
+ number_float_t value_float = 0;
+
+ /// the decimal point
+ const char_int_type decimal_point_char = '.';
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/is_sax.hpp>
+
+
+#include <cstdint> // size_t
+#include <utility> // declval
+#include <string> // string
+
+// #include <nlohmann/detail/meta/detected.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename T>
+using null_function_t = decltype(std::declval<T&>().null());
+
+template<typename T>
+using boolean_function_t =
+ decltype(std::declval<T&>().boolean(std::declval<bool>()));
+
+template<typename T, typename Integer>
+using number_integer_function_t =
+ decltype(std::declval<T&>().number_integer(std::declval<Integer>()));
+
+template<typename T, typename Unsigned>
+using number_unsigned_function_t =
+ decltype(std::declval<T&>().number_unsigned(std::declval<Unsigned>()));
+
+template<typename T, typename Float, typename String>
+using number_float_function_t = decltype(std::declval<T&>().number_float(
+ std::declval<Float>(), std::declval<const String&>()));
+
+template<typename T, typename String>
+using string_function_t =
+ decltype(std::declval<T&>().string(std::declval<String&>()));
+
+template<typename T, typename Binary>
+using binary_function_t =
+ decltype(std::declval<T&>().binary(std::declval<Binary&>()));
+
+template<typename T>
+using start_object_function_t =
+ decltype(std::declval<T&>().start_object(std::declval<std::size_t>()));
+
+template<typename T, typename String>
+using key_function_t =
+ decltype(std::declval<T&>().key(std::declval<String&>()));
+
+template<typename T>
+using end_object_function_t = decltype(std::declval<T&>().end_object());
+
+template<typename T>
+using start_array_function_t =
+ decltype(std::declval<T&>().start_array(std::declval<std::size_t>()));
+
+template<typename T>
+using end_array_function_t = decltype(std::declval<T&>().end_array());
+
+template<typename T, typename Exception>
+using parse_error_function_t = decltype(std::declval<T&>().parse_error(
+ std::declval<std::size_t>(), std::declval<const std::string&>(),
+ std::declval<const Exception&>()));
+
+template<typename SAX, typename BasicJsonType>
+struct is_sax
+{
+ private:
+ static_assert(is_basic_json<BasicJsonType>::value,
+ "BasicJsonType must be of type basic_json<...>");
+
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using exception_t = typename BasicJsonType::exception;
+
+ public:
+ static constexpr bool value =
+ is_detected_exact<bool, null_function_t, SAX>::value &&
+ is_detected_exact<bool, boolean_function_t, SAX>::value &&
+ is_detected_exact<bool, number_integer_function_t, SAX, number_integer_t>::value &&
+ is_detected_exact<bool, number_unsigned_function_t, SAX, number_unsigned_t>::value &&
+ is_detected_exact<bool, number_float_function_t, SAX, number_float_t, string_t>::value &&
+ is_detected_exact<bool, string_function_t, SAX, string_t>::value &&
+ is_detected_exact<bool, binary_function_t, SAX, binary_t>::value &&
+ is_detected_exact<bool, start_object_function_t, SAX>::value &&
+ is_detected_exact<bool, key_function_t, SAX, string_t>::value &&
+ is_detected_exact<bool, end_object_function_t, SAX>::value &&
+ is_detected_exact<bool, start_array_function_t, SAX>::value &&
+ is_detected_exact<bool, end_array_function_t, SAX>::value &&
+ is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value;
+};
+
+template<typename SAX, typename BasicJsonType>
+struct is_sax_static_asserts
+{
+ private:
+ static_assert(is_basic_json<BasicJsonType>::value,
+ "BasicJsonType must be of type basic_json<...>");
+
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using exception_t = typename BasicJsonType::exception;
+
+ public:
+ static_assert(is_detected_exact<bool, null_function_t, SAX>::value,
+ "Missing/invalid function: bool null()");
+ static_assert(is_detected_exact<bool, boolean_function_t, SAX>::value,
+ "Missing/invalid function: bool boolean(bool)");
+ static_assert(is_detected_exact<bool, boolean_function_t, SAX>::value,
+ "Missing/invalid function: bool boolean(bool)");
+ static_assert(
+ is_detected_exact<bool, number_integer_function_t, SAX,
+ number_integer_t>::value,
+ "Missing/invalid function: bool number_integer(number_integer_t)");
+ static_assert(
+ is_detected_exact<bool, number_unsigned_function_t, SAX,
+ number_unsigned_t>::value,
+ "Missing/invalid function: bool number_unsigned(number_unsigned_t)");
+ static_assert(is_detected_exact<bool, number_float_function_t, SAX,
+ number_float_t, string_t>::value,
+ "Missing/invalid function: bool number_float(number_float_t, const string_t&)");
+ static_assert(
+ is_detected_exact<bool, string_function_t, SAX, string_t>::value,
+ "Missing/invalid function: bool string(string_t&)");
+ static_assert(
+ is_detected_exact<bool, binary_function_t, SAX, binary_t>::value,
+ "Missing/invalid function: bool binary(binary_t&)");
+ static_assert(is_detected_exact<bool, start_object_function_t, SAX>::value,
+ "Missing/invalid function: bool start_object(std::size_t)");
+ static_assert(is_detected_exact<bool, key_function_t, SAX, string_t>::value,
+ "Missing/invalid function: bool key(string_t&)");
+ static_assert(is_detected_exact<bool, end_object_function_t, SAX>::value,
+ "Missing/invalid function: bool end_object()");
+ static_assert(is_detected_exact<bool, start_array_function_t, SAX>::value,
+ "Missing/invalid function: bool start_array(std::size_t)");
+ static_assert(is_detected_exact<bool, end_array_function_t, SAX>::value,
+ "Missing/invalid function: bool end_array()");
+ static_assert(
+ is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value,
+ "Missing/invalid function: bool parse_error(std::size_t, const "
+ "std::string&, const exception&)");
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+
+/// how to treat CBOR tags
+enum class cbor_tag_handler_t
+{
+ error, ///< throw a parse_error exception in case of a tag
+ ignore ///< ignore tags
+};
+
+/*!
+@brief determine system byte order
+
+@return true if and only if system's byte order is little endian
+
+@note from https://stackoverflow.com/a/1001328/266378
+*/
+static inline bool little_endianess(int num = 1) noexcept
+{
+ return *reinterpret_cast<char*>(&num) == 1;
+}
+
+
+///////////////////
+// binary reader //
+///////////////////
+
+/*!
+@brief deserialization of CBOR, MessagePack, and UBJSON values
+*/
+template<typename BasicJsonType, typename InputAdapterType, typename SAX = json_sax_dom_parser<BasicJsonType>>
+class binary_reader
+{
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using json_sax_t = SAX;
+ using char_type = typename InputAdapterType::char_type;
+ using char_int_type = typename std::char_traits<char_type>::int_type;
+
+ public:
+ /*!
+ @brief create a binary reader
+
+ @param[in] adapter input adapter to read from
+ */
+ explicit binary_reader(InputAdapterType&& adapter) : ia(std::move(adapter))
+ {
+ (void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
+ }
+
+ // make class move-only
+ binary_reader(const binary_reader&) = delete;
+ binary_reader(binary_reader&&) = default;
+ binary_reader& operator=(const binary_reader&) = delete;
+ binary_reader& operator=(binary_reader&&) = default;
+ ~binary_reader() = default;
+
+ /*!
+ @param[in] format the binary format to parse
+ @param[in] sax_ a SAX event processor
+ @param[in] strict whether to expect the input to be consumed completed
+ @param[in] tag_handler how to treat CBOR tags
+
+ @return
+ */
+ JSON_HEDLEY_NON_NULL(3)
+ bool sax_parse(const input_format_t format,
+ json_sax_t* sax_,
+ const bool strict = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ sax = sax_;
+ bool result = false;
+
+ switch (format)
+ {
+ case input_format_t::bson:
+ result = parse_bson_internal();
+ break;
+
+ case input_format_t::cbor:
+ result = parse_cbor_internal(true, tag_handler);
+ break;
+
+ case input_format_t::msgpack:
+ result = parse_msgpack_internal();
+ break;
+
+ case input_format_t::ubjson:
+ result = parse_ubjson_internal();
+ break;
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+
+ // strict mode: next byte must be EOF
+ if (result && strict)
+ {
+ if (format == input_format_t::ubjson)
+ {
+ get_ignore_noop();
+ }
+ else
+ {
+ get();
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(current != std::char_traits<char_type>::eof()))
+ {
+ return sax->parse_error(chars_read, get_token_string(),
+ parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value")));
+ }
+ }
+
+ return result;
+ }
+
+ private:
+ //////////
+ // BSON //
+ //////////
+
+ /*!
+ @brief Reads in a BSON-object and passes it to the SAX-parser.
+ @return whether a valid BSON-value was passed to the SAX parser
+ */
+ bool parse_bson_internal()
+ {
+ std::int32_t document_size{};
+ get_number<std::int32_t, true>(input_format_t::bson, document_size);
+
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/false)))
+ {
+ return false;
+ }
+
+ return sax->end_object();
+ }
+
+ /*!
+ @brief Parses a C-style string from the BSON input.
+ @param[in, out] result A reference to the string variable where the read
+ string is to be stored.
+ @return `true` if the \x00-byte indicating the end of the string was
+ encountered before the EOF; false` indicates an unexpected EOF.
+ */
+ bool get_bson_cstr(string_t& result)
+ {
+ auto out = std::back_inserter(result);
+ while (true)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "cstring")))
+ {
+ return false;
+ }
+ if (current == 0x00)
+ {
+ return true;
+ }
+ *out++ = static_cast<typename string_t::value_type>(current);
+ }
+ }
+
+ /*!
+ @brief Parses a zero-terminated string of length @a len from the BSON
+ input.
+ @param[in] len The length (including the zero-byte at the end) of the
+ string to be read.
+ @param[in, out] result A reference to the string variable where the read
+ string is to be stored.
+ @tparam NumberType The type of the length @a len
+ @pre len >= 1
+ @return `true` if the string was successfully parsed
+ */
+ template<typename NumberType>
+ bool get_bson_string(const NumberType len, string_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(len < 1))
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string")));
+ }
+
+ return get_string(input_format_t::bson, len - static_cast<NumberType>(1), result) && get() != std::char_traits<char_type>::eof();
+ }
+
+ /*!
+ @brief Parses a byte array input of length @a len from the BSON input.
+ @param[in] len The length of the byte array to be read.
+ @param[in, out] result A reference to the binary variable where the read
+ array is to be stored.
+ @tparam NumberType The type of the length @a len
+ @pre len >= 0
+ @return `true` if the byte array was successfully parsed
+ */
+ template<typename NumberType>
+ bool get_bson_binary(const NumberType len, binary_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(len < 0))
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "byte array length cannot be negative, is " + std::to_string(len), "binary")));
+ }
+
+ // All BSON binary values have a subtype
+ std::uint8_t subtype{};
+ get_number<std::uint8_t>(input_format_t::bson, subtype);
+ result.set_subtype(subtype);
+
+ return get_binary(input_format_t::bson, len, result);
+ }
+
+ /*!
+ @brief Read a BSON document element of the given @a element_type.
+ @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html
+ @param[in] element_type_parse_position The position in the input stream,
+ where the `element_type` was read.
+ @warning Not all BSON element types are supported yet. An unsupported
+ @a element_type will give rise to a parse_error.114:
+ Unsupported BSON record type 0x...
+ @return whether a valid BSON-object/array was passed to the SAX parser
+ */
+ bool parse_bson_element_internal(const char_int_type element_type,
+ const std::size_t element_type_parse_position)
+ {
+ switch (element_type)
+ {
+ case 0x01: // double
+ {
+ double number{};
+ return get_number<double, true>(input_format_t::bson, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0x02: // string
+ {
+ std::int32_t len{};
+ string_t value;
+ return get_number<std::int32_t, true>(input_format_t::bson, len) && get_bson_string(len, value) && sax->string(value);
+ }
+
+ case 0x03: // object
+ {
+ return parse_bson_internal();
+ }
+
+ case 0x04: // array
+ {
+ return parse_bson_array();
+ }
+
+ case 0x05: // binary
+ {
+ std::int32_t len{};
+ binary_t value;
+ return get_number<std::int32_t, true>(input_format_t::bson, len) && get_bson_binary(len, value) && sax->binary(value);
+ }
+
+ case 0x08: // boolean
+ {
+ return sax->boolean(get() != 0);
+ }
+
+ case 0x0A: // null
+ {
+ return sax->null();
+ }
+
+ case 0x10: // int32
+ {
+ std::int32_t value{};
+ return get_number<std::int32_t, true>(input_format_t::bson, value) && sax->number_integer(value);
+ }
+
+ case 0x12: // int64
+ {
+ std::int64_t value{};
+ return get_number<std::int64_t, true>(input_format_t::bson, value) && sax->number_integer(value);
+ }
+
+ default: // anything else not supported (yet)
+ {
+ std::array<char, 3> cr{{}};
+ (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast<unsigned char>(element_type));
+ return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data())));
+ }
+ }
+ }
+
+ /*!
+ @brief Read a BSON element list (as specified in the BSON-spec)
+
+ The same binary layout is used for objects and arrays, hence it must be
+ indicated with the argument @a is_array which one is expected
+ (true --> array, false --> object).
+
+ @param[in] is_array Determines if the element list being read is to be
+ treated as an object (@a is_array == false), or as an
+ array (@a is_array == true).
+ @return whether a valid BSON-object/array was passed to the SAX parser
+ */
+ bool parse_bson_element_list(const bool is_array)
+ {
+ string_t key;
+
+ while (auto element_type = get())
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "element list")))
+ {
+ return false;
+ }
+
+ const std::size_t element_type_parse_position = chars_read;
+ if (JSON_HEDLEY_UNLIKELY(!get_bson_cstr(key)))
+ {
+ return false;
+ }
+
+ if (!is_array && !sax->key(key))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_internal(element_type, element_type_parse_position)))
+ {
+ return false;
+ }
+
+ // get_bson_cstr only appends
+ key.clear();
+ }
+
+ return true;
+ }
+
+ /*!
+ @brief Reads an array from the BSON input and passes it to the SAX-parser.
+ @return whether a valid BSON-array was passed to the SAX parser
+ */
+ bool parse_bson_array()
+ {
+ std::int32_t document_size{};
+ get_number<std::int32_t, true>(input_format_t::bson, document_size);
+
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/true)))
+ {
+ return false;
+ }
+
+ return sax->end_array();
+ }
+
+ //////////
+ // CBOR //
+ //////////
+
+ /*!
+ @param[in] get_char whether a new character should be retrieved from the
+ input (true) or whether the last read character should
+ be considered instead (false)
+ @param[in] tag_handler how CBOR tags should be treated
+
+ @return whether a valid CBOR value was passed to the SAX parser
+ */
+ bool parse_cbor_internal(const bool get_char,
+ const cbor_tag_handler_t tag_handler)
+ {
+ switch (get_char ? get() : current)
+ {
+ // EOF
+ case std::char_traits<char_type>::eof():
+ return unexpect_eof(input_format_t::cbor, "value");
+
+ // Integer 0x00..0x17 (0..23)
+ case 0x00:
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ case 0x08:
+ case 0x09:
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
+ case 0x0D:
+ case 0x0E:
+ case 0x0F:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ return sax->number_unsigned(static_cast<number_unsigned_t>(current));
+
+ case 0x18: // Unsigned integer (one-byte uint8_t follows)
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ case 0x19: // Unsigned integer (two-byte uint16_t follows)
+ {
+ std::uint16_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ case 0x1A: // Unsigned integer (four-byte uint32_t follows)
+ {
+ std::uint32_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ case 0x1B: // Unsigned integer (eight-byte uint64_t follows)
+ {
+ std::uint64_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ // Negative integer -1-0x00..-1-0x17 (-1..-24)
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ case 0x23:
+ case 0x24:
+ case 0x25:
+ case 0x26:
+ case 0x27:
+ case 0x28:
+ case 0x29:
+ case 0x2A:
+ case 0x2B:
+ case 0x2C:
+ case 0x2D:
+ case 0x2E:
+ case 0x2F:
+ case 0x30:
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ return sax->number_integer(static_cast<std::int8_t>(0x20 - 1 - current));
+
+ case 0x38: // Negative integer (one-byte uint8_t follows)
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1) - number);
+ }
+
+ case 0x39: // Negative integer -1-n (two-byte uint16_t follows)
+ {
+ std::uint16_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1) - number);
+ }
+
+ case 0x3A: // Negative integer -1-n (four-byte uint32_t follows)
+ {
+ std::uint32_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1) - number);
+ }
+
+ case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows)
+ {
+ std::uint64_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1)
+ - static_cast<number_integer_t>(number));
+ }
+
+ // Binary data (0x00..0x17 bytes follow)
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ case 0x58: // Binary data (one-byte uint8_t for n follows)
+ case 0x59: // Binary data (two-byte uint16_t for n follow)
+ case 0x5A: // Binary data (four-byte uint32_t for n follow)
+ case 0x5B: // Binary data (eight-byte uint64_t for n follow)
+ case 0x5F: // Binary data (indefinite length)
+ {
+ binary_t b;
+ return get_cbor_binary(b) && sax->binary(b);
+ }
+
+ // UTF-8 string (0x00..0x17 bytes follow)
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
+ case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+ case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
+ case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+ case 0x7F: // UTF-8 string (indefinite length)
+ {
+ string_t s;
+ return get_cbor_string(s) && sax->string(s);
+ }
+
+ // array (0x00..0x17 data items follow)
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ case 0x83:
+ case 0x84:
+ case 0x85:
+ case 0x86:
+ case 0x87:
+ case 0x88:
+ case 0x89:
+ case 0x8A:
+ case 0x8B:
+ case 0x8C:
+ case 0x8D:
+ case 0x8E:
+ case 0x8F:
+ case 0x90:
+ case 0x91:
+ case 0x92:
+ case 0x93:
+ case 0x94:
+ case 0x95:
+ case 0x96:
+ case 0x97:
+ return get_cbor_array(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x1Fu), tag_handler);
+
+ case 0x98: // array (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x99: // array (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x9A: // array (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x9B: // array (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x9F: // array (indefinite length)
+ return get_cbor_array(std::size_t(-1), tag_handler);
+
+ // map (0x00..0x17 pairs of data items follow)
+ case 0xA0:
+ case 0xA1:
+ case 0xA2:
+ case 0xA3:
+ case 0xA4:
+ case 0xA5:
+ case 0xA6:
+ case 0xA7:
+ case 0xA8:
+ case 0xA9:
+ case 0xAA:
+ case 0xAB:
+ case 0xAC:
+ case 0xAD:
+ case 0xAE:
+ case 0xAF:
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ return get_cbor_object(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x1Fu), tag_handler);
+
+ case 0xB8: // map (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xB9: // map (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xBA: // map (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xBB: // map (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xBF: // map (indefinite length)
+ return get_cbor_object(std::size_t(-1), tag_handler);
+
+ case 0xC6: // tagged item
+ case 0xC7:
+ case 0xC8:
+ case 0xC9:
+ case 0xCA:
+ case 0xCB:
+ case 0xCC:
+ case 0xCD:
+ case 0xCE:
+ case 0xCF:
+ case 0xD0:
+ case 0xD1:
+ case 0xD2:
+ case 0xD3:
+ case 0xD4:
+ case 0xD8: // tagged item (1 bytes follow)
+ case 0xD9: // tagged item (2 bytes follow)
+ case 0xDA: // tagged item (4 bytes follow)
+ case 0xDB: // tagged item (8 bytes follow)
+ {
+ switch (tag_handler)
+ {
+ case cbor_tag_handler_t::error:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value")));
+ }
+
+ case cbor_tag_handler_t::ignore:
+ {
+ switch (current)
+ {
+ case 0xD8:
+ {
+ std::uint8_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ case 0xD9:
+ {
+ std::uint16_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ case 0xDA:
+ {
+ std::uint32_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ case 0xDB:
+ {
+ std::uint64_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ default:
+ break;
+ }
+ return parse_cbor_internal(true, tag_handler);
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ }
+
+ case 0xF4: // false
+ return sax->boolean(false);
+
+ case 0xF5: // true
+ return sax->boolean(true);
+
+ case 0xF6: // null
+ return sax->null();
+
+ case 0xF9: // Half-Precision Float (two-byte IEEE 754)
+ {
+ const auto byte1_raw = get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number")))
+ {
+ return false;
+ }
+ const auto byte2_raw = get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number")))
+ {
+ return false;
+ }
+
+ const auto byte1 = static_cast<unsigned char>(byte1_raw);
+ const auto byte2 = static_cast<unsigned char>(byte2_raw);
+
+ // code from RFC 7049, Appendix D, Figure 3:
+ // As half-precision floating-point numbers were only added
+ // to IEEE 754 in 2008, today's programming platforms often
+ // still only have limited support for them. It is very
+ // easy to include at least decoding support for them even
+ // without such support. An example of a small decoder for
+ // half-precision floating-point numbers in the C language
+ // is shown in Fig. 3.
+ const auto half = static_cast<unsigned int>((byte1 << 8u) + byte2);
+ const double val = [&half]
+ {
+ const int exp = (half >> 10u) & 0x1Fu;
+ const unsigned int mant = half & 0x3FFu;
+ JSON_ASSERT(0 <= exp&& exp <= 32);
+ JSON_ASSERT(mant <= 1024);
+ switch (exp)
+ {
+ case 0:
+ return std::ldexp(mant, -24);
+ case 31:
+ return (mant == 0)
+ ? std::numeric_limits<double>::infinity()
+ : std::numeric_limits<double>::quiet_NaN();
+ default:
+ return std::ldexp(mant + 1024, exp - 25);
+ }
+ }();
+ return sax->number_float((half & 0x8000u) != 0
+ ? static_cast<number_float_t>(-val)
+ : static_cast<number_float_t>(val), "");
+ }
+
+ case 0xFA: // Single-Precision Float (four-byte IEEE 754)
+ {
+ float number{};
+ return get_number(input_format_t::cbor, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0xFB: // Double-Precision Float (eight-byte IEEE 754)
+ {
+ double number{};
+ return get_number(input_format_t::cbor, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ default: // anything else (0xFF is handled inside the other types)
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value")));
+ }
+ }
+ }
+
+ /*!
+ @brief reads a CBOR string
+
+ This function first reads starting bytes to determine the expected
+ string length and then copies this number of bytes into a string.
+ Additionally, CBOR's strings with indefinite lengths are supported.
+
+ @param[out] result created string
+
+ @return whether string creation completed
+ */
+ bool get_cbor_string(string_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "string")))
+ {
+ return false;
+ }
+
+ switch (current)
+ {
+ // UTF-8 string (0x00..0x17 bytes follow)
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ {
+ return get_string(input_format_t::cbor, static_cast<unsigned int>(current) & 0x1Fu, result);
+ }
+
+ case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x7F: // UTF-8 string (indefinite length)
+ {
+ while (get() != 0xFF)
+ {
+ string_t chunk;
+ if (!get_cbor_string(chunk))
+ {
+ return false;
+ }
+ result.append(chunk);
+ }
+ return true;
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string")));
+ }
+ }
+ }
+
+ /*!
+ @brief reads a CBOR byte array
+
+ This function first reads starting bytes to determine the expected
+ byte array length and then copies this number of bytes into the byte array.
+ Additionally, CBOR's byte arrays with indefinite lengths are supported.
+
+ @param[out] result created byte array
+
+ @return whether byte array creation completed
+ */
+ bool get_cbor_binary(binary_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "binary")))
+ {
+ return false;
+ }
+
+ switch (current)
+ {
+ // Binary data (0x00..0x17 bytes follow)
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ {
+ return get_binary(input_format_t::cbor, static_cast<unsigned int>(current) & 0x1Fu, result);
+ }
+
+ case 0x58: // Binary data (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x59: // Binary data (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x5A: // Binary data (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x5B: // Binary data (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x5F: // Binary data (indefinite length)
+ {
+ while (get() != 0xFF)
+ {
+ binary_t chunk;
+ if (!get_cbor_binary(chunk))
+ {
+ return false;
+ }
+ result.insert(result.end(), chunk.begin(), chunk.end());
+ }
+ return true;
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x40-0x5B) or indefinite binary array type (0x5F); last byte: 0x" + last_token, "binary")));
+ }
+ }
+ }
+
+ /*!
+ @param[in] len the length of the array or std::size_t(-1) for an
+ array of indefinite size
+ @param[in] tag_handler how CBOR tags should be treated
+ @return whether array creation completed
+ */
+ bool get_cbor_array(const std::size_t len,
+ const cbor_tag_handler_t tag_handler)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len)))
+ {
+ return false;
+ }
+
+ if (len != std::size_t(-1))
+ {
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler)))
+ {
+ return false;
+ }
+ }
+ }
+ else
+ {
+ while (get() != 0xFF)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(false, tag_handler)))
+ {
+ return false;
+ }
+ }
+ }
+
+ return sax->end_array();
+ }
+
+ /*!
+ @param[in] len the length of the object or std::size_t(-1) for an
+ object of indefinite size
+ @param[in] tag_handler how CBOR tags should be treated
+ @return whether object creation completed
+ */
+ bool get_cbor_object(const std::size_t len,
+ const cbor_tag_handler_t tag_handler)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len)))
+ {
+ return false;
+ }
+
+ string_t key;
+ if (len != std::size_t(-1))
+ {
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler)))
+ {
+ return false;
+ }
+ key.clear();
+ }
+ }
+ else
+ {
+ while (get() != 0xFF)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler)))
+ {
+ return false;
+ }
+ key.clear();
+ }
+ }
+
+ return sax->end_object();
+ }
+
+ /////////////
+ // MsgPack //
+ /////////////
+
+ /*!
+ @return whether a valid MessagePack value was passed to the SAX parser
+ */
+ bool parse_msgpack_internal()
+ {
+ switch (get())
+ {
+ // EOF
+ case std::char_traits<char_type>::eof():
+ return unexpect_eof(input_format_t::msgpack, "value");
+
+ // positive fixint
+ case 0x00:
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ case 0x08:
+ case 0x09:
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
+ case 0x0D:
+ case 0x0E:
+ case 0x0F:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ case 0x18:
+ case 0x19:
+ case 0x1A:
+ case 0x1B:
+ case 0x1C:
+ case 0x1D:
+ case 0x1E:
+ case 0x1F:
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ case 0x23:
+ case 0x24:
+ case 0x25:
+ case 0x26:
+ case 0x27:
+ case 0x28:
+ case 0x29:
+ case 0x2A:
+ case 0x2B:
+ case 0x2C:
+ case 0x2D:
+ case 0x2E:
+ case 0x2F:
+ case 0x30:
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ case 0x38:
+ case 0x39:
+ case 0x3A:
+ case 0x3B:
+ case 0x3C:
+ case 0x3D:
+ case 0x3E:
+ case 0x3F:
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ case 0x58:
+ case 0x59:
+ case 0x5A:
+ case 0x5B:
+ case 0x5C:
+ case 0x5D:
+ case 0x5E:
+ case 0x5F:
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ return sax->number_unsigned(static_cast<number_unsigned_t>(current));
+
+ // fixmap
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ case 0x83:
+ case 0x84:
+ case 0x85:
+ case 0x86:
+ case 0x87:
+ case 0x88:
+ case 0x89:
+ case 0x8A:
+ case 0x8B:
+ case 0x8C:
+ case 0x8D:
+ case 0x8E:
+ case 0x8F:
+ return get_msgpack_object(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x0Fu));
+
+ // fixarray
+ case 0x90:
+ case 0x91:
+ case 0x92:
+ case 0x93:
+ case 0x94:
+ case 0x95:
+ case 0x96:
+ case 0x97:
+ case 0x98:
+ case 0x99:
+ case 0x9A:
+ case 0x9B:
+ case 0x9C:
+ case 0x9D:
+ case 0x9E:
+ case 0x9F:
+ return get_msgpack_array(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x0Fu));
+
+ // fixstr
+ case 0xA0:
+ case 0xA1:
+ case 0xA2:
+ case 0xA3:
+ case 0xA4:
+ case 0xA5:
+ case 0xA6:
+ case 0xA7:
+ case 0xA8:
+ case 0xA9:
+ case 0xAA:
+ case 0xAB:
+ case 0xAC:
+ case 0xAD:
+ case 0xAE:
+ case 0xAF:
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ case 0xB8:
+ case 0xB9:
+ case 0xBA:
+ case 0xBB:
+ case 0xBC:
+ case 0xBD:
+ case 0xBE:
+ case 0xBF:
+ case 0xD9: // str 8
+ case 0xDA: // str 16
+ case 0xDB: // str 32
+ {
+ string_t s;
+ return get_msgpack_string(s) && sax->string(s);
+ }
+
+ case 0xC0: // nil
+ return sax->null();
+
+ case 0xC2: // false
+ return sax->boolean(false);
+
+ case 0xC3: // true
+ return sax->boolean(true);
+
+ case 0xC4: // bin 8
+ case 0xC5: // bin 16
+ case 0xC6: // bin 32
+ case 0xC7: // ext 8
+ case 0xC8: // ext 16
+ case 0xC9: // ext 32
+ case 0xD4: // fixext 1
+ case 0xD5: // fixext 2
+ case 0xD6: // fixext 4
+ case 0xD7: // fixext 8
+ case 0xD8: // fixext 16
+ {
+ binary_t b;
+ return get_msgpack_binary(b) && sax->binary(b);
+ }
+
+ case 0xCA: // float 32
+ {
+ float number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0xCB: // float 64
+ {
+ double number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0xCC: // uint 8
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xCD: // uint 16
+ {
+ std::uint16_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xCE: // uint 32
+ {
+ std::uint32_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xCF: // uint 64
+ {
+ std::uint64_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xD0: // int 8
+ {
+ std::int8_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xD1: // int 16
+ {
+ std::int16_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xD2: // int 32
+ {
+ std::int32_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xD3: // int 64
+ {
+ std::int64_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xDC: // array 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast<std::size_t>(len));
+ }
+
+ case 0xDD: // array 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast<std::size_t>(len));
+ }
+
+ case 0xDE: // map 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast<std::size_t>(len));
+ }
+
+ case 0xDF: // map 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast<std::size_t>(len));
+ }
+
+ // negative fixint
+ case 0xE0:
+ case 0xE1:
+ case 0xE2:
+ case 0xE3:
+ case 0xE4:
+ case 0xE5:
+ case 0xE6:
+ case 0xE7:
+ case 0xE8:
+ case 0xE9:
+ case 0xEA:
+ case 0xEB:
+ case 0xEC:
+ case 0xED:
+ case 0xEE:
+ case 0xEF:
+ case 0xF0:
+ case 0xF1:
+ case 0xF2:
+ case 0xF3:
+ case 0xF4:
+ case 0xF5:
+ case 0xF6:
+ case 0xF7:
+ case 0xF8:
+ case 0xF9:
+ case 0xFA:
+ case 0xFB:
+ case 0xFC:
+ case 0xFD:
+ case 0xFE:
+ case 0xFF:
+ return sax->number_integer(static_cast<std::int8_t>(current));
+
+ default: // anything else
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value")));
+ }
+ }
+ }
+
+ /*!
+ @brief reads a MessagePack string
+
+ This function first reads starting bytes to determine the expected
+ string length and then copies this number of bytes into a string.
+
+ @param[out] result created string
+
+ @return whether string creation completed
+ */
+ bool get_msgpack_string(string_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::msgpack, "string")))
+ {
+ return false;
+ }
+
+ switch (current)
+ {
+ // fixstr
+ case 0xA0:
+ case 0xA1:
+ case 0xA2:
+ case 0xA3:
+ case 0xA4:
+ case 0xA5:
+ case 0xA6:
+ case 0xA7:
+ case 0xA8:
+ case 0xA9:
+ case 0xAA:
+ case 0xAB:
+ case 0xAC:
+ case 0xAD:
+ case 0xAE:
+ case 0xAF:
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ case 0xB8:
+ case 0xB9:
+ case 0xBA:
+ case 0xBB:
+ case 0xBC:
+ case 0xBD:
+ case 0xBE:
+ case 0xBF:
+ {
+ return get_string(input_format_t::msgpack, static_cast<unsigned int>(current) & 0x1Fu, result);
+ }
+
+ case 0xD9: // str 8
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result);
+ }
+
+ case 0xDA: // str 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result);
+ }
+
+ case 0xDB: // str 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result);
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string")));
+ }
+ }
+ }
+
+ /*!
+ @brief reads a MessagePack byte array
+
+ This function first reads starting bytes to determine the expected
+ byte array length and then copies this number of bytes into a byte array.
+
+ @param[out] result created byte array
+
+ @return whether byte array creation completed
+ */
+ bool get_msgpack_binary(binary_t& result)
+ {
+ // helper function to set the subtype
+ auto assign_and_return_true = [&result](std::int8_t subtype)
+ {
+ result.set_subtype(static_cast<std::uint8_t>(subtype));
+ return true;
+ };
+
+ switch (current)
+ {
+ case 0xC4: // bin 8
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_binary(input_format_t::msgpack, len, result);
+ }
+
+ case 0xC5: // bin 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_binary(input_format_t::msgpack, len, result);
+ }
+
+ case 0xC6: // bin 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_binary(input_format_t::msgpack, len, result);
+ }
+
+ case 0xC7: // ext 8
+ {
+ std::uint8_t len{};
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, len, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xC8: // ext 16
+ {
+ std::uint16_t len{};
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, len, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xC9: // ext 32
+ {
+ std::uint32_t len{};
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, len, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD4: // fixext 1
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 1, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD5: // fixext 2
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 2, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD6: // fixext 4
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 4, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD7: // fixext 8
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 8, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD8: // fixext 16
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 16, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ default: // LCOV_EXCL_LINE
+ return false; // LCOV_EXCL_LINE
+ }
+ }
+
+ /*!
+ @param[in] len the length of the array
+ @return whether array creation completed
+ */
+ bool get_msgpack_array(const std::size_t len)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len)))
+ {
+ return false;
+ }
+
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal()))
+ {
+ return false;
+ }
+ }
+
+ return sax->end_array();
+ }
+
+ /*!
+ @param[in] len the length of the object
+ @return whether object creation completed
+ */
+ bool get_msgpack_object(const std::size_t len)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len)))
+ {
+ return false;
+ }
+
+ string_t key;
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!get_msgpack_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal()))
+ {
+ return false;
+ }
+ key.clear();
+ }
+
+ return sax->end_object();
+ }
+
+ ////////////
+ // UBJSON //
+ ////////////
+
+ /*!
+ @param[in] get_char whether a new character should be retrieved from the
+ input (true, default) or whether the last read
+ character should be considered instead
+
+ @return whether a valid UBJSON value was passed to the SAX parser
+ */
+ bool parse_ubjson_internal(const bool get_char = true)
+ {
+ return get_ubjson_value(get_char ? get_ignore_noop() : current);
+ }
+
+ /*!
+ @brief reads a UBJSON string
+
+ This function is either called after reading the 'S' byte explicitly
+ indicating a string, or in case of an object key where the 'S' byte can be
+ left out.
+
+ @param[out] result created string
+ @param[in] get_char whether a new character should be retrieved from the
+ input (true, default) or whether the last read
+ character should be considered instead
+
+ @return whether string creation completed
+ */
+ bool get_ubjson_string(string_t& result, const bool get_char = true)
+ {
+ if (get_char)
+ {
+ get(); // TODO(niels): may we ignore N here?
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value")))
+ {
+ return false;
+ }
+
+ switch (current)
+ {
+ case 'U':
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'i':
+ {
+ std::int8_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'I':
+ {
+ std::int16_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'l':
+ {
+ std::int32_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'L':
+ {
+ std::int64_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ default:
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string")));
+ }
+ }
+
+ /*!
+ @param[out] result determined size
+ @return whether size determination completed
+ */
+ bool get_ubjson_size_value(std::size_t& result)
+ {
+ switch (get_ignore_noop())
+ {
+ case 'U':
+ {
+ std::uint8_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ case 'i':
+ {
+ std::int8_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ case 'I':
+ {
+ std::int16_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ case 'l':
+ {
+ std::int32_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ case 'L':
+ {
+ std::int64_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size")));
+ }
+ }
+ }
+
+ /*!
+ @brief determine the type and size for a container
+
+ In the optimized UBJSON format, a type and a size can be provided to allow
+ for a more compact representation.
+
+ @param[out] result pair of the size and the type
+
+ @return whether pair creation completed
+ */
+ bool get_ubjson_size_type(std::pair<std::size_t, char_int_type>& result)
+ {
+ result.first = string_t::npos; // size
+ result.second = 0; // type
+
+ get_ignore_noop();
+
+ if (current == '$')
+ {
+ result.second = get(); // must not ignore 'N', because 'N' maybe the type
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "type")))
+ {
+ return false;
+ }
+
+ get_ignore_noop();
+ if (JSON_HEDLEY_UNLIKELY(current != '#'))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value")))
+ {
+ return false;
+ }
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size")));
+ }
+
+ return get_ubjson_size_value(result.first);
+ }
+
+ if (current == '#')
+ {
+ return get_ubjson_size_value(result.first);
+ }
+
+ return true;
+ }
+
+ /*!
+ @param prefix the previously read or set type prefix
+ @return whether value creation completed
+ */
+ bool get_ubjson_value(const char_int_type prefix)
+ {
+ switch (prefix)
+ {
+ case std::char_traits<char_type>::eof(): // EOF
+ return unexpect_eof(input_format_t::ubjson, "value");
+
+ case 'T': // true
+ return sax->boolean(true);
+ case 'F': // false
+ return sax->boolean(false);
+
+ case 'Z': // null
+ return sax->null();
+
+ case 'U':
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_unsigned(number);
+ }
+
+ case 'i':
+ {
+ std::int8_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
+ }
+
+ case 'I':
+ {
+ std::int16_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
+ }
+
+ case 'l':
+ {
+ std::int32_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
+ }
+
+ case 'L':
+ {
+ std::int64_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
+ }
+
+ case 'd':
+ {
+ float number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 'D':
+ {
+ double number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 'H':
+ {
+ return get_ubjson_high_precision_number();
+ }
+
+ case 'C': // char
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "char")))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(current > 127))
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char")));
+ }
+ string_t s(1, static_cast<typename string_t::value_type>(current));
+ return sax->string(s);
+ }
+
+ case 'S': // string
+ {
+ string_t s;
+ return get_ubjson_string(s) && sax->string(s);
+ }
+
+ case '[': // array
+ return get_ubjson_array();
+
+ case '{': // object
+ return get_ubjson_object();
+
+ default: // anything else
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value")));
+ }
+ }
+ }
+
+ /*!
+ @return whether array creation completed
+ */
+ bool get_ubjson_array()
+ {
+ std::pair<std::size_t, char_int_type> size_and_type;
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type)))
+ {
+ return false;
+ }
+
+ if (size_and_type.first != string_t::npos)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first)))
+ {
+ return false;
+ }
+
+ if (size_and_type.second != 0)
+ {
+ if (size_and_type.second != 'N')
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second)))
+ {
+ return false;
+ }
+ }
+ }
+ }
+ else
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal()))
+ {
+ return false;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ while (current != ']')
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal(false)))
+ {
+ return false;
+ }
+ get_ignore_noop();
+ }
+ }
+
+ return sax->end_array();
+ }
+
+ /*!
+ @return whether object creation completed
+ */
+ bool get_ubjson_object()
+ {
+ std::pair<std::size_t, char_int_type> size_and_type;
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type)))
+ {
+ return false;
+ }
+
+ string_t key;
+ if (size_and_type.first != string_t::npos)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(size_and_type.first)))
+ {
+ return false;
+ }
+
+ if (size_and_type.second != 0)
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second)))
+ {
+ return false;
+ }
+ key.clear();
+ }
+ }
+ else
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal()))
+ {
+ return false;
+ }
+ key.clear();
+ }
+ }
+ }
+ else
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ while (current != '}')
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key, false) || !sax->key(key)))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal()))
+ {
+ return false;
+ }
+ get_ignore_noop();
+ key.clear();
+ }
+ }
+
+ return sax->end_object();
+ }
+
+ // Note, no reader for UBJSON binary types is implemented because they do
+ // not exist
+
+ bool get_ubjson_high_precision_number()
+ {
+ // get size of following number string
+ std::size_t size{};
+ auto res = get_ubjson_size_value(size);
+ if (JSON_HEDLEY_UNLIKELY(!res))
+ {
+ return res;
+ }
+
+ // get number string
+ std::vector<char> number_vector;
+ for (std::size_t i = 0; i < size; ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "number")))
+ {
+ return false;
+ }
+ number_vector.push_back(static_cast<char>(current));
+ }
+
+ // parse number string
+ auto number_ia = detail::input_adapter(std::forward<decltype(number_vector)>(number_vector));
+ auto number_lexer = detail::lexer<BasicJsonType, decltype(number_ia)>(std::move(number_ia), false);
+ const auto result_number = number_lexer.scan();
+ const auto number_string = number_lexer.get_token_string();
+ const auto result_remainder = number_lexer.scan();
+
+ using token_type = typename detail::lexer_base<BasicJsonType>::token_type;
+
+ if (JSON_HEDLEY_UNLIKELY(result_remainder != token_type::end_of_input))
+ {
+ return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number")));
+ }
+
+ switch (result_number)
+ {
+ case token_type::value_integer:
+ return sax->number_integer(number_lexer.get_number_integer());
+ case token_type::value_unsigned:
+ return sax->number_unsigned(number_lexer.get_number_unsigned());
+ case token_type::value_float:
+ return sax->number_float(number_lexer.get_number_float(), std::move(number_string));
+ default:
+ return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number")));
+ }
+ }
+
+ ///////////////////////
+ // Utility functions //
+ ///////////////////////
+
+ /*!
+ @brief get next character from the input
+
+ This function provides the interface to the used input adapter. It does
+ not throw in case the input reached EOF, but returns a -'ve valued
+ `std::char_traits<char_type>::eof()` in that case.
+
+ @return character read from the input
+ */
+ char_int_type get()
+ {
+ ++chars_read;
+ return current = ia.get_character();
+ }
+
+ /*!
+ @return character read from the input after ignoring all 'N' entries
+ */
+ char_int_type get_ignore_noop()
+ {
+ do
+ {
+ get();
+ }
+ while (current == 'N');
+
+ return current;
+ }
+
+ /*
+ @brief read a number from the input
+
+ @tparam NumberType the type of the number
+ @param[in] format the current format (for diagnostics)
+ @param[out] result number of type @a NumberType
+
+ @return whether conversion completed
+
+ @note This function needs to respect the system's endianess, because
+ bytes in CBOR, MessagePack, and UBJSON are stored in network order
+ (big endian) and therefore need reordering on little endian systems.
+ */
+ template<typename NumberType, bool InputIsLittleEndian = false>
+ bool get_number(const input_format_t format, NumberType& result)
+ {
+ // step 1: read input into array with system's byte order
+ std::array<std::uint8_t, sizeof(NumberType)> vec;
+ for (std::size_t i = 0; i < sizeof(NumberType); ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number")))
+ {
+ return false;
+ }
+
+ // reverse byte order prior to conversion if necessary
+ if (is_little_endian != InputIsLittleEndian)
+ {
+ vec[sizeof(NumberType) - i - 1] = static_cast<std::uint8_t>(current);
+ }
+ else
+ {
+ vec[i] = static_cast<std::uint8_t>(current); // LCOV_EXCL_LINE
+ }
+ }
+
+ // step 2: convert array into number of type T and return
+ std::memcpy(&result, vec.data(), sizeof(NumberType));
+ return true;
+ }
+
+ /*!
+ @brief create a string by reading characters from the input
+
+ @tparam NumberType the type of the number
+ @param[in] format the current format (for diagnostics)
+ @param[in] len number of characters to read
+ @param[out] result string created by reading @a len bytes
+
+ @return whether string creation completed
+
+ @note We can not reserve @a len bytes for the result, because @a len
+ may be too large. Usually, @ref unexpect_eof() detects the end of
+ the input before we run out of string memory.
+ */
+ template<typename NumberType>
+ bool get_string(const input_format_t format,
+ const NumberType len,
+ string_t& result)
+ {
+ bool success = true;
+ for (NumberType i = 0; i < len; i++)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "string")))
+ {
+ success = false;
+ break;
+ }
+ result.push_back(static_cast<typename string_t::value_type>(current));
+ };
+ return success;
+ }
+
+ /*!
+ @brief create a byte array by reading bytes from the input
+
+ @tparam NumberType the type of the number
+ @param[in] format the current format (for diagnostics)
+ @param[in] len number of bytes to read
+ @param[out] result byte array created by reading @a len bytes
+
+ @return whether byte array creation completed
+
+ @note We can not reserve @a len bytes for the result, because @a len
+ may be too large. Usually, @ref unexpect_eof() detects the end of
+ the input before we run out of memory.
+ */
+ template<typename NumberType>
+ bool get_binary(const input_format_t format,
+ const NumberType len,
+ binary_t& result)
+ {
+ bool success = true;
+ for (NumberType i = 0; i < len; i++)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "binary")))
+ {
+ success = false;
+ break;
+ }
+ result.push_back(static_cast<std::uint8_t>(current));
+ }
+ return success;
+ }
+
+ /*!
+ @param[in] format the current format (for diagnostics)
+ @param[in] context further context information (for diagnostics)
+ @return whether the last read character is not EOF
+ */
+ JSON_HEDLEY_NON_NULL(3)
+ bool unexpect_eof(const input_format_t format, const char* context) const
+ {
+ if (JSON_HEDLEY_UNLIKELY(current == std::char_traits<char_type>::eof()))
+ {
+ return sax->parse_error(chars_read, "<end of file>",
+ parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context)));
+ }
+ return true;
+ }
+
+ /*!
+ @return a string representation of the last read byte
+ */
+ std::string get_token_string() const
+ {
+ std::array<char, 3> cr{{}};
+ (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast<unsigned char>(current));
+ return std::string{cr.data()};
+ }
+
+ /*!
+ @param[in] format the current format
+ @param[in] detail a detailed error message
+ @param[in] context further context information
+ @return a message string to use in the parse_error exceptions
+ */
+ std::string exception_message(const input_format_t format,
+ const std::string& detail,
+ const std::string& context) const
+ {
+ std::string error_msg = "syntax error while parsing ";
+
+ switch (format)
+ {
+ case input_format_t::cbor:
+ error_msg += "CBOR";
+ break;
+
+ case input_format_t::msgpack:
+ error_msg += "MessagePack";
+ break;
+
+ case input_format_t::ubjson:
+ error_msg += "UBJSON";
+ break;
+
+ case input_format_t::bson:
+ error_msg += "BSON";
+ break;
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+
+ return error_msg + " " + context + ": " + detail;
+ }
+
+ private:
+ /// input adapter
+ InputAdapterType ia;
+
+ /// the current character
+ char_int_type current = std::char_traits<char_type>::eof();
+
+ /// the number of characters read
+ std::size_t chars_read = 0;
+
+ /// whether we can assume little endianess
+ const bool is_little_endian = little_endianess();
+
+ /// the SAX parser
+ json_sax_t* sax = nullptr;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/lexer.hpp>
+
+// #include <nlohmann/detail/input/parser.hpp>
+
+
+#include <cmath> // isfinite
+#include <cstdint> // uint8_t
+#include <functional> // function
+#include <string> // string
+#include <utility> // move
+#include <vector> // vector
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/json_sax.hpp>
+
+// #include <nlohmann/detail/input/lexer.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/is_sax.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+////////////
+// parser //
+////////////
+
+enum class parse_event_t : uint8_t
+{
+ /// the parser read `{` and started to process a JSON object
+ object_start,
+ /// the parser read `}` and finished processing a JSON object
+ object_end,
+ /// the parser read `[` and started to process a JSON array
+ array_start,
+ /// the parser read `]` and finished processing a JSON array
+ array_end,
+ /// the parser read a key of a value in an object
+ key,
+ /// the parser finished reading a JSON value
+ value
+};
+
+template<typename BasicJsonType>
+using parser_callback_t =
+ std::function<bool(int depth, parse_event_t event, BasicJsonType& parsed)>;
+
+/*!
+@brief syntax analysis
+
+This class implements a recursive descent parser.
+*/
+template<typename BasicJsonType, typename InputAdapterType>
+class parser
+{
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using lexer_t = lexer<BasicJsonType, InputAdapterType>;
+ using token_type = typename lexer_t::token_type;
+
+ public:
+ /// a parser reading from an input adapter
+ explicit parser(InputAdapterType&& adapter,
+ const parser_callback_t<BasicJsonType> cb = nullptr,
+ const bool allow_exceptions_ = true,
+ const bool skip_comments = false)
+ : callback(cb)
+ , m_lexer(std::move(adapter), skip_comments)
+ , allow_exceptions(allow_exceptions_)
+ {
+ // read first token
+ get_token();
+ }
+
+ /*!
+ @brief public parser interface
+
+ @param[in] strict whether to expect the last token to be EOF
+ @param[in,out] result parsed JSON value
+
+ @throw parse_error.101 in case of an unexpected token
+ @throw parse_error.102 if to_unicode fails or surrogate error
+ @throw parse_error.103 if to_unicode fails
+ */
+ void parse(const bool strict, BasicJsonType& result)
+ {
+ if (callback)
+ {
+ json_sax_dom_callback_parser<BasicJsonType> sdp(result, callback, allow_exceptions);
+ sax_parse_internal(&sdp);
+ result.assert_invariant();
+
+ // in strict mode, input must be completely read
+ if (strict && (get_token() != token_type::end_of_input))
+ {
+ sdp.parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_of_input, "value")));
+ }
+
+ // in case of an error, return discarded value
+ if (sdp.is_errored())
+ {
+ result = value_t::discarded;
+ return;
+ }
+
+ // set top-level value to null if it was discarded by the callback
+ // function
+ if (result.is_discarded())
+ {
+ result = nullptr;
+ }
+ }
+ else
+ {
+ json_sax_dom_parser<BasicJsonType> sdp(result, allow_exceptions);
+ sax_parse_internal(&sdp);
+ result.assert_invariant();
+
+ // in strict mode, input must be completely read
+ if (strict && (get_token() != token_type::end_of_input))
+ {
+ sdp.parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_of_input, "value")));
+ }
+
+ // in case of an error, return discarded value
+ if (sdp.is_errored())
+ {
+ result = value_t::discarded;
+ return;
+ }
+ }
+ }
+
+ /*!
+ @brief public accept interface
+
+ @param[in] strict whether to expect the last token to be EOF
+ @return whether the input is a proper JSON text
+ */
+ bool accept(const bool strict = true)
+ {
+ json_sax_acceptor<BasicJsonType> sax_acceptor;
+ return sax_parse(&sax_acceptor, strict);
+ }
+
+ template<typename SAX>
+ JSON_HEDLEY_NON_NULL(2)
+ bool sax_parse(SAX* sax, const bool strict = true)
+ {
+ (void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
+ const bool result = sax_parse_internal(sax);
+
+ // strict mode: next byte must be EOF
+ if (result && strict && (get_token() != token_type::end_of_input))
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_of_input, "value")));
+ }
+
+ return result;
+ }
+
+ private:
+ template<typename SAX>
+ JSON_HEDLEY_NON_NULL(2)
+ bool sax_parse_internal(SAX* sax)
+ {
+ // stack to remember the hierarchy of structured values we are parsing
+ // true = array; false = object
+ std::vector<bool> states;
+ // value to avoid a goto (see comment where set to true)
+ bool skip_to_state_evaluation = false;
+
+ while (true)
+ {
+ if (!skip_to_state_evaluation)
+ {
+ // invariant: get_token() was called before each iteration
+ switch (last_token)
+ {
+ case token_type::begin_object:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ // closing } -> we are done
+ if (get_token() == token_type::end_object)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
+ {
+ return false;
+ }
+ break;
+ }
+
+ // parse key
+ if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string))
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::value_string, "object key")));
+ }
+ if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
+ {
+ return false;
+ }
+
+ // parse separator (:)
+ if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::name_separator, "object separator")));
+ }
+
+ // remember we are now inside an object
+ states.push_back(false);
+
+ // parse values
+ get_token();
+ continue;
+ }
+
+ case token_type::begin_array:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ // closing ] -> we are done
+ if (get_token() == token_type::end_array)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
+ {
+ return false;
+ }
+ break;
+ }
+
+ // remember we are now inside an array
+ states.push_back(true);
+
+ // parse values (no need to call get_token)
+ continue;
+ }
+
+ case token_type::value_float:
+ {
+ const auto res = m_lexer.get_number_float();
+
+ if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res)))
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ out_of_range::create(406, "number overflow parsing '" + m_lexer.get_token_string() + "'"));
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string())))
+ {
+ return false;
+ }
+
+ break;
+ }
+
+ case token_type::literal_false:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false)))
+ {
+ return false;
+ }
+ break;
+ }
+
+ case token_type::literal_null:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->null()))
+ {
+ return false;
+ }
+ break;
+ }
+
+ case token_type::literal_true:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true)))
+ {
+ return false;
+ }
+ break;
+ }
+
+ case token_type::value_integer:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer())))
+ {
+ return false;
+ }
+ break;
+ }
+
+ case token_type::value_string:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string())))
+ {
+ return false;
+ }
+ break;
+ }
+
+ case token_type::value_unsigned:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned())))
+ {
+ return false;
+ }
+ break;
+ }
+
+ case token_type::parse_error:
+ {
+ // using "uninitialized" to avoid "expected" message
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::uninitialized, "value")));
+ }
+
+ default: // the last token was unexpected
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::literal_or_value, "value")));
+ }
+ }
+ }
+ else
+ {
+ skip_to_state_evaluation = false;
+ }
+
+ // we reached this line after we successfully parsed a value
+ if (states.empty())
+ {
+ // empty stack: we reached the end of the hierarchy: done
+ return true;
+ }
+
+ if (states.back()) // array
+ {
+ // comma -> next value
+ if (get_token() == token_type::value_separator)
+ {
+ // parse a new value
+ get_token();
+ continue;
+ }
+
+ // closing ]
+ if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
+ {
+ return false;
+ }
+
+ // We are done with this array. Before we can parse a
+ // new value, we need to evaluate the new state first.
+ // By setting skip_to_state_evaluation to false, we
+ // are effectively jumping to the beginning of this if.
+ JSON_ASSERT(!states.empty());
+ states.pop_back();
+ skip_to_state_evaluation = true;
+ continue;
+ }
+
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_array, "array")));
+ }
+ else // object
+ {
+ // comma -> next value
+ if (get_token() == token_type::value_separator)
+ {
+ // parse key
+ if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string))
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::value_string, "object key")));
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
+ {
+ return false;
+ }
+
+ // parse separator (:)
+ if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
+ {
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::name_separator, "object separator")));
+ }
+
+ // parse values
+ get_token();
+ continue;
+ }
+
+ // closing }
+ if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
+ {
+ return false;
+ }
+
+ // We are done with this object. Before we can parse a
+ // new value, we need to evaluate the new state first.
+ // By setting skip_to_state_evaluation to false, we
+ // are effectively jumping to the beginning of this if.
+ JSON_ASSERT(!states.empty());
+ states.pop_back();
+ skip_to_state_evaluation = true;
+ continue;
+ }
+
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_object, "object")));
+ }
+ }
+ }
+
+ /// get next token from lexer
+ token_type get_token()
+ {
+ return last_token = m_lexer.scan();
+ }
+
+ std::string exception_message(const token_type expected, const std::string& context)
+ {
+ std::string error_msg = "syntax error ";
+
+ if (!context.empty())
+ {
+ error_msg += "while parsing " + context + " ";
+ }
+
+ error_msg += "- ";
+
+ if (last_token == token_type::parse_error)
+ {
+ error_msg += std::string(m_lexer.get_error_message()) + "; last read: '" +
+ m_lexer.get_token_string() + "'";
+ }
+ else
+ {
+ error_msg += "unexpected " + std::string(lexer_t::token_type_name(last_token));
+ }
+
+ if (expected != token_type::uninitialized)
+ {
+ error_msg += "; expected " + std::string(lexer_t::token_type_name(expected));
+ }
+
+ return error_msg;
+ }
+
+ private:
+ /// callback function
+ const parser_callback_t<BasicJsonType> callback = nullptr;
+ /// the type of the last read token
+ token_type last_token = token_type::uninitialized;
+ /// the lexer
+ lexer_t m_lexer;
+ /// whether to throw exceptions in case of errors
+ const bool allow_exceptions = true;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/internal_iterator.hpp>
+
+
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+
+
+#include <cstddef> // ptrdiff_t
+#include <limits> // numeric_limits
+
+namespace nlohmann
+{
+namespace detail
+{
+/*
+@brief an iterator for primitive JSON types
+
+This class models an iterator for primitive JSON types (boolean, number,
+string). It's only purpose is to allow the iterator/const_iterator classes
+to "iterate" over primitive values. Internally, the iterator is modeled by
+a `difference_type` variable. Value begin_value (`0`) models the begin,
+end_value (`1`) models past the end.
+*/
+class primitive_iterator_t
+{
+ private:
+ using difference_type = std::ptrdiff_t;
+ static constexpr difference_type begin_value = 0;
+ static constexpr difference_type end_value = begin_value + 1;
+
+ /// iterator as signed integer type
+ difference_type m_it = (std::numeric_limits<std::ptrdiff_t>::min)();
+
+ public:
+ constexpr difference_type get_value() const noexcept
+ {
+ return m_it;
+ }
+
+ /// set iterator to a defined beginning
+ void set_begin() noexcept
+ {
+ m_it = begin_value;
+ }
+
+ /// set iterator to a defined past the end
+ void set_end() noexcept
+ {
+ m_it = end_value;
+ }
+
+ /// return whether the iterator can be dereferenced
+ constexpr bool is_begin() const noexcept
+ {
+ return m_it == begin_value;
+ }
+
+ /// return whether the iterator is at end
+ constexpr bool is_end() const noexcept
+ {
+ return m_it == end_value;
+ }
+
+ friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
+ {
+ return lhs.m_it == rhs.m_it;
+ }
+
+ friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
+ {
+ return lhs.m_it < rhs.m_it;
+ }
+
+ primitive_iterator_t operator+(difference_type n) noexcept
+ {
+ auto result = *this;
+ result += n;
+ return result;
+ }
+
+ friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
+ {
+ return lhs.m_it - rhs.m_it;
+ }
+
+ primitive_iterator_t& operator++() noexcept
+ {
+ ++m_it;
+ return *this;
+ }
+
+ primitive_iterator_t const operator++(int) noexcept
+ {
+ auto result = *this;
+ ++m_it;
+ return result;
+ }
+
+ primitive_iterator_t& operator--() noexcept
+ {
+ --m_it;
+ return *this;
+ }
+
+ primitive_iterator_t const operator--(int) noexcept
+ {
+ auto result = *this;
+ --m_it;
+ return result;
+ }
+
+ primitive_iterator_t& operator+=(difference_type n) noexcept
+ {
+ m_it += n;
+ return *this;
+ }
+
+ primitive_iterator_t& operator-=(difference_type n) noexcept
+ {
+ m_it -= n;
+ return *this;
+ }
+};
+} // namespace detail
+} // namespace nlohmann
+
+
+namespace nlohmann
+{
+namespace detail
+{
+/*!
+@brief an iterator value
+
+@note This structure could easily be a union, but MSVC currently does not allow
+unions members with complex constructors, see https://github.com/nlohmann/json/pull/105.
+*/
+template<typename BasicJsonType> struct internal_iterator
+{
+ /// iterator for JSON objects
+ typename BasicJsonType::object_t::iterator object_iterator {};
+ /// iterator for JSON arrays
+ typename BasicJsonType::array_t::iterator array_iterator {};
+ /// generic iterator for all other types
+ primitive_iterator_t primitive_iterator {};
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/iter_impl.hpp>
+
+
+#include <iterator> // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next
+#include <type_traits> // conditional, is_const, remove_const
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/iterators/internal_iterator.hpp>
+
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+// forward declare, to be able to friend it later on
+template<typename IteratorType> class iteration_proxy;
+template<typename IteratorType> class iteration_proxy_value;
+
+/*!
+@brief a template for a bidirectional iterator for the @ref basic_json class
+This class implements a both iterators (iterator and const_iterator) for the
+@ref basic_json class.
+@note An iterator is called *initialized* when a pointer to a JSON value has
+ been set (e.g., by a constructor or a copy assignment). If the iterator is
+ default-constructed, it is *uninitialized* and most methods are undefined.
+ **The library uses assertions to detect calls on uninitialized iterators.**
+@requirement The class satisfies the following concept requirements:
+-
+[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
+ The iterator that can be moved can be moved in both directions (i.e.
+ incremented and decremented).
+@since version 1.0.0, simplified in version 2.0.9, change to bidirectional
+ iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593)
+*/
+template<typename BasicJsonType>
+class iter_impl
+{
+ /// allow basic_json to access private members
+ friend iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>;
+ friend BasicJsonType;
+ friend iteration_proxy<iter_impl>;
+ friend iteration_proxy_value<iter_impl>;
+
+ using object_t = typename BasicJsonType::object_t;
+ using array_t = typename BasicJsonType::array_t;
+ // make sure BasicJsonType is basic_json or const basic_json
+ static_assert(is_basic_json<typename std::remove_const<BasicJsonType>::type>::value,
+ "iter_impl only accepts (const) basic_json");
+
+ public:
+
+ /// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17.
+ /// The C++ Standard has never required user-defined iterators to derive from std::iterator.
+ /// A user-defined iterator should provide publicly accessible typedefs named
+ /// iterator_category, value_type, difference_type, pointer, and reference.
+ /// Note that value_type is required to be non-const, even for constant iterators.
+ using iterator_category = std::bidirectional_iterator_tag;
+
+ /// the type of the values when the iterator is dereferenced
+ using value_type = typename BasicJsonType::value_type;
+ /// a type to represent differences between iterators
+ using difference_type = typename BasicJsonType::difference_type;
+ /// defines a pointer to the type iterated over (value_type)
+ using pointer = typename std::conditional<std::is_const<BasicJsonType>::value,
+ typename BasicJsonType::const_pointer,
+ typename BasicJsonType::pointer>::type;
+ /// defines a reference to the type iterated over (value_type)
+ using reference =
+ typename std::conditional<std::is_const<BasicJsonType>::value,
+ typename BasicJsonType::const_reference,
+ typename BasicJsonType::reference>::type;
+
+ /// default constructor
+ iter_impl() = default;
+
+ /*!
+ @brief constructor for a given JSON instance
+ @param[in] object pointer to a JSON object for this iterator
+ @pre object != nullptr
+ @post The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ explicit iter_impl(pointer object) noexcept : m_object(object)
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ m_it.object_iterator = typename object_t::iterator();
+ break;
+ }
+
+ case value_t::array:
+ {
+ m_it.array_iterator = typename array_t::iterator();
+ break;
+ }
+
+ default:
+ {
+ m_it.primitive_iterator = primitive_iterator_t();
+ break;
+ }
+ }
+ }
+
+ /*!
+ @note The conventional copy constructor and copy assignment are implicitly
+ defined. Combined with the following converting constructor and
+ assignment, they support: (1) copy from iterator to iterator, (2)
+ copy from const iterator to const iterator, and (3) conversion from
+ iterator to const iterator. However conversion from const iterator
+ to iterator is not defined.
+ */
+
+ /*!
+ @brief const copy constructor
+ @param[in] other const iterator to copy from
+ @note This copy constructor had to be defined explicitly to circumvent a bug
+ occurring on msvc v19.0 compiler (VS 2015) debug build. For more
+ information refer to: https://github.com/nlohmann/json/issues/1608
+ */
+ iter_impl(const iter_impl<const BasicJsonType>& other) noexcept
+ : m_object(other.m_object), m_it(other.m_it)
+ {}
+
+ /*!
+ @brief converting assignment
+ @param[in] other const iterator to copy from
+ @return const/non-const iterator
+ @note It is not checked whether @a other is initialized.
+ */
+ iter_impl& operator=(const iter_impl<const BasicJsonType>& other) noexcept
+ {
+ m_object = other.m_object;
+ m_it = other.m_it;
+ return *this;
+ }
+
+ /*!
+ @brief converting constructor
+ @param[in] other non-const iterator to copy from
+ @note It is not checked whether @a other is initialized.
+ */
+ iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
+ : m_object(other.m_object), m_it(other.m_it)
+ {}
+
+ /*!
+ @brief converting assignment
+ @param[in] other non-const iterator to copy from
+ @return const/non-const iterator
+ @note It is not checked whether @a other is initialized.
+ */
+ iter_impl& operator=(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
+ {
+ m_object = other.m_object;
+ m_it = other.m_it;
+ return *this;
+ }
+
+ private:
+ /*!
+ @brief set the iterator to the first value
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ void set_begin() noexcept
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ m_it.object_iterator = m_object->m_value.object->begin();
+ break;
+ }
+
+ case value_t::array:
+ {
+ m_it.array_iterator = m_object->m_value.array->begin();
+ break;
+ }
+
+ case value_t::null:
+ {
+ // set to end so begin()==end() is true: null is empty
+ m_it.primitive_iterator.set_end();
+ break;
+ }
+
+ default:
+ {
+ m_it.primitive_iterator.set_begin();
+ break;
+ }
+ }
+ }
+
+ /*!
+ @brief set the iterator past the last value
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ void set_end() noexcept
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ m_it.object_iterator = m_object->m_value.object->end();
+ break;
+ }
+
+ case value_t::array:
+ {
+ m_it.array_iterator = m_object->m_value.array->end();
+ break;
+ }
+
+ default:
+ {
+ m_it.primitive_iterator.set_end();
+ break;
+ }
+ }
+ }
+
+ public:
+ /*!
+ @brief return a reference to the value pointed to by the iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ reference operator*() const
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end());
+ return m_it.object_iterator->second;
+ }
+
+ case value_t::array:
+ {
+ JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end());
+ return *m_it.array_iterator;
+ }
+
+ case value_t::null:
+ JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+
+ default:
+ {
+ if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
+ {
+ return *m_object;
+ }
+
+ JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+ }
+ }
+ }
+
+ /*!
+ @brief dereference the iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ pointer operator->() const
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end());
+ return &(m_it.object_iterator->second);
+ }
+
+ case value_t::array:
+ {
+ JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end());
+ return &*m_it.array_iterator;
+ }
+
+ default:
+ {
+ if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
+ {
+ return m_object;
+ }
+
+ JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+ }
+ }
+ }
+
+ /*!
+ @brief post-increment (it++)
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl const operator++(int)
+ {
+ auto result = *this;
+ ++(*this);
+ return result;
+ }
+
+ /*!
+ @brief pre-increment (++it)
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl& operator++()
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ std::advance(m_it.object_iterator, 1);
+ break;
+ }
+
+ case value_t::array:
+ {
+ std::advance(m_it.array_iterator, 1);
+ break;
+ }
+
+ default:
+ {
+ ++m_it.primitive_iterator;
+ break;
+ }
+ }
+
+ return *this;
+ }
+
+ /*!
+ @brief post-decrement (it--)
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl const operator--(int)
+ {
+ auto result = *this;
+ --(*this);
+ return result;
+ }
+
+ /*!
+ @brief pre-decrement (--it)
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl& operator--()
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ {
+ std::advance(m_it.object_iterator, -1);
+ break;
+ }
+
+ case value_t::array:
+ {
+ std::advance(m_it.array_iterator, -1);
+ break;
+ }
+
+ default:
+ {
+ --m_it.primitive_iterator;
+ break;
+ }
+ }
+
+ return *this;
+ }
+
+ /*!
+ @brief comparison: equal
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ bool operator==(const iter_impl& other) const
+ {
+ // if objects are not the same, the comparison is undefined
+ if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers"));
+ }
+
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ return (m_it.object_iterator == other.m_it.object_iterator);
+
+ case value_t::array:
+ return (m_it.array_iterator == other.m_it.array_iterator);
+
+ default:
+ return (m_it.primitive_iterator == other.m_it.primitive_iterator);
+ }
+ }
+
+ /*!
+ @brief comparison: not equal
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ bool operator!=(const iter_impl& other) const
+ {
+ return !operator==(other);
+ }
+
+ /*!
+ @brief comparison: smaller
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ bool operator<(const iter_impl& other) const
+ {
+ // if objects are not the same, the comparison is undefined
+ if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers"));
+ }
+
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators"));
+
+ case value_t::array:
+ return (m_it.array_iterator < other.m_it.array_iterator);
+
+ default:
+ return (m_it.primitive_iterator < other.m_it.primitive_iterator);
+ }
+ }
+
+ /*!
+ @brief comparison: less than or equal
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ bool operator<=(const iter_impl& other) const
+ {
+ return !other.operator < (*this);
+ }
+
+ /*!
+ @brief comparison: greater than
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ bool operator>(const iter_impl& other) const
+ {
+ return !operator<=(other);
+ }
+
+ /*!
+ @brief comparison: greater than or equal
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ bool operator>=(const iter_impl& other) const
+ {
+ return !operator<(other);
+ }
+
+ /*!
+ @brief add to iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl& operator+=(difference_type i)
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators"));
+
+ case value_t::array:
+ {
+ std::advance(m_it.array_iterator, i);
+ break;
+ }
+
+ default:
+ {
+ m_it.primitive_iterator += i;
+ break;
+ }
+ }
+
+ return *this;
+ }
+
+ /*!
+ @brief subtract from iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl& operator-=(difference_type i)
+ {
+ return operator+=(-i);
+ }
+
+ /*!
+ @brief add to iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl operator+(difference_type i) const
+ {
+ auto result = *this;
+ result += i;
+ return result;
+ }
+
+ /*!
+ @brief addition of distance and iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ friend iter_impl operator+(difference_type i, const iter_impl& it)
+ {
+ auto result = it;
+ result += i;
+ return result;
+ }
+
+ /*!
+ @brief subtract from iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ iter_impl operator-(difference_type i) const
+ {
+ auto result = *this;
+ result -= i;
+ return result;
+ }
+
+ /*!
+ @brief return difference
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ difference_type operator-(const iter_impl& other) const
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators"));
+
+ case value_t::array:
+ return m_it.array_iterator - other.m_it.array_iterator;
+
+ default:
+ return m_it.primitive_iterator - other.m_it.primitive_iterator;
+ }
+ }
+
+ /*!
+ @brief access to successor
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ reference operator[](difference_type n) const
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ switch (m_object->m_type)
+ {
+ case value_t::object:
+ JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators"));
+
+ case value_t::array:
+ return *std::next(m_it.array_iterator, n);
+
+ case value_t::null:
+ JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+
+ default:
+ {
+ if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n))
+ {
+ return *m_object;
+ }
+
+ JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+ }
+ }
+ }
+
+ /*!
+ @brief return the key of an object iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ const typename object_t::key_type& key() const
+ {
+ JSON_ASSERT(m_object != nullptr);
+
+ if (JSON_HEDLEY_LIKELY(m_object->is_object()))
+ {
+ return m_it.object_iterator->first;
+ }
+
+ JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators"));
+ }
+
+ /*!
+ @brief return the value of an iterator
+ @pre The iterator is initialized; i.e. `m_object != nullptr`.
+ */
+ reference value() const
+ {
+ return operator*();
+ }
+
+ private:
+ /// associated JSON instance
+ pointer m_object = nullptr;
+ /// the actual iterator of the associated instance
+ internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it {};
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/iteration_proxy.hpp>
+
+// #include <nlohmann/detail/iterators/json_reverse_iterator.hpp>
+
+
+#include <cstddef> // ptrdiff_t
+#include <iterator> // reverse_iterator
+#include <utility> // declval
+
+namespace nlohmann
+{
+namespace detail
+{
+//////////////////////
+// reverse_iterator //
+//////////////////////
+
+/*!
+@brief a template for a reverse iterator class
+
+@tparam Base the base iterator type to reverse. Valid types are @ref
+iterator (to create @ref reverse_iterator) and @ref const_iterator (to
+create @ref const_reverse_iterator).
+
+@requirement The class satisfies the following concept requirements:
+-
+[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
+ The iterator that can be moved can be moved in both directions (i.e.
+ incremented and decremented).
+- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator):
+ It is possible to write to the pointed-to element (only if @a Base is
+ @ref iterator).
+
+@since version 1.0.0
+*/
+template<typename Base>
+class json_reverse_iterator : public std::reverse_iterator<Base>
+{
+ public:
+ using difference_type = std::ptrdiff_t;
+ /// shortcut to the reverse iterator adapter
+ using base_iterator = std::reverse_iterator<Base>;
+ /// the reference type for the pointed-to element
+ using reference = typename Base::reference;
+
+ /// create reverse iterator from iterator
+ explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept
+ : base_iterator(it) {}
+
+ /// create reverse iterator from base class
+ explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {}
+
+ /// post-increment (it++)
+ json_reverse_iterator const operator++(int)
+ {
+ return static_cast<json_reverse_iterator>(base_iterator::operator++(1));
+ }
+
+ /// pre-increment (++it)
+ json_reverse_iterator& operator++()
+ {
+ return static_cast<json_reverse_iterator&>(base_iterator::operator++());
+ }
+
+ /// post-decrement (it--)
+ json_reverse_iterator const operator--(int)
+ {
+ return static_cast<json_reverse_iterator>(base_iterator::operator--(1));
+ }
+
+ /// pre-decrement (--it)
+ json_reverse_iterator& operator--()
+ {
+ return static_cast<json_reverse_iterator&>(base_iterator::operator--());
+ }
+
+ /// add to iterator
+ json_reverse_iterator& operator+=(difference_type i)
+ {
+ return static_cast<json_reverse_iterator&>(base_iterator::operator+=(i));
+ }
+
+ /// add to iterator
+ json_reverse_iterator operator+(difference_type i) const
+ {
+ return static_cast<json_reverse_iterator>(base_iterator::operator+(i));
+ }
+
+ /// subtract from iterator
+ json_reverse_iterator operator-(difference_type i) const
+ {
+ return static_cast<json_reverse_iterator>(base_iterator::operator-(i));
+ }
+
+ /// return difference
+ difference_type operator-(const json_reverse_iterator& other) const
+ {
+ return base_iterator(*this) - base_iterator(other);
+ }
+
+ /// access to successor
+ reference operator[](difference_type n) const
+ {
+ return *(this->operator+(n));
+ }
+
+ /// return the key of an object iterator
+ auto key() const -> decltype(std::declval<Base>().key())
+ {
+ auto it = --this->base();
+ return it.key();
+ }
+
+ /// return the value of an iterator
+ reference value() const
+ {
+ auto it = --this->base();
+ return it.operator * ();
+ }
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+
+// #include <nlohmann/detail/json_pointer.hpp>
+
+
+#include <algorithm> // all_of
+#include <cctype> // isdigit
+#include <limits> // max
+#include <numeric> // accumulate
+#include <string> // string
+#include <utility> // move
+#include <vector> // vector
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+template<typename BasicJsonType>
+class json_pointer
+{
+ // allow basic_json to access private members
+ NLOHMANN_BASIC_JSON_TPL_DECLARATION
+ friend class basic_json;
+
+ public:
+ /*!
+ @brief create JSON pointer
+
+ Create a JSON pointer according to the syntax described in
+ [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3).
+
+ @param[in] s string representing the JSON pointer; if omitted, the empty
+ string is assumed which references the whole JSON value
+
+ @throw parse_error.107 if the given JSON pointer @a s is nonempty and does
+ not begin with a slash (`/`); see example below
+
+ @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s is
+ not followed by `0` (representing `~`) or `1` (representing `/`); see
+ example below
+
+ @liveexample{The example shows the construction several valid JSON pointers
+ as well as the exceptional behavior.,json_pointer}
+
+ @since version 2.0.0
+ */
+ explicit json_pointer(const std::string& s = "")
+ : reference_tokens(split(s))
+ {}
+
+ /*!
+ @brief return a string representation of the JSON pointer
+
+ @invariant For each JSON pointer `ptr`, it holds:
+ @code {.cpp}
+ ptr == json_pointer(ptr.to_string());
+ @endcode
+
+ @return a string representation of the JSON pointer
+
+ @liveexample{The example shows the result of `to_string`.,json_pointer__to_string}
+
+ @since version 2.0.0
+ */
+ std::string to_string() const
+ {
+ return std::accumulate(reference_tokens.begin(), reference_tokens.end(),
+ std::string{},
+ [](const std::string & a, const std::string & b)
+ {
+ return a + "/" + escape(b);
+ });
+ }
+
+ /// @copydoc to_string()
+ operator std::string() const
+ {
+ return to_string();
+ }
+
+ /*!
+ @brief append another JSON pointer at the end of this JSON pointer
+
+ @param[in] ptr JSON pointer to append
+ @return JSON pointer with @a ptr appended
+
+ @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add}
+
+ @complexity Linear in the length of @a ptr.
+
+ @sa @ref operator/=(std::string) to append a reference token
+ @sa @ref operator/=(std::size_t) to append an array index
+ @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator
+
+ @since version 3.6.0
+ */
+ json_pointer& operator/=(const json_pointer& ptr)
+ {
+ reference_tokens.insert(reference_tokens.end(),
+ ptr.reference_tokens.begin(),
+ ptr.reference_tokens.end());
+ return *this;
+ }
+
+ /*!
+ @brief append an unescaped reference token at the end of this JSON pointer
+
+ @param[in] token reference token to append
+ @return JSON pointer with @a token appended without escaping @a token
+
+ @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add}
+
+ @complexity Amortized constant.
+
+ @sa @ref operator/=(const json_pointer&) to append a JSON pointer
+ @sa @ref operator/=(std::size_t) to append an array index
+ @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator
+
+ @since version 3.6.0
+ */
+ json_pointer& operator/=(std::string token)
+ {
+ push_back(std::move(token));
+ return *this;
+ }
+
+ /*!
+ @brief append an array index at the end of this JSON pointer
+
+ @param[in] array_idx array index to append
+ @return JSON pointer with @a array_idx appended
+
+ @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add}
+
+ @complexity Amortized constant.
+
+ @sa @ref operator/=(const json_pointer&) to append a JSON pointer
+ @sa @ref operator/=(std::string) to append a reference token
+ @sa @ref operator/(const json_pointer&, std::string) for a binary operator
+
+ @since version 3.6.0
+ */
+ json_pointer& operator/=(std::size_t array_idx)
+ {
+ return *this /= std::to_string(array_idx);
+ }
+
+ /*!
+ @brief create a new JSON pointer by appending the right JSON pointer at the end of the left JSON pointer
+
+ @param[in] lhs JSON pointer
+ @param[in] rhs JSON pointer
+ @return a new JSON pointer with @a rhs appended to @a lhs
+
+ @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary}
+
+ @complexity Linear in the length of @a lhs and @a rhs.
+
+ @sa @ref operator/=(const json_pointer&) to append a JSON pointer
+
+ @since version 3.6.0
+ */
+ friend json_pointer operator/(const json_pointer& lhs,
+ const json_pointer& rhs)
+ {
+ return json_pointer(lhs) /= rhs;
+ }
+
+ /*!
+ @brief create a new JSON pointer by appending the unescaped token at the end of the JSON pointer
+
+ @param[in] ptr JSON pointer
+ @param[in] token reference token
+ @return a new JSON pointer with unescaped @a token appended to @a ptr
+
+ @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary}
+
+ @complexity Linear in the length of @a ptr.
+
+ @sa @ref operator/=(std::string) to append a reference token
+
+ @since version 3.6.0
+ */
+ friend json_pointer operator/(const json_pointer& ptr, std::string token)
+ {
+ return json_pointer(ptr) /= std::move(token);
+ }
+
+ /*!
+ @brief create a new JSON pointer by appending the array-index-token at the end of the JSON pointer
+
+ @param[in] ptr JSON pointer
+ @param[in] array_idx array index
+ @return a new JSON pointer with @a array_idx appended to @a ptr
+
+ @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary}
+
+ @complexity Linear in the length of @a ptr.
+
+ @sa @ref operator/=(std::size_t) to append an array index
+
+ @since version 3.6.0
+ */
+ friend json_pointer operator/(const json_pointer& ptr, std::size_t array_idx)
+ {
+ return json_pointer(ptr) /= array_idx;
+ }
+
+ /*!
+ @brief returns the parent of this JSON pointer
+
+ @return parent of this JSON pointer; in case this JSON pointer is the root,
+ the root itself is returned
+
+ @complexity Linear in the length of the JSON pointer.
+
+ @liveexample{The example shows the result of `parent_pointer` for different
+ JSON Pointers.,json_pointer__parent_pointer}
+
+ @since version 3.6.0
+ */
+ json_pointer parent_pointer() const
+ {
+ if (empty())
+ {
+ return *this;
+ }
+
+ json_pointer res = *this;
+ res.pop_back();
+ return res;
+ }
+
+ /*!
+ @brief remove last reference token
+
+ @pre not `empty()`
+
+ @liveexample{The example shows the usage of `pop_back`.,json_pointer__pop_back}
+
+ @complexity Constant.
+
+ @throw out_of_range.405 if JSON pointer has no parent
+
+ @since version 3.6.0
+ */
+ void pop_back()
+ {
+ if (JSON_HEDLEY_UNLIKELY(empty()))
+ {
+ JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+ }
+
+ reference_tokens.pop_back();
+ }
+
+ /*!
+ @brief return last reference token
+
+ @pre not `empty()`
+ @return last reference token
+
+ @liveexample{The example shows the usage of `back`.,json_pointer__back}
+
+ @complexity Constant.
+
+ @throw out_of_range.405 if JSON pointer has no parent
+
+ @since version 3.6.0
+ */
+ const std::string& back() const
+ {
+ if (JSON_HEDLEY_UNLIKELY(empty()))
+ {
+ JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+ }
+
+ return reference_tokens.back();
+ }
+
+ /*!
+ @brief append an unescaped token at the end of the reference pointer
+
+ @param[in] token token to add
+
+ @complexity Amortized constant.
+
+ @liveexample{The example shows the result of `push_back` for different
+ JSON Pointers.,json_pointer__push_back}
+
+ @since version 3.6.0
+ */
+ void push_back(const std::string& token)
+ {
+ reference_tokens.push_back(token);
+ }
+
+ /// @copydoc push_back(const std::string&)
+ void push_back(std::string&& token)
+ {
+ reference_tokens.push_back(std::move(token));
+ }
+
+ /*!
+ @brief return whether pointer points to the root document
+
+ @return true iff the JSON pointer points to the root document
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @liveexample{The example shows the result of `empty` for different JSON
+ Pointers.,json_pointer__empty}
+
+ @since version 3.6.0
+ */
+ bool empty() const noexcept
+ {
+ return reference_tokens.empty();
+ }
+
+ private:
+ /*!
+ @param[in] s reference token to be converted into an array index
+
+ @return integer representation of @a s
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index begins not with a digit
+ @throw out_of_range.404 if string @a s could not be converted to an integer
+ @throw out_of_range.410 if an array index exceeds size_type
+ */
+ static typename BasicJsonType::size_type array_index(const std::string& s)
+ {
+ using size_type = typename BasicJsonType::size_type;
+
+ // error condition (cf. RFC 6901, Sect. 4)
+ if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && s[0] == '0'))
+ {
+ JSON_THROW(detail::parse_error::create(106, 0,
+ "array index '" + s +
+ "' must not begin with '0'"));
+ }
+
+ // error condition (cf. RFC 6901, Sect. 4)
+ if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && !(s[0] >= '1' && s[0] <= '9')))
+ {
+ JSON_THROW(detail::parse_error::create(109, 0, "array index '" + s + "' is not a number"));
+ }
+
+ std::size_t processed_chars = 0;
+ unsigned long long res = 0;
+ JSON_TRY
+ {
+ res = std::stoull(s, &processed_chars);
+ }
+ JSON_CATCH(std::out_of_range&)
+ {
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'"));
+ }
+
+ // check if the string was completely read
+ if (JSON_HEDLEY_UNLIKELY(processed_chars != s.size()))
+ {
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'"));
+ }
+
+ // only triggered on special platforms (like 32bit), see also
+ // https://github.com/nlohmann/json/pull/2203
+ if (res >= static_cast<unsigned long long>((std::numeric_limits<size_type>::max)()))
+ {
+ JSON_THROW(detail::out_of_range::create(410, "array index " + s + " exceeds size_type")); // LCOV_EXCL_LINE
+ }
+
+ return static_cast<size_type>(res);
+ }
+
+ json_pointer top() const
+ {
+ if (JSON_HEDLEY_UNLIKELY(empty()))
+ {
+ JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+ }
+
+ json_pointer result = *this;
+ result.reference_tokens = {reference_tokens[0]};
+ return result;
+ }
+
+ /*!
+ @brief create and return a reference to the pointed to value
+
+ @complexity Linear in the number of reference tokens.
+
+ @throw parse_error.109 if array index is not a number
+ @throw type_error.313 if value cannot be unflattened
+ */
+ BasicJsonType& get_and_create(BasicJsonType& j) const
+ {
+ auto result = &j;
+
+ // in case no reference tokens exist, return a reference to the JSON value
+ // j which will be overwritten by a primitive value
+ for (const auto& reference_token : reference_tokens)
+ {
+ switch (result->type())
+ {
+ case detail::value_t::null:
+ {
+ if (reference_token == "0")
+ {
+ // start a new array if reference token is 0
+ result = &result->operator[](0);
+ }
+ else
+ {
+ // start a new object otherwise
+ result = &result->operator[](reference_token);
+ }
+ break;
+ }
+
+ case detail::value_t::object:
+ {
+ // create an entry in the object
+ result = &result->operator[](reference_token);
+ break;
+ }
+
+ case detail::value_t::array:
+ {
+ // create an entry in the array
+ result = &result->operator[](array_index(reference_token));
+ break;
+ }
+
+ /*
+ The following code is only reached if there exists a reference
+ token _and_ the current value is primitive. In this case, we have
+ an error situation, because primitive values may only occur as
+ single value; that is, with an empty list of reference tokens.
+ */
+ default:
+ JSON_THROW(detail::type_error::create(313, "invalid value to unflatten"));
+ }
+ }
+
+ return *result;
+ }
+
+ /*!
+ @brief return a reference to the pointed to value
+
+ @note This version does not throw if a value is not present, but tries to
+ create nested values instead. For instance, calling this function
+ with pointer `"/this/that"` on a null value is equivalent to calling
+ `operator[]("this").operator[]("that")` on that value, effectively
+ changing the null value to an object.
+
+ @param[in] ptr a JSON value
+
+ @return reference to the JSON value pointed to by the JSON pointer
+
+ @complexity Linear in the length of the JSON pointer.
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+ */
+ BasicJsonType& get_unchecked(BasicJsonType* ptr) const
+ {
+ for (const auto& reference_token : reference_tokens)
+ {
+ // convert null values to arrays or objects before continuing
+ if (ptr->is_null())
+ {
+ // check if reference token is a number
+ const bool nums =
+ std::all_of(reference_token.begin(), reference_token.end(),
+ [](const unsigned char x)
+ {
+ return std::isdigit(x);
+ });
+
+ // change value to array for numbers or "-" or to object otherwise
+ *ptr = (nums || reference_token == "-")
+ ? detail::value_t::array
+ : detail::value_t::object;
+ }
+
+ switch (ptr->type())
+ {
+ case detail::value_t::object:
+ {
+ // use unchecked object access
+ ptr = &ptr->operator[](reference_token);
+ break;
+ }
+
+ case detail::value_t::array:
+ {
+ if (reference_token == "-")
+ {
+ // explicitly treat "-" as index beyond the end
+ ptr = &ptr->operator[](ptr->m_value.array->size());
+ }
+ else
+ {
+ // convert array index to number; unchecked access
+ ptr = &ptr->operator[](array_index(reference_token));
+ }
+ break;
+ }
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ }
+ }
+
+ return *ptr;
+ }
+
+ /*!
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+ */
+ BasicJsonType& get_checked(BasicJsonType* ptr) const
+ {
+ for (const auto& reference_token : reference_tokens)
+ {
+ switch (ptr->type())
+ {
+ case detail::value_t::object:
+ {
+ // note: at performs range check
+ ptr = &ptr->at(reference_token);
+ break;
+ }
+
+ case detail::value_t::array:
+ {
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" always fails the range check
+ JSON_THROW(detail::out_of_range::create(402,
+ "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+ ") is out of range"));
+ }
+
+ // note: at performs range check
+ ptr = &ptr->at(array_index(reference_token));
+ break;
+ }
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ }
+ }
+
+ return *ptr;
+ }
+
+ /*!
+ @brief return a const reference to the pointed to value
+
+ @param[in] ptr a JSON value
+
+ @return const reference to the JSON value pointed to by the JSON
+ pointer
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+ */
+ const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
+ {
+ for (const auto& reference_token : reference_tokens)
+ {
+ switch (ptr->type())
+ {
+ case detail::value_t::object:
+ {
+ // use unchecked object access
+ ptr = &ptr->operator[](reference_token);
+ break;
+ }
+
+ case detail::value_t::array:
+ {
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" cannot be used for const access
+ JSON_THROW(detail::out_of_range::create(402,
+ "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+ ") is out of range"));
+ }
+
+ // use unchecked array access
+ ptr = &ptr->operator[](array_index(reference_token));
+ break;
+ }
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ }
+ }
+
+ return *ptr;
+ }
+
+ /*!
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+ */
+ const BasicJsonType& get_checked(const BasicJsonType* ptr) const
+ {
+ for (const auto& reference_token : reference_tokens)
+ {
+ switch (ptr->type())
+ {
+ case detail::value_t::object:
+ {
+ // note: at performs range check
+ ptr = &ptr->at(reference_token);
+ break;
+ }
+
+ case detail::value_t::array:
+ {
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" always fails the range check
+ JSON_THROW(detail::out_of_range::create(402,
+ "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+ ") is out of range"));
+ }
+
+ // note: at performs range check
+ ptr = &ptr->at(array_index(reference_token));
+ break;
+ }
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ }
+ }
+
+ return *ptr;
+ }
+
+ /*!
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ */
+ bool contains(const BasicJsonType* ptr) const
+ {
+ for (const auto& reference_token : reference_tokens)
+ {
+ switch (ptr->type())
+ {
+ case detail::value_t::object:
+ {
+ if (!ptr->contains(reference_token))
+ {
+ // we did not find the key in the object
+ return false;
+ }
+
+ ptr = &ptr->operator[](reference_token);
+ break;
+ }
+
+ case detail::value_t::array:
+ {
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" always fails the range check
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(reference_token.size() == 1 && !("0" <= reference_token && reference_token <= "9")))
+ {
+ // invalid char
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(reference_token.size() > 1))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!('1' <= reference_token[0] && reference_token[0] <= '9')))
+ {
+ // first char should be between '1' and '9'
+ return false;
+ }
+ for (std::size_t i = 1; i < reference_token.size(); i++)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!('0' <= reference_token[i] && reference_token[i] <= '9')))
+ {
+ // other char should be between '0' and '9'
+ return false;
+ }
+ }
+ }
+
+ const auto idx = array_index(reference_token);
+ if (idx >= ptr->size())
+ {
+ // index out of range
+ return false;
+ }
+
+ ptr = &ptr->operator[](idx);
+ break;
+ }
+
+ default:
+ {
+ // we do not expect primitive values if there is still a
+ // reference token to process
+ return false;
+ }
+ }
+ }
+
+ // no reference token left means we found a primitive value
+ return true;
+ }
+
+ /*!
+ @brief split the string input to reference tokens
+
+ @note This function is only called by the json_pointer constructor.
+ All exceptions below are documented there.
+
+ @throw parse_error.107 if the pointer is not empty or begins with '/'
+ @throw parse_error.108 if character '~' is not followed by '0' or '1'
+ */
+ static std::vector<std::string> split(const std::string& reference_string)
+ {
+ std::vector<std::string> result;
+
+ // special case: empty reference string -> no reference tokens
+ if (reference_string.empty())
+ {
+ return result;
+ }
+
+ // check if nonempty reference string begins with slash
+ if (JSON_HEDLEY_UNLIKELY(reference_string[0] != '/'))
+ {
+ JSON_THROW(detail::parse_error::create(107, 1,
+ "JSON pointer must be empty or begin with '/' - was: '" +
+ reference_string + "'"));
+ }
+
+ // extract the reference tokens:
+ // - slash: position of the last read slash (or end of string)
+ // - start: position after the previous slash
+ for (
+ // search for the first slash after the first character
+ std::size_t slash = reference_string.find_first_of('/', 1),
+ // set the beginning of the first reference token
+ start = 1;
+ // we can stop if start == 0 (if slash == std::string::npos)
+ start != 0;
+ // set the beginning of the next reference token
+ // (will eventually be 0 if slash == std::string::npos)
+ start = (slash == std::string::npos) ? 0 : slash + 1,
+ // find next slash
+ slash = reference_string.find_first_of('/', start))
+ {
+ // use the text between the beginning of the reference token
+ // (start) and the last slash (slash).
+ auto reference_token = reference_string.substr(start, slash - start);
+
+ // check reference tokens are properly escaped
+ for (std::size_t pos = reference_token.find_first_of('~');
+ pos != std::string::npos;
+ pos = reference_token.find_first_of('~', pos + 1))
+ {
+ JSON_ASSERT(reference_token[pos] == '~');
+
+ // ~ must be followed by 0 or 1
+ if (JSON_HEDLEY_UNLIKELY(pos == reference_token.size() - 1 ||
+ (reference_token[pos + 1] != '0' &&
+ reference_token[pos + 1] != '1')))
+ {
+ JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'"));
+ }
+ }
+
+ // finally, store the reference token
+ unescape(reference_token);
+ result.push_back(reference_token);
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief replace all occurrences of a substring by another string
+
+ @param[in,out] s the string to manipulate; changed so that all
+ occurrences of @a f are replaced with @a t
+ @param[in] f the substring to replace with @a t
+ @param[in] t the string to replace @a f
+
+ @pre The search string @a f must not be empty. **This precondition is
+ enforced with an assertion.**
+
+ @since version 2.0.0
+ */
+ static void replace_substring(std::string& s, const std::string& f,
+ const std::string& t)
+ {
+ JSON_ASSERT(!f.empty());
+ for (auto pos = s.find(f); // find first occurrence of f
+ pos != std::string::npos; // make sure f was found
+ s.replace(pos, f.size(), t), // replace with t, and
+ pos = s.find(f, pos + t.size())) // find next occurrence of f
+ {}
+ }
+
+ /// escape "~" to "~0" and "/" to "~1"
+ static std::string escape(std::string s)
+ {
+ replace_substring(s, "~", "~0");
+ replace_substring(s, "/", "~1");
+ return s;
+ }
+
+ /// unescape "~1" to tilde and "~0" to slash (order is important!)
+ static void unescape(std::string& s)
+ {
+ replace_substring(s, "~1", "/");
+ replace_substring(s, "~0", "~");
+ }
+
+ /*!
+ @param[in] reference_string the reference string to the current value
+ @param[in] value the value to consider
+ @param[in,out] result the result object to insert values to
+
+ @note Empty objects or arrays are flattened to `null`.
+ */
+ static void flatten(const std::string& reference_string,
+ const BasicJsonType& value,
+ BasicJsonType& result)
+ {
+ switch (value.type())
+ {
+ case detail::value_t::array:
+ {
+ if (value.m_value.array->empty())
+ {
+ // flatten empty array as null
+ result[reference_string] = nullptr;
+ }
+ else
+ {
+ // iterate array and use index as reference string
+ for (std::size_t i = 0; i < value.m_value.array->size(); ++i)
+ {
+ flatten(reference_string + "/" + std::to_string(i),
+ value.m_value.array->operator[](i), result);
+ }
+ }
+ break;
+ }
+
+ case detail::value_t::object:
+ {
+ if (value.m_value.object->empty())
+ {
+ // flatten empty object as null
+ result[reference_string] = nullptr;
+ }
+ else
+ {
+ // iterate object and use keys as reference string
+ for (const auto& element : *value.m_value.object)
+ {
+ flatten(reference_string + "/" + escape(element.first), element.second, result);
+ }
+ }
+ break;
+ }
+
+ default:
+ {
+ // add primitive value with its reference string
+ result[reference_string] = value;
+ break;
+ }
+ }
+ }
+
+ /*!
+ @param[in] value flattened JSON
+
+ @return unflattened JSON
+
+ @throw parse_error.109 if array index is not a number
+ @throw type_error.314 if value is not an object
+ @throw type_error.315 if object values are not primitive
+ @throw type_error.313 if value cannot be unflattened
+ */
+ static BasicJsonType
+ unflatten(const BasicJsonType& value)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!value.is_object()))
+ {
+ JSON_THROW(detail::type_error::create(314, "only objects can be unflattened"));
+ }
+
+ BasicJsonType result;
+
+ // iterate the JSON object values
+ for (const auto& element : *value.m_value.object)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!element.second.is_primitive()))
+ {
+ JSON_THROW(detail::type_error::create(315, "values in object must be primitive"));
+ }
+
+ // assign value to reference pointed to by JSON pointer; Note that if
+ // the JSON pointer is "" (i.e., points to the whole value), function
+ // get_and_create returns a reference to result itself. An assignment
+ // will then create a primitive value.
+ json_pointer(element.first).get_and_create(result) = element.second;
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief compares two JSON pointers for equality
+
+ @param[in] lhs JSON pointer to compare
+ @param[in] rhs JSON pointer to compare
+ @return whether @a lhs is equal to @a rhs
+
+ @complexity Linear in the length of the JSON pointer
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+ */
+ friend bool operator==(json_pointer const& lhs,
+ json_pointer const& rhs) noexcept
+ {
+ return lhs.reference_tokens == rhs.reference_tokens;
+ }
+
+ /*!
+ @brief compares two JSON pointers for inequality
+
+ @param[in] lhs JSON pointer to compare
+ @param[in] rhs JSON pointer to compare
+ @return whether @a lhs is not equal @a rhs
+
+ @complexity Linear in the length of the JSON pointer
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+ */
+ friend bool operator!=(json_pointer const& lhs,
+ json_pointer const& rhs) noexcept
+ {
+ return !(lhs == rhs);
+ }
+
+ /// the reference tokens
+ std::vector<std::string> reference_tokens;
+};
+} // namespace nlohmann
+
+// #include <nlohmann/detail/json_ref.hpp>
+
+
+#include <initializer_list>
+#include <utility>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename BasicJsonType>
+class json_ref
+{
+ public:
+ using value_type = BasicJsonType;
+
+ json_ref(value_type&& value)
+ : owned_value(std::move(value))
+ , value_ref(&owned_value)
+ , is_rvalue(true)
+ {}
+
+ json_ref(const value_type& value)
+ : value_ref(const_cast<value_type*>(&value))
+ , is_rvalue(false)
+ {}
+
+ json_ref(std::initializer_list<json_ref> init)
+ : owned_value(init)
+ , value_ref(&owned_value)
+ , is_rvalue(true)
+ {}
+
+ template <
+ class... Args,
+ enable_if_t<std::is_constructible<value_type, Args...>::value, int> = 0 >
+ json_ref(Args && ... args)
+ : owned_value(std::forward<Args>(args)...)
+ , value_ref(&owned_value)
+ , is_rvalue(true)
+ {}
+
+ // class should be movable only
+ json_ref(json_ref&&) = default;
+ json_ref(const json_ref&) = delete;
+ json_ref& operator=(const json_ref&) = delete;
+ json_ref& operator=(json_ref&&) = delete;
+ ~json_ref() = default;
+
+ value_type moved_or_copied() const
+ {
+ if (is_rvalue)
+ {
+ return std::move(*value_ref);
+ }
+ return *value_ref;
+ }
+
+ value_type const& operator*() const
+ {
+ return *static_cast<value_type const*>(value_ref);
+ }
+
+ value_type const* operator->() const
+ {
+ return static_cast<value_type const*>(value_ref);
+ }
+
+ private:
+ mutable value_type owned_value = nullptr;
+ value_type* value_ref = nullptr;
+ const bool is_rvalue = true;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/output/binary_writer.hpp>
+
+
+#include <algorithm> // reverse
+#include <array> // array
+#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <cstring> // memcpy
+#include <limits> // numeric_limits
+#include <string> // string
+#include <cmath> // isnan, isinf
+
+// #include <nlohmann/detail/input/binary_reader.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
+
+#include <algorithm> // copy
+#include <cstddef> // size_t
+#include <ios> // streamsize
+#include <iterator> // back_inserter
+#include <memory> // shared_ptr, make_shared
+#include <ostream> // basic_ostream
+#include <string> // basic_string
+#include <vector> // vector
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+/// abstract output adapter interface
+template<typename CharType> struct output_adapter_protocol
+{
+ virtual void write_character(CharType c) = 0;
+ virtual void write_characters(const CharType* s, std::size_t length) = 0;
+ virtual ~output_adapter_protocol() = default;
+};
+
+/// a type to simplify interfaces
+template<typename CharType>
+using output_adapter_t = std::shared_ptr<output_adapter_protocol<CharType>>;
+
+/// output adapter for byte vectors
+template<typename CharType>
+class output_vector_adapter : public output_adapter_protocol<CharType>
+{
+ public:
+ explicit output_vector_adapter(std::vector<CharType>& vec) noexcept
+ : v(vec)
+ {}
+
+ void write_character(CharType c) override
+ {
+ v.push_back(c);
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
+ void write_characters(const CharType* s, std::size_t length) override
+ {
+ std::copy(s, s + length, std::back_inserter(v));
+ }
+
+ private:
+ std::vector<CharType>& v;
+};
+
+/// output adapter for output streams
+template<typename CharType>
+class output_stream_adapter : public output_adapter_protocol<CharType>
+{
+ public:
+ explicit output_stream_adapter(std::basic_ostream<CharType>& s) noexcept
+ : stream(s)
+ {}
+
+ void write_character(CharType c) override
+ {
+ stream.put(c);
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
+ void write_characters(const CharType* s, std::size_t length) override
+ {
+ stream.write(s, static_cast<std::streamsize>(length));
+ }
+
+ private:
+ std::basic_ostream<CharType>& stream;
+};
+
+/// output adapter for basic_string
+template<typename CharType, typename StringType = std::basic_string<CharType>>
+class output_string_adapter : public output_adapter_protocol<CharType>
+{
+ public:
+ explicit output_string_adapter(StringType& s) noexcept
+ : str(s)
+ {}
+
+ void write_character(CharType c) override
+ {
+ str.push_back(c);
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
+ void write_characters(const CharType* s, std::size_t length) override
+ {
+ str.append(s, length);
+ }
+
+ private:
+ StringType& str;
+};
+
+template<typename CharType, typename StringType = std::basic_string<CharType>>
+class output_adapter
+{
+ public:
+ output_adapter(std::vector<CharType>& vec)
+ : oa(std::make_shared<output_vector_adapter<CharType>>(vec)) {}
+
+ output_adapter(std::basic_ostream<CharType>& s)
+ : oa(std::make_shared<output_stream_adapter<CharType>>(s)) {}
+
+ output_adapter(StringType& s)
+ : oa(std::make_shared<output_string_adapter<CharType, StringType>>(s)) {}
+
+ operator output_adapter_t<CharType>()
+ {
+ return oa;
+ }
+
+ private:
+ output_adapter_t<CharType> oa = nullptr;
+};
+} // namespace detail
+} // namespace nlohmann
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////
+// binary writer //
+///////////////////
+
+/*!
+@brief serialization to CBOR and MessagePack values
+*/
+template<typename BasicJsonType, typename CharType>
+class binary_writer
+{
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+
+ public:
+ /*!
+ @brief create a binary writer
+
+ @param[in] adapter output adapter to write to
+ */
+ explicit binary_writer(output_adapter_t<CharType> adapter) : oa(adapter)
+ {
+ JSON_ASSERT(oa);
+ }
+
+ /*!
+ @param[in] j JSON value to serialize
+ @pre j.type() == value_t::object
+ */
+ void write_bson(const BasicJsonType& j)
+ {
+ switch (j.type())
+ {
+ case value_t::object:
+ {
+ write_bson_object(*j.m_value.object);
+ break;
+ }
+
+ default:
+ {
+ JSON_THROW(type_error::create(317, "to serialize to BSON, top-level type must be object, but is " + std::string(j.type_name())));
+ }
+ }
+ }
+
+ /*!
+ @param[in] j JSON value to serialize
+ */
+ void write_cbor(const BasicJsonType& j)
+ {
+ switch (j.type())
+ {
+ case value_t::null:
+ {
+ oa->write_character(to_char_type(0xF6));
+ break;
+ }
+
+ case value_t::boolean:
+ {
+ oa->write_character(j.m_value.boolean
+ ? to_char_type(0xF5)
+ : to_char_type(0xF4));
+ break;
+ }
+
+ case value_t::number_integer:
+ {
+ if (j.m_value.number_integer >= 0)
+ {
+ // CBOR does not differentiate between positive signed
+ // integers and unsigned integers. Therefore, we used the
+ // code from the value_t::number_unsigned case here.
+ if (j.m_value.number_integer <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x18));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x19));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x1A));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_integer));
+ }
+ else
+ {
+ oa->write_character(to_char_type(0x1B));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_integer));
+ }
+ }
+ else
+ {
+ // The conversions below encode the sign in the first
+ // byte, and the value is converted to a positive number.
+ const auto positive_number = -1 - j.m_value.number_integer;
+ if (j.m_value.number_integer >= -24)
+ {
+ write_number(static_cast<std::uint8_t>(0x20 + positive_number));
+ }
+ else if (positive_number <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x38));
+ write_number(static_cast<std::uint8_t>(positive_number));
+ }
+ else if (positive_number <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x39));
+ write_number(static_cast<std::uint16_t>(positive_number));
+ }
+ else if (positive_number <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x3A));
+ write_number(static_cast<std::uint32_t>(positive_number));
+ }
+ else
+ {
+ oa->write_character(to_char_type(0x3B));
+ write_number(static_cast<std::uint64_t>(positive_number));
+ }
+ }
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ if (j.m_value.number_unsigned <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(j.m_value.number_unsigned));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x18));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_unsigned));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x19));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_unsigned));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x1A));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_unsigned));
+ }
+ else
+ {
+ oa->write_character(to_char_type(0x1B));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_unsigned));
+ }
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ if (std::isnan(j.m_value.number_float))
+ {
+ // NaN is 0xf97e00 in CBOR
+ oa->write_character(to_char_type(0xF9));
+ oa->write_character(to_char_type(0x7E));
+ oa->write_character(to_char_type(0x00));
+ }
+ else if (std::isinf(j.m_value.number_float))
+ {
+ // Infinity is 0xf97c00, -Infinity is 0xf9fc00
+ oa->write_character(to_char_type(0xf9));
+ oa->write_character(j.m_value.number_float > 0 ? to_char_type(0x7C) : to_char_type(0xFC));
+ oa->write_character(to_char_type(0x00));
+ }
+ else
+ {
+ write_compact_float(j.m_value.number_float, detail::input_format_t::cbor);
+ }
+ break;
+ }
+
+ case value_t::string:
+ {
+ // step 1: write control byte and the string length
+ const auto N = j.m_value.string->size();
+ if (N <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(0x60 + N));
+ }
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x78));
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x79));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x7A));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+ // LCOV_EXCL_START
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ oa->write_character(to_char_type(0x7B));
+ write_number(static_cast<std::uint64_t>(N));
+ }
+ // LCOV_EXCL_STOP
+
+ // step 2: write the string
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.string->c_str()),
+ j.m_value.string->size());
+ break;
+ }
+
+ case value_t::array:
+ {
+ // step 1: write control byte and the array size
+ const auto N = j.m_value.array->size();
+ if (N <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(0x80 + N));
+ }
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x98));
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x99));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x9A));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+ // LCOV_EXCL_START
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ oa->write_character(to_char_type(0x9B));
+ write_number(static_cast<std::uint64_t>(N));
+ }
+ // LCOV_EXCL_STOP
+
+ // step 2: write each element
+ for (const auto& el : *j.m_value.array)
+ {
+ write_cbor(el);
+ }
+ break;
+ }
+
+ case value_t::binary:
+ {
+ if (j.m_value.binary->has_subtype())
+ {
+ write_number(static_cast<std::uint8_t>(0xd8));
+ write_number(j.m_value.binary->subtype());
+ }
+
+ // step 1: write control byte and the binary array size
+ const auto N = j.m_value.binary->size();
+ if (N <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(0x40 + N));
+ }
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x58));
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x59));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x5A));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+ // LCOV_EXCL_START
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ oa->write_character(to_char_type(0x5B));
+ write_number(static_cast<std::uint64_t>(N));
+ }
+ // LCOV_EXCL_STOP
+
+ // step 2: write each element
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.binary->data()),
+ N);
+
+ break;
+ }
+
+ case value_t::object:
+ {
+ // step 1: write control byte and the object size
+ const auto N = j.m_value.object->size();
+ if (N <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(0xA0 + N));
+ }
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0xB8));
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0xB9));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0xBA));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+ // LCOV_EXCL_START
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ oa->write_character(to_char_type(0xBB));
+ write_number(static_cast<std::uint64_t>(N));
+ }
+ // LCOV_EXCL_STOP
+
+ // step 2: write each element
+ for (const auto& el : *j.m_value.object)
+ {
+ write_cbor(el.first);
+ write_cbor(el.second);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ /*!
+ @param[in] j JSON value to serialize
+ */
+ void write_msgpack(const BasicJsonType& j)
+ {
+ switch (j.type())
+ {
+ case value_t::null: // nil
+ {
+ oa->write_character(to_char_type(0xC0));
+ break;
+ }
+
+ case value_t::boolean: // true and false
+ {
+ oa->write_character(j.m_value.boolean
+ ? to_char_type(0xC3)
+ : to_char_type(0xC2));
+ break;
+ }
+
+ case value_t::number_integer:
+ {
+ if (j.m_value.number_integer >= 0)
+ {
+ // MessagePack does not differentiate between positive
+ // signed integers and unsigned integers. Therefore, we used
+ // the code from the value_t::number_unsigned case here.
+ if (j.m_value.number_unsigned < 128)
+ {
+ // positive fixnum
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ // uint 8
+ oa->write_character(to_char_type(0xCC));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ // uint 16
+ oa->write_character(to_char_type(0xCD));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ // uint 32
+ oa->write_character(to_char_type(0xCE));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ // uint 64
+ oa->write_character(to_char_type(0xCF));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_integer));
+ }
+ }
+ else
+ {
+ if (j.m_value.number_integer >= -32)
+ {
+ // negative fixnum
+ write_number(static_cast<std::int8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int8_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int8_t>::max)())
+ {
+ // int 8
+ oa->write_character(to_char_type(0xD0));
+ write_number(static_cast<std::int8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int16_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int16_t>::max)())
+ {
+ // int 16
+ oa->write_character(to_char_type(0xD1));
+ write_number(static_cast<std::int16_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int32_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int32_t>::max)())
+ {
+ // int 32
+ oa->write_character(to_char_type(0xD2));
+ write_number(static_cast<std::int32_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int64_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int64_t>::max)())
+ {
+ // int 64
+ oa->write_character(to_char_type(0xD3));
+ write_number(static_cast<std::int64_t>(j.m_value.number_integer));
+ }
+ }
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ if (j.m_value.number_unsigned < 128)
+ {
+ // positive fixnum
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ // uint 8
+ oa->write_character(to_char_type(0xCC));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ // uint 16
+ oa->write_character(to_char_type(0xCD));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ // uint 32
+ oa->write_character(to_char_type(0xCE));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_integer));
+ }
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ // uint 64
+ oa->write_character(to_char_type(0xCF));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_integer));
+ }
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ write_compact_float(j.m_value.number_float, detail::input_format_t::msgpack);
+ break;
+ }
+
+ case value_t::string:
+ {
+ // step 1: write control byte and the string length
+ const auto N = j.m_value.string->size();
+ if (N <= 31)
+ {
+ // fixstr
+ write_number(static_cast<std::uint8_t>(0xA0 | N));
+ }
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ // str 8
+ oa->write_character(to_char_type(0xD9));
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ // str 16
+ oa->write_character(to_char_type(0xDA));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ // str 32
+ oa->write_character(to_char_type(0xDB));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+
+ // step 2: write the string
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.string->c_str()),
+ j.m_value.string->size());
+ break;
+ }
+
+ case value_t::array:
+ {
+ // step 1: write control byte and the array size
+ const auto N = j.m_value.array->size();
+ if (N <= 15)
+ {
+ // fixarray
+ write_number(static_cast<std::uint8_t>(0x90 | N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ // array 16
+ oa->write_character(to_char_type(0xDC));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ // array 32
+ oa->write_character(to_char_type(0xDD));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+
+ // step 2: write each element
+ for (const auto& el : *j.m_value.array)
+ {
+ write_msgpack(el);
+ }
+ break;
+ }
+
+ case value_t::binary:
+ {
+ // step 0: determine if the binary type has a set subtype to
+ // determine whether or not to use the ext or fixext types
+ const bool use_ext = j.m_value.binary->has_subtype();
+
+ // step 1: write control byte and the byte string length
+ const auto N = j.m_value.binary->size();
+ if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ std::uint8_t output_type{};
+ bool fixed = true;
+ if (use_ext)
+ {
+ switch (N)
+ {
+ case 1:
+ output_type = 0xD4; // fixext 1
+ break;
+ case 2:
+ output_type = 0xD5; // fixext 2
+ break;
+ case 4:
+ output_type = 0xD6; // fixext 4
+ break;
+ case 8:
+ output_type = 0xD7; // fixext 8
+ break;
+ case 16:
+ output_type = 0xD8; // fixext 16
+ break;
+ default:
+ output_type = 0xC7; // ext 8
+ fixed = false;
+ break;
+ }
+
+ }
+ else
+ {
+ output_type = 0xC4; // bin 8
+ fixed = false;
+ }
+
+ oa->write_character(to_char_type(output_type));
+ if (!fixed)
+ {
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ std::uint8_t output_type = use_ext
+ ? 0xC8 // ext 16
+ : 0xC5; // bin 16
+
+ oa->write_character(to_char_type(output_type));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ std::uint8_t output_type = use_ext
+ ? 0xC9 // ext 32
+ : 0xC6; // bin 32
+
+ oa->write_character(to_char_type(output_type));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+
+ // step 1.5: if this is an ext type, write the subtype
+ if (use_ext)
+ {
+ write_number(static_cast<std::int8_t>(j.m_value.binary->subtype()));
+ }
+
+ // step 2: write the byte string
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.binary->data()),
+ N);
+
+ break;
+ }
+
+ case value_t::object:
+ {
+ // step 1: write control byte and the object size
+ const auto N = j.m_value.object->size();
+ if (N <= 15)
+ {
+ // fixmap
+ write_number(static_cast<std::uint8_t>(0x80 | (N & 0xF)));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ // map 16
+ oa->write_character(to_char_type(0xDE));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ // map 32
+ oa->write_character(to_char_type(0xDF));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+
+ // step 2: write each element
+ for (const auto& el : *j.m_value.object)
+ {
+ write_msgpack(el.first);
+ write_msgpack(el.second);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ /*!
+ @param[in] j JSON value to serialize
+ @param[in] use_count whether to use '#' prefixes (optimized format)
+ @param[in] use_type whether to use '$' prefixes (optimized format)
+ @param[in] add_prefix whether prefixes need to be used for this value
+ */
+ void write_ubjson(const BasicJsonType& j, const bool use_count,
+ const bool use_type, const bool add_prefix = true)
+ {
+ switch (j.type())
+ {
+ case value_t::null:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('Z'));
+ }
+ break;
+ }
+
+ case value_t::boolean:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(j.m_value.boolean
+ ? to_char_type('T')
+ : to_char_type('F'));
+ }
+ break;
+ }
+
+ case value_t::number_integer:
+ {
+ write_number_with_ubjson_prefix(j.m_value.number_integer, add_prefix);
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ write_number_with_ubjson_prefix(j.m_value.number_unsigned, add_prefix);
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ write_number_with_ubjson_prefix(j.m_value.number_float, add_prefix);
+ break;
+ }
+
+ case value_t::string:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('S'));
+ }
+ write_number_with_ubjson_prefix(j.m_value.string->size(), true);
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.string->c_str()),
+ j.m_value.string->size());
+ break;
+ }
+
+ case value_t::array:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('['));
+ }
+
+ bool prefix_required = true;
+ if (use_type && !j.m_value.array->empty())
+ {
+ JSON_ASSERT(use_count);
+ const CharType first_prefix = ubjson_prefix(j.front());
+ const bool same_prefix = std::all_of(j.begin() + 1, j.end(),
+ [this, first_prefix](const BasicJsonType & v)
+ {
+ return ubjson_prefix(v) == first_prefix;
+ });
+
+ if (same_prefix)
+ {
+ prefix_required = false;
+ oa->write_character(to_char_type('$'));
+ oa->write_character(first_prefix);
+ }
+ }
+
+ if (use_count)
+ {
+ oa->write_character(to_char_type('#'));
+ write_number_with_ubjson_prefix(j.m_value.array->size(), true);
+ }
+
+ for (const auto& el : *j.m_value.array)
+ {
+ write_ubjson(el, use_count, use_type, prefix_required);
+ }
+
+ if (!use_count)
+ {
+ oa->write_character(to_char_type(']'));
+ }
+
+ break;
+ }
+
+ case value_t::binary:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('['));
+ }
+
+ if (use_type && !j.m_value.binary->empty())
+ {
+ JSON_ASSERT(use_count);
+ oa->write_character(to_char_type('$'));
+ oa->write_character('U');
+ }
+
+ if (use_count)
+ {
+ oa->write_character(to_char_type('#'));
+ write_number_with_ubjson_prefix(j.m_value.binary->size(), true);
+ }
+
+ if (use_type)
+ {
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.binary->data()),
+ j.m_value.binary->size());
+ }
+ else
+ {
+ for (size_t i = 0; i < j.m_value.binary->size(); ++i)
+ {
+ oa->write_character(to_char_type('U'));
+ oa->write_character(j.m_value.binary->data()[i]);
+ }
+ }
+
+ if (!use_count)
+ {
+ oa->write_character(to_char_type(']'));
+ }
+
+ break;
+ }
+
+ case value_t::object:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('{'));
+ }
+
+ bool prefix_required = true;
+ if (use_type && !j.m_value.object->empty())
+ {
+ JSON_ASSERT(use_count);
+ const CharType first_prefix = ubjson_prefix(j.front());
+ const bool same_prefix = std::all_of(j.begin(), j.end(),
+ [this, first_prefix](const BasicJsonType & v)
+ {
+ return ubjson_prefix(v) == first_prefix;
+ });
+
+ if (same_prefix)
+ {
+ prefix_required = false;
+ oa->write_character(to_char_type('$'));
+ oa->write_character(first_prefix);
+ }
+ }
+
+ if (use_count)
+ {
+ oa->write_character(to_char_type('#'));
+ write_number_with_ubjson_prefix(j.m_value.object->size(), true);
+ }
+
+ for (const auto& el : *j.m_value.object)
+ {
+ write_number_with_ubjson_prefix(el.first.size(), true);
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(el.first.c_str()),
+ el.first.size());
+ write_ubjson(el.second, use_count, use_type, prefix_required);
+ }
+
+ if (!use_count)
+ {
+ oa->write_character(to_char_type('}'));
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ private:
+ //////////
+ // BSON //
+ //////////
+
+ /*!
+ @return The size of a BSON document entry header, including the id marker
+ and the entry name size (and its null-terminator).
+ */
+ static std::size_t calc_bson_entry_header_size(const string_t& name)
+ {
+ const auto it = name.find(static_cast<typename string_t::value_type>(0));
+ if (JSON_HEDLEY_UNLIKELY(it != BasicJsonType::string_t::npos))
+ {
+ JSON_THROW(out_of_range::create(409,
+ "BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")"));
+ }
+
+ return /*id*/ 1ul + name.size() + /*zero-terminator*/1u;
+ }
+
+ /*!
+ @brief Writes the given @a element_type and @a name to the output adapter
+ */
+ void write_bson_entry_header(const string_t& name,
+ const std::uint8_t element_type)
+ {
+ oa->write_character(to_char_type(element_type)); // boolean
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(name.c_str()),
+ name.size() + 1u);
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and boolean value @a value
+ */
+ void write_bson_boolean(const string_t& name,
+ const bool value)
+ {
+ write_bson_entry_header(name, 0x08);
+ oa->write_character(value ? to_char_type(0x01) : to_char_type(0x00));
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and double value @a value
+ */
+ void write_bson_double(const string_t& name,
+ const double value)
+ {
+ write_bson_entry_header(name, 0x01);
+ write_number<double, true>(value);
+ }
+
+ /*!
+ @return The size of the BSON-encoded string in @a value
+ */
+ static std::size_t calc_bson_string_size(const string_t& value)
+ {
+ return sizeof(std::int32_t) + value.size() + 1ul;
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and string value @a value
+ */
+ void write_bson_string(const string_t& name,
+ const string_t& value)
+ {
+ write_bson_entry_header(name, 0x02);
+
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(value.size() + 1ul));
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(value.c_str()),
+ value.size() + 1);
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and null value
+ */
+ void write_bson_null(const string_t& name)
+ {
+ write_bson_entry_header(name, 0x0A);
+ }
+
+ /*!
+ @return The size of the BSON-encoded integer @a value
+ */
+ static std::size_t calc_bson_integer_size(const std::int64_t value)
+ {
+ return (std::numeric_limits<std::int32_t>::min)() <= value && value <= (std::numeric_limits<std::int32_t>::max)()
+ ? sizeof(std::int32_t)
+ : sizeof(std::int64_t);
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and integer @a value
+ */
+ void write_bson_integer(const string_t& name,
+ const std::int64_t value)
+ {
+ if ((std::numeric_limits<std::int32_t>::min)() <= value && value <= (std::numeric_limits<std::int32_t>::max)())
+ {
+ write_bson_entry_header(name, 0x10); // int32
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(value));
+ }
+ else
+ {
+ write_bson_entry_header(name, 0x12); // int64
+ write_number<std::int64_t, true>(static_cast<std::int64_t>(value));
+ }
+ }
+
+ /*!
+ @return The size of the BSON-encoded unsigned integer in @a j
+ */
+ static constexpr std::size_t calc_bson_unsigned_size(const std::uint64_t value) noexcept
+ {
+ return (value <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
+ ? sizeof(std::int32_t)
+ : sizeof(std::int64_t);
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and unsigned @a value
+ */
+ void write_bson_unsigned(const string_t& name,
+ const std::uint64_t value)
+ {
+ if (value <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
+ {
+ write_bson_entry_header(name, 0x10 /* int32 */);
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(value));
+ }
+ else if (value <= static_cast<std::uint64_t>((std::numeric_limits<std::int64_t>::max)()))
+ {
+ write_bson_entry_header(name, 0x12 /* int64 */);
+ write_number<std::int64_t, true>(static_cast<std::int64_t>(value));
+ }
+ else
+ {
+ JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(value) + " cannot be represented by BSON as it does not fit int64"));
+ }
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and object @a value
+ */
+ void write_bson_object_entry(const string_t& name,
+ const typename BasicJsonType::object_t& value)
+ {
+ write_bson_entry_header(name, 0x03); // object
+ write_bson_object(value);
+ }
+
+ /*!
+ @return The size of the BSON-encoded array @a value
+ */
+ static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value)
+ {
+ std::size_t array_index = 0ul;
+
+ const std::size_t embedded_document_size = std::accumulate(std::begin(value), std::end(value), std::size_t(0), [&array_index](std::size_t result, const typename BasicJsonType::array_t::value_type & el)
+ {
+ return result + calc_bson_element_size(std::to_string(array_index++), el);
+ });
+
+ return sizeof(std::int32_t) + embedded_document_size + 1ul;
+ }
+
+ /*!
+ @return The size of the BSON-encoded binary array @a value
+ */
+ static std::size_t calc_bson_binary_size(const typename BasicJsonType::binary_t& value)
+ {
+ return sizeof(std::int32_t) + value.size() + 1ul;
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and array @a value
+ */
+ void write_bson_array(const string_t& name,
+ const typename BasicJsonType::array_t& value)
+ {
+ write_bson_entry_header(name, 0x04); // array
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(calc_bson_array_size(value)));
+
+ std::size_t array_index = 0ul;
+
+ for (const auto& el : value)
+ {
+ write_bson_element(std::to_string(array_index++), el);
+ }
+
+ oa->write_character(to_char_type(0x00));
+ }
+
+ /*!
+ @brief Writes a BSON element with key @a name and binary value @a value
+ */
+ void write_bson_binary(const string_t& name,
+ const binary_t& value)
+ {
+ write_bson_entry_header(name, 0x05);
+
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(value.size()));
+ write_number(value.has_subtype() ? value.subtype() : std::uint8_t(0x00));
+
+ oa->write_characters(reinterpret_cast<const CharType*>(value.data()), value.size());
+ }
+
+ /*!
+ @brief Calculates the size necessary to serialize the JSON value @a j with its @a name
+ @return The calculated size for the BSON document entry for @a j with the given @a name.
+ */
+ static std::size_t calc_bson_element_size(const string_t& name,
+ const BasicJsonType& j)
+ {
+ const auto header_size = calc_bson_entry_header_size(name);
+ switch (j.type())
+ {
+ case value_t::object:
+ return header_size + calc_bson_object_size(*j.m_value.object);
+
+ case value_t::array:
+ return header_size + calc_bson_array_size(*j.m_value.array);
+
+ case value_t::binary:
+ return header_size + calc_bson_binary_size(*j.m_value.binary);
+
+ case value_t::boolean:
+ return header_size + 1ul;
+
+ case value_t::number_float:
+ return header_size + 8ul;
+
+ case value_t::number_integer:
+ return header_size + calc_bson_integer_size(j.m_value.number_integer);
+
+ case value_t::number_unsigned:
+ return header_size + calc_bson_unsigned_size(j.m_value.number_unsigned);
+
+ case value_t::string:
+ return header_size + calc_bson_string_size(*j.m_value.string);
+
+ case value_t::null:
+ return header_size + 0ul;
+
+ // LCOV_EXCL_START
+ default:
+ JSON_ASSERT(false);
+ return 0ul;
+ // LCOV_EXCL_STOP
+ }
+ }
+
+ /*!
+ @brief Serializes the JSON value @a j to BSON and associates it with the
+ key @a name.
+ @param name The name to associate with the JSON entity @a j within the
+ current BSON document
+ @return The size of the BSON entry
+ */
+ void write_bson_element(const string_t& name,
+ const BasicJsonType& j)
+ {
+ switch (j.type())
+ {
+ case value_t::object:
+ return write_bson_object_entry(name, *j.m_value.object);
+
+ case value_t::array:
+ return write_bson_array(name, *j.m_value.array);
+
+ case value_t::binary:
+ return write_bson_binary(name, *j.m_value.binary);
+
+ case value_t::boolean:
+ return write_bson_boolean(name, j.m_value.boolean);
+
+ case value_t::number_float:
+ return write_bson_double(name, j.m_value.number_float);
+
+ case value_t::number_integer:
+ return write_bson_integer(name, j.m_value.number_integer);
+
+ case value_t::number_unsigned:
+ return write_bson_unsigned(name, j.m_value.number_unsigned);
+
+ case value_t::string:
+ return write_bson_string(name, *j.m_value.string);
+
+ case value_t::null:
+ return write_bson_null(name);
+
+ // LCOV_EXCL_START
+ default:
+ JSON_ASSERT(false);
+ return;
+ // LCOV_EXCL_STOP
+ }
+ }
+
+ /*!
+ @brief Calculates the size of the BSON serialization of the given
+ JSON-object @a j.
+ @param[in] j JSON value to serialize
+ @pre j.type() == value_t::object
+ */
+ static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value)
+ {
+ std::size_t document_size = std::accumulate(value.begin(), value.end(), std::size_t(0),
+ [](size_t result, const typename BasicJsonType::object_t::value_type & el)
+ {
+ return result += calc_bson_element_size(el.first, el.second);
+ });
+
+ return sizeof(std::int32_t) + document_size + 1ul;
+ }
+
+ /*!
+ @param[in] j JSON value to serialize
+ @pre j.type() == value_t::object
+ */
+ void write_bson_object(const typename BasicJsonType::object_t& value)
+ {
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(calc_bson_object_size(value)));
+
+ for (const auto& el : value)
+ {
+ write_bson_element(el.first, el.second);
+ }
+
+ oa->write_character(to_char_type(0x00));
+ }
+
+ //////////
+ // CBOR //
+ //////////
+
+ static constexpr CharType get_cbor_float_prefix(float /*unused*/)
+ {
+ return to_char_type(0xFA); // Single-Precision Float
+ }
+
+ static constexpr CharType get_cbor_float_prefix(double /*unused*/)
+ {
+ return to_char_type(0xFB); // Double-Precision Float
+ }
+
+ /////////////
+ // MsgPack //
+ /////////////
+
+ static constexpr CharType get_msgpack_float_prefix(float /*unused*/)
+ {
+ return to_char_type(0xCA); // float 32
+ }
+
+ static constexpr CharType get_msgpack_float_prefix(double /*unused*/)
+ {
+ return to_char_type(0xCB); // float 64
+ }
+
+ ////////////
+ // UBJSON //
+ ////////////
+
+ // UBJSON: write number (floating point)
+ template<typename NumberType, typename std::enable_if<
+ std::is_floating_point<NumberType>::value, int>::type = 0>
+ void write_number_with_ubjson_prefix(const NumberType n,
+ const bool add_prefix)
+ {
+ if (add_prefix)
+ {
+ oa->write_character(get_ubjson_float_prefix(n));
+ }
+ write_number(n);
+ }
+
+ // UBJSON: write number (unsigned integer)
+ template<typename NumberType, typename std::enable_if<
+ std::is_unsigned<NumberType>::value, int>::type = 0>
+ void write_number_with_ubjson_prefix(const NumberType n,
+ const bool add_prefix)
+ {
+ if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int8_t>::max)()))
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('i')); // int8
+ }
+ write_number(static_cast<std::uint8_t>(n));
+ }
+ else if (n <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('U')); // uint8
+ }
+ write_number(static_cast<std::uint8_t>(n));
+ }
+ else if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int16_t>::max)()))
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('I')); // int16
+ }
+ write_number(static_cast<std::int16_t>(n));
+ }
+ else if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('l')); // int32
+ }
+ write_number(static_cast<std::int32_t>(n));
+ }
+ else if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int64_t>::max)()))
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('L')); // int64
+ }
+ write_number(static_cast<std::int64_t>(n));
+ }
+ else
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('H')); // high-precision number
+ }
+
+ const auto number = BasicJsonType(n).dump();
+ write_number_with_ubjson_prefix(number.size(), true);
+ for (std::size_t i = 0; i < number.size(); ++i)
+ {
+ oa->write_character(to_char_type(static_cast<std::uint8_t>(number[i])));
+ }
+ }
+ }
+
+ // UBJSON: write number (signed integer)
+ template < typename NumberType, typename std::enable_if <
+ std::is_signed<NumberType>::value&&
+ !std::is_floating_point<NumberType>::value, int >::type = 0 >
+ void write_number_with_ubjson_prefix(const NumberType n,
+ const bool add_prefix)
+ {
+ if ((std::numeric_limits<std::int8_t>::min)() <= n && n <= (std::numeric_limits<std::int8_t>::max)())
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('i')); // int8
+ }
+ write_number(static_cast<std::int8_t>(n));
+ }
+ else if (static_cast<std::int64_t>((std::numeric_limits<std::uint8_t>::min)()) <= n && n <= static_cast<std::int64_t>((std::numeric_limits<std::uint8_t>::max)()))
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('U')); // uint8
+ }
+ write_number(static_cast<std::uint8_t>(n));
+ }
+ else if ((std::numeric_limits<std::int16_t>::min)() <= n && n <= (std::numeric_limits<std::int16_t>::max)())
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('I')); // int16
+ }
+ write_number(static_cast<std::int16_t>(n));
+ }
+ else if ((std::numeric_limits<std::int32_t>::min)() <= n && n <= (std::numeric_limits<std::int32_t>::max)())
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('l')); // int32
+ }
+ write_number(static_cast<std::int32_t>(n));
+ }
+ else if ((std::numeric_limits<std::int64_t>::min)() <= n && n <= (std::numeric_limits<std::int64_t>::max)())
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('L')); // int64
+ }
+ write_number(static_cast<std::int64_t>(n));
+ }
+ // LCOV_EXCL_START
+ else
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('H')); // high-precision number
+ }
+
+ const auto number = BasicJsonType(n).dump();
+ write_number_with_ubjson_prefix(number.size(), true);
+ for (std::size_t i = 0; i < number.size(); ++i)
+ {
+ oa->write_character(to_char_type(static_cast<std::uint8_t>(number[i])));
+ }
+ }
+ // LCOV_EXCL_STOP
+ }
+
+ /*!
+ @brief determine the type prefix of container values
+ */
+ CharType ubjson_prefix(const BasicJsonType& j) const noexcept
+ {
+ switch (j.type())
+ {
+ case value_t::null:
+ return 'Z';
+
+ case value_t::boolean:
+ return j.m_value.boolean ? 'T' : 'F';
+
+ case value_t::number_integer:
+ {
+ if ((std::numeric_limits<std::int8_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int8_t>::max)())
+ {
+ return 'i';
+ }
+ if ((std::numeric_limits<std::uint8_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ return 'U';
+ }
+ if ((std::numeric_limits<std::int16_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int16_t>::max)())
+ {
+ return 'I';
+ }
+ if ((std::numeric_limits<std::int32_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int32_t>::max)())
+ {
+ return 'l';
+ }
+ if ((std::numeric_limits<std::int64_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int64_t>::max)())
+ {
+ return 'L';
+ }
+ // anything else is treated as high-precision number
+ return 'H'; // LCOV_EXCL_LINE
+ }
+
+ case value_t::number_unsigned:
+ {
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int8_t>::max)()))
+ {
+ return 'i';
+ }
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::uint8_t>::max)()))
+ {
+ return 'U';
+ }
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int16_t>::max)()))
+ {
+ return 'I';
+ }
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
+ {
+ return 'l';
+ }
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int64_t>::max)()))
+ {
+ return 'L';
+ }
+ // anything else is treated as high-precision number
+ return 'H'; // LCOV_EXCL_LINE
+ }
+
+ case value_t::number_float:
+ return get_ubjson_float_prefix(j.m_value.number_float);
+
+ case value_t::string:
+ return 'S';
+
+ case value_t::array: // fallthrough
+ case value_t::binary:
+ return '[';
+
+ case value_t::object:
+ return '{';
+
+ default: // discarded values
+ return 'N';
+ }
+ }
+
+ static constexpr CharType get_ubjson_float_prefix(float /*unused*/)
+ {
+ return 'd'; // float 32
+ }
+
+ static constexpr CharType get_ubjson_float_prefix(double /*unused*/)
+ {
+ return 'D'; // float 64
+ }
+
+ ///////////////////////
+ // Utility functions //
+ ///////////////////////
+
+ /*
+ @brief write a number to output input
+ @param[in] n number of type @a NumberType
+ @tparam NumberType the type of the number
+ @tparam OutputIsLittleEndian Set to true if output data is
+ required to be little endian
+
+ @note This function needs to respect the system's endianess, because bytes
+ in CBOR, MessagePack, and UBJSON are stored in network order (big
+ endian) and therefore need reordering on little endian systems.
+ */
+ template<typename NumberType, bool OutputIsLittleEndian = false>
+ void write_number(const NumberType n)
+ {
+ // step 1: write number to array of length NumberType
+ std::array<CharType, sizeof(NumberType)> vec;
+ std::memcpy(vec.data(), &n, sizeof(NumberType));
+
+ // step 2: write array to output (with possible reordering)
+ if (is_little_endian != OutputIsLittleEndian)
+ {
+ // reverse byte order prior to conversion if necessary
+ std::reverse(vec.begin(), vec.end());
+ }
+
+ oa->write_characters(vec.data(), sizeof(NumberType));
+ }
+
+ void write_compact_float(const number_float_t n, detail::input_format_t format)
+ {
+ if (static_cast<double>(n) >= static_cast<double>(std::numeric_limits<float>::lowest()) &&
+ static_cast<double>(n) <= static_cast<double>((std::numeric_limits<float>::max)()) &&
+ static_cast<double>(static_cast<float>(n)) == static_cast<double>(n))
+ {
+ oa->write_character(format == detail::input_format_t::cbor
+ ? get_cbor_float_prefix(static_cast<float>(n))
+ : get_msgpack_float_prefix(static_cast<float>(n)));
+ write_number(static_cast<float>(n));
+ }
+ else
+ {
+ oa->write_character(format == detail::input_format_t::cbor
+ ? get_cbor_float_prefix(n)
+ : get_msgpack_float_prefix(n));
+ write_number(n);
+ }
+ }
+
+ public:
+ // The following to_char_type functions are implement the conversion
+ // between uint8_t and CharType. In case CharType is not unsigned,
+ // such a conversion is required to allow values greater than 128.
+ // See <https://github.com/nlohmann/json/issues/1286> for a discussion.
+ template < typename C = CharType,
+ enable_if_t < std::is_signed<C>::value && std::is_signed<char>::value > * = nullptr >
+ static constexpr CharType to_char_type(std::uint8_t x) noexcept
+ {
+ return *reinterpret_cast<char*>(&x);
+ }
+
+ template < typename C = CharType,
+ enable_if_t < std::is_signed<C>::value && std::is_unsigned<char>::value > * = nullptr >
+ static CharType to_char_type(std::uint8_t x) noexcept
+ {
+ static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t");
+ static_assert(std::is_trivial<CharType>::value, "CharType must be trivial");
+ CharType result;
+ std::memcpy(&result, &x, sizeof(x));
+ return result;
+ }
+
+ template<typename C = CharType,
+ enable_if_t<std::is_unsigned<C>::value>* = nullptr>
+ static constexpr CharType to_char_type(std::uint8_t x) noexcept
+ {
+ return x;
+ }
+
+ template < typename InputCharType, typename C = CharType,
+ enable_if_t <
+ std::is_signed<C>::value &&
+ std::is_signed<char>::value &&
+ std::is_same<char, typename std::remove_cv<InputCharType>::type>::value
+ > * = nullptr >
+ static constexpr CharType to_char_type(InputCharType x) noexcept
+ {
+ return x;
+ }
+
+ private:
+ /// whether we can assume little endianess
+ const bool is_little_endian = little_endianess();
+
+ /// the output
+ output_adapter_t<CharType> oa = nullptr;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
+// #include <nlohmann/detail/output/serializer.hpp>
+
+
+#include <algorithm> // reverse, remove, fill, find, none_of
+#include <array> // array
+#include <clocale> // localeconv, lconv
+#include <cmath> // labs, isfinite, isnan, signbit
+#include <cstddef> // size_t, ptrdiff_t
+#include <cstdint> // uint8_t
+#include <cstdio> // snprintf
+#include <limits> // numeric_limits
+#include <string> // string, char_traits
+#include <type_traits> // is_same
+#include <utility> // move
+
+// #include <nlohmann/detail/conversions/to_chars.hpp>
+
+
+#include <array> // array
+#include <cmath> // signbit, isfinite
+#include <cstdint> // intN_t, uintN_t
+#include <cstring> // memcpy, memmove
+#include <limits> // numeric_limits
+#include <type_traits> // conditional
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+
+/*!
+@brief implements the Grisu2 algorithm for binary to decimal floating-point
+conversion.
+
+This implementation is a slightly modified version of the reference
+implementation which may be obtained from
+http://florian.loitsch.com/publications (bench.tar.gz).
+
+The code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch.
+
+For a detailed description of the algorithm see:
+
+[1] Loitsch, "Printing Floating-Point Numbers Quickly and Accurately with
+ Integers", Proceedings of the ACM SIGPLAN 2010 Conference on Programming
+ Language Design and Implementation, PLDI 2010
+[2] Burger, Dybvig, "Printing Floating-Point Numbers Quickly and Accurately",
+ Proceedings of the ACM SIGPLAN 1996 Conference on Programming Language
+ Design and Implementation, PLDI 1996
+*/
+namespace dtoa_impl
+{
+
+template<typename Target, typename Source>
+Target reinterpret_bits(const Source source)
+{
+ static_assert(sizeof(Target) == sizeof(Source), "size mismatch");
+
+ Target target;
+ std::memcpy(&target, &source, sizeof(Source));
+ return target;
+}
+
+struct diyfp // f * 2^e
+{
+ static constexpr int kPrecision = 64; // = q
+
+ std::uint64_t f = 0;
+ int e = 0;
+
+ constexpr diyfp(std::uint64_t f_, int e_) noexcept : f(f_), e(e_) {}
+
+ /*!
+ @brief returns x - y
+ @pre x.e == y.e and x.f >= y.f
+ */
+ static diyfp sub(const diyfp& x, const diyfp& y) noexcept
+ {
+ JSON_ASSERT(x.e == y.e);
+ JSON_ASSERT(x.f >= y.f);
+
+ return {x.f - y.f, x.e};
+ }
+
+ /*!
+ @brief returns x * y
+ @note The result is rounded. (Only the upper q bits are returned.)
+ */
+ static diyfp mul(const diyfp& x, const diyfp& y) noexcept
+ {
+ static_assert(kPrecision == 64, "internal error");
+
+ // Computes:
+ // f = round((x.f * y.f) / 2^q)
+ // e = x.e + y.e + q
+
+ // Emulate the 64-bit * 64-bit multiplication:
+ //
+ // p = u * v
+ // = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi)
+ // = (u_lo v_lo ) + 2^32 ((u_lo v_hi ) + (u_hi v_lo )) + 2^64 (u_hi v_hi )
+ // = (p0 ) + 2^32 ((p1 ) + (p2 )) + 2^64 (p3 )
+ // = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3 )
+ // = (p0_lo ) + 2^32 (p0_hi + p1_lo + p2_lo ) + 2^64 (p1_hi + p2_hi + p3)
+ // = (p0_lo ) + 2^32 (Q ) + 2^64 (H )
+ // = (p0_lo ) + 2^32 (Q_lo + 2^32 Q_hi ) + 2^64 (H )
+ //
+ // (Since Q might be larger than 2^32 - 1)
+ //
+ // = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H)
+ //
+ // (Q_hi + H does not overflow a 64-bit int)
+ //
+ // = p_lo + 2^64 p_hi
+
+ const std::uint64_t u_lo = x.f & 0xFFFFFFFFu;
+ const std::uint64_t u_hi = x.f >> 32u;
+ const std::uint64_t v_lo = y.f & 0xFFFFFFFFu;
+ const std::uint64_t v_hi = y.f >> 32u;
+
+ const std::uint64_t p0 = u_lo * v_lo;
+ const std::uint64_t p1 = u_lo * v_hi;
+ const std::uint64_t p2 = u_hi * v_lo;
+ const std::uint64_t p3 = u_hi * v_hi;
+
+ const std::uint64_t p0_hi = p0 >> 32u;
+ const std::uint64_t p1_lo = p1 & 0xFFFFFFFFu;
+ const std::uint64_t p1_hi = p1 >> 32u;
+ const std::uint64_t p2_lo = p2 & 0xFFFFFFFFu;
+ const std::uint64_t p2_hi = p2 >> 32u;
+
+ std::uint64_t Q = p0_hi + p1_lo + p2_lo;
+
+ // The full product might now be computed as
+ //
+ // p_hi = p3 + p2_hi + p1_hi + (Q >> 32)
+ // p_lo = p0_lo + (Q << 32)
+ //
+ // But in this particular case here, the full p_lo is not required.
+ // Effectively we only need to add the highest bit in p_lo to p_hi (and
+ // Q_hi + 1 does not overflow).
+
+ Q += std::uint64_t{1} << (64u - 32u - 1u); // round, ties up
+
+ const std::uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32u);
+
+ return {h, x.e + y.e + 64};
+ }
+
+ /*!
+ @brief normalize x such that the significand is >= 2^(q-1)
+ @pre x.f != 0
+ */
+ static diyfp normalize(diyfp x) noexcept
+ {
+ JSON_ASSERT(x.f != 0);
+
+ while ((x.f >> 63u) == 0)
+ {
+ x.f <<= 1u;
+ x.e--;
+ }
+
+ return x;
+ }
+
+ /*!
+ @brief normalize x such that the result has the exponent E
+ @pre e >= x.e and the upper e - x.e bits of x.f must be zero.
+ */
+ static diyfp normalize_to(const diyfp& x, const int target_exponent) noexcept
+ {
+ const int delta = x.e - target_exponent;
+
+ JSON_ASSERT(delta >= 0);
+ JSON_ASSERT(((x.f << delta) >> delta) == x.f);
+
+ return {x.f << delta, target_exponent};
+ }
+};
+
+struct boundaries
+{
+ diyfp w;
+ diyfp minus;
+ diyfp plus;
+};
+
+/*!
+Compute the (normalized) diyfp representing the input number 'value' and its
+boundaries.
+
+@pre value must be finite and positive
+*/
+template<typename FloatType>
+boundaries compute_boundaries(FloatType value)
+{
+ JSON_ASSERT(std::isfinite(value));
+ JSON_ASSERT(value > 0);
+
+ // Convert the IEEE representation into a diyfp.
+ //
+ // If v is denormal:
+ // value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1))
+ // If v is normalized:
+ // value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1))
+
+ static_assert(std::numeric_limits<FloatType>::is_iec559,
+ "internal error: dtoa_short requires an IEEE-754 floating-point implementation");
+
+ constexpr int kPrecision = std::numeric_limits<FloatType>::digits; // = p (includes the hidden bit)
+ constexpr int kBias = std::numeric_limits<FloatType>::max_exponent - 1 + (kPrecision - 1);
+ constexpr int kMinExp = 1 - kBias;
+ constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1)
+
+ using bits_type = typename std::conditional<kPrecision == 24, std::uint32_t, std::uint64_t >::type;
+
+ const std::uint64_t bits = reinterpret_bits<bits_type>(value);
+ const std::uint64_t E = bits >> (kPrecision - 1);
+ const std::uint64_t F = bits & (kHiddenBit - 1);
+
+ const bool is_denormal = E == 0;
+ const diyfp v = is_denormal
+ ? diyfp(F, kMinExp)
+ : diyfp(F + kHiddenBit, static_cast<int>(E) - kBias);
+
+ // Compute the boundaries m- and m+ of the floating-point value
+ // v = f * 2^e.
+ //
+ // Determine v- and v+, the floating-point predecessor and successor if v,
+ // respectively.
+ //
+ // v- = v - 2^e if f != 2^(p-1) or e == e_min (A)
+ // = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B)
+ //
+ // v+ = v + 2^e
+ //
+ // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_
+ // between m- and m+ round to v, regardless of how the input rounding
+ // algorithm breaks ties.
+ //
+ // ---+-------------+-------------+-------------+-------------+--- (A)
+ // v- m- v m+ v+
+ //
+ // -----------------+------+------+-------------+-------------+--- (B)
+ // v- m- v m+ v+
+
+ const bool lower_boundary_is_closer = F == 0 && E > 1;
+ const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1);
+ const diyfp m_minus = lower_boundary_is_closer
+ ? diyfp(4 * v.f - 1, v.e - 2) // (B)
+ : diyfp(2 * v.f - 1, v.e - 1); // (A)
+
+ // Determine the normalized w+ = m+.
+ const diyfp w_plus = diyfp::normalize(m_plus);
+
+ // Determine w- = m- such that e_(w-) = e_(w+).
+ const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e);
+
+ return {diyfp::normalize(v), w_minus, w_plus};
+}
+
+// Given normalized diyfp w, Grisu needs to find a (normalized) cached
+// power-of-ten c, such that the exponent of the product c * w = f * 2^e lies
+// within a certain range [alpha, gamma] (Definition 3.2 from [1])
+//
+// alpha <= e = e_c + e_w + q <= gamma
+//
+// or
+//
+// f_c * f_w * 2^alpha <= f_c 2^(e_c) * f_w 2^(e_w) * 2^q
+// <= f_c * f_w * 2^gamma
+//
+// Since c and w are normalized, i.e. 2^(q-1) <= f < 2^q, this implies
+//
+// 2^(q-1) * 2^(q-1) * 2^alpha <= c * w * 2^q < 2^q * 2^q * 2^gamma
+//
+// or
+//
+// 2^(q - 2 + alpha) <= c * w < 2^(q + gamma)
+//
+// The choice of (alpha,gamma) determines the size of the table and the form of
+// the digit generation procedure. Using (alpha,gamma)=(-60,-32) works out well
+// in practice:
+//
+// The idea is to cut the number c * w = f * 2^e into two parts, which can be
+// processed independently: An integral part p1, and a fractional part p2:
+//
+// f * 2^e = ( (f div 2^-e) * 2^-e + (f mod 2^-e) ) * 2^e
+// = (f div 2^-e) + (f mod 2^-e) * 2^e
+// = p1 + p2 * 2^e
+//
+// The conversion of p1 into decimal form requires a series of divisions and
+// modulos by (a power of) 10. These operations are faster for 32-bit than for
+// 64-bit integers, so p1 should ideally fit into a 32-bit integer. This can be
+// achieved by choosing
+//
+// -e >= 32 or e <= -32 := gamma
+//
+// In order to convert the fractional part
+//
+// p2 * 2^e = p2 / 2^-e = d[-1] / 10^1 + d[-2] / 10^2 + ...
+//
+// into decimal form, the fraction is repeatedly multiplied by 10 and the digits
+// d[-i] are extracted in order:
+//
+// (10 * p2) div 2^-e = d[-1]
+// (10 * p2) mod 2^-e = d[-2] / 10^1 + ...
+//
+// The multiplication by 10 must not overflow. It is sufficient to choose
+//
+// 10 * p2 < 16 * p2 = 2^4 * p2 <= 2^64.
+//
+// Since p2 = f mod 2^-e < 2^-e,
+//
+// -e <= 60 or e >= -60 := alpha
+
+constexpr int kAlpha = -60;
+constexpr int kGamma = -32;
+
+struct cached_power // c = f * 2^e ~= 10^k
+{
+ std::uint64_t f;
+ int e;
+ int k;
+};
+
+/*!
+For a normalized diyfp w = f * 2^e, this function returns a (normalized) cached
+power-of-ten c = f_c * 2^e_c, such that the exponent of the product w * c
+satisfies (Definition 3.2 from [1])
+
+ alpha <= e_c + e + q <= gamma.
+*/
+inline cached_power get_cached_power_for_binary_exponent(int e)
+{
+ // Now
+ //
+ // alpha <= e_c + e + q <= gamma (1)
+ // ==> f_c * 2^alpha <= c * 2^e * 2^q
+ //
+ // and since the c's are normalized, 2^(q-1) <= f_c,
+ //
+ // ==> 2^(q - 1 + alpha) <= c * 2^(e + q)
+ // ==> 2^(alpha - e - 1) <= c
+ //
+ // If c were an exact power of ten, i.e. c = 10^k, one may determine k as
+ //
+ // k = ceil( log_10( 2^(alpha - e - 1) ) )
+ // = ceil( (alpha - e - 1) * log_10(2) )
+ //
+ // From the paper:
+ // "In theory the result of the procedure could be wrong since c is rounded,
+ // and the computation itself is approximated [...]. In practice, however,
+ // this simple function is sufficient."
+ //
+ // For IEEE double precision floating-point numbers converted into
+ // normalized diyfp's w = f * 2^e, with q = 64,
+ //
+ // e >= -1022 (min IEEE exponent)
+ // -52 (p - 1)
+ // -52 (p - 1, possibly normalize denormal IEEE numbers)
+ // -11 (normalize the diyfp)
+ // = -1137
+ //
+ // and
+ //
+ // e <= +1023 (max IEEE exponent)
+ // -52 (p - 1)
+ // -11 (normalize the diyfp)
+ // = 960
+ //
+ // This binary exponent range [-1137,960] results in a decimal exponent
+ // range [-307,324]. One does not need to store a cached power for each
+ // k in this range. For each such k it suffices to find a cached power
+ // such that the exponent of the product lies in [alpha,gamma].
+ // This implies that the difference of the decimal exponents of adjacent
+ // table entries must be less than or equal to
+ //
+ // floor( (gamma - alpha) * log_10(2) ) = 8.
+ //
+ // (A smaller distance gamma-alpha would require a larger table.)
+
+ // NB:
+ // Actually this function returns c, such that -60 <= e_c + e + 64 <= -34.
+
+ constexpr int kCachedPowersMinDecExp = -300;
+ constexpr int kCachedPowersDecStep = 8;
+
+ static constexpr std::array<cached_power, 79> kCachedPowers =
+ {
+ {
+ { 0xAB70FE17C79AC6CA, -1060, -300 },
+ { 0xFF77B1FCBEBCDC4F, -1034, -292 },
+ { 0xBE5691EF416BD60C, -1007, -284 },
+ { 0x8DD01FAD907FFC3C, -980, -276 },
+ { 0xD3515C2831559A83, -954, -268 },
+ { 0x9D71AC8FADA6C9B5, -927, -260 },
+ { 0xEA9C227723EE8BCB, -901, -252 },
+ { 0xAECC49914078536D, -874, -244 },
+ { 0x823C12795DB6CE57, -847, -236 },
+ { 0xC21094364DFB5637, -821, -228 },
+ { 0x9096EA6F3848984F, -794, -220 },
+ { 0xD77485CB25823AC7, -768, -212 },
+ { 0xA086CFCD97BF97F4, -741, -204 },
+ { 0xEF340A98172AACE5, -715, -196 },
+ { 0xB23867FB2A35B28E, -688, -188 },
+ { 0x84C8D4DFD2C63F3B, -661, -180 },
+ { 0xC5DD44271AD3CDBA, -635, -172 },
+ { 0x936B9FCEBB25C996, -608, -164 },
+ { 0xDBAC6C247D62A584, -582, -156 },
+ { 0xA3AB66580D5FDAF6, -555, -148 },
+ { 0xF3E2F893DEC3F126, -529, -140 },
+ { 0xB5B5ADA8AAFF80B8, -502, -132 },
+ { 0x87625F056C7C4A8B, -475, -124 },
+ { 0xC9BCFF6034C13053, -449, -116 },
+ { 0x964E858C91BA2655, -422, -108 },
+ { 0xDFF9772470297EBD, -396, -100 },
+ { 0xA6DFBD9FB8E5B88F, -369, -92 },
+ { 0xF8A95FCF88747D94, -343, -84 },
+ { 0xB94470938FA89BCF, -316, -76 },
+ { 0x8A08F0F8BF0F156B, -289, -68 },
+ { 0xCDB02555653131B6, -263, -60 },
+ { 0x993FE2C6D07B7FAC, -236, -52 },
+ { 0xE45C10C42A2B3B06, -210, -44 },
+ { 0xAA242499697392D3, -183, -36 },
+ { 0xFD87B5F28300CA0E, -157, -28 },
+ { 0xBCE5086492111AEB, -130, -20 },
+ { 0x8CBCCC096F5088CC, -103, -12 },
+ { 0xD1B71758E219652C, -77, -4 },
+ { 0x9C40000000000000, -50, 4 },
+ { 0xE8D4A51000000000, -24, 12 },
+ { 0xAD78EBC5AC620000, 3, 20 },
+ { 0x813F3978F8940984, 30, 28 },
+ { 0xC097CE7BC90715B3, 56, 36 },
+ { 0x8F7E32CE7BEA5C70, 83, 44 },
+ { 0xD5D238A4ABE98068, 109, 52 },
+ { 0x9F4F2726179A2245, 136, 60 },
+ { 0xED63A231D4C4FB27, 162, 68 },
+ { 0xB0DE65388CC8ADA8, 189, 76 },
+ { 0x83C7088E1AAB65DB, 216, 84 },
+ { 0xC45D1DF942711D9A, 242, 92 },
+ { 0x924D692CA61BE758, 269, 100 },
+ { 0xDA01EE641A708DEA, 295, 108 },
+ { 0xA26DA3999AEF774A, 322, 116 },
+ { 0xF209787BB47D6B85, 348, 124 },
+ { 0xB454E4A179DD1877, 375, 132 },
+ { 0x865B86925B9BC5C2, 402, 140 },
+ { 0xC83553C5C8965D3D, 428, 148 },
+ { 0x952AB45CFA97A0B3, 455, 156 },
+ { 0xDE469FBD99A05FE3, 481, 164 },
+ { 0xA59BC234DB398C25, 508, 172 },
+ { 0xF6C69A72A3989F5C, 534, 180 },
+ { 0xB7DCBF5354E9BECE, 561, 188 },
+ { 0x88FCF317F22241E2, 588, 196 },
+ { 0xCC20CE9BD35C78A5, 614, 204 },
+ { 0x98165AF37B2153DF, 641, 212 },
+ { 0xE2A0B5DC971F303A, 667, 220 },
+ { 0xA8D9D1535CE3B396, 694, 228 },
+ { 0xFB9B7CD9A4A7443C, 720, 236 },
+ { 0xBB764C4CA7A44410, 747, 244 },
+ { 0x8BAB8EEFB6409C1A, 774, 252 },
+ { 0xD01FEF10A657842C, 800, 260 },
+ { 0x9B10A4E5E9913129, 827, 268 },
+ { 0xE7109BFBA19C0C9D, 853, 276 },
+ { 0xAC2820D9623BF429, 880, 284 },
+ { 0x80444B5E7AA7CF85, 907, 292 },
+ { 0xBF21E44003ACDD2D, 933, 300 },
+ { 0x8E679C2F5E44FF8F, 960, 308 },
+ { 0xD433179D9C8CB841, 986, 316 },
+ { 0x9E19DB92B4E31BA9, 1013, 324 },
+ }
+ };
+
+ // This computation gives exactly the same results for k as
+ // k = ceil((kAlpha - e - 1) * 0.30102999566398114)
+ // for |e| <= 1500, but doesn't require floating-point operations.
+ // NB: log_10(2) ~= 78913 / 2^18
+ JSON_ASSERT(e >= -1500);
+ JSON_ASSERT(e <= 1500);
+ const int f = kAlpha - e - 1;
+ const int k = (f * 78913) / (1 << 18) + static_cast<int>(f > 0);
+
+ const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep;
+ JSON_ASSERT(index >= 0);
+ JSON_ASSERT(static_cast<std::size_t>(index) < kCachedPowers.size());
+
+ const cached_power cached = kCachedPowers[static_cast<std::size_t>(index)];
+ JSON_ASSERT(kAlpha <= cached.e + e + 64);
+ JSON_ASSERT(kGamma >= cached.e + e + 64);
+
+ return cached;
+}
+
+/*!
+For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k.
+For n == 0, returns 1 and sets pow10 := 1.
+*/
+inline int find_largest_pow10(const std::uint32_t n, std::uint32_t& pow10)
+{
+ // LCOV_EXCL_START
+ if (n >= 1000000000)
+ {
+ pow10 = 1000000000;
+ return 10;
+ }
+ // LCOV_EXCL_STOP
+ else if (n >= 100000000)
+ {
+ pow10 = 100000000;
+ return 9;
+ }
+ else if (n >= 10000000)
+ {
+ pow10 = 10000000;
+ return 8;
+ }
+ else if (n >= 1000000)
+ {
+ pow10 = 1000000;
+ return 7;
+ }
+ else if (n >= 100000)
+ {
+ pow10 = 100000;
+ return 6;
+ }
+ else if (n >= 10000)
+ {
+ pow10 = 10000;
+ return 5;
+ }
+ else if (n >= 1000)
+ {
+ pow10 = 1000;
+ return 4;
+ }
+ else if (n >= 100)
+ {
+ pow10 = 100;
+ return 3;
+ }
+ else if (n >= 10)
+ {
+ pow10 = 10;
+ return 2;
+ }
+ else
+ {
+ pow10 = 1;
+ return 1;
+ }
+}
+
+inline void grisu2_round(char* buf, int len, std::uint64_t dist, std::uint64_t delta,
+ std::uint64_t rest, std::uint64_t ten_k)
+{
+ JSON_ASSERT(len >= 1);
+ JSON_ASSERT(dist <= delta);
+ JSON_ASSERT(rest <= delta);
+ JSON_ASSERT(ten_k > 0);
+
+ // <--------------------------- delta ---->
+ // <---- dist --------->
+ // --------------[------------------+-------------------]--------------
+ // M- w M+
+ //
+ // ten_k
+ // <------>
+ // <---- rest ---->
+ // --------------[------------------+----+--------------]--------------
+ // w V
+ // = buf * 10^k
+ //
+ // ten_k represents a unit-in-the-last-place in the decimal representation
+ // stored in buf.
+ // Decrement buf by ten_k while this takes buf closer to w.
+
+ // The tests are written in this order to avoid overflow in unsigned
+ // integer arithmetic.
+
+ while (rest < dist
+ && delta - rest >= ten_k
+ && (rest + ten_k < dist || dist - rest > rest + ten_k - dist))
+ {
+ JSON_ASSERT(buf[len - 1] != '0');
+ buf[len - 1]--;
+ rest += ten_k;
+ }
+}
+
+/*!
+Generates V = buffer * 10^decimal_exponent, such that M- <= V <= M+.
+M- and M+ must be normalized and share the same exponent -60 <= e <= -32.
+*/
+inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
+ diyfp M_minus, diyfp w, diyfp M_plus)
+{
+ static_assert(kAlpha >= -60, "internal error");
+ static_assert(kGamma <= -32, "internal error");
+
+ // Generates the digits (and the exponent) of a decimal floating-point
+ // number V = buffer * 10^decimal_exponent in the range [M-, M+]. The diyfp's
+ // w, M- and M+ share the same exponent e, which satisfies alpha <= e <= gamma.
+ //
+ // <--------------------------- delta ---->
+ // <---- dist --------->
+ // --------------[------------------+-------------------]--------------
+ // M- w M+
+ //
+ // Grisu2 generates the digits of M+ from left to right and stops as soon as
+ // V is in [M-,M+].
+
+ JSON_ASSERT(M_plus.e >= kAlpha);
+ JSON_ASSERT(M_plus.e <= kGamma);
+
+ std::uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e)
+ std::uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e)
+
+ // Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0):
+ //
+ // M+ = f * 2^e
+ // = ((f div 2^-e) * 2^-e + (f mod 2^-e)) * 2^e
+ // = ((p1 ) * 2^-e + (p2 )) * 2^e
+ // = p1 + p2 * 2^e
+
+ const diyfp one(std::uint64_t{1} << -M_plus.e, M_plus.e);
+
+ auto p1 = static_cast<std::uint32_t>(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.)
+ std::uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e
+
+ // 1)
+ //
+ // Generate the digits of the integral part p1 = d[n-1]...d[1]d[0]
+
+ JSON_ASSERT(p1 > 0);
+
+ std::uint32_t pow10;
+ const int k = find_largest_pow10(p1, pow10);
+
+ // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1)
+ //
+ // p1 = (p1 div 10^(k-1)) * 10^(k-1) + (p1 mod 10^(k-1))
+ // = (d[k-1] ) * 10^(k-1) + (p1 mod 10^(k-1))
+ //
+ // M+ = p1 + p2 * 2^e
+ // = d[k-1] * 10^(k-1) + (p1 mod 10^(k-1)) + p2 * 2^e
+ // = d[k-1] * 10^(k-1) + ((p1 mod 10^(k-1)) * 2^-e + p2) * 2^e
+ // = d[k-1] * 10^(k-1) + ( rest) * 2^e
+ //
+ // Now generate the digits d[n] of p1 from left to right (n = k-1,...,0)
+ //
+ // p1 = d[k-1]...d[n] * 10^n + d[n-1]...d[0]
+ //
+ // but stop as soon as
+ //
+ // rest * 2^e = (d[n-1]...d[0] * 2^-e + p2) * 2^e <= delta * 2^e
+
+ int n = k;
+ while (n > 0)
+ {
+ // Invariants:
+ // M+ = buffer * 10^n + (p1 + p2 * 2^e) (buffer = 0 for n = k)
+ // pow10 = 10^(n-1) <= p1 < 10^n
+ //
+ const std::uint32_t d = p1 / pow10; // d = p1 div 10^(n-1)
+ const std::uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1)
+ //
+ // M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e
+ // = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e)
+ //
+ JSON_ASSERT(d <= 9);
+ buffer[length++] = static_cast<char>('0' + d); // buffer := buffer * 10 + d
+ //
+ // M+ = buffer * 10^(n-1) + (r + p2 * 2^e)
+ //
+ p1 = r;
+ n--;
+ //
+ // M+ = buffer * 10^n + (p1 + p2 * 2^e)
+ // pow10 = 10^n
+ //
+
+ // Now check if enough digits have been generated.
+ // Compute
+ //
+ // p1 + p2 * 2^e = (p1 * 2^-e + p2) * 2^e = rest * 2^e
+ //
+ // Note:
+ // Since rest and delta share the same exponent e, it suffices to
+ // compare the significands.
+ const std::uint64_t rest = (std::uint64_t{p1} << -one.e) + p2;
+ if (rest <= delta)
+ {
+ // V = buffer * 10^n, with M- <= V <= M+.
+
+ decimal_exponent += n;
+
+ // We may now just stop. But instead look if the buffer could be
+ // decremented to bring V closer to w.
+ //
+ // pow10 = 10^n is now 1 ulp in the decimal representation V.
+ // The rounding procedure works with diyfp's with an implicit
+ // exponent of e.
+ //
+ // 10^n = (10^n * 2^-e) * 2^e = ulp * 2^e
+ //
+ const std::uint64_t ten_n = std::uint64_t{pow10} << -one.e;
+ grisu2_round(buffer, length, dist, delta, rest, ten_n);
+
+ return;
+ }
+
+ pow10 /= 10;
+ //
+ // pow10 = 10^(n-1) <= p1 < 10^n
+ // Invariants restored.
+ }
+
+ // 2)
+ //
+ // The digits of the integral part have been generated:
+ //
+ // M+ = d[k-1]...d[1]d[0] + p2 * 2^e
+ // = buffer + p2 * 2^e
+ //
+ // Now generate the digits of the fractional part p2 * 2^e.
+ //
+ // Note:
+ // No decimal point is generated: the exponent is adjusted instead.
+ //
+ // p2 actually represents the fraction
+ //
+ // p2 * 2^e
+ // = p2 / 2^-e
+ // = d[-1] / 10^1 + d[-2] / 10^2 + ...
+ //
+ // Now generate the digits d[-m] of p1 from left to right (m = 1,2,...)
+ //
+ // p2 * 2^e = d[-1]d[-2]...d[-m] * 10^-m
+ // + 10^-m * (d[-m-1] / 10^1 + d[-m-2] / 10^2 + ...)
+ //
+ // using
+ //
+ // 10^m * p2 = ((10^m * p2) div 2^-e) * 2^-e + ((10^m * p2) mod 2^-e)
+ // = ( d) * 2^-e + ( r)
+ //
+ // or
+ // 10^m * p2 * 2^e = d + r * 2^e
+ //
+ // i.e.
+ //
+ // M+ = buffer + p2 * 2^e
+ // = buffer + 10^-m * (d + r * 2^e)
+ // = (buffer * 10^m + d) * 10^-m + 10^-m * r * 2^e
+ //
+ // and stop as soon as 10^-m * r * 2^e <= delta * 2^e
+
+ JSON_ASSERT(p2 > delta);
+
+ int m = 0;
+ for (;;)
+ {
+ // Invariant:
+ // M+ = buffer * 10^-m + 10^-m * (d[-m-1] / 10 + d[-m-2] / 10^2 + ...) * 2^e
+ // = buffer * 10^-m + 10^-m * (p2 ) * 2^e
+ // = buffer * 10^-m + 10^-m * (1/10 * (10 * p2) ) * 2^e
+ // = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e
+ //
+ JSON_ASSERT(p2 <= (std::numeric_limits<std::uint64_t>::max)() / 10);
+ p2 *= 10;
+ const std::uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e
+ const std::uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e
+ //
+ // M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e
+ // = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e))
+ // = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e
+ //
+ JSON_ASSERT(d <= 9);
+ buffer[length++] = static_cast<char>('0' + d); // buffer := buffer * 10 + d
+ //
+ // M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e
+ //
+ p2 = r;
+ m++;
+ //
+ // M+ = buffer * 10^-m + 10^-m * p2 * 2^e
+ // Invariant restored.
+
+ // Check if enough digits have been generated.
+ //
+ // 10^-m * p2 * 2^e <= delta * 2^e
+ // p2 * 2^e <= 10^m * delta * 2^e
+ // p2 <= 10^m * delta
+ delta *= 10;
+ dist *= 10;
+ if (p2 <= delta)
+ {
+ break;
+ }
+ }
+
+ // V = buffer * 10^-m, with M- <= V <= M+.
+
+ decimal_exponent -= m;
+
+ // 1 ulp in the decimal representation is now 10^-m.
+ // Since delta and dist are now scaled by 10^m, we need to do the
+ // same with ulp in order to keep the units in sync.
+ //
+ // 10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e
+ //
+ const std::uint64_t ten_m = one.f;
+ grisu2_round(buffer, length, dist, delta, p2, ten_m);
+
+ // By construction this algorithm generates the shortest possible decimal
+ // number (Loitsch, Theorem 6.2) which rounds back to w.
+ // For an input number of precision p, at least
+ //
+ // N = 1 + ceil(p * log_10(2))
+ //
+ // decimal digits are sufficient to identify all binary floating-point
+ // numbers (Matula, "In-and-Out conversions").
+ // This implies that the algorithm does not produce more than N decimal
+ // digits.
+ //
+ // N = 17 for p = 53 (IEEE double precision)
+ // N = 9 for p = 24 (IEEE single precision)
+}
+
+/*!
+v = buf * 10^decimal_exponent
+len is the length of the buffer (number of decimal digits)
+The buffer must be large enough, i.e. >= max_digits10.
+*/
+JSON_HEDLEY_NON_NULL(1)
+inline void grisu2(char* buf, int& len, int& decimal_exponent,
+ diyfp m_minus, diyfp v, diyfp m_plus)
+{
+ JSON_ASSERT(m_plus.e == m_minus.e);
+ JSON_ASSERT(m_plus.e == v.e);
+
+ // --------(-----------------------+-----------------------)-------- (A)
+ // m- v m+
+ //
+ // --------------------(-----------+-----------------------)-------- (B)
+ // m- v m+
+ //
+ // First scale v (and m- and m+) such that the exponent is in the range
+ // [alpha, gamma].
+
+ const cached_power cached = get_cached_power_for_binary_exponent(m_plus.e);
+
+ const diyfp c_minus_k(cached.f, cached.e); // = c ~= 10^-k
+
+ // The exponent of the products is = v.e + c_minus_k.e + q and is in the range [alpha,gamma]
+ const diyfp w = diyfp::mul(v, c_minus_k);
+ const diyfp w_minus = diyfp::mul(m_minus, c_minus_k);
+ const diyfp w_plus = diyfp::mul(m_plus, c_minus_k);
+
+ // ----(---+---)---------------(---+---)---------------(---+---)----
+ // w- w w+
+ // = c*m- = c*v = c*m+
+ //
+ // diyfp::mul rounds its result and c_minus_k is approximated too. w, w- and
+ // w+ are now off by a small amount.
+ // In fact:
+ //
+ // w - v * 10^k < 1 ulp
+ //
+ // To account for this inaccuracy, add resp. subtract 1 ulp.
+ //
+ // --------+---[---------------(---+---)---------------]---+--------
+ // w- M- w M+ w+
+ //
+ // Now any number in [M-, M+] (bounds included) will round to w when input,
+ // regardless of how the input rounding algorithm breaks ties.
+ //
+ // And digit_gen generates the shortest possible such number in [M-, M+].
+ // Note that this does not mean that Grisu2 always generates the shortest
+ // possible number in the interval (m-, m+).
+ const diyfp M_minus(w_minus.f + 1, w_minus.e);
+ const diyfp M_plus (w_plus.f - 1, w_plus.e );
+
+ decimal_exponent = -cached.k; // = -(-k) = k
+
+ grisu2_digit_gen(buf, len, decimal_exponent, M_minus, w, M_plus);
+}
+
+/*!
+v = buf * 10^decimal_exponent
+len is the length of the buffer (number of decimal digits)
+The buffer must be large enough, i.e. >= max_digits10.
+*/
+template<typename FloatType>
+JSON_HEDLEY_NON_NULL(1)
+void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value)
+{
+ static_assert(diyfp::kPrecision >= std::numeric_limits<FloatType>::digits + 3,
+ "internal error: not enough precision");
+
+ JSON_ASSERT(std::isfinite(value));
+ JSON_ASSERT(value > 0);
+
+ // If the neighbors (and boundaries) of 'value' are always computed for double-precision
+ // numbers, all float's can be recovered using strtod (and strtof). However, the resulting
+ // decimal representations are not exactly "short".
+ //
+ // The documentation for 'std::to_chars' (https://en.cppreference.com/w/cpp/utility/to_chars)
+ // says "value is converted to a string as if by std::sprintf in the default ("C") locale"
+ // and since sprintf promotes float's to double's, I think this is exactly what 'std::to_chars'
+ // does.
+ // On the other hand, the documentation for 'std::to_chars' requires that "parsing the
+ // representation using the corresponding std::from_chars function recovers value exactly". That
+ // indicates that single precision floating-point numbers should be recovered using
+ // 'std::strtof'.
+ //
+ // NB: If the neighbors are computed for single-precision numbers, there is a single float
+ // (7.0385307e-26f) which can't be recovered using strtod. The resulting double precision
+ // value is off by 1 ulp.
+#if 0
+ const boundaries w = compute_boundaries(static_cast<double>(value));
+#else
+ const boundaries w = compute_boundaries(value);
+#endif
+
+ grisu2(buf, len, decimal_exponent, w.minus, w.w, w.plus);
+}
+
+/*!
+@brief appends a decimal representation of e to buf
+@return a pointer to the element following the exponent.
+@pre -1000 < e < 1000
+*/
+JSON_HEDLEY_NON_NULL(1)
+JSON_HEDLEY_RETURNS_NON_NULL
+inline char* append_exponent(char* buf, int e)
+{
+ JSON_ASSERT(e > -1000);
+ JSON_ASSERT(e < 1000);
+
+ if (e < 0)
+ {
+ e = -e;
+ *buf++ = '-';
+ }
+ else
+ {
+ *buf++ = '+';
+ }
+
+ auto k = static_cast<std::uint32_t>(e);
+ if (k < 10)
+ {
+ // Always print at least two digits in the exponent.
+ // This is for compatibility with printf("%g").
+ *buf++ = '0';
+ *buf++ = static_cast<char>('0' + k);
+ }
+ else if (k < 100)
+ {
+ *buf++ = static_cast<char>('0' + k / 10);
+ k %= 10;
+ *buf++ = static_cast<char>('0' + k);
+ }
+ else
+ {
+ *buf++ = static_cast<char>('0' + k / 100);
+ k %= 100;
+ *buf++ = static_cast<char>('0' + k / 10);
+ k %= 10;
+ *buf++ = static_cast<char>('0' + k);
+ }
+
+ return buf;
+}
+
+/*!
+@brief prettify v = buf * 10^decimal_exponent
+
+If v is in the range [10^min_exp, 10^max_exp) it will be printed in fixed-point
+notation. Otherwise it will be printed in exponential notation.
+
+@pre min_exp < 0
+@pre max_exp > 0
+*/
+JSON_HEDLEY_NON_NULL(1)
+JSON_HEDLEY_RETURNS_NON_NULL
+inline char* format_buffer(char* buf, int len, int decimal_exponent,
+ int min_exp, int max_exp)
+{
+ JSON_ASSERT(min_exp < 0);
+ JSON_ASSERT(max_exp > 0);
+
+ const int k = len;
+ const int n = len + decimal_exponent;
+
+ // v = buf * 10^(n-k)
+ // k is the length of the buffer (number of decimal digits)
+ // n is the position of the decimal point relative to the start of the buffer.
+
+ if (k <= n && n <= max_exp)
+ {
+ // digits[000]
+ // len <= max_exp + 2
+
+ std::memset(buf + k, '0', static_cast<size_t>(n) - static_cast<size_t>(k));
+ // Make it look like a floating-point number (#362, #378)
+ buf[n + 0] = '.';
+ buf[n + 1] = '0';
+ return buf + (static_cast<size_t>(n) + 2);
+ }
+
+ if (0 < n && n <= max_exp)
+ {
+ // dig.its
+ // len <= max_digits10 + 1
+
+ JSON_ASSERT(k > n);
+
+ std::memmove(buf + (static_cast<size_t>(n) + 1), buf + n, static_cast<size_t>(k) - static_cast<size_t>(n));
+ buf[n] = '.';
+ return buf + (static_cast<size_t>(k) + 1U);
+ }
+
+ if (min_exp < n && n <= 0)
+ {
+ // 0.[000]digits
+ // len <= 2 + (-min_exp - 1) + max_digits10
+
+ std::memmove(buf + (2 + static_cast<size_t>(-n)), buf, static_cast<size_t>(k));
+ buf[0] = '0';
+ buf[1] = '.';
+ std::memset(buf + 2, '0', static_cast<size_t>(-n));
+ return buf + (2U + static_cast<size_t>(-n) + static_cast<size_t>(k));
+ }
+
+ if (k == 1)
+ {
+ // dE+123
+ // len <= 1 + 5
+
+ buf += 1;
+ }
+ else
+ {
+ // d.igitsE+123
+ // len <= max_digits10 + 1 + 5
+
+ std::memmove(buf + 2, buf + 1, static_cast<size_t>(k) - 1);
+ buf[1] = '.';
+ buf += 1 + static_cast<size_t>(k);
+ }
+
+ *buf++ = 'e';
+ return append_exponent(buf, n - 1);
+}
+
+} // namespace dtoa_impl
+
+/*!
+@brief generates a decimal representation of the floating-point number value in [first, last).
+
+The format of the resulting decimal representation is similar to printf's %g
+format. Returns an iterator pointing past-the-end of the decimal representation.
+
+@note The input number must be finite, i.e. NaN's and Inf's are not supported.
+@note The buffer must be large enough.
+@note The result is NOT null-terminated.
+*/
+template<typename FloatType>
+JSON_HEDLEY_NON_NULL(1, 2)
+JSON_HEDLEY_RETURNS_NON_NULL
+char* to_chars(char* first, const char* last, FloatType value)
+{
+ static_cast<void>(last); // maybe unused - fix warning
+ JSON_ASSERT(std::isfinite(value));
+
+ // Use signbit(value) instead of (value < 0) since signbit works for -0.
+ if (std::signbit(value))
+ {
+ value = -value;
+ *first++ = '-';
+ }
+
+ if (value == 0) // +-0
+ {
+ *first++ = '0';
+ // Make it look like a floating-point number (#362, #378)
+ *first++ = '.';
+ *first++ = '0';
+ return first;
+ }
+
+ JSON_ASSERT(last - first >= std::numeric_limits<FloatType>::max_digits10);
+
+ // Compute v = buffer * 10^decimal_exponent.
+ // The decimal digits are stored in the buffer, which needs to be interpreted
+ // as an unsigned decimal integer.
+ // len is the length of the buffer, i.e. the number of decimal digits.
+ int len = 0;
+ int decimal_exponent = 0;
+ dtoa_impl::grisu2(first, len, decimal_exponent, value);
+
+ JSON_ASSERT(len <= std::numeric_limits<FloatType>::max_digits10);
+
+ // Format the buffer like printf("%.*g", prec, value)
+ constexpr int kMinExp = -4;
+ // Use digits10 here to increase compatibility with version 2.
+ constexpr int kMaxExp = std::numeric_limits<FloatType>::digits10;
+
+ JSON_ASSERT(last - first >= kMaxExp + 2);
+ JSON_ASSERT(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits<FloatType>::max_digits10);
+ JSON_ASSERT(last - first >= std::numeric_limits<FloatType>::max_digits10 + 6);
+
+ return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp);
+}
+
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/output/binary_writer.hpp>
+
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////
+// serialization //
+///////////////////
+
+/// how to treat decoding errors
+enum class error_handler_t
+{
+ strict, ///< throw a type_error exception in case of invalid UTF-8
+ replace, ///< replace invalid UTF-8 sequences with U+FFFD
+ ignore ///< ignore invalid UTF-8 sequences
+};
+
+template<typename BasicJsonType>
+class serializer
+{
+ using string_t = typename BasicJsonType::string_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using binary_char_t = typename BasicJsonType::binary_t::value_type;
+ static constexpr std::uint8_t UTF8_ACCEPT = 0;
+ static constexpr std::uint8_t UTF8_REJECT = 1;
+
+ public:
+ /*!
+ @param[in] s output stream to serialize to
+ @param[in] ichar indentation character to use
+ @param[in] error_handler_ how to react on decoding errors
+ */
+ serializer(output_adapter_t<char> s, const char ichar,
+ error_handler_t error_handler_ = error_handler_t::strict)
+ : o(std::move(s))
+ , loc(std::localeconv())
+ , thousands_sep(loc->thousands_sep == nullptr ? '\0' : std::char_traits<char>::to_char_type(* (loc->thousands_sep)))
+ , decimal_point(loc->decimal_point == nullptr ? '\0' : std::char_traits<char>::to_char_type(* (loc->decimal_point)))
+ , indent_char(ichar)
+ , indent_string(512, indent_char)
+ , error_handler(error_handler_)
+ {}
+
+ // delete because of pointer members
+ serializer(const serializer&) = delete;
+ serializer& operator=(const serializer&) = delete;
+ serializer(serializer&&) = delete;
+ serializer& operator=(serializer&&) = delete;
+ ~serializer() = default;
+
+ /*!
+ @brief internal implementation of the serialization function
+
+ This function is called by the public member function dump and organizes
+ the serialization internally. The indentation level is propagated as
+ additional parameter. In case of arrays and objects, the function is
+ called recursively.
+
+ - strings and object keys are escaped using `escape_string()`
+ - integer numbers are converted implicitly via `operator<<`
+ - floating-point numbers are converted to a string using `"%g"` format
+ - binary values are serialized as objects containing the subtype and the
+ byte array
+
+ @param[in] val value to serialize
+ @param[in] pretty_print whether the output shall be pretty-printed
+ @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters
+ in the output are escaped with `\uXXXX` sequences, and the result consists
+ of ASCII characters only.
+ @param[in] indent_step the indent level
+ @param[in] current_indent the current indent level (only used internally)
+ */
+ void dump(const BasicJsonType& val,
+ const bool pretty_print,
+ const bool ensure_ascii,
+ const unsigned int indent_step,
+ const unsigned int current_indent = 0)
+ {
+ switch (val.m_type)
+ {
+ case value_t::object:
+ {
+ if (val.m_value.object->empty())
+ {
+ o->write_characters("{}", 2);
+ return;
+ }
+
+ if (pretty_print)
+ {
+ o->write_characters("{\n", 2);
+
+ // variable to hold indentation for recursive calls
+ const auto new_indent = current_indent + indent_step;
+ if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
+ {
+ indent_string.resize(indent_string.size() * 2, ' ');
+ }
+
+ // first n-1 elements
+ auto i = val.m_value.object->cbegin();
+ for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i)
+ {
+ o->write_characters(indent_string.c_str(), new_indent);
+ o->write_character('\"');
+ dump_escaped(i->first, ensure_ascii);
+ o->write_characters("\": ", 3);
+ dump(i->second, true, ensure_ascii, indent_step, new_indent);
+ o->write_characters(",\n", 2);
+ }
+
+ // last element
+ JSON_ASSERT(i != val.m_value.object->cend());
+ JSON_ASSERT(std::next(i) == val.m_value.object->cend());
+ o->write_characters(indent_string.c_str(), new_indent);
+ o->write_character('\"');
+ dump_escaped(i->first, ensure_ascii);
+ o->write_characters("\": ", 3);
+ dump(i->second, true, ensure_ascii, indent_step, new_indent);
+
+ o->write_character('\n');
+ o->write_characters(indent_string.c_str(), current_indent);
+ o->write_character('}');
+ }
+ else
+ {
+ o->write_character('{');
+
+ // first n-1 elements
+ auto i = val.m_value.object->cbegin();
+ for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i)
+ {
+ o->write_character('\"');
+ dump_escaped(i->first, ensure_ascii);
+ o->write_characters("\":", 2);
+ dump(i->second, false, ensure_ascii, indent_step, current_indent);
+ o->write_character(',');
+ }
+
+ // last element
+ JSON_ASSERT(i != val.m_value.object->cend());
+ JSON_ASSERT(std::next(i) == val.m_value.object->cend());
+ o->write_character('\"');
+ dump_escaped(i->first, ensure_ascii);
+ o->write_characters("\":", 2);
+ dump(i->second, false, ensure_ascii, indent_step, current_indent);
+
+ o->write_character('}');
+ }
+
+ return;
+ }
+
+ case value_t::array:
+ {
+ if (val.m_value.array->empty())
+ {
+ o->write_characters("[]", 2);
+ return;
+ }
+
+ if (pretty_print)
+ {
+ o->write_characters("[\n", 2);
+
+ // variable to hold indentation for recursive calls
+ const auto new_indent = current_indent + indent_step;
+ if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
+ {
+ indent_string.resize(indent_string.size() * 2, ' ');
+ }
+
+ // first n-1 elements
+ for (auto i = val.m_value.array->cbegin();
+ i != val.m_value.array->cend() - 1; ++i)
+ {
+ o->write_characters(indent_string.c_str(), new_indent);
+ dump(*i, true, ensure_ascii, indent_step, new_indent);
+ o->write_characters(",\n", 2);
+ }
+
+ // last element
+ JSON_ASSERT(!val.m_value.array->empty());
+ o->write_characters(indent_string.c_str(), new_indent);
+ dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent);
+
+ o->write_character('\n');
+ o->write_characters(indent_string.c_str(), current_indent);
+ o->write_character(']');
+ }
+ else
+ {
+ o->write_character('[');
+
+ // first n-1 elements
+ for (auto i = val.m_value.array->cbegin();
+ i != val.m_value.array->cend() - 1; ++i)
+ {
+ dump(*i, false, ensure_ascii, indent_step, current_indent);
+ o->write_character(',');
+ }
+
+ // last element
+ JSON_ASSERT(!val.m_value.array->empty());
+ dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent);
+
+ o->write_character(']');
+ }
+
+ return;
+ }
+
+ case value_t::string:
+ {
+ o->write_character('\"');
+ dump_escaped(*val.m_value.string, ensure_ascii);
+ o->write_character('\"');
+ return;
+ }
+
+ case value_t::binary:
+ {
+ if (pretty_print)
+ {
+ o->write_characters("{\n", 2);
+
+ // variable to hold indentation for recursive calls
+ const auto new_indent = current_indent + indent_step;
+ if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
+ {
+ indent_string.resize(indent_string.size() * 2, ' ');
+ }
+
+ o->write_characters(indent_string.c_str(), new_indent);
+
+ o->write_characters("\"bytes\": [", 10);
+
+ if (!val.m_value.binary->empty())
+ {
+ for (auto i = val.m_value.binary->cbegin();
+ i != val.m_value.binary->cend() - 1; ++i)
+ {
+ dump_integer(*i);
+ o->write_characters(", ", 2);
+ }
+ dump_integer(val.m_value.binary->back());
+ }
+
+ o->write_characters("],\n", 3);
+ o->write_characters(indent_string.c_str(), new_indent);
+
+ o->write_characters("\"subtype\": ", 11);
+ if (val.m_value.binary->has_subtype())
+ {
+ dump_integer(val.m_value.binary->subtype());
+ }
+ else
+ {
+ o->write_characters("null", 4);
+ }
+ o->write_character('\n');
+ o->write_characters(indent_string.c_str(), current_indent);
+ o->write_character('}');
+ }
+ else
+ {
+ o->write_characters("{\"bytes\":[", 10);
+
+ if (!val.m_value.binary->empty())
+ {
+ for (auto i = val.m_value.binary->cbegin();
+ i != val.m_value.binary->cend() - 1; ++i)
+ {
+ dump_integer(*i);
+ o->write_character(',');
+ }
+ dump_integer(val.m_value.binary->back());
+ }
+
+ o->write_characters("],\"subtype\":", 12);
+ if (val.m_value.binary->has_subtype())
+ {
+ dump_integer(val.m_value.binary->subtype());
+ o->write_character('}');
+ }
+ else
+ {
+ o->write_characters("null}", 5);
+ }
+ }
+ return;
+ }
+
+ case value_t::boolean:
+ {
+ if (val.m_value.boolean)
+ {
+ o->write_characters("true", 4);
+ }
+ else
+ {
+ o->write_characters("false", 5);
+ }
+ return;
+ }
+
+ case value_t::number_integer:
+ {
+ dump_integer(val.m_value.number_integer);
+ return;
+ }
+
+ case value_t::number_unsigned:
+ {
+ dump_integer(val.m_value.number_unsigned);
+ return;
+ }
+
+ case value_t::number_float:
+ {
+ dump_float(val.m_value.number_float);
+ return;
+ }
+
+ case value_t::discarded:
+ {
+ o->write_characters("<discarded>", 11);
+ return;
+ }
+
+ case value_t::null:
+ {
+ o->write_characters("null", 4);
+ return;
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ }
+
+ private:
+ /*!
+ @brief dump escaped string
+
+ Escape a string by replacing certain special characters by a sequence of an
+ escape character (backslash) and another character and other control
+ characters by a sequence of "\u" followed by a four-digit hex
+ representation. The escaped string is written to output stream @a o.
+
+ @param[in] s the string to escape
+ @param[in] ensure_ascii whether to escape non-ASCII characters with
+ \uXXXX sequences
+
+ @complexity Linear in the length of string @a s.
+ */
+ void dump_escaped(const string_t& s, const bool ensure_ascii)
+ {
+ std::uint32_t codepoint;
+ std::uint8_t state = UTF8_ACCEPT;
+ std::size_t bytes = 0; // number of bytes written to string_buffer
+
+ // number of bytes written at the point of the last valid byte
+ std::size_t bytes_after_last_accept = 0;
+ std::size_t undumped_chars = 0;
+
+ for (std::size_t i = 0; i < s.size(); ++i)
+ {
+ const auto byte = static_cast<uint8_t>(s[i]);
+
+ switch (decode(state, codepoint, byte))
+ {
+ case UTF8_ACCEPT: // decode found a new code point
+ {
+ switch (codepoint)
+ {
+ case 0x08: // backspace
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = 'b';
+ break;
+ }
+
+ case 0x09: // horizontal tab
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = 't';
+ break;
+ }
+
+ case 0x0A: // newline
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = 'n';
+ break;
+ }
+
+ case 0x0C: // formfeed
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = 'f';
+ break;
+ }
+
+ case 0x0D: // carriage return
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = 'r';
+ break;
+ }
+
+ case 0x22: // quotation mark
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = '\"';
+ break;
+ }
+
+ case 0x5C: // reverse solidus
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = '\\';
+ break;
+ }
+
+ default:
+ {
+ // escape control characters (0x00..0x1F) or, if
+ // ensure_ascii parameter is used, non-ASCII characters
+ if ((codepoint <= 0x1F) || (ensure_ascii && (codepoint >= 0x7F)))
+ {
+ if (codepoint <= 0xFFFF)
+ {
+ (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x",
+ static_cast<std::uint16_t>(codepoint));
+ bytes += 6;
+ }
+ else
+ {
+ (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x",
+ static_cast<std::uint16_t>(0xD7C0u + (codepoint >> 10u)),
+ static_cast<std::uint16_t>(0xDC00u + (codepoint & 0x3FFu)));
+ bytes += 12;
+ }
+ }
+ else
+ {
+ // copy byte to buffer (all previous bytes
+ // been copied have in default case above)
+ string_buffer[bytes++] = s[i];
+ }
+ break;
+ }
+ }
+
+ // write buffer and reset index; there must be 13 bytes
+ // left, as this is the maximal number of bytes to be
+ // written ("\uxxxx\uxxxx\0") for one code point
+ if (string_buffer.size() - bytes < 13)
+ {
+ o->write_characters(string_buffer.data(), bytes);
+ bytes = 0;
+ }
+
+ // remember the byte position of this accept
+ bytes_after_last_accept = bytes;
+ undumped_chars = 0;
+ break;
+ }
+
+ case UTF8_REJECT: // decode found invalid UTF-8 byte
+ {
+ switch (error_handler)
+ {
+ case error_handler_t::strict:
+ {
+ std::string sn(3, '\0');
+ (std::snprintf)(&sn[0], sn.size(), "%.2X", byte);
+ JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn));
+ }
+
+ case error_handler_t::ignore:
+ case error_handler_t::replace:
+ {
+ // in case we saw this character the first time, we
+ // would like to read it again, because the byte
+ // may be OK for itself, but just not OK for the
+ // previous sequence
+ if (undumped_chars > 0)
+ {
+ --i;
+ }
+
+ // reset length buffer to the last accepted index;
+ // thus removing/ignoring the invalid characters
+ bytes = bytes_after_last_accept;
+
+ if (error_handler == error_handler_t::replace)
+ {
+ // add a replacement character
+ if (ensure_ascii)
+ {
+ string_buffer[bytes++] = '\\';
+ string_buffer[bytes++] = 'u';
+ string_buffer[bytes++] = 'f';
+ string_buffer[bytes++] = 'f';
+ string_buffer[bytes++] = 'f';
+ string_buffer[bytes++] = 'd';
+ }
+ else
+ {
+ string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xEF');
+ string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xBF');
+ string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xBD');
+ }
+
+ // write buffer and reset index; there must be 13 bytes
+ // left, as this is the maximal number of bytes to be
+ // written ("\uxxxx\uxxxx\0") for one code point
+ if (string_buffer.size() - bytes < 13)
+ {
+ o->write_characters(string_buffer.data(), bytes);
+ bytes = 0;
+ }
+
+ bytes_after_last_accept = bytes;
+ }
+
+ undumped_chars = 0;
+
+ // continue processing the string
+ state = UTF8_ACCEPT;
+ break;
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ break;
+ }
+
+ default: // decode found yet incomplete multi-byte code point
+ {
+ if (!ensure_ascii)
+ {
+ // code point will not be escaped - copy byte to buffer
+ string_buffer[bytes++] = s[i];
+ }
+ ++undumped_chars;
+ break;
+ }
+ }
+ }
+
+ // we finished processing the string
+ if (JSON_HEDLEY_LIKELY(state == UTF8_ACCEPT))
+ {
+ // write buffer
+ if (bytes > 0)
+ {
+ o->write_characters(string_buffer.data(), bytes);
+ }
+ }
+ else
+ {
+ // we finish reading, but do not accept: string was incomplete
+ switch (error_handler)
+ {
+ case error_handler_t::strict:
+ {
+ std::string sn(3, '\0');
+ (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast<std::uint8_t>(s.back()));
+ JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn));
+ }
+
+ case error_handler_t::ignore:
+ {
+ // write all accepted bytes
+ o->write_characters(string_buffer.data(), bytes_after_last_accept);
+ break;
+ }
+
+ case error_handler_t::replace:
+ {
+ // write all accepted bytes
+ o->write_characters(string_buffer.data(), bytes_after_last_accept);
+ // add a replacement character
+ if (ensure_ascii)
+ {
+ o->write_characters("\\ufffd", 6);
+ }
+ else
+ {
+ o->write_characters("\xEF\xBF\xBD", 3);
+ }
+ break;
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ }
+ }
+
+ /*!
+ @brief count digits
+
+ Count the number of decimal (base 10) digits for an input unsigned integer.
+
+ @param[in] x unsigned integer number to count its digits
+ @return number of decimal digits
+ */
+ inline unsigned int count_digits(number_unsigned_t x) noexcept
+ {
+ unsigned int n_digits = 1;
+ for (;;)
+ {
+ if (x < 10)
+ {
+ return n_digits;
+ }
+ if (x < 100)
+ {
+ return n_digits + 1;
+ }
+ if (x < 1000)
+ {
+ return n_digits + 2;
+ }
+ if (x < 10000)
+ {
+ return n_digits + 3;
+ }
+ x = x / 10000u;
+ n_digits += 4;
+ }
+ }
+
+ /*!
+ @brief dump an integer
+
+ Dump a given integer to output stream @a o. Works internally with
+ @a number_buffer.
+
+ @param[in] x integer number (signed or unsigned) to dump
+ @tparam NumberType either @a number_integer_t or @a number_unsigned_t
+ */
+ template < typename NumberType, detail::enable_if_t <
+ std::is_same<NumberType, number_unsigned_t>::value ||
+ std::is_same<NumberType, number_integer_t>::value ||
+ std::is_same<NumberType, binary_char_t>::value,
+ int > = 0 >
+ void dump_integer(NumberType x)
+ {
+ static constexpr std::array<std::array<char, 2>, 100> digits_to_99
+ {
+ {
+ {{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}},
+ {{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}},
+ {{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}},
+ {{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}},
+ {{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}},
+ {{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}},
+ {{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}},
+ {{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}},
+ {{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}},
+ {{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}},
+ }
+ };
+
+ // special case for "0"
+ if (x == 0)
+ {
+ o->write_character('0');
+ return;
+ }
+
+ // use a pointer to fill the buffer
+ auto buffer_ptr = number_buffer.begin();
+
+ const bool is_negative = std::is_same<NumberType, number_integer_t>::value && !(x >= 0); // see issue #755
+ number_unsigned_t abs_value;
+
+ unsigned int n_chars;
+
+ if (is_negative)
+ {
+ *buffer_ptr = '-';
+ abs_value = remove_sign(static_cast<number_integer_t>(x));
+
+ // account one more byte for the minus sign
+ n_chars = 1 + count_digits(abs_value);
+ }
+ else
+ {
+ abs_value = static_cast<number_unsigned_t>(x);
+ n_chars = count_digits(abs_value);
+ }
+
+ // spare 1 byte for '\0'
+ JSON_ASSERT(n_chars < number_buffer.size() - 1);
+
+ // jump to the end to generate the string from backward
+ // so we later avoid reversing the result
+ buffer_ptr += n_chars;
+
+ // Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu
+ // See: https://www.youtube.com/watch?v=o4-CwDo2zpg
+ while (abs_value >= 100)
+ {
+ const auto digits_index = static_cast<unsigned>((abs_value % 100));
+ abs_value /= 100;
+ *(--buffer_ptr) = digits_to_99[digits_index][1];
+ *(--buffer_ptr) = digits_to_99[digits_index][0];
+ }
+
+ if (abs_value >= 10)
+ {
+ const auto digits_index = static_cast<unsigned>(abs_value);
+ *(--buffer_ptr) = digits_to_99[digits_index][1];
+ *(--buffer_ptr) = digits_to_99[digits_index][0];
+ }
+ else
+ {
+ *(--buffer_ptr) = static_cast<char>('0' + abs_value);
+ }
+
+ o->write_characters(number_buffer.data(), n_chars);
+ }
+
+ /*!
+ @brief dump a floating-point number
+
+ Dump a given floating-point number to output stream @a o. Works internally
+ with @a number_buffer.
+
+ @param[in] x floating-point number to dump
+ */
+ void dump_float(number_float_t x)
+ {
+ // NaN / inf
+ if (!std::isfinite(x))
+ {
+ o->write_characters("null", 4);
+ return;
+ }
+
+ // If number_float_t is an IEEE-754 single or double precision number,
+ // use the Grisu2 algorithm to produce short numbers which are
+ // guaranteed to round-trip, using strtof and strtod, resp.
+ //
+ // NB: The test below works if <long double> == <double>.
+ static constexpr bool is_ieee_single_or_double
+ = (std::numeric_limits<number_float_t>::is_iec559 && std::numeric_limits<number_float_t>::digits == 24 && std::numeric_limits<number_float_t>::max_exponent == 128) ||
+ (std::numeric_limits<number_float_t>::is_iec559 && std::numeric_limits<number_float_t>::digits == 53 && std::numeric_limits<number_float_t>::max_exponent == 1024);
+
+ dump_float(x, std::integral_constant<bool, is_ieee_single_or_double>());
+ }
+
+ void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/)
+ {
+ char* begin = number_buffer.data();
+ char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x);
+
+ o->write_characters(begin, static_cast<size_t>(end - begin));
+ }
+
+ void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_double*/)
+ {
+ // get number of digits for a float -> text -> float round-trip
+ static constexpr auto d = std::numeric_limits<number_float_t>::max_digits10;
+
+ // the actual conversion
+ std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x);
+
+ // negative value indicates an error
+ JSON_ASSERT(len > 0);
+ // check if buffer was large enough
+ JSON_ASSERT(static_cast<std::size_t>(len) < number_buffer.size());
+
+ // erase thousands separator
+ if (thousands_sep != '\0')
+ {
+ const auto end = std::remove(number_buffer.begin(),
+ number_buffer.begin() + len, thousands_sep);
+ std::fill(end, number_buffer.end(), '\0');
+ JSON_ASSERT((end - number_buffer.begin()) <= len);
+ len = (end - number_buffer.begin());
+ }
+
+ // convert decimal point to '.'
+ if (decimal_point != '\0' && decimal_point != '.')
+ {
+ const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point);
+ if (dec_pos != number_buffer.end())
+ {
+ *dec_pos = '.';
+ }
+ }
+
+ o->write_characters(number_buffer.data(), static_cast<std::size_t>(len));
+
+ // determine if need to append ".0"
+ const bool value_is_int_like =
+ std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1,
+ [](char c)
+ {
+ return c == '.' || c == 'e';
+ });
+
+ if (value_is_int_like)
+ {
+ o->write_characters(".0", 2);
+ }
+ }
+
+ /*!
+ @brief check whether a string is UTF-8 encoded
+
+ The function checks each byte of a string whether it is UTF-8 encoded. The
+ result of the check is stored in the @a state parameter. The function must
+ be called initially with state 0 (accept). State 1 means the string must
+ be rejected, because the current byte is not allowed. If the string is
+ completely processed, but the state is non-zero, the string ended
+ prematurely; that is, the last byte indicated more bytes should have
+ followed.
+
+ @param[in,out] state the state of the decoding
+ @param[in,out] codep codepoint (valid only if resulting state is UTF8_ACCEPT)
+ @param[in] byte next byte to decode
+ @return new state
+
+ @note The function has been edited: a std::array is used.
+
+ @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+ @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+ */
+ static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, const std::uint8_t byte) noexcept
+ {
+ static const std::array<std::uint8_t, 400> utf8d =
+ {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF
+ 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF
+ 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF
+ 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF
+ 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2
+ 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4
+ 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6
+ 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8
+ }
+ };
+
+ const std::uint8_t type = utf8d[byte];
+
+ codep = (state != UTF8_ACCEPT)
+ ? (byte & 0x3fu) | (codep << 6u)
+ : (0xFFu >> type) & (byte);
+
+ std::size_t index = 256u + static_cast<size_t>(state) * 16u + static_cast<size_t>(type);
+ JSON_ASSERT(index < 400);
+ state = utf8d[index];
+ return state;
+ }
+
+ /*
+ * Overload to make the compiler happy while it is instantiating
+ * dump_integer for number_unsigned_t.
+ * Must never be called.
+ */
+ number_unsigned_t remove_sign(number_unsigned_t x)
+ {
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ return x; // LCOV_EXCL_LINE
+ }
+
+ /*
+ * Helper function for dump_integer
+ *
+ * This function takes a negative signed integer and returns its absolute
+ * value as unsigned integer. The plus/minus shuffling is necessary as we can
+ * not directly remove the sign of an arbitrary signed integer as the
+ * absolute values of INT_MIN and INT_MAX are usually not the same. See
+ * #1708 for details.
+ */
+ inline number_unsigned_t remove_sign(number_integer_t x) noexcept
+ {
+ JSON_ASSERT(x < 0 && x < (std::numeric_limits<number_integer_t>::max)());
+ return static_cast<number_unsigned_t>(-(x + 1)) + 1;
+ }
+
+ private:
+ /// the output of the serializer
+ output_adapter_t<char> o = nullptr;
+
+ /// a (hopefully) large enough character buffer
+ std::array<char, 64> number_buffer{{}};
+
+ /// the locale
+ const std::lconv* loc = nullptr;
+ /// the locale's thousand separator character
+ const char thousands_sep = '\0';
+ /// the locale's decimal point character
+ const char decimal_point = '\0';
+
+ /// string buffer
+ std::array<char, 512> string_buffer{{}};
+
+ /// the indentation character
+ const char indent_char;
+ /// the indentation string
+ string_t indent_string;
+
+ /// error_handler how to react on decoding errors
+ const error_handler_t error_handler;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/value_t.hpp>
+
+// #include <nlohmann/json_fwd.hpp>
+
+// #include <nlohmann/ordered_map.hpp>
+
+
+#include <functional> // less
+#include <memory> // allocator
+#include <utility> // pair
+#include <vector> // vector
+
+namespace nlohmann
+{
+
+/// ordered_map: a minimal map-like container that preserves insertion order
+/// for use within nlohmann::basic_json<ordered_map>
+template <class Key, class T, class IgnoredLess = std::less<Key>,
+ class Allocator = std::allocator<std::pair<const Key, T>>>
+ struct ordered_map : std::vector<std::pair<const Key, T>, Allocator>
+{
+ using key_type = Key;
+ using mapped_type = T;
+ using Container = std::vector<std::pair<const Key, T>, Allocator>;
+ using typename Container::iterator;
+ using typename Container::const_iterator;
+ using typename Container::size_type;
+ using typename Container::value_type;
+
+ // Explicit constructors instead of `using Container::Container`
+ // otherwise older compilers choke on it (GCC <= 5.5, xcode <= 9.4)
+ ordered_map(const Allocator& alloc = Allocator()) : Container{alloc} {}
+ template <class It>
+ ordered_map(It first, It last, const Allocator& alloc = Allocator())
+ : Container{first, last, alloc} {}
+ ordered_map(std::initializer_list<T> init, const Allocator& alloc = Allocator() )
+ : Container{init, alloc} {}
+
+ std::pair<iterator, bool> emplace(const key_type& key, T&& t)
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return {it, false};
+ }
+ }
+ Container::emplace_back(key, t);
+ return {--this->end(), true};
+ }
+
+ T& operator[](const Key& key)
+ {
+ return emplace(key, T{}).first->second;
+ }
+
+ const T& operator[](const Key& key) const
+ {
+ return at(key);
+ }
+
+ T& at(const Key& key)
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return it->second;
+ }
+ }
+
+ throw std::out_of_range("key not found");
+ }
+
+ const T& at(const Key& key) const
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return it->second;
+ }
+ }
+
+ throw std::out_of_range("key not found");
+ }
+
+ size_type erase(const Key& key)
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ // Since we cannot move const Keys, re-construct them in place
+ for (auto next = it; ++next != this->end(); ++it)
+ {
+ it->~value_type(); // Destroy but keep allocation
+ new (&*it) value_type{std::move(*next)};
+ }
+ Container::pop_back();
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ iterator erase(iterator pos)
+ {
+ auto it = pos;
+
+ // Since we cannot move const Keys, re-construct them in place
+ for (auto next = it; ++next != this->end(); ++it)
+ {
+ it->~value_type(); // Destroy but keep allocation
+ new (&*it) value_type{std::move(*next)};
+ }
+ Container::pop_back();
+ return pos;
+ }
+
+ size_type count(const Key& key) const
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ iterator find(const Key& key)
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return it;
+ }
+ }
+ return Container::end();
+ }
+
+ const_iterator find(const Key& key) const
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == key)
+ {
+ return it;
+ }
+ }
+ return Container::end();
+ }
+
+ std::pair<iterator, bool> insert( value_type&& value )
+ {
+ return emplace(value.first, std::move(value.second));
+ }
+
+ std::pair<iterator, bool> insert( const value_type& value )
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
+ {
+ if (it->first == value.first)
+ {
+ return {it, false};
+ }
+ }
+ Container::push_back(value);
+ return {--this->end(), true};
+ }
+};
+
+} // namespace nlohmann
+
+
+/*!
+@brief namespace for Niels Lohmann
+@see https://github.com/nlohmann
+@since version 1.0.0
+*/
+namespace nlohmann
+{
+
+/*!
+@brief a class to store JSON values
+
+@tparam ObjectType type for JSON objects (`std::map` by default; will be used
+in @ref object_t)
+@tparam ArrayType type for JSON arrays (`std::vector` by default; will be used
+in @ref array_t)
+@tparam StringType type for JSON strings and object keys (`std::string` by
+default; will be used in @ref string_t)
+@tparam BooleanType type for JSON booleans (`bool` by default; will be used
+in @ref boolean_t)
+@tparam NumberIntegerType type for JSON integer numbers (`int64_t` by
+default; will be used in @ref number_integer_t)
+@tparam NumberUnsignedType type for JSON unsigned integer numbers (@c
+`uint64_t` by default; will be used in @ref number_unsigned_t)
+@tparam NumberFloatType type for JSON floating-point numbers (`double` by
+default; will be used in @ref number_float_t)
+@tparam BinaryType type for packed binary data for compatibility with binary
+serialization formats (`std::vector<std::uint8_t>` by default; will be used in
+@ref binary_t)
+@tparam AllocatorType type of the allocator to use (`std::allocator` by
+default)
+@tparam JSONSerializer the serializer to resolve internal calls to `to_json()`
+and `from_json()` (@ref adl_serializer by default)
+
+@requirement The class satisfies the following concept requirements:
+- Basic
+ - [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible):
+ JSON values can be default constructed. The result will be a JSON null
+ value.
+ - [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible):
+ A JSON value can be constructed from an rvalue argument.
+ - [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible):
+ A JSON value can be copy-constructed from an lvalue expression.
+ - [MoveAssignable](https://en.cppreference.com/w/cpp/named_req/MoveAssignable):
+ A JSON value van be assigned from an rvalue argument.
+ - [CopyAssignable](https://en.cppreference.com/w/cpp/named_req/CopyAssignable):
+ A JSON value can be copy-assigned from an lvalue expression.
+ - [Destructible](https://en.cppreference.com/w/cpp/named_req/Destructible):
+ JSON values can be destructed.
+- Layout
+ - [StandardLayoutType](https://en.cppreference.com/w/cpp/named_req/StandardLayoutType):
+ JSON values have
+ [standard layout](https://en.cppreference.com/w/cpp/language/data_members#Standard_layout):
+ All non-static data members are private and standard layout types, the
+ class has no virtual functions or (virtual) base classes.
+- Library-wide
+ - [EqualityComparable](https://en.cppreference.com/w/cpp/named_req/EqualityComparable):
+ JSON values can be compared with `==`, see @ref
+ operator==(const_reference,const_reference).
+ - [LessThanComparable](https://en.cppreference.com/w/cpp/named_req/LessThanComparable):
+ JSON values can be compared with `<`, see @ref
+ operator<(const_reference,const_reference).
+ - [Swappable](https://en.cppreference.com/w/cpp/named_req/Swappable):
+ Any JSON lvalue or rvalue of can be swapped with any lvalue or rvalue of
+ other compatible types, using unqualified function call @ref swap().
+ - [NullablePointer](https://en.cppreference.com/w/cpp/named_req/NullablePointer):
+ JSON values can be compared against `std::nullptr_t` objects which are used
+ to model the `null` value.
+- Container
+ - [Container](https://en.cppreference.com/w/cpp/named_req/Container):
+ JSON values can be used like STL containers and provide iterator access.
+ - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer);
+ JSON values can be used like STL containers and provide reverse iterator
+ access.
+
+@invariant The member variables @a m_value and @a m_type have the following
+relationship:
+- If `m_type == value_t::object`, then `m_value.object != nullptr`.
+- If `m_type == value_t::array`, then `m_value.array != nullptr`.
+- If `m_type == value_t::string`, then `m_value.string != nullptr`.
+The invariants are checked by member function assert_invariant().
+
+@internal
+@note ObjectType trick from https://stackoverflow.com/a/9860911
+@endinternal
+
+@see [RFC 7159: The JavaScript Object Notation (JSON) Data Interchange
+Format](http://rfc7159.net/rfc7159)
+
+@since version 1.0.0
+
+@nosubgrouping
+*/
+NLOHMANN_BASIC_JSON_TPL_DECLARATION
+class basic_json
+{
+ private:
+ template<detail::value_t> friend struct detail::external_constructor;
+ friend ::nlohmann::json_pointer<basic_json>;
+
+ template<typename BasicJsonType, typename InputType>
+ friend class ::nlohmann::detail::parser;
+ friend ::nlohmann::detail::serializer<basic_json>;
+ template<typename BasicJsonType>
+ friend class ::nlohmann::detail::iter_impl;
+ template<typename BasicJsonType, typename CharType>
+ friend class ::nlohmann::detail::binary_writer;
+ template<typename BasicJsonType, typename InputType, typename SAX>
+ friend class ::nlohmann::detail::binary_reader;
+ template<typename BasicJsonType>
+ friend class ::nlohmann::detail::json_sax_dom_parser;
+ template<typename BasicJsonType>
+ friend class ::nlohmann::detail::json_sax_dom_callback_parser;
+
+ /// workaround type for MSVC
+ using basic_json_t = NLOHMANN_BASIC_JSON_TPL;
+
+ // convenience aliases for types residing in namespace detail;
+ using lexer = ::nlohmann::detail::lexer_base<basic_json>;
+
+ template<typename InputAdapterType>
+ static ::nlohmann::detail::parser<basic_json, InputAdapterType> parser(
+ InputAdapterType adapter,
+ detail::parser_callback_t<basic_json>cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false
+ )
+ {
+ return ::nlohmann::detail::parser<basic_json, InputAdapterType>(std::move(adapter),
+ std::move(cb), allow_exceptions, ignore_comments);
+ }
+
+ using primitive_iterator_t = ::nlohmann::detail::primitive_iterator_t;
+ template<typename BasicJsonType>
+ using internal_iterator = ::nlohmann::detail::internal_iterator<BasicJsonType>;
+ template<typename BasicJsonType>
+ using iter_impl = ::nlohmann::detail::iter_impl<BasicJsonType>;
+ template<typename Iterator>
+ using iteration_proxy = ::nlohmann::detail::iteration_proxy<Iterator>;
+ template<typename Base> using json_reverse_iterator = ::nlohmann::detail::json_reverse_iterator<Base>;
+
+ template<typename CharType>
+ using output_adapter_t = ::nlohmann::detail::output_adapter_t<CharType>;
+
+ template<typename InputType>
+ using binary_reader = ::nlohmann::detail::binary_reader<basic_json, InputType>;
+ template<typename CharType> using binary_writer = ::nlohmann::detail::binary_writer<basic_json, CharType>;
+
+ using serializer = ::nlohmann::detail::serializer<basic_json>;
+
+ public:
+ using value_t = detail::value_t;
+ /// JSON Pointer, see @ref nlohmann::json_pointer
+ using json_pointer = ::nlohmann::json_pointer<basic_json>;
+ template<typename T, typename SFINAE>
+ using json_serializer = JSONSerializer<T, SFINAE>;
+ /// how to treat decoding errors
+ using error_handler_t = detail::error_handler_t;
+ /// how to treat CBOR tags
+ using cbor_tag_handler_t = detail::cbor_tag_handler_t;
+ /// helper type for initializer lists of basic_json values
+ using initializer_list_t = std::initializer_list<detail::json_ref<basic_json>>;
+
+ using input_format_t = detail::input_format_t;
+ /// SAX interface type, see @ref nlohmann::json_sax
+ using json_sax_t = json_sax<basic_json>;
+
+ ////////////////
+ // exceptions //
+ ////////////////
+
+ /// @name exceptions
+ /// Classes to implement user-defined exceptions.
+ /// @{
+
+ /// @copydoc detail::exception
+ using exception = detail::exception;
+ /// @copydoc detail::parse_error
+ using parse_error = detail::parse_error;
+ /// @copydoc detail::invalid_iterator
+ using invalid_iterator = detail::invalid_iterator;
+ /// @copydoc detail::type_error
+ using type_error = detail::type_error;
+ /// @copydoc detail::out_of_range
+ using out_of_range = detail::out_of_range;
+ /// @copydoc detail::other_error
+ using other_error = detail::other_error;
+
+ /// @}
+
+
+ /////////////////////
+ // container types //
+ /////////////////////
+
+ /// @name container types
+ /// The canonic container types to use @ref basic_json like any other STL
+ /// container.
+ /// @{
+
+ /// the type of elements in a basic_json container
+ using value_type = basic_json;
+
+ /// the type of an element reference
+ using reference = value_type&;
+ /// the type of an element const reference
+ using const_reference = const value_type&;
+
+ /// a type to represent differences between iterators
+ using difference_type = std::ptrdiff_t;
+ /// a type to represent container sizes
+ using size_type = std::size_t;
+
+ /// the allocator type
+ using allocator_type = AllocatorType<basic_json>;
+
+ /// the type of an element pointer
+ using pointer = typename std::allocator_traits<allocator_type>::pointer;
+ /// the type of an element const pointer
+ using const_pointer = typename std::allocator_traits<allocator_type>::const_pointer;
+
+ /// an iterator for a basic_json container
+ using iterator = iter_impl<basic_json>;
+ /// a const iterator for a basic_json container
+ using const_iterator = iter_impl<const basic_json>;
+ /// a reverse iterator for a basic_json container
+ using reverse_iterator = json_reverse_iterator<typename basic_json::iterator>;
+ /// a const reverse iterator for a basic_json container
+ using const_reverse_iterator = json_reverse_iterator<typename basic_json::const_iterator>;
+
+ /// @}
+
+
+ /*!
+ @brief returns the allocator associated with the container
+ */
+ static allocator_type get_allocator()
+ {
+ return allocator_type();
+ }
+
+ /*!
+ @brief returns version information on the library
+
+ This function returns a JSON object with information about the library,
+ including the version number and information on the platform and compiler.
+
+ @return JSON object holding version information
+ key | description
+ ----------- | ---------------
+ `compiler` | Information on the used compiler. It is an object with the following keys: `c++` (the used C++ standard), `family` (the compiler family; possible values are `clang`, `icc`, `gcc`, `ilecpp`, `msvc`, `pgcpp`, `sunpro`, and `unknown`), and `version` (the compiler version).
+ `copyright` | The copyright line for the library as string.
+ `name` | The name of the library as string.
+ `platform` | The used platform as string. Possible values are `win32`, `linux`, `apple`, `unix`, and `unknown`.
+ `url` | The URL of the project as string.
+ `version` | The version of the library. It is an object with the following keys: `major`, `minor`, and `patch` as defined by [Semantic Versioning](http://semver.org), and `string` (the version string).
+
+ @liveexample{The following code shows an example output of the `meta()`
+ function.,meta}
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @complexity Constant.
+
+ @since 2.1.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json meta()
+ {
+ basic_json result;
+
+ result["copyright"] = "(C) 2013-2020 Niels Lohmann";
+ result["name"] = "JSON for Modern C++";
+ result["url"] = "https://github.com/nlohmann/json";
+ result["version"]["string"] =
+ std::to_string(NLOHMANN_JSON_VERSION_MAJOR) + "." +
+ std::to_string(NLOHMANN_JSON_VERSION_MINOR) + "." +
+ std::to_string(NLOHMANN_JSON_VERSION_PATCH);
+ result["version"]["major"] = NLOHMANN_JSON_VERSION_MAJOR;
+ result["version"]["minor"] = NLOHMANN_JSON_VERSION_MINOR;
+ result["version"]["patch"] = NLOHMANN_JSON_VERSION_PATCH;
+
+#ifdef _WIN32
+ result["platform"] = "win32";
+#elif defined __linux__
+ result["platform"] = "linux";
+#elif defined __APPLE__
+ result["platform"] = "apple";
+#elif defined __unix__
+ result["platform"] = "unix";
+#else
+ result["platform"] = "unknown";
+#endif
+
+#if defined(__ICC) || defined(__INTEL_COMPILER)
+ result["compiler"] = {{"family", "icc"}, {"version", __INTEL_COMPILER}};
+#elif defined(__clang__)
+ result["compiler"] = {{"family", "clang"}, {"version", __clang_version__}};
+#elif defined(__GNUC__) || defined(__GNUG__)
+ result["compiler"] = {{"family", "gcc"}, {"version", std::to_string(__GNUC__) + "." + std::to_string(__GNUC_MINOR__) + "." + std::to_string(__GNUC_PATCHLEVEL__)}};
+#elif defined(__HP_cc) || defined(__HP_aCC)
+ result["compiler"] = "hp"
+#elif defined(__IBMCPP__)
+ result["compiler"] = {{"family", "ilecpp"}, {"version", __IBMCPP__}};
+#elif defined(_MSC_VER)
+ result["compiler"] = {{"family", "msvc"}, {"version", _MSC_VER}};
+#elif defined(__PGI)
+ result["compiler"] = {{"family", "pgcpp"}, {"version", __PGI}};
+#elif defined(__SUNPRO_CC)
+ result["compiler"] = {{"family", "sunpro"}, {"version", __SUNPRO_CC}};
+#else
+ result["compiler"] = {{"family", "unknown"}, {"version", "unknown"}};
+#endif
+
+#ifdef __cplusplus
+ result["compiler"]["c++"] = std::to_string(__cplusplus);
+#else
+ result["compiler"]["c++"] = "unknown";
+#endif
+ return result;
+ }
+
+
+ ///////////////////////////
+ // JSON value data types //
+ ///////////////////////////
+
+ /// @name JSON value data types
+ /// The data types to store a JSON value. These types are derived from
+ /// the template arguments passed to class @ref basic_json.
+ /// @{
+
+#if defined(JSON_HAS_CPP_14)
+ // Use transparent comparator if possible, combined with perfect forwarding
+ // on find() and count() calls prevents unnecessary string construction.
+ using object_comparator_t = std::less<>;
+#else
+ using object_comparator_t = std::less<StringType>;
+#endif
+
+ /*!
+ @brief a type for an object
+
+ [RFC 7159](http://rfc7159.net/rfc7159) describes JSON objects as follows:
+ > An object is an unordered collection of zero or more name/value pairs,
+ > where a name is a string and a value is a string, number, boolean, null,
+ > object, or array.
+
+ To store objects in C++, a type is defined by the template parameters
+ described below.
+
+ @tparam ObjectType the container to store objects (e.g., `std::map` or
+ `std::unordered_map`)
+ @tparam StringType the type of the keys or names (e.g., `std::string`).
+ The comparison function `std::less<StringType>` is used to order elements
+ inside the container.
+ @tparam AllocatorType the allocator to use for objects (e.g.,
+ `std::allocator`)
+
+ #### Default type
+
+ With the default values for @a ObjectType (`std::map`), @a StringType
+ (`std::string`), and @a AllocatorType (`std::allocator`), the default
+ value for @a object_t is:
+
+ @code {.cpp}
+ std::map<
+ std::string, // key_type
+ basic_json, // value_type
+ std::less<std::string>, // key_compare
+ std::allocator<std::pair<const std::string, basic_json>> // allocator_type
+ >
+ @endcode
+
+ #### Behavior
+
+ The choice of @a object_t influences the behavior of the JSON class. With
+ the default type, objects have the following behavior:
+
+ - When all names are unique, objects will be interoperable in the sense
+ that all software implementations receiving that object will agree on
+ the name-value mappings.
+ - When the names within an object are not unique, it is unspecified which
+ one of the values for a given key will be chosen. For instance,
+ `{"key": 2, "key": 1}` could be equal to either `{"key": 1}` or
+ `{"key": 2}`.
+ - Internally, name/value pairs are stored in lexicographical order of the
+ names. Objects will also be serialized (see @ref dump) in this order.
+ For instance, `{"b": 1, "a": 2}` and `{"a": 2, "b": 1}` will be stored
+ and serialized as `{"a": 2, "b": 1}`.
+ - When comparing objects, the order of the name/value pairs is irrelevant.
+ This makes objects interoperable in the sense that they will not be
+ affected by these differences. For instance, `{"b": 1, "a": 2}` and
+ `{"a": 2, "b": 1}` will be treated as equal.
+
+ #### Limits
+
+ [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+ > An implementation may set limits on the maximum depth of nesting.
+
+ In this class, the object's limit of nesting is not explicitly constrained.
+ However, a maximum depth of nesting may be introduced by the compiler or
+ runtime environment. A theoretical limit can be queried by calling the
+ @ref max_size function of a JSON object.
+
+ #### Storage
+
+ Objects are stored as pointers in a @ref basic_json type. That is, for any
+ access to object values, a pointer of type `object_t*` must be
+ dereferenced.
+
+ @sa @ref array_t -- type for an array value
+
+ @since version 1.0.0
+
+ @note The order name/value pairs are added to the object is *not*
+ preserved by the library. Therefore, iterating an object may return
+ name/value pairs in a different order than they were originally stored. In
+ fact, keys will be traversed in alphabetical order as `std::map` with
+ `std::less` is used by default. Please note this behavior conforms to [RFC
+ 7159](http://rfc7159.net/rfc7159), because any order implements the
+ specified "unordered" nature of JSON objects.
+ */
+ using object_t = ObjectType<StringType,
+ basic_json,
+ object_comparator_t,
+ AllocatorType<std::pair<const StringType,
+ basic_json>>>;
+
+ /*!
+ @brief a type for an array
+
+ [RFC 7159](http://rfc7159.net/rfc7159) describes JSON arrays as follows:
+ > An array is an ordered sequence of zero or more values.
+
+ To store objects in C++, a type is defined by the template parameters
+ explained below.
+
+ @tparam ArrayType container type to store arrays (e.g., `std::vector` or
+ `std::list`)
+ @tparam AllocatorType allocator to use for arrays (e.g., `std::allocator`)
+
+ #### Default type
+
+ With the default values for @a ArrayType (`std::vector`) and @a
+ AllocatorType (`std::allocator`), the default value for @a array_t is:
+
+ @code {.cpp}
+ std::vector<
+ basic_json, // value_type
+ std::allocator<basic_json> // allocator_type
+ >
+ @endcode
+
+ #### Limits
+
+ [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+ > An implementation may set limits on the maximum depth of nesting.
+
+ In this class, the array's limit of nesting is not explicitly constrained.
+ However, a maximum depth of nesting may be introduced by the compiler or
+ runtime environment. A theoretical limit can be queried by calling the
+ @ref max_size function of a JSON array.
+
+ #### Storage
+
+ Arrays are stored as pointers in a @ref basic_json type. That is, for any
+ access to array values, a pointer of type `array_t*` must be dereferenced.
+
+ @sa @ref object_t -- type for an object value
+
+ @since version 1.0.0
+ */
+ using array_t = ArrayType<basic_json, AllocatorType<basic_json>>;
+
+ /*!
+ @brief a type for a string
+
+ [RFC 7159](http://rfc7159.net/rfc7159) describes JSON strings as follows:
+ > A string is a sequence of zero or more Unicode characters.
+
+ To store objects in C++, a type is defined by the template parameter
+ described below. Unicode values are split by the JSON class into
+ byte-sized characters during deserialization.
+
+ @tparam StringType the container to store strings (e.g., `std::string`).
+ Note this container is used for keys/names in objects, see @ref object_t.
+
+ #### Default type
+
+ With the default values for @a StringType (`std::string`), the default
+ value for @a string_t is:
+
+ @code {.cpp}
+ std::string
+ @endcode
+
+ #### Encoding
+
+ Strings are stored in UTF-8 encoding. Therefore, functions like
+ `std::string::size()` or `std::string::length()` return the number of
+ bytes in the string rather than the number of characters or glyphs.
+
+ #### String comparison
+
+ [RFC 7159](http://rfc7159.net/rfc7159) states:
+ > Software implementations are typically required to test names of object
+ > members for equality. Implementations that transform the textual
+ > representation into sequences of Unicode code units and then perform the
+ > comparison numerically, code unit by code unit, are interoperable in the
+ > sense that implementations will agree in all cases on equality or
+ > inequality of two strings. For example, implementations that compare
+ > strings with escaped characters unconverted may incorrectly find that
+ > `"a\\b"` and `"a\u005Cb"` are not equal.
+
+ This implementation is interoperable as it does compare strings code unit
+ by code unit.
+
+ #### Storage
+
+ String values are stored as pointers in a @ref basic_json type. That is,
+ for any access to string values, a pointer of type `string_t*` must be
+ dereferenced.
+
+ @since version 1.0.0
+ */
+ using string_t = StringType;
+
+ /*!
+ @brief a type for a boolean
+
+ [RFC 7159](http://rfc7159.net/rfc7159) implicitly describes a boolean as a
+ type which differentiates the two literals `true` and `false`.
+
+ To store objects in C++, a type is defined by the template parameter @a
+ BooleanType which chooses the type to use.
+
+ #### Default type
+
+ With the default values for @a BooleanType (`bool`), the default value for
+ @a boolean_t is:
+
+ @code {.cpp}
+ bool
+ @endcode
+
+ #### Storage
+
+ Boolean values are stored directly inside a @ref basic_json type.
+
+ @since version 1.0.0
+ */
+ using boolean_t = BooleanType;
+
+ /*!
+ @brief a type for a number (integer)
+
+ [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows:
+ > The representation of numbers is similar to that used in most
+ > programming languages. A number is represented in base 10 using decimal
+ > digits. It contains an integer component that may be prefixed with an
+ > optional minus sign, which may be followed by a fraction part and/or an
+ > exponent part. Leading zeros are not allowed. (...) Numeric values that
+ > cannot be represented in the grammar below (such as Infinity and NaN)
+ > are not permitted.
+
+ This description includes both integer and floating-point numbers.
+ However, C++ allows more precise storage if it is known whether the number
+ is a signed integer, an unsigned integer or a floating-point number.
+ Therefore, three different types, @ref number_integer_t, @ref
+ number_unsigned_t and @ref number_float_t are used.
+
+ To store integer numbers in C++, a type is defined by the template
+ parameter @a NumberIntegerType which chooses the type to use.
+
+ #### Default type
+
+ With the default values for @a NumberIntegerType (`int64_t`), the default
+ value for @a number_integer_t is:
+
+ @code {.cpp}
+ int64_t
+ @endcode
+
+ #### Default behavior
+
+ - The restrictions about leading zeros is not enforced in C++. Instead,
+ leading zeros in integer literals lead to an interpretation as octal
+ number. Internally, the value will be stored as decimal number. For
+ instance, the C++ integer literal `010` will be serialized to `8`.
+ During deserialization, leading zeros yield an error.
+ - Not-a-number (NaN) values will be serialized to `null`.
+
+ #### Limits
+
+ [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+ > An implementation may set limits on the range and precision of numbers.
+
+ When the default type is used, the maximal integer number that can be
+ stored is `9223372036854775807` (INT64_MAX) and the minimal integer number
+ that can be stored is `-9223372036854775808` (INT64_MIN). Integer numbers
+ that are out of range will yield over/underflow when used in a
+ constructor. During deserialization, too large or small integer numbers
+ will be automatically be stored as @ref number_unsigned_t or @ref
+ number_float_t.
+
+ [RFC 7159](http://rfc7159.net/rfc7159) further states:
+ > Note that when such software is used, numbers that are integers and are
+ > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense
+ > that implementations will agree exactly on their numeric values.
+
+ As this range is a subrange of the exactly supported range [INT64_MIN,
+ INT64_MAX], this class's integer type is interoperable.
+
+ #### Storage
+
+ Integer number values are stored directly inside a @ref basic_json type.
+
+ @sa @ref number_float_t -- type for number values (floating-point)
+
+ @sa @ref number_unsigned_t -- type for number values (unsigned integer)
+
+ @since version 1.0.0
+ */
+ using number_integer_t = NumberIntegerType;
+
+ /*!
+ @brief a type for a number (unsigned)
+
+ [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows:
+ > The representation of numbers is similar to that used in most
+ > programming languages. A number is represented in base 10 using decimal
+ > digits. It contains an integer component that may be prefixed with an
+ > optional minus sign, which may be followed by a fraction part and/or an
+ > exponent part. Leading zeros are not allowed. (...) Numeric values that
+ > cannot be represented in the grammar below (such as Infinity and NaN)
+ > are not permitted.
+
+ This description includes both integer and floating-point numbers.
+ However, C++ allows more precise storage if it is known whether the number
+ is a signed integer, an unsigned integer or a floating-point number.
+ Therefore, three different types, @ref number_integer_t, @ref
+ number_unsigned_t and @ref number_float_t are used.
+
+ To store unsigned integer numbers in C++, a type is defined by the
+ template parameter @a NumberUnsignedType which chooses the type to use.
+
+ #### Default type
+
+ With the default values for @a NumberUnsignedType (`uint64_t`), the
+ default value for @a number_unsigned_t is:
+
+ @code {.cpp}
+ uint64_t
+ @endcode
+
+ #### Default behavior
+
+ - The restrictions about leading zeros is not enforced in C++. Instead,
+ leading zeros in integer literals lead to an interpretation as octal
+ number. Internally, the value will be stored as decimal number. For
+ instance, the C++ integer literal `010` will be serialized to `8`.
+ During deserialization, leading zeros yield an error.
+ - Not-a-number (NaN) values will be serialized to `null`.
+
+ #### Limits
+
+ [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+ > An implementation may set limits on the range and precision of numbers.
+
+ When the default type is used, the maximal integer number that can be
+ stored is `18446744073709551615` (UINT64_MAX) and the minimal integer
+ number that can be stored is `0`. Integer numbers that are out of range
+ will yield over/underflow when used in a constructor. During
+ deserialization, too large or small integer numbers will be automatically
+ be stored as @ref number_integer_t or @ref number_float_t.
+
+ [RFC 7159](http://rfc7159.net/rfc7159) further states:
+ > Note that when such software is used, numbers that are integers and are
+ > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense
+ > that implementations will agree exactly on their numeric values.
+
+ As this range is a subrange (when considered in conjunction with the
+ number_integer_t type) of the exactly supported range [0, UINT64_MAX],
+ this class's integer type is interoperable.
+
+ #### Storage
+
+ Integer number values are stored directly inside a @ref basic_json type.
+
+ @sa @ref number_float_t -- type for number values (floating-point)
+ @sa @ref number_integer_t -- type for number values (integer)
+
+ @since version 2.0.0
+ */
+ using number_unsigned_t = NumberUnsignedType;
+
+ /*!
+ @brief a type for a number (floating-point)
+
+ [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows:
+ > The representation of numbers is similar to that used in most
+ > programming languages. A number is represented in base 10 using decimal
+ > digits. It contains an integer component that may be prefixed with an
+ > optional minus sign, which may be followed by a fraction part and/or an
+ > exponent part. Leading zeros are not allowed. (...) Numeric values that
+ > cannot be represented in the grammar below (such as Infinity and NaN)
+ > are not permitted.
+
+ This description includes both integer and floating-point numbers.
+ However, C++ allows more precise storage if it is known whether the number
+ is a signed integer, an unsigned integer or a floating-point number.
+ Therefore, three different types, @ref number_integer_t, @ref
+ number_unsigned_t and @ref number_float_t are used.
+
+ To store floating-point numbers in C++, a type is defined by the template
+ parameter @a NumberFloatType which chooses the type to use.
+
+ #### Default type
+
+ With the default values for @a NumberFloatType (`double`), the default
+ value for @a number_float_t is:
+
+ @code {.cpp}
+ double
+ @endcode
+
+ #### Default behavior
+
+ - The restrictions about leading zeros is not enforced in C++. Instead,
+ leading zeros in floating-point literals will be ignored. Internally,
+ the value will be stored as decimal number. For instance, the C++
+ floating-point literal `01.2` will be serialized to `1.2`. During
+ deserialization, leading zeros yield an error.
+ - Not-a-number (NaN) values will be serialized to `null`.
+
+ #### Limits
+
+ [RFC 7159](http://rfc7159.net/rfc7159) states:
+ > This specification allows implementations to set limits on the range and
+ > precision of numbers accepted. Since software that implements IEEE
+ > 754-2008 binary64 (double precision) numbers is generally available and
+ > widely used, good interoperability can be achieved by implementations
+ > that expect no more precision or range than these provide, in the sense
+ > that implementations will approximate JSON numbers within the expected
+ > precision.
+
+ This implementation does exactly follow this approach, as it uses double
+ precision floating-point numbers. Note values smaller than
+ `-1.79769313486232e+308` and values greater than `1.79769313486232e+308`
+ will be stored as NaN internally and be serialized to `null`.
+
+ #### Storage
+
+ Floating-point number values are stored directly inside a @ref basic_json
+ type.
+
+ @sa @ref number_integer_t -- type for number values (integer)
+
+ @sa @ref number_unsigned_t -- type for number values (unsigned integer)
+
+ @since version 1.0.0
+ */
+ using number_float_t = NumberFloatType;
+
+ /*!
+ @brief a type for a packed binary type
+
+ This type is a type designed to carry binary data that appears in various
+ serialized formats, such as CBOR's Major Type 2, MessagePack's bin, and
+ BSON's generic binary subtype. This type is NOT a part of standard JSON and
+ exists solely for compatibility with these binary types. As such, it is
+ simply defined as an ordered sequence of zero or more byte values.
+
+ Additionally, as an implementation detail, the subtype of the binary data is
+ carried around as a `std::uint8_t`, which is compatible with both of the
+ binary data formats that use binary subtyping, (though the specific
+ numbering is incompatible with each other, and it is up to the user to
+ translate between them).
+
+ [CBOR's RFC 7049](https://tools.ietf.org/html/rfc7049) describes this type
+ as:
+ > Major type 2: a byte string. The string's length in bytes is represented
+ > following the rules for positive integers (major type 0).
+
+ [MessagePack's documentation on the bin type
+ family](https://github.com/msgpack/msgpack/blob/master/spec.md#bin-format-family)
+ describes this type as:
+ > Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes
+ > in addition to the size of the byte array.
+
+ [BSON's specifications](http://bsonspec.org/spec.html) describe several
+ binary types; however, this type is intended to represent the generic binary
+ type which has the description:
+ > Generic binary subtype - This is the most commonly used binary subtype and
+ > should be the 'default' for drivers and tools.
+
+ None of these impose any limitations on the internal representation other
+ than the basic unit of storage be some type of array whose parts are
+ decomposable into bytes.
+
+ The default representation of this binary format is a
+ `std::vector<std::uint8_t>`, which is a very common way to represent a byte
+ array in modern C++.
+
+ #### Default type
+
+ The default values for @a BinaryType is `std::vector<std::uint8_t>`
+
+ #### Storage
+
+ Binary Arrays are stored as pointers in a @ref basic_json type. That is,
+ for any access to array values, a pointer of the type `binary_t*` must be
+ dereferenced.
+
+ #### Notes on subtypes
+
+ - CBOR
+ - Binary values are represented as byte strings. No subtypes are
+ supported and will be ignored when CBOR is written.
+ - MessagePack
+ - If a subtype is given and the binary array contains exactly 1, 2, 4, 8,
+ or 16 elements, the fixext family (fixext1, fixext2, fixext4, fixext8)
+ is used. For other sizes, the ext family (ext8, ext16, ext32) is used.
+ The subtype is then added as singed 8-bit integer.
+ - If no subtype is given, the bin family (bin8, bin16, bin32) is used.
+ - BSON
+ - If a subtype is given, it is used and added as unsigned 8-bit integer.
+ - If no subtype is given, the generic binary subtype 0x00 is used.
+
+ @sa @ref binary -- create a binary array
+
+ @since version 3.8.0
+ */
+ using binary_t = nlohmann::byte_container_with_subtype<BinaryType>;
+ /// @}
+
+ private:
+
+ /// helper for exception-safe object creation
+ template<typename T, typename... Args>
+ JSON_HEDLEY_RETURNS_NON_NULL
+ static T* create(Args&& ... args)
+ {
+ AllocatorType<T> alloc;
+ using AllocatorTraits = std::allocator_traits<AllocatorType<T>>;
+
+ auto deleter = [&](T * object)
+ {
+ AllocatorTraits::deallocate(alloc, object, 1);
+ };
+ std::unique_ptr<T, decltype(deleter)> object(AllocatorTraits::allocate(alloc, 1), deleter);
+ AllocatorTraits::construct(alloc, object.get(), std::forward<Args>(args)...);
+ JSON_ASSERT(object != nullptr);
+ return object.release();
+ }
+
+ ////////////////////////
+ // JSON value storage //
+ ////////////////////////
+
+ /*!
+ @brief a JSON value
+
+ The actual storage for a JSON value of the @ref basic_json class. This
+ union combines the different storage types for the JSON value types
+ defined in @ref value_t.
+
+ JSON type | value_t type | used type
+ --------- | --------------- | ------------------------
+ object | object | pointer to @ref object_t
+ array | array | pointer to @ref array_t
+ string | string | pointer to @ref string_t
+ boolean | boolean | @ref boolean_t
+ number | number_integer | @ref number_integer_t
+ number | number_unsigned | @ref number_unsigned_t
+ number | number_float | @ref number_float_t
+ binary | binary | pointer to @ref binary_t
+ null | null | *no value is stored*
+
+ @note Variable-length types (objects, arrays, and strings) are stored as
+ pointers. The size of the union should not exceed 64 bits if the default
+ value types are used.
+
+ @since version 1.0.0
+ */
+ union json_value
+ {
+ /// object (stored with pointer to save storage)
+ object_t* object;
+ /// array (stored with pointer to save storage)
+ array_t* array;
+ /// string (stored with pointer to save storage)
+ string_t* string;
+ /// binary (stored with pointer to save storage)
+ binary_t* binary;
+ /// boolean
+ boolean_t boolean;
+ /// number (integer)
+ number_integer_t number_integer;
+ /// number (unsigned integer)
+ number_unsigned_t number_unsigned;
+ /// number (floating-point)
+ number_float_t number_float;
+
+ /// default constructor (for null values)
+ json_value() = default;
+ /// constructor for booleans
+ json_value(boolean_t v) noexcept : boolean(v) {}
+ /// constructor for numbers (integer)
+ json_value(number_integer_t v) noexcept : number_integer(v) {}
+ /// constructor for numbers (unsigned)
+ json_value(number_unsigned_t v) noexcept : number_unsigned(v) {}
+ /// constructor for numbers (floating-point)
+ json_value(number_float_t v) noexcept : number_float(v) {}
+ /// constructor for empty values of a given type
+ json_value(value_t t)
+ {
+ switch (t)
+ {
+ case value_t::object:
+ {
+ object = create<object_t>();
+ break;
+ }
+
+ case value_t::array:
+ {
+ array = create<array_t>();
+ break;
+ }
+
+ case value_t::string:
+ {
+ string = create<string_t>("");
+ break;
+ }
+
+ case value_t::binary:
+ {
+ binary = create<binary_t>();
+ break;
+ }
+
+ case value_t::boolean:
+ {
+ boolean = boolean_t(false);
+ break;
+ }
+
+ case value_t::number_integer:
+ {
+ number_integer = number_integer_t(0);
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ number_unsigned = number_unsigned_t(0);
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ number_float = number_float_t(0.0);
+ break;
+ }
+
+ case value_t::null:
+ {
+ object = nullptr; // silence warning, see #821
+ break;
+ }
+
+ default:
+ {
+ object = nullptr; // silence warning, see #821
+ if (JSON_HEDLEY_UNLIKELY(t == value_t::null))
+ {
+ JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.9.1")); // LCOV_EXCL_LINE
+ }
+ break;
+ }
+ }
+ }
+
+ /// constructor for strings
+ json_value(const string_t& value)
+ {
+ string = create<string_t>(value);
+ }
+
+ /// constructor for rvalue strings
+ json_value(string_t&& value)
+ {
+ string = create<string_t>(std::move(value));
+ }
+
+ /// constructor for objects
+ json_value(const object_t& value)
+ {
+ object = create<object_t>(value);
+ }
+
+ /// constructor for rvalue objects
+ json_value(object_t&& value)
+ {
+ object = create<object_t>(std::move(value));
+ }
+
+ /// constructor for arrays
+ json_value(const array_t& value)
+ {
+ array = create<array_t>(value);
+ }
+
+ /// constructor for rvalue arrays
+ json_value(array_t&& value)
+ {
+ array = create<array_t>(std::move(value));
+ }
+
+ /// constructor for binary arrays
+ json_value(const typename binary_t::container_type& value)
+ {
+ binary = create<binary_t>(value);
+ }
+
+ /// constructor for rvalue binary arrays
+ json_value(typename binary_t::container_type&& value)
+ {
+ binary = create<binary_t>(std::move(value));
+ }
+
+ /// constructor for binary arrays (internal type)
+ json_value(const binary_t& value)
+ {
+ binary = create<binary_t>(value);
+ }
+
+ /// constructor for rvalue binary arrays (internal type)
+ json_value(binary_t&& value)
+ {
+ binary = create<binary_t>(std::move(value));
+ }
+
+ void destroy(value_t t) noexcept
+ {
+ // flatten the current json_value to a heap-allocated stack
+ std::vector<basic_json> stack;
+
+ // move the top-level items to stack
+ if (t == value_t::array)
+ {
+ stack.reserve(array->size());
+ std::move(array->begin(), array->end(), std::back_inserter(stack));
+ }
+ else if (t == value_t::object)
+ {
+ stack.reserve(object->size());
+ for (auto&& it : *object)
+ {
+ stack.push_back(std::move(it.second));
+ }
+ }
+
+ while (!stack.empty())
+ {
+ // move the last item to local variable to be processed
+ basic_json current_item(std::move(stack.back()));
+ stack.pop_back();
+
+ // if current_item is array/object, move
+ // its children to the stack to be processed later
+ if (current_item.is_array())
+ {
+ std::move(current_item.m_value.array->begin(), current_item.m_value.array->end(),
+ std::back_inserter(stack));
+
+ current_item.m_value.array->clear();
+ }
+ else if (current_item.is_object())
+ {
+ for (auto&& it : *current_item.m_value.object)
+ {
+ stack.push_back(std::move(it.second));
+ }
+
+ current_item.m_value.object->clear();
+ }
+
+ // it's now safe that current_item get destructed
+ // since it doesn't have any children
+ }
+
+ switch (t)
+ {
+ case value_t::object:
+ {
+ AllocatorType<object_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, object);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, object, 1);
+ break;
+ }
+
+ case value_t::array:
+ {
+ AllocatorType<array_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, array);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, array, 1);
+ break;
+ }
+
+ case value_t::string:
+ {
+ AllocatorType<string_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, string);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, string, 1);
+ break;
+ }
+
+ case value_t::binary:
+ {
+ AllocatorType<binary_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, binary);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, binary, 1);
+ break;
+ }
+
+ default:
+ {
+ break;
+ }
+ }
+ }
+ };
+
+ /*!
+ @brief checks the class invariants
+
+ This function asserts the class invariants. It needs to be called at the
+ end of every constructor to make sure that created objects respect the
+ invariant. Furthermore, it has to be called each time the type of a JSON
+ value is changed, because the invariant expresses a relationship between
+ @a m_type and @a m_value.
+ */
+ void assert_invariant() const noexcept
+ {
+ JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr);
+ JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr);
+ JSON_ASSERT(m_type != value_t::string || m_value.string != nullptr);
+ JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr);
+ }
+
+ public:
+ //////////////////////////
+ // JSON parser callback //
+ //////////////////////////
+
+ /*!
+ @brief parser event types
+
+ The parser callback distinguishes the following events:
+ - `object_start`: the parser read `{` and started to process a JSON object
+ - `key`: the parser read a key of a value in an object
+ - `object_end`: the parser read `}` and finished processing a JSON object
+ - `array_start`: the parser read `[` and started to process a JSON array
+ - `array_end`: the parser read `]` and finished processing a JSON array
+ - `value`: the parser finished reading a JSON value
+
+ @image html callback_events.png "Example when certain parse events are triggered"
+
+ @sa @ref parser_callback_t for more information and examples
+ */
+ using parse_event_t = detail::parse_event_t;
+
+ /*!
+ @brief per-element parser callback type
+
+ With a parser callback function, the result of parsing a JSON text can be
+ influenced. When passed to @ref parse, it is called on certain events
+ (passed as @ref parse_event_t via parameter @a event) with a set recursion
+ depth @a depth and context JSON value @a parsed. The return value of the
+ callback function is a boolean indicating whether the element that emitted
+ the callback shall be kept or not.
+
+ We distinguish six scenarios (determined by the event type) in which the
+ callback function can be called. The following table describes the values
+ of the parameters @a depth, @a event, and @a parsed.
+
+ parameter @a event | description | parameter @a depth | parameter @a parsed
+ ------------------ | ----------- | ------------------ | -------------------
+ parse_event_t::object_start | the parser read `{` and started to process a JSON object | depth of the parent of the JSON object | a JSON value with type discarded
+ parse_event_t::key | the parser read a key of a value in an object | depth of the currently parsed JSON object | a JSON string containing the key
+ parse_event_t::object_end | the parser read `}` and finished processing a JSON object | depth of the parent of the JSON object | the parsed JSON object
+ parse_event_t::array_start | the parser read `[` and started to process a JSON array | depth of the parent of the JSON array | a JSON value with type discarded
+ parse_event_t::array_end | the parser read `]` and finished processing a JSON array | depth of the parent of the JSON array | the parsed JSON array
+ parse_event_t::value | the parser finished reading a JSON value | depth of the value | the parsed JSON value
+
+ @image html callback_events.png "Example when certain parse events are triggered"
+
+ Discarding a value (i.e., returning `false`) has different effects
+ depending on the context in which function was called:
+
+ - Discarded values in structured types are skipped. That is, the parser
+ will behave as if the discarded value was never read.
+ - In case a value outside a structured type is skipped, it is replaced
+ with `null`. This case happens if the top-level element is skipped.
+
+ @param[in] depth the depth of the recursion during parsing
+
+ @param[in] event an event of type parse_event_t indicating the context in
+ the callback function has been called
+
+ @param[in,out] parsed the current intermediate parse result; note that
+ writing to this value has no effect for parse_event_t::key events
+
+ @return Whether the JSON value which called the function during parsing
+ should be kept (`true`) or not (`false`). In the latter case, it is either
+ skipped completely or replaced by an empty discarded object.
+
+ @sa @ref parse for examples
+
+ @since version 1.0.0
+ */
+ using parser_callback_t = detail::parser_callback_t<basic_json>;
+
+ //////////////////
+ // constructors //
+ //////////////////
+
+ /// @name constructors and destructors
+ /// Constructors of class @ref basic_json, copy/move constructor, copy
+ /// assignment, static functions creating objects, and the destructor.
+ /// @{
+
+ /*!
+ @brief create an empty value with a given type
+
+ Create an empty JSON value with a given type. The value will be default
+ initialized with an empty value which depends on the type:
+
+ Value type | initial value
+ ----------- | -------------
+ null | `null`
+ boolean | `false`
+ string | `""`
+ number | `0`
+ object | `{}`
+ array | `[]`
+ binary | empty array
+
+ @param[in] v the type of the value to create
+
+ @complexity Constant.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @liveexample{The following code shows the constructor for different @ref
+ value_t values,basic_json__value_t}
+
+ @sa @ref clear() -- restores the postcondition of this constructor
+
+ @since version 1.0.0
+ */
+ basic_json(const value_t v)
+ : m_type(v), m_value(v)
+ {
+ assert_invariant();
+ }
+
+ /*!
+ @brief create a null object
+
+ Create a `null` JSON value. It either takes a null pointer as parameter
+ (explicitly creating `null`) or no parameter (implicitly creating `null`).
+ The passed null pointer itself is not read -- it is only used to choose
+ the right constructor.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this constructor never throws
+ exceptions.
+
+ @liveexample{The following code shows the constructor with and without a
+ null pointer parameter.,basic_json__nullptr_t}
+
+ @since version 1.0.0
+ */
+ basic_json(std::nullptr_t = nullptr) noexcept
+ : basic_json(value_t::null)
+ {
+ assert_invariant();
+ }
+
+ /*!
+ @brief create a JSON value
+
+ This is a "catch all" constructor for all compatible JSON types; that is,
+ types for which a `to_json()` method exists. The constructor forwards the
+ parameter @a val to that method (to `json_serializer<U>::to_json` method
+ with `U = uncvref_t<CompatibleType>`, to be exact).
+
+ Template type @a CompatibleType includes, but is not limited to, the
+ following types:
+ - **arrays**: @ref array_t and all kinds of compatible containers such as
+ `std::vector`, `std::deque`, `std::list`, `std::forward_list`,
+ `std::array`, `std::valarray`, `std::set`, `std::unordered_set`,
+ `std::multiset`, and `std::unordered_multiset` with a `value_type` from
+ which a @ref basic_json value can be constructed.
+ - **objects**: @ref object_t and all kinds of compatible associative
+ containers such as `std::map`, `std::unordered_map`, `std::multimap`,
+ and `std::unordered_multimap` with a `key_type` compatible to
+ @ref string_t and a `value_type` from which a @ref basic_json value can
+ be constructed.
+ - **strings**: @ref string_t, string literals, and all compatible string
+ containers can be used.
+ - **numbers**: @ref number_integer_t, @ref number_unsigned_t,
+ @ref number_float_t, and all convertible number types such as `int`,
+ `size_t`, `int64_t`, `float` or `double` can be used.
+ - **boolean**: @ref boolean_t / `bool` can be used.
+ - **binary**: @ref binary_t / `std::vector<uint8_t>` may be used,
+ unfortunately because string literals cannot be distinguished from binary
+ character arrays by the C++ type system, all types compatible with `const
+ char*` will be directed to the string constructor instead. This is both
+ for backwards compatibility, and due to the fact that a binary type is not
+ a standard JSON type.
+
+ See the examples below.
+
+ @tparam CompatibleType a type such that:
+ - @a CompatibleType is not derived from `std::istream`,
+ - @a CompatibleType is not @ref basic_json (to avoid hijacking copy/move
+ constructors),
+ - @a CompatibleType is not a different @ref basic_json type (i.e. with different template arguments)
+ - @a CompatibleType is not a @ref basic_json nested type (e.g.,
+ @ref json_pointer, @ref iterator, etc ...)
+ - @ref @ref json_serializer<U> has a
+ `to_json(basic_json_t&, CompatibleType&&)` method
+
+ @tparam U = `uncvref_t<CompatibleType>`
+
+ @param[in] val the value to be forwarded to the respective constructor
+
+ @complexity Usually linear in the size of the passed @a val, also
+ depending on the implementation of the called `to_json()`
+ method.
+
+ @exceptionsafety Depends on the called constructor. For types directly
+ supported by the library (i.e., all types for which no `to_json()` function
+ was provided), strong guarantee holds: if an exception is thrown, there are
+ no changes to any JSON value.
+
+ @liveexample{The following code shows the constructor with several
+ compatible types.,basic_json__CompatibleType}
+
+ @since version 2.1.0
+ */
+ template < typename CompatibleType,
+ typename U = detail::uncvref_t<CompatibleType>,
+ detail::enable_if_t <
+ !detail::is_basic_json<U>::value && detail::is_compatible_type<basic_json_t, U>::value, int > = 0 >
+ basic_json(CompatibleType && val) noexcept(noexcept(
+ JSONSerializer<U>::to_json(std::declval<basic_json_t&>(),
+ std::forward<CompatibleType>(val))))
+ {
+ JSONSerializer<U>::to_json(*this, std::forward<CompatibleType>(val));
+ assert_invariant();
+ }
+
+ /*!
+ @brief create a JSON value from an existing one
+
+ This is a constructor for existing @ref basic_json types.
+ It does not hijack copy/move constructors, since the parameter has different
+ template arguments than the current ones.
+
+ The constructor tries to convert the internal @ref m_value of the parameter.
+
+ @tparam BasicJsonType a type such that:
+ - @a BasicJsonType is a @ref basic_json type.
+ - @a BasicJsonType has different template arguments than @ref basic_json_t.
+
+ @param[in] val the @ref basic_json value to be converted.
+
+ @complexity Usually linear in the size of the passed @a val, also
+ depending on the implementation of the called `to_json()`
+ method.
+
+ @exceptionsafety Depends on the called constructor. For types directly
+ supported by the library (i.e., all types for which no `to_json()` function
+ was provided), strong guarantee holds: if an exception is thrown, there are
+ no changes to any JSON value.
+
+ @since version 3.2.0
+ */
+ template < typename BasicJsonType,
+ detail::enable_if_t <
+ detail::is_basic_json<BasicJsonType>::value&& !std::is_same<basic_json, BasicJsonType>::value, int > = 0 >
+ basic_json(const BasicJsonType& val)
+ {
+ using other_boolean_t = typename BasicJsonType::boolean_t;
+ using other_number_float_t = typename BasicJsonType::number_float_t;
+ using other_number_integer_t = typename BasicJsonType::number_integer_t;
+ using other_number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using other_string_t = typename BasicJsonType::string_t;
+ using other_object_t = typename BasicJsonType::object_t;
+ using other_array_t = typename BasicJsonType::array_t;
+ using other_binary_t = typename BasicJsonType::binary_t;
+
+ switch (val.type())
+ {
+ case value_t::boolean:
+ JSONSerializer<other_boolean_t>::to_json(*this, val.template get<other_boolean_t>());
+ break;
+ case value_t::number_float:
+ JSONSerializer<other_number_float_t>::to_json(*this, val.template get<other_number_float_t>());
+ break;
+ case value_t::number_integer:
+ JSONSerializer<other_number_integer_t>::to_json(*this, val.template get<other_number_integer_t>());
+ break;
+ case value_t::number_unsigned:
+ JSONSerializer<other_number_unsigned_t>::to_json(*this, val.template get<other_number_unsigned_t>());
+ break;
+ case value_t::string:
+ JSONSerializer<other_string_t>::to_json(*this, val.template get_ref<const other_string_t&>());
+ break;
+ case value_t::object:
+ JSONSerializer<other_object_t>::to_json(*this, val.template get_ref<const other_object_t&>());
+ break;
+ case value_t::array:
+ JSONSerializer<other_array_t>::to_json(*this, val.template get_ref<const other_array_t&>());
+ break;
+ case value_t::binary:
+ JSONSerializer<other_binary_t>::to_json(*this, val.template get_ref<const other_binary_t&>());
+ break;
+ case value_t::null:
+ *this = nullptr;
+ break;
+ case value_t::discarded:
+ m_type = value_t::discarded;
+ break;
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ assert_invariant();
+ }
+
+ /*!
+ @brief create a container (array or object) from an initializer list
+
+ Creates a JSON value of type array or object from the passed initializer
+ list @a init. In case @a type_deduction is `true` (default), the type of
+ the JSON value to be created is deducted from the initializer list @a init
+ according to the following rules:
+
+ 1. If the list is empty, an empty JSON object value `{}` is created.
+ 2. If the list consists of pairs whose first element is a string, a JSON
+ object value is created where the first elements of the pairs are
+ treated as keys and the second elements are as values.
+ 3. In all other cases, an array is created.
+
+ The rules aim to create the best fit between a C++ initializer list and
+ JSON values. The rationale is as follows:
+
+ 1. The empty initializer list is written as `{}` which is exactly an empty
+ JSON object.
+ 2. C++ has no way of describing mapped types other than to list a list of
+ pairs. As JSON requires that keys must be of type string, rule 2 is the
+ weakest constraint one can pose on initializer lists to interpret them
+ as an object.
+ 3. In all other cases, the initializer list could not be interpreted as
+ JSON object type, so interpreting it as JSON array type is safe.
+
+ With the rules described above, the following JSON values cannot be
+ expressed by an initializer list:
+
+ - the empty array (`[]`): use @ref array(initializer_list_t)
+ with an empty initializer list in this case
+ - arrays whose elements satisfy rule 2: use @ref
+ array(initializer_list_t) with the same initializer list
+ in this case
+
+ @note When used without parentheses around an empty initializer list, @ref
+ basic_json() is called instead of this function, yielding the JSON null
+ value.
+
+ @param[in] init initializer list with JSON values
+
+ @param[in] type_deduction internal parameter; when set to `true`, the type
+ of the JSON value is deducted from the initializer list @a init; when set
+ to `false`, the type provided via @a manual_type is forced. This mode is
+ used by the functions @ref array(initializer_list_t) and
+ @ref object(initializer_list_t).
+
+ @param[in] manual_type internal parameter; when @a type_deduction is set
+ to `false`, the created JSON value will use the provided type (only @ref
+ value_t::array and @ref value_t::object are valid); when @a type_deduction
+ is set to `true`, this parameter has no effect
+
+ @throw type_error.301 if @a type_deduction is `false`, @a manual_type is
+ `value_t::object`, but @a init contains an element which is not a pair
+ whose first element is a string. In this case, the constructor could not
+ create an object. If @a type_deduction would have be `true`, an array
+ would have been created. See @ref object(initializer_list_t)
+ for an example.
+
+ @complexity Linear in the size of the initializer list @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @liveexample{The example below shows how JSON values are created from
+ initializer lists.,basic_json__list_init_t}
+
+ @sa @ref array(initializer_list_t) -- create a JSON array
+ value from an initializer list
+ @sa @ref object(initializer_list_t) -- create a JSON object
+ value from an initializer list
+
+ @since version 1.0.0
+ */
+ basic_json(initializer_list_t init,
+ bool type_deduction = true,
+ value_t manual_type = value_t::array)
+ {
+ // check if each element is an array with two elements whose first
+ // element is a string
+ bool is_an_object = std::all_of(init.begin(), init.end(),
+ [](const detail::json_ref<basic_json>& element_ref)
+ {
+ return element_ref->is_array() && element_ref->size() == 2 && (*element_ref)[0].is_string();
+ });
+
+ // adjust type if type deduction is not wanted
+ if (!type_deduction)
+ {
+ // if array is wanted, do not create an object though possible
+ if (manual_type == value_t::array)
+ {
+ is_an_object = false;
+ }
+
+ // if object is wanted but impossible, throw an exception
+ if (JSON_HEDLEY_UNLIKELY(manual_type == value_t::object && !is_an_object))
+ {
+ JSON_THROW(type_error::create(301, "cannot create object from initializer list"));
+ }
+ }
+
+ if (is_an_object)
+ {
+ // the initializer list is a list of pairs -> create object
+ m_type = value_t::object;
+ m_value = value_t::object;
+
+ std::for_each(init.begin(), init.end(), [this](const detail::json_ref<basic_json>& element_ref)
+ {
+ auto element = element_ref.moved_or_copied();
+ m_value.object->emplace(
+ std::move(*((*element.m_value.array)[0].m_value.string)),
+ std::move((*element.m_value.array)[1]));
+ });
+ }
+ else
+ {
+ // the initializer list describes an array -> create array
+ m_type = value_t::array;
+ m_value.array = create<array_t>(init.begin(), init.end());
+ }
+
+ assert_invariant();
+ }
+
+ /*!
+ @brief explicitly create a binary array (without subtype)
+
+ Creates a JSON binary array value from a given binary container. Binary
+ values are part of various binary formats, such as CBOR, MessagePack, and
+ BSON. This constructor is used to create a value for serialization to those
+ formats.
+
+ @note Note, this function exists because of the difficulty in correctly
+ specifying the correct template overload in the standard value ctor, as both
+ JSON arrays and JSON binary arrays are backed with some form of a
+ `std::vector`. Because JSON binary arrays are a non-standard extension it
+ was decided that it would be best to prevent automatic initialization of a
+ binary array type, for backwards compatibility and so it does not happen on
+ accident.
+
+ @param[in] init container containing bytes to use as binary type
+
+ @return JSON binary array value
+
+ @complexity Linear in the size of @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @since version 3.8.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(const typename binary_t::container_type& init)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = init;
+ return res;
+ }
+
+ /*!
+ @brief explicitly create a binary array (with subtype)
+
+ Creates a JSON binary array value from a given binary container. Binary
+ values are part of various binary formats, such as CBOR, MessagePack, and
+ BSON. This constructor is used to create a value for serialization to those
+ formats.
+
+ @note Note, this function exists because of the difficulty in correctly
+ specifying the correct template overload in the standard value ctor, as both
+ JSON arrays and JSON binary arrays are backed with some form of a
+ `std::vector`. Because JSON binary arrays are a non-standard extension it
+ was decided that it would be best to prevent automatic initialization of a
+ binary array type, for backwards compatibility and so it does not happen on
+ accident.
+
+ @param[in] init container containing bytes to use as binary type
+ @param[in] subtype subtype to use in MessagePack and BSON
+
+ @return JSON binary array value
+
+ @complexity Linear in the size of @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @since version 3.8.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(const typename binary_t::container_type& init, std::uint8_t subtype)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = binary_t(init, subtype);
+ return res;
+ }
+
+ /// @copydoc binary(const typename binary_t::container_type&)
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(typename binary_t::container_type&& init)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = std::move(init);
+ return res;
+ }
+
+ /// @copydoc binary(const typename binary_t::container_type&, std::uint8_t)
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(typename binary_t::container_type&& init, std::uint8_t subtype)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = binary_t(std::move(init), subtype);
+ return res;
+ }
+
+ /*!
+ @brief explicitly create an array from an initializer list
+
+ Creates a JSON array value from a given initializer list. That is, given a
+ list of values `a, b, c`, creates the JSON value `[a, b, c]`. If the
+ initializer list is empty, the empty array `[]` is created.
+
+ @note This function is only needed to express two edge cases that cannot
+ be realized with the initializer list constructor (@ref
+ basic_json(initializer_list_t, bool, value_t)). These cases
+ are:
+ 1. creating an array whose elements are all pairs whose first element is a
+ string -- in this case, the initializer list constructor would create an
+ object, taking the first elements as keys
+ 2. creating an empty array -- passing the empty initializer list to the
+ initializer list constructor yields an empty object
+
+ @param[in] init initializer list with JSON values to create an array from
+ (optional)
+
+ @return JSON array value
+
+ @complexity Linear in the size of @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @liveexample{The following code shows an example for the `array`
+ function.,array}
+
+ @sa @ref basic_json(initializer_list_t, bool, value_t) --
+ create a JSON value from an initializer list
+ @sa @ref object(initializer_list_t) -- create a JSON object
+ value from an initializer list
+
+ @since version 1.0.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json array(initializer_list_t init = {})
+ {
+ return basic_json(init, false, value_t::array);
+ }
+
+ /*!
+ @brief explicitly create an object from an initializer list
+
+ Creates a JSON object value from a given initializer list. The initializer
+ lists elements must be pairs, and their first elements must be strings. If
+ the initializer list is empty, the empty object `{}` is created.
+
+ @note This function is only added for symmetry reasons. In contrast to the
+ related function @ref array(initializer_list_t), there are
+ no cases which can only be expressed by this function. That is, any
+ initializer list @a init can also be passed to the initializer list
+ constructor @ref basic_json(initializer_list_t, bool, value_t).
+
+ @param[in] init initializer list to create an object from (optional)
+
+ @return JSON object value
+
+ @throw type_error.301 if @a init is not a list of pairs whose first
+ elements are strings. In this case, no object can be created. When such a
+ value is passed to @ref basic_json(initializer_list_t, bool, value_t),
+ an array would have been created from the passed initializer list @a init.
+ See example below.
+
+ @complexity Linear in the size of @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @liveexample{The following code shows an example for the `object`
+ function.,object}
+
+ @sa @ref basic_json(initializer_list_t, bool, value_t) --
+ create a JSON value from an initializer list
+ @sa @ref array(initializer_list_t) -- create a JSON array
+ value from an initializer list
+
+ @since version 1.0.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json object(initializer_list_t init = {})
+ {
+ return basic_json(init, false, value_t::object);
+ }
+
+ /*!
+ @brief construct an array with count copies of given value
+
+ Constructs a JSON array value by creating @a cnt copies of a passed value.
+ In case @a cnt is `0`, an empty array is created.
+
+ @param[in] cnt the number of JSON copies of @a val to create
+ @param[in] val the JSON value to copy
+
+ @post `std::distance(begin(),end()) == cnt` holds.
+
+ @complexity Linear in @a cnt.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @liveexample{The following code shows examples for the @ref
+ basic_json(size_type\, const basic_json&)
+ constructor.,basic_json__size_type_basic_json}
+
+ @since version 1.0.0
+ */
+ basic_json(size_type cnt, const basic_json& val)
+ : m_type(value_t::array)
+ {
+ m_value.array = create<array_t>(cnt, val);
+ assert_invariant();
+ }
+
+ /*!
+ @brief construct a JSON container given an iterator range
+
+ Constructs the JSON value with the contents of the range `[first, last)`.
+ The semantics depends on the different types a JSON value can have:
+ - In case of a null type, invalid_iterator.206 is thrown.
+ - In case of other primitive types (number, boolean, or string), @a first
+ must be `begin()` and @a last must be `end()`. In this case, the value is
+ copied. Otherwise, invalid_iterator.204 is thrown.
+ - In case of structured types (array, object), the constructor behaves as
+ similar versions for `std::vector` or `std::map`; that is, a JSON array
+ or object is constructed from the values in the range.
+
+ @tparam InputIT an input iterator type (@ref iterator or @ref
+ const_iterator)
+
+ @param[in] first begin of the range to copy from (included)
+ @param[in] last end of the range to copy from (excluded)
+
+ @pre Iterators @a first and @a last must be initialized. **This
+ precondition is enforced with an assertion (see warning).** If
+ assertions are switched off, a violation of this precondition yields
+ undefined behavior.
+
+ @pre Range `[first, last)` is valid. Usually, this precondition cannot be
+ checked efficiently. Only certain edge cases are detected; see the
+ description of the exceptions below. A violation of this precondition
+ yields undefined behavior.
+
+ @warning A precondition is enforced with a runtime assertion that will
+ result in calling `std::abort` if this precondition is not met.
+ Assertions can be disabled by defining `NDEBUG` at compile time.
+ See https://en.cppreference.com/w/cpp/error/assert for more
+ information.
+
+ @throw invalid_iterator.201 if iterators @a first and @a last are not
+ compatible (i.e., do not belong to the same JSON value). In this case,
+ the range `[first, last)` is undefined.
+ @throw invalid_iterator.204 if iterators @a first and @a last belong to a
+ primitive type (number, boolean, or string), but @a first does not point
+ to the first element any more. In this case, the range `[first, last)` is
+ undefined. See example code below.
+ @throw invalid_iterator.206 if iterators @a first and @a last belong to a
+ null value. In this case, the range `[first, last)` is undefined.
+
+ @complexity Linear in distance between @a first and @a last.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @liveexample{The example below shows several ways to create JSON values by
+ specifying a subrange with iterators.,basic_json__InputIt_InputIt}
+
+ @since version 1.0.0
+ */
+ template < class InputIT, typename std::enable_if <
+ std::is_same<InputIT, typename basic_json_t::iterator>::value ||
+ std::is_same<InputIT, typename basic_json_t::const_iterator>::value, int >::type = 0 >
+ basic_json(InputIT first, InputIT last)
+ {
+ JSON_ASSERT(first.m_object != nullptr);
+ JSON_ASSERT(last.m_object != nullptr);
+
+ // make sure iterator fits the current value
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(201, "iterators are not compatible"));
+ }
+
+ // copy type from first iterator
+ m_type = first.m_object->m_type;
+
+ // check if iterator range is complete for primitive values
+ switch (m_type)
+ {
+ case value_t::boolean:
+ case value_t::number_float:
+ case value_t::number_integer:
+ case value_t::number_unsigned:
+ case value_t::string:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!first.m_it.primitive_iterator.is_begin()
+ || !last.m_it.primitive_iterator.is_end()))
+ {
+ JSON_THROW(invalid_iterator::create(204, "iterators out of range"));
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ switch (m_type)
+ {
+ case value_t::number_integer:
+ {
+ m_value.number_integer = first.m_object->m_value.number_integer;
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ m_value.number_unsigned = first.m_object->m_value.number_unsigned;
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ m_value.number_float = first.m_object->m_value.number_float;
+ break;
+ }
+
+ case value_t::boolean:
+ {
+ m_value.boolean = first.m_object->m_value.boolean;
+ break;
+ }
+
+ case value_t::string:
+ {
+ m_value = *first.m_object->m_value.string;
+ break;
+ }
+
+ case value_t::object:
+ {
+ m_value.object = create<object_t>(first.m_it.object_iterator,
+ last.m_it.object_iterator);
+ break;
+ }
+
+ case value_t::array:
+ {
+ m_value.array = create<array_t>(first.m_it.array_iterator,
+ last.m_it.array_iterator);
+ break;
+ }
+
+ case value_t::binary:
+ {
+ m_value = *first.m_object->m_value.binary;
+ break;
+ }
+
+ default:
+ JSON_THROW(invalid_iterator::create(206, "cannot construct with iterators from " +
+ std::string(first.m_object->type_name())));
+ }
+
+ assert_invariant();
+ }
+
+
+ ///////////////////////////////////////
+ // other constructors and destructor //
+ ///////////////////////////////////////
+
+ template<typename JsonRef,
+ detail::enable_if_t<detail::conjunction<detail::is_json_ref<JsonRef>,
+ std::is_same<typename JsonRef::value_type, basic_json>>::value, int> = 0 >
+ basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {}
+
+ /*!
+ @brief copy constructor
+
+ Creates a copy of a given JSON value.
+
+ @param[in] other the JSON value to copy
+
+ @post `*this == other`
+
+ @complexity Linear in the size of @a other.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is linear.
+ - As postcondition, it holds: `other == basic_json(other)`.
+
+ @liveexample{The following code shows an example for the copy
+ constructor.,basic_json__basic_json}
+
+ @since version 1.0.0
+ */
+ basic_json(const basic_json& other)
+ : m_type(other.m_type)
+ {
+ // check of passed value is valid
+ other.assert_invariant();
+
+ switch (m_type)
+ {
+ case value_t::object:
+ {
+ m_value = *other.m_value.object;
+ break;
+ }
+
+ case value_t::array:
+ {
+ m_value = *other.m_value.array;
+ break;
+ }
+
+ case value_t::string:
+ {
+ m_value = *other.m_value.string;
+ break;
+ }
+
+ case value_t::boolean:
+ {
+ m_value = other.m_value.boolean;
+ break;
+ }
+
+ case value_t::number_integer:
+ {
+ m_value = other.m_value.number_integer;
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ m_value = other.m_value.number_unsigned;
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ m_value = other.m_value.number_float;
+ break;
+ }
+
+ case value_t::binary:
+ {
+ m_value = *other.m_value.binary;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ assert_invariant();
+ }
+
+ /*!
+ @brief move constructor
+
+ Move constructor. Constructs a JSON value with the contents of the given
+ value @a other using move semantics. It "steals" the resources from @a
+ other and leaves it as JSON null value.
+
+ @param[in,out] other value to move to this object
+
+ @post `*this` has the same value as @a other before the call.
+ @post @a other is a JSON null value.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this constructor never throws
+ exceptions.
+
+ @requirement This function helps `basic_json` satisfying the
+ [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible)
+ requirements.
+
+ @liveexample{The code below shows the move constructor explicitly called
+ via std::move.,basic_json__moveconstructor}
+
+ @since version 1.0.0
+ */
+ basic_json(basic_json&& other) noexcept
+ : m_type(std::move(other.m_type)),
+ m_value(std::move(other.m_value))
+ {
+ // check that passed value is valid
+ other.assert_invariant();
+
+ // invalidate payload
+ other.m_type = value_t::null;
+ other.m_value = {};
+
+ assert_invariant();
+ }
+
+ /*!
+ @brief copy assignment
+
+ Copy assignment operator. Copies a JSON value via the "copy and swap"
+ strategy: It is expressed in terms of the copy constructor, destructor,
+ and the `swap()` member function.
+
+ @param[in] other value to copy from
+
+ @complexity Linear.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is linear.
+
+ @liveexample{The code below shows and example for the copy assignment. It
+ creates a copy of value `a` which is then swapped with `b`. Finally\, the
+ copy of `a` (which is the null value after the swap) is
+ destroyed.,basic_json__copyassignment}
+
+ @since version 1.0.0
+ */
+ basic_json& operator=(basic_json other) noexcept (
+ std::is_nothrow_move_constructible<value_t>::value&&
+ std::is_nothrow_move_assignable<value_t>::value&&
+ std::is_nothrow_move_constructible<json_value>::value&&
+ std::is_nothrow_move_assignable<json_value>::value
+ )
+ {
+ // check that passed value is valid
+ other.assert_invariant();
+
+ using std::swap;
+ swap(m_type, other.m_type);
+ swap(m_value, other.m_value);
+
+ assert_invariant();
+ return *this;
+ }
+
+ /*!
+ @brief destructor
+
+ Destroys the JSON value and frees all allocated memory.
+
+ @complexity Linear.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is linear.
+ - All stored elements are destroyed and all memory is freed.
+
+ @since version 1.0.0
+ */
+ ~basic_json() noexcept
+ {
+ assert_invariant();
+ m_value.destroy(m_type);
+ }
+
+ /// @}
+
+ public:
+ ///////////////////////
+ // object inspection //
+ ///////////////////////
+
+ /// @name object inspection
+ /// Functions to inspect the type of a JSON value.
+ /// @{
+
+ /*!
+ @brief serialization
+
+ Serialization function for JSON values. The function tries to mimic
+ Python's `json.dumps()` function, and currently supports its @a indent
+ and @a ensure_ascii parameters.
+
+ @param[in] indent If indent is nonnegative, then array elements and object
+ members will be pretty-printed with that indent level. An indent level of
+ `0` will only insert newlines. `-1` (the default) selects the most compact
+ representation.
+ @param[in] indent_char The character to use for indentation if @a indent is
+ greater than `0`. The default is ` ` (space).
+ @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters
+ in the output are escaped with `\uXXXX` sequences, and the result consists
+ of ASCII characters only.
+ @param[in] error_handler how to react on decoding errors; there are three
+ possible values: `strict` (throws and exception in case a decoding error
+ occurs; default), `replace` (replace invalid UTF-8 sequences with U+FFFD),
+ and `ignore` (ignore invalid UTF-8 sequences during serialization; all
+ bytes are copied to the output unchanged).
+
+ @return string containing the serialization of the JSON value
+
+ @throw type_error.316 if a string stored inside the JSON value is not
+ UTF-8 encoded and @a error_handler is set to strict
+
+ @note Binary values are serialized as object containing two keys:
+ - "bytes": an array of bytes as integers
+ - "subtype": the subtype as integer or "null" if the binary has no subtype
+
+ @complexity Linear.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @liveexample{The following example shows the effect of different @a indent\,
+ @a indent_char\, and @a ensure_ascii parameters to the result of the
+ serialization.,dump}
+
+ @see https://docs.python.org/2/library/json.html#json.dump
+
+ @since version 1.0.0; indentation character @a indent_char, option
+ @a ensure_ascii and exceptions added in version 3.0.0; error
+ handlers added in version 3.4.0; serialization of binary values added
+ in version 3.8.0.
+ */
+ string_t dump(const int indent = -1,
+ const char indent_char = ' ',
+ const bool ensure_ascii = false,
+ const error_handler_t error_handler = error_handler_t::strict) const
+ {
+ string_t result;
+ serializer s(detail::output_adapter<char, string_t>(result), indent_char, error_handler);
+
+ if (indent >= 0)
+ {
+ s.dump(*this, true, ensure_ascii, static_cast<unsigned int>(indent));
+ }
+ else
+ {
+ s.dump(*this, false, ensure_ascii, 0);
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief return the type of the JSON value (explicit)
+
+ Return the type of the JSON value as a value from the @ref value_t
+ enumeration.
+
+ @return the type of the JSON value
+ Value type | return value
+ ------------------------- | -------------------------
+ null | value_t::null
+ boolean | value_t::boolean
+ string | value_t::string
+ number (integer) | value_t::number_integer
+ number (unsigned integer) | value_t::number_unsigned
+ number (floating-point) | value_t::number_float
+ object | value_t::object
+ array | value_t::array
+ binary | value_t::binary
+ discarded | value_t::discarded
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `type()` for all JSON
+ types.,type}
+
+ @sa @ref operator value_t() -- return the type of the JSON value (implicit)
+ @sa @ref type_name() -- return the type as string
+
+ @since version 1.0.0
+ */
+ constexpr value_t type() const noexcept
+ {
+ return m_type;
+ }
+
+ /*!
+ @brief return whether type is primitive
+
+ This function returns true if and only if the JSON type is primitive
+ (string, number, boolean, or null).
+
+ @return `true` if type is primitive (string, number, boolean, or null),
+ `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_primitive()` for all JSON
+ types.,is_primitive}
+
+ @sa @ref is_structured() -- returns whether JSON value is structured
+ @sa @ref is_null() -- returns whether JSON value is `null`
+ @sa @ref is_string() -- returns whether JSON value is a string
+ @sa @ref is_boolean() -- returns whether JSON value is a boolean
+ @sa @ref is_number() -- returns whether JSON value is a number
+ @sa @ref is_binary() -- returns whether JSON value is a binary array
+
+ @since version 1.0.0
+ */
+ constexpr bool is_primitive() const noexcept
+ {
+ return is_null() || is_string() || is_boolean() || is_number() || is_binary();
+ }
+
+ /*!
+ @brief return whether type is structured
+
+ This function returns true if and only if the JSON type is structured
+ (array or object).
+
+ @return `true` if type is structured (array or object), `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_structured()` for all JSON
+ types.,is_structured}
+
+ @sa @ref is_primitive() -- returns whether value is primitive
+ @sa @ref is_array() -- returns whether value is an array
+ @sa @ref is_object() -- returns whether value is an object
+
+ @since version 1.0.0
+ */
+ constexpr bool is_structured() const noexcept
+ {
+ return is_array() || is_object();
+ }
+
+ /*!
+ @brief return whether value is null
+
+ This function returns true if and only if the JSON value is null.
+
+ @return `true` if type is null, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_null()` for all JSON
+ types.,is_null}
+
+ @since version 1.0.0
+ */
+ constexpr bool is_null() const noexcept
+ {
+ return m_type == value_t::null;
+ }
+
+ /*!
+ @brief return whether value is a boolean
+
+ This function returns true if and only if the JSON value is a boolean.
+
+ @return `true` if type is boolean, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_boolean()` for all JSON
+ types.,is_boolean}
+
+ @since version 1.0.0
+ */
+ constexpr bool is_boolean() const noexcept
+ {
+ return m_type == value_t::boolean;
+ }
+
+ /*!
+ @brief return whether value is a number
+
+ This function returns true if and only if the JSON value is a number. This
+ includes both integer (signed and unsigned) and floating-point values.
+
+ @return `true` if type is number (regardless whether integer, unsigned
+ integer or floating-type), `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_number()` for all JSON
+ types.,is_number}
+
+ @sa @ref is_number_integer() -- check if value is an integer or unsigned
+ integer number
+ @sa @ref is_number_unsigned() -- check if value is an unsigned integer
+ number
+ @sa @ref is_number_float() -- check if value is a floating-point number
+
+ @since version 1.0.0
+ */
+ constexpr bool is_number() const noexcept
+ {
+ return is_number_integer() || is_number_float();
+ }
+
+ /*!
+ @brief return whether value is an integer number
+
+ This function returns true if and only if the JSON value is a signed or
+ unsigned integer number. This excludes floating-point values.
+
+ @return `true` if type is an integer or unsigned integer number, `false`
+ otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_number_integer()` for all
+ JSON types.,is_number_integer}
+
+ @sa @ref is_number() -- check if value is a number
+ @sa @ref is_number_unsigned() -- check if value is an unsigned integer
+ number
+ @sa @ref is_number_float() -- check if value is a floating-point number
+
+ @since version 1.0.0
+ */
+ constexpr bool is_number_integer() const noexcept
+ {
+ return m_type == value_t::number_integer || m_type == value_t::number_unsigned;
+ }
+
+ /*!
+ @brief return whether value is an unsigned integer number
+
+ This function returns true if and only if the JSON value is an unsigned
+ integer number. This excludes floating-point and signed integer values.
+
+ @return `true` if type is an unsigned integer number, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_number_unsigned()` for all
+ JSON types.,is_number_unsigned}
+
+ @sa @ref is_number() -- check if value is a number
+ @sa @ref is_number_integer() -- check if value is an integer or unsigned
+ integer number
+ @sa @ref is_number_float() -- check if value is a floating-point number
+
+ @since version 2.0.0
+ */
+ constexpr bool is_number_unsigned() const noexcept
+ {
+ return m_type == value_t::number_unsigned;
+ }
+
+ /*!
+ @brief return whether value is a floating-point number
+
+ This function returns true if and only if the JSON value is a
+ floating-point number. This excludes signed and unsigned integer values.
+
+ @return `true` if type is a floating-point number, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_number_float()` for all
+ JSON types.,is_number_float}
+
+ @sa @ref is_number() -- check if value is number
+ @sa @ref is_number_integer() -- check if value is an integer number
+ @sa @ref is_number_unsigned() -- check if value is an unsigned integer
+ number
+
+ @since version 1.0.0
+ */
+ constexpr bool is_number_float() const noexcept
+ {
+ return m_type == value_t::number_float;
+ }
+
+ /*!
+ @brief return whether value is an object
+
+ This function returns true if and only if the JSON value is an object.
+
+ @return `true` if type is object, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_object()` for all JSON
+ types.,is_object}
+
+ @since version 1.0.0
+ */
+ constexpr bool is_object() const noexcept
+ {
+ return m_type == value_t::object;
+ }
+
+ /*!
+ @brief return whether value is an array
+
+ This function returns true if and only if the JSON value is an array.
+
+ @return `true` if type is array, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_array()` for all JSON
+ types.,is_array}
+
+ @since version 1.0.0
+ */
+ constexpr bool is_array() const noexcept
+ {
+ return m_type == value_t::array;
+ }
+
+ /*!
+ @brief return whether value is a string
+
+ This function returns true if and only if the JSON value is a string.
+
+ @return `true` if type is string, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_string()` for all JSON
+ types.,is_string}
+
+ @since version 1.0.0
+ */
+ constexpr bool is_string() const noexcept
+ {
+ return m_type == value_t::string;
+ }
+
+ /*!
+ @brief return whether value is a binary array
+
+ This function returns true if and only if the JSON value is a binary array.
+
+ @return `true` if type is binary array, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_binary()` for all JSON
+ types.,is_binary}
+
+ @since version 3.8.0
+ */
+ constexpr bool is_binary() const noexcept
+ {
+ return m_type == value_t::binary;
+ }
+
+ /*!
+ @brief return whether value is discarded
+
+ This function returns true if and only if the JSON value was discarded
+ during parsing with a callback function (see @ref parser_callback_t).
+
+ @note This function will always be `false` for JSON values after parsing.
+ That is, discarded values can only occur during parsing, but will be
+ removed when inside a structured value or replaced by null in other cases.
+
+ @return `true` if type is discarded, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_discarded()` for all JSON
+ types.,is_discarded}
+
+ @since version 1.0.0
+ */
+ constexpr bool is_discarded() const noexcept
+ {
+ return m_type == value_t::discarded;
+ }
+
+ /*!
+ @brief return the type of the JSON value (implicit)
+
+ Implicitly return the type of the JSON value as a value from the @ref
+ value_t enumeration.
+
+ @return the type of the JSON value
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies the @ref value_t operator for
+ all JSON types.,operator__value_t}
+
+ @sa @ref type() -- return the type of the JSON value (explicit)
+ @sa @ref type_name() -- return the type as string
+
+ @since version 1.0.0
+ */
+ constexpr operator value_t() const noexcept
+ {
+ return m_type;
+ }
+
+ /// @}
+
+ private:
+ //////////////////
+ // value access //
+ //////////////////
+
+ /// get a boolean (explicit)
+ boolean_t get_impl(boolean_t* /*unused*/) const
+ {
+ if (JSON_HEDLEY_LIKELY(is_boolean()))
+ {
+ return m_value.boolean;
+ }
+
+ JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(type_name())));
+ }
+
+ /// get a pointer to the value (object)
+ object_t* get_impl_ptr(object_t* /*unused*/) noexcept
+ {
+ return is_object() ? m_value.object : nullptr;
+ }
+
+ /// get a pointer to the value (object)
+ constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const noexcept
+ {
+ return is_object() ? m_value.object : nullptr;
+ }
+
+ /// get a pointer to the value (array)
+ array_t* get_impl_ptr(array_t* /*unused*/) noexcept
+ {
+ return is_array() ? m_value.array : nullptr;
+ }
+
+ /// get a pointer to the value (array)
+ constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const noexcept
+ {
+ return is_array() ? m_value.array : nullptr;
+ }
+
+ /// get a pointer to the value (string)
+ string_t* get_impl_ptr(string_t* /*unused*/) noexcept
+ {
+ return is_string() ? m_value.string : nullptr;
+ }
+
+ /// get a pointer to the value (string)
+ constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const noexcept
+ {
+ return is_string() ? m_value.string : nullptr;
+ }
+
+ /// get a pointer to the value (boolean)
+ boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept
+ {
+ return is_boolean() ? &m_value.boolean : nullptr;
+ }
+
+ /// get a pointer to the value (boolean)
+ constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) const noexcept
+ {
+ return is_boolean() ? &m_value.boolean : nullptr;
+ }
+
+ /// get a pointer to the value (integer number)
+ number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept
+ {
+ return is_number_integer() ? &m_value.number_integer : nullptr;
+ }
+
+ /// get a pointer to the value (integer number)
+ constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept
+ {
+ return is_number_integer() ? &m_value.number_integer : nullptr;
+ }
+
+ /// get a pointer to the value (unsigned number)
+ number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept
+ {
+ return is_number_unsigned() ? &m_value.number_unsigned : nullptr;
+ }
+
+ /// get a pointer to the value (unsigned number)
+ constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t* /*unused*/) const noexcept
+ {
+ return is_number_unsigned() ? &m_value.number_unsigned : nullptr;
+ }
+
+ /// get a pointer to the value (floating-point number)
+ number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept
+ {
+ return is_number_float() ? &m_value.number_float : nullptr;
+ }
+
+ /// get a pointer to the value (floating-point number)
+ constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unused*/) const noexcept
+ {
+ return is_number_float() ? &m_value.number_float : nullptr;
+ }
+
+ /// get a pointer to the value (binary)
+ binary_t* get_impl_ptr(binary_t* /*unused*/) noexcept
+ {
+ return is_binary() ? m_value.binary : nullptr;
+ }
+
+ /// get a pointer to the value (binary)
+ constexpr const binary_t* get_impl_ptr(const binary_t* /*unused*/) const noexcept
+ {
+ return is_binary() ? m_value.binary : nullptr;
+ }
+
+ /*!
+ @brief helper function to implement get_ref()
+
+ This function helps to implement get_ref() without code duplication for
+ const and non-const overloads
+
+ @tparam ThisType will be deduced as `basic_json` or `const basic_json`
+
+ @throw type_error.303 if ReferenceType does not match underlying value
+ type of the current JSON
+ */
+ template<typename ReferenceType, typename ThisType>
+ static ReferenceType get_ref_impl(ThisType& obj)
+ {
+ // delegate the call to get_ptr<>()
+ auto ptr = obj.template get_ptr<typename std::add_pointer<ReferenceType>::type>();
+
+ if (JSON_HEDLEY_LIKELY(ptr != nullptr))
+ {
+ return *ptr;
+ }
+
+ JSON_THROW(type_error::create(303, "incompatible ReferenceType for get_ref, actual type is " + std::string(obj.type_name())));
+ }
+
+ public:
+ /// @name value access
+ /// Direct access to the stored value of a JSON value.
+ /// @{
+
+ /*!
+ @brief get special-case overload
+
+ This overloads avoids a lot of template boilerplate, it can be seen as the
+ identity method
+
+ @tparam BasicJsonType == @ref basic_json
+
+ @return a copy of *this
+
+ @complexity Constant.
+
+ @since version 2.1.0
+ */
+ template<typename BasicJsonType, detail::enable_if_t<
+ std::is_same<typename std::remove_const<BasicJsonType>::type, basic_json_t>::value,
+ int> = 0>
+ basic_json get() const
+ {
+ return *this;
+ }
+
+ /*!
+ @brief get special-case overload
+
+ This overloads converts the current @ref basic_json in a different
+ @ref basic_json type
+
+ @tparam BasicJsonType == @ref basic_json
+
+ @return a copy of *this, converted into @tparam BasicJsonType
+
+ @complexity Depending on the implementation of the called `from_json()`
+ method.
+
+ @since version 3.2.0
+ */
+ template < typename BasicJsonType, detail::enable_if_t <
+ !std::is_same<BasicJsonType, basic_json>::value&&
+ detail::is_basic_json<BasicJsonType>::value, int > = 0 >
+ BasicJsonType get() const
+ {
+ return *this;
+ }
+
+ /*!
+ @brief get a value (explicit)
+
+ Explicit type conversion between the JSON value and a compatible value
+ which is [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible)
+ and [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible).
+ The value is converted by calling the @ref json_serializer<ValueType>
+ `from_json()` method.
+
+ The function is equivalent to executing
+ @code {.cpp}
+ ValueType ret;
+ JSONSerializer<ValueType>::from_json(*this, ret);
+ return ret;
+ @endcode
+
+ This overloads is chosen if:
+ - @a ValueType is not @ref basic_json,
+ - @ref json_serializer<ValueType> has a `from_json()` method of the form
+ `void from_json(const basic_json&, ValueType&)`, and
+ - @ref json_serializer<ValueType> does not have a `from_json()` method of
+ the form `ValueType from_json(const basic_json&)`
+
+ @tparam ValueTypeCV the provided value type
+ @tparam ValueType the returned value type
+
+ @return copy of the JSON value, converted to @a ValueType
+
+ @throw what @ref json_serializer<ValueType> `from_json()` method throws
+
+ @liveexample{The example below shows several conversions from JSON values
+ to other types. There a few things to note: (1) Floating-point numbers can
+ be converted to integers\, (2) A JSON array can be converted to a standard
+ `std::vector<short>`\, (3) A JSON object can be converted to C++
+ associative containers such as `std::unordered_map<std::string\,
+ json>`.,get__ValueType_const}
+
+ @since version 2.1.0
+ */
+ template < typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
+ detail::enable_if_t <
+ !detail::is_basic_json<ValueType>::value &&
+ detail::has_from_json<basic_json_t, ValueType>::value &&
+ !detail::has_non_default_from_json<basic_json_t, ValueType>::value,
+ int > = 0 >
+ ValueType get() const noexcept(noexcept(
+ JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), std::declval<ValueType&>())))
+ {
+ // we cannot static_assert on ValueTypeCV being non-const, because
+ // there is support for get<const basic_json_t>(), which is why we
+ // still need the uncvref
+ static_assert(!std::is_reference<ValueTypeCV>::value,
+ "get() cannot be used with reference types, you might want to use get_ref()");
+ static_assert(std::is_default_constructible<ValueType>::value,
+ "types must be DefaultConstructible when used with get()");
+
+ ValueType ret;
+ JSONSerializer<ValueType>::from_json(*this, ret);
+ return ret;
+ }
+
+ /*!
+ @brief get a value (explicit); special case
+
+ Explicit type conversion between the JSON value and a compatible value
+ which is **not** [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible)
+ and **not** [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible).
+ The value is converted by calling the @ref json_serializer<ValueType>
+ `from_json()` method.
+
+ The function is equivalent to executing
+ @code {.cpp}
+ return JSONSerializer<ValueTypeCV>::from_json(*this);
+ @endcode
+
+ This overloads is chosen if:
+ - @a ValueType is not @ref basic_json and
+ - @ref json_serializer<ValueType> has a `from_json()` method of the form
+ `ValueType from_json(const basic_json&)`
+
+ @note If @ref json_serializer<ValueType> has both overloads of
+ `from_json()`, this one is chosen.
+
+ @tparam ValueTypeCV the provided value type
+ @tparam ValueType the returned value type
+
+ @return copy of the JSON value, converted to @a ValueType
+
+ @throw what @ref json_serializer<ValueType> `from_json()` method throws
+
+ @since version 2.1.0
+ */
+ template < typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
+ detail::enable_if_t < !std::is_same<basic_json_t, ValueType>::value &&
+ detail::has_non_default_from_json<basic_json_t, ValueType>::value,
+ int > = 0 >
+ ValueType get() const noexcept(noexcept(
+ JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>())))
+ {
+ static_assert(!std::is_reference<ValueTypeCV>::value,
+ "get() cannot be used with reference types, you might want to use get_ref()");
+ return JSONSerializer<ValueType>::from_json(*this);
+ }
+
+ /*!
+ @brief get a value (explicit)
+
+ Explicit type conversion between the JSON value and a compatible value.
+ The value is filled into the input parameter by calling the @ref json_serializer<ValueType>
+ `from_json()` method.
+
+ The function is equivalent to executing
+ @code {.cpp}
+ ValueType v;
+ JSONSerializer<ValueType>::from_json(*this, v);
+ @endcode
+
+ This overloads is chosen if:
+ - @a ValueType is not @ref basic_json,
+ - @ref json_serializer<ValueType> has a `from_json()` method of the form
+ `void from_json(const basic_json&, ValueType&)`, and
+
+ @tparam ValueType the input parameter type.
+
+ @return the input parameter, allowing chaining calls.
+
+ @throw what @ref json_serializer<ValueType> `from_json()` method throws
+
+ @liveexample{The example below shows several conversions from JSON values
+ to other types. There a few things to note: (1) Floating-point numbers can
+ be converted to integers\, (2) A JSON array can be converted to a standard
+ `std::vector<short>`\, (3) A JSON object can be converted to C++
+ associative containers such as `std::unordered_map<std::string\,
+ json>`.,get_to}
+
+ @since version 3.3.0
+ */
+ template < typename ValueType,
+ detail::enable_if_t <
+ !detail::is_basic_json<ValueType>::value&&
+ detail::has_from_json<basic_json_t, ValueType>::value,
+ int > = 0 >
+ ValueType & get_to(ValueType& v) const noexcept(noexcept(
+ JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), v)))
+ {
+ JSONSerializer<ValueType>::from_json(*this, v);
+ return v;
+ }
+
+ // specialization to allow to call get_to with a basic_json value
+ // see https://github.com/nlohmann/json/issues/2175
+ template<typename ValueType,
+ detail::enable_if_t <
+ detail::is_basic_json<ValueType>::value,
+ int> = 0>
+ ValueType & get_to(ValueType& v) const
+ {
+ v = *this;
+ return v;
+ }
+
+ template <
+ typename T, std::size_t N,
+ typename Array = T (&)[N],
+ detail::enable_if_t <
+ detail::has_from_json<basic_json_t, Array>::value, int > = 0 >
+ Array get_to(T (&v)[N]) const
+ noexcept(noexcept(JSONSerializer<Array>::from_json(
+ std::declval<const basic_json_t&>(), v)))
+ {
+ JSONSerializer<Array>::from_json(*this, v);
+ return v;
+ }
+
+
+ /*!
+ @brief get a pointer value (implicit)
+
+ Implicit pointer access to the internally stored JSON value. No copies are
+ made.
+
+ @warning Writing data to the pointee of the result yields an undefined
+ state.
+
+ @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref
+ object_t, @ref string_t, @ref boolean_t, @ref number_integer_t,
+ @ref number_unsigned_t, or @ref number_float_t. Enforced by a static
+ assertion.
+
+ @return pointer to the internally stored JSON value if the requested
+ pointer type @a PointerType fits to the JSON value; `nullptr` otherwise
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how pointers to internal values of a
+ JSON value can be requested. Note that no type conversions are made and a
+ `nullptr` is returned if the value and the requested pointer type does not
+ match.,get_ptr}
+
+ @since version 1.0.0
+ */
+ template<typename PointerType, typename std::enable_if<
+ std::is_pointer<PointerType>::value, int>::type = 0>
+ auto get_ptr() noexcept -> decltype(std::declval<basic_json_t&>().get_impl_ptr(std::declval<PointerType>()))
+ {
+ // delegate the call to get_impl_ptr<>()
+ return get_impl_ptr(static_cast<PointerType>(nullptr));
+ }
+
+ /*!
+ @brief get a pointer value (implicit)
+ @copydoc get_ptr()
+ */
+ template < typename PointerType, typename std::enable_if <
+ std::is_pointer<PointerType>::value&&
+ std::is_const<typename std::remove_pointer<PointerType>::type>::value, int >::type = 0 >
+ constexpr auto get_ptr() const noexcept -> decltype(std::declval<const basic_json_t&>().get_impl_ptr(std::declval<PointerType>()))
+ {
+ // delegate the call to get_impl_ptr<>() const
+ return get_impl_ptr(static_cast<PointerType>(nullptr));
+ }
+
+ /*!
+ @brief get a pointer value (explicit)
+
+ Explicit pointer access to the internally stored JSON value. No copies are
+ made.
+
+ @warning The pointer becomes invalid if the underlying JSON object
+ changes.
+
+ @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref
+ object_t, @ref string_t, @ref boolean_t, @ref number_integer_t,
+ @ref number_unsigned_t, or @ref number_float_t.
+
+ @return pointer to the internally stored JSON value if the requested
+ pointer type @a PointerType fits to the JSON value; `nullptr` otherwise
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how pointers to internal values of a
+ JSON value can be requested. Note that no type conversions are made and a
+ `nullptr` is returned if the value and the requested pointer type does not
+ match.,get__PointerType}
+
+ @sa @ref get_ptr() for explicit pointer-member access
+
+ @since version 1.0.0
+ */
+ template<typename PointerType, typename std::enable_if<
+ std::is_pointer<PointerType>::value, int>::type = 0>
+ auto get() noexcept -> decltype(std::declval<basic_json_t&>().template get_ptr<PointerType>())
+ {
+ // delegate the call to get_ptr
+ return get_ptr<PointerType>();
+ }
+
+ /*!
+ @brief get a pointer value (explicit)
+ @copydoc get()
+ */
+ template<typename PointerType, typename std::enable_if<
+ std::is_pointer<PointerType>::value, int>::type = 0>
+ constexpr auto get() const noexcept -> decltype(std::declval<const basic_json_t&>().template get_ptr<PointerType>())
+ {
+ // delegate the call to get_ptr
+ return get_ptr<PointerType>();
+ }
+
+ /*!
+ @brief get a reference value (implicit)
+
+ Implicit reference access to the internally stored JSON value. No copies
+ are made.
+
+ @warning Writing data to the referee of the result yields an undefined
+ state.
+
+ @tparam ReferenceType reference type; must be a reference to @ref array_t,
+ @ref object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, or
+ @ref number_float_t. Enforced by static assertion.
+
+ @return reference to the internally stored JSON value if the requested
+ reference type @a ReferenceType fits to the JSON value; throws
+ type_error.303 otherwise
+
+ @throw type_error.303 in case passed type @a ReferenceType is incompatible
+ with the stored JSON value; see example below
+
+ @complexity Constant.
+
+ @liveexample{The example shows several calls to `get_ref()`.,get_ref}
+
+ @since version 1.1.0
+ */
+ template<typename ReferenceType, typename std::enable_if<
+ std::is_reference<ReferenceType>::value, int>::type = 0>
+ ReferenceType get_ref()
+ {
+ // delegate call to get_ref_impl
+ return get_ref_impl<ReferenceType>(*this);
+ }
+
+ /*!
+ @brief get a reference value (implicit)
+ @copydoc get_ref()
+ */
+ template < typename ReferenceType, typename std::enable_if <
+ std::is_reference<ReferenceType>::value&&
+ std::is_const<typename std::remove_reference<ReferenceType>::type>::value, int >::type = 0 >
+ ReferenceType get_ref() const
+ {
+ // delegate call to get_ref_impl
+ return get_ref_impl<ReferenceType>(*this);
+ }
+
+ /*!
+ @brief get a value (implicit)
+
+ Implicit type conversion between the JSON value and a compatible value.
+ The call is realized by calling @ref get() const.
+
+ @tparam ValueType non-pointer type compatible to the JSON value, for
+ instance `int` for JSON integer numbers, `bool` for JSON booleans, or
+ `std::vector` types for JSON arrays. The character type of @ref string_t
+ as well as an initializer list of this type is excluded to avoid
+ ambiguities as these types implicitly convert to `std::string`.
+
+ @return copy of the JSON value, converted to type @a ValueType
+
+ @throw type_error.302 in case passed type @a ValueType is incompatible
+ to the JSON value type (e.g., the JSON value is of type boolean, but a
+ string is requested); see example below
+
+ @complexity Linear in the size of the JSON value.
+
+ @liveexample{The example below shows several conversions from JSON values
+ to other types. There a few things to note: (1) Floating-point numbers can
+ be converted to integers\, (2) A JSON array can be converted to a standard
+ `std::vector<short>`\, (3) A JSON object can be converted to C++
+ associative containers such as `std::unordered_map<std::string\,
+ json>`.,operator__ValueType}
+
+ @since version 1.0.0
+ */
+ template < typename ValueType, typename std::enable_if <
+ !std::is_pointer<ValueType>::value&&
+ !std::is_same<ValueType, detail::json_ref<basic_json>>::value&&
+ !std::is_same<ValueType, typename string_t::value_type>::value&&
+ !detail::is_basic_json<ValueType>::value
+ && !std::is_same<ValueType, std::initializer_list<typename string_t::value_type>>::value
+#if defined(JSON_HAS_CPP_17) && (defined(__GNUC__) || (defined(_MSC_VER) && _MSC_VER >= 1910 && _MSC_VER <= 1914))
+ && !std::is_same<ValueType, typename std::string_view>::value
+#endif
+ && detail::is_detected<detail::get_template_function, const basic_json_t&, ValueType>::value
+ , int >::type = 0 >
+ JSON_EXPLICIT operator ValueType() const
+ {
+ // delegate the call to get<>() const
+ return get<ValueType>();
+ }
+
+ /*!
+ @return reference to the binary value
+
+ @throw type_error.302 if the value is not binary
+
+ @sa @ref is_binary() to check if the value is binary
+
+ @since version 3.8.0
+ */
+ binary_t& get_binary()
+ {
+ if (!is_binary())
+ {
+ JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name())));
+ }
+
+ return *get_ptr<binary_t*>();
+ }
+
+ /// @copydoc get_binary()
+ const binary_t& get_binary() const
+ {
+ if (!is_binary())
+ {
+ JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name())));
+ }
+
+ return *get_ptr<const binary_t*>();
+ }
+
+ /// @}
+
+
+ ////////////////////
+ // element access //
+ ////////////////////
+
+ /// @name element access
+ /// Access to the JSON value.
+ /// @{
+
+ /*!
+ @brief access specified array element with bounds checking
+
+ Returns a reference to the element at specified location @a idx, with
+ bounds checking.
+
+ @param[in] idx index of the element to access
+
+ @return reference to the element at index @a idx
+
+ @throw type_error.304 if the JSON value is not an array; in this case,
+ calling `at` with an index makes no sense. See example below.
+ @throw out_of_range.401 if the index @a idx is out of range of the array;
+ that is, `idx >= size()`. See example below.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Constant.
+
+ @since version 1.0.0
+
+ @liveexample{The example below shows how array elements can be read and
+ written using `at()`. It also demonstrates the different exceptions that
+ can be thrown.,at__size_type}
+ */
+ reference at(size_type idx)
+ {
+ // at only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ JSON_TRY
+ {
+ return m_value.array->at(idx);
+ }
+ JSON_CATCH (std::out_of_range&)
+ {
+ // create better exception explanation
+ JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+ }
+ }
+ else
+ {
+ JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief access specified array element with bounds checking
+
+ Returns a const reference to the element at specified location @a idx,
+ with bounds checking.
+
+ @param[in] idx index of the element to access
+
+ @return const reference to the element at index @a idx
+
+ @throw type_error.304 if the JSON value is not an array; in this case,
+ calling `at` with an index makes no sense. See example below.
+ @throw out_of_range.401 if the index @a idx is out of range of the array;
+ that is, `idx >= size()`. See example below.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Constant.
+
+ @since version 1.0.0
+
+ @liveexample{The example below shows how array elements can be read using
+ `at()`. It also demonstrates the different exceptions that can be thrown.,
+ at__size_type_const}
+ */
+ const_reference at(size_type idx) const
+ {
+ // at only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ JSON_TRY
+ {
+ return m_value.array->at(idx);
+ }
+ JSON_CATCH (std::out_of_range&)
+ {
+ // create better exception explanation
+ JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+ }
+ }
+ else
+ {
+ JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief access specified object element with bounds checking
+
+ Returns a reference to the element at with specified key @a key, with
+ bounds checking.
+
+ @param[in] key key of the element to access
+
+ @return reference to the element at key @a key
+
+ @throw type_error.304 if the JSON value is not an object; in this case,
+ calling `at` with a key makes no sense. See example below.
+ @throw out_of_range.403 if the key @a key is is not stored in the object;
+ that is, `find(key) == end()`. See example below.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Logarithmic in the size of the container.
+
+ @sa @ref operator[](const typename object_t::key_type&) for unchecked
+ access by reference
+ @sa @ref value() for access by value with a default value
+
+ @since version 1.0.0
+
+ @liveexample{The example below shows how object elements can be read and
+ written using `at()`. It also demonstrates the different exceptions that
+ can be thrown.,at__object_t_key_type}
+ */
+ reference at(const typename object_t::key_type& key)
+ {
+ // at only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ JSON_TRY
+ {
+ return m_value.object->at(key);
+ }
+ JSON_CATCH (std::out_of_range&)
+ {
+ // create better exception explanation
+ JSON_THROW(out_of_range::create(403, "key '" + key + "' not found"));
+ }
+ }
+ else
+ {
+ JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief access specified object element with bounds checking
+
+ Returns a const reference to the element at with specified key @a key,
+ with bounds checking.
+
+ @param[in] key key of the element to access
+
+ @return const reference to the element at key @a key
+
+ @throw type_error.304 if the JSON value is not an object; in this case,
+ calling `at` with a key makes no sense. See example below.
+ @throw out_of_range.403 if the key @a key is is not stored in the object;
+ that is, `find(key) == end()`. See example below.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Logarithmic in the size of the container.
+
+ @sa @ref operator[](const typename object_t::key_type&) for unchecked
+ access by reference
+ @sa @ref value() for access by value with a default value
+
+ @since version 1.0.0
+
+ @liveexample{The example below shows how object elements can be read using
+ `at()`. It also demonstrates the different exceptions that can be thrown.,
+ at__object_t_key_type_const}
+ */
+ const_reference at(const typename object_t::key_type& key) const
+ {
+ // at only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ JSON_TRY
+ {
+ return m_value.object->at(key);
+ }
+ JSON_CATCH (std::out_of_range&)
+ {
+ // create better exception explanation
+ JSON_THROW(out_of_range::create(403, "key '" + key + "' not found"));
+ }
+ }
+ else
+ {
+ JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief access specified array element
+
+ Returns a reference to the element at specified location @a idx.
+
+ @note If @a idx is beyond the range of the array (i.e., `idx >= size()`),
+ then the array is silently filled up with `null` values to make `idx` a
+ valid reference to the last stored element.
+
+ @param[in] idx index of the element to access
+
+ @return reference to the element at index @a idx
+
+ @throw type_error.305 if the JSON value is not an array or null; in that
+ cases, using the [] operator with an index makes no sense.
+
+ @complexity Constant if @a idx is in the range of the array. Otherwise
+ linear in `idx - size()`.
+
+ @liveexample{The example below shows how array elements can be read and
+ written using `[]` operator. Note the addition of `null`
+ values.,operatorarray__size_type}
+
+ @since version 1.0.0
+ */
+ reference operator[](size_type idx)
+ {
+ // implicitly convert null value to an empty array
+ if (is_null())
+ {
+ m_type = value_t::array;
+ m_value.array = create<array_t>();
+ assert_invariant();
+ }
+
+ // operator[] only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ // fill up array with null values if given idx is outside range
+ if (idx >= m_value.array->size())
+ {
+ m_value.array->insert(m_value.array->end(),
+ idx - m_value.array->size() + 1,
+ basic_json());
+ }
+
+ return m_value.array->operator[](idx);
+ }
+
+ JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief access specified array element
+
+ Returns a const reference to the element at specified location @a idx.
+
+ @param[in] idx index of the element to access
+
+ @return const reference to the element at index @a idx
+
+ @throw type_error.305 if the JSON value is not an array; in that case,
+ using the [] operator with an index makes no sense.
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how array elements can be read using
+ the `[]` operator.,operatorarray__size_type_const}
+
+ @since version 1.0.0
+ */
+ const_reference operator[](size_type idx) const
+ {
+ // const operator[] only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ return m_value.array->operator[](idx);
+ }
+
+ JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief access specified object element
+
+ Returns a reference to the element at with specified key @a key.
+
+ @note If @a key is not found in the object, then it is silently added to
+ the object and filled with a `null` value to make `key` a valid reference.
+ In case the value was `null` before, it is converted to an object.
+
+ @param[in] key key of the element to access
+
+ @return reference to the element at key @a key
+
+ @throw type_error.305 if the JSON value is not an object or null; in that
+ cases, using the [] operator with a key makes no sense.
+
+ @complexity Logarithmic in the size of the container.
+
+ @liveexample{The example below shows how object elements can be read and
+ written using the `[]` operator.,operatorarray__key_type}
+
+ @sa @ref at(const typename object_t::key_type&) for access by reference
+ with range checking
+ @sa @ref value() for access by value with a default value
+
+ @since version 1.0.0
+ */
+ reference operator[](const typename object_t::key_type& key)
+ {
+ // implicitly convert null value to an empty object
+ if (is_null())
+ {
+ m_type = value_t::object;
+ m_value.object = create<object_t>();
+ assert_invariant();
+ }
+
+ // operator[] only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ return m_value.object->operator[](key);
+ }
+
+ JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief read-only access specified object element
+
+ Returns a const reference to the element at with specified key @a key. No
+ bounds checking is performed.
+
+ @warning If the element with key @a key does not exist, the behavior is
+ undefined.
+
+ @param[in] key key of the element to access
+
+ @return const reference to the element at key @a key
+
+ @pre The element with key @a key must exist. **This precondition is
+ enforced with an assertion.**
+
+ @throw type_error.305 if the JSON value is not an object; in that case,
+ using the [] operator with a key makes no sense.
+
+ @complexity Logarithmic in the size of the container.
+
+ @liveexample{The example below shows how object elements can be read using
+ the `[]` operator.,operatorarray__key_type_const}
+
+ @sa @ref at(const typename object_t::key_type&) for access by reference
+ with range checking
+ @sa @ref value() for access by value with a default value
+
+ @since version 1.0.0
+ */
+ const_reference operator[](const typename object_t::key_type& key) const
+ {
+ // const operator[] only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ JSON_ASSERT(m_value.object->find(key) != m_value.object->end());
+ return m_value.object->find(key)->second;
+ }
+
+ JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief access specified object element
+
+ Returns a reference to the element at with specified key @a key.
+
+ @note If @a key is not found in the object, then it is silently added to
+ the object and filled with a `null` value to make `key` a valid reference.
+ In case the value was `null` before, it is converted to an object.
+
+ @param[in] key key of the element to access
+
+ @return reference to the element at key @a key
+
+ @throw type_error.305 if the JSON value is not an object or null; in that
+ cases, using the [] operator with a key makes no sense.
+
+ @complexity Logarithmic in the size of the container.
+
+ @liveexample{The example below shows how object elements can be read and
+ written using the `[]` operator.,operatorarray__key_type}
+
+ @sa @ref at(const typename object_t::key_type&) for access by reference
+ with range checking
+ @sa @ref value() for access by value with a default value
+
+ @since version 1.1.0
+ */
+ template<typename T>
+ JSON_HEDLEY_NON_NULL(2)
+ reference operator[](T* key)
+ {
+ // implicitly convert null to object
+ if (is_null())
+ {
+ m_type = value_t::object;
+ m_value = value_t::object;
+ assert_invariant();
+ }
+
+ // at only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ return m_value.object->operator[](key);
+ }
+
+ JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief read-only access specified object element
+
+ Returns a const reference to the element at with specified key @a key. No
+ bounds checking is performed.
+
+ @warning If the element with key @a key does not exist, the behavior is
+ undefined.
+
+ @param[in] key key of the element to access
+
+ @return const reference to the element at key @a key
+
+ @pre The element with key @a key must exist. **This precondition is
+ enforced with an assertion.**
+
+ @throw type_error.305 if the JSON value is not an object; in that case,
+ using the [] operator with a key makes no sense.
+
+ @complexity Logarithmic in the size of the container.
+
+ @liveexample{The example below shows how object elements can be read using
+ the `[]` operator.,operatorarray__key_type_const}
+
+ @sa @ref at(const typename object_t::key_type&) for access by reference
+ with range checking
+ @sa @ref value() for access by value with a default value
+
+ @since version 1.1.0
+ */
+ template<typename T>
+ JSON_HEDLEY_NON_NULL(2)
+ const_reference operator[](T* key) const
+ {
+ // at only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ JSON_ASSERT(m_value.object->find(key) != m_value.object->end());
+ return m_value.object->find(key)->second;
+ }
+
+ JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief access specified object element with default value
+
+ Returns either a copy of an object's element at the specified key @a key
+ or a given default value if no element with key @a key exists.
+
+ The function is basically equivalent to executing
+ @code {.cpp}
+ try {
+ return at(key);
+ } catch(out_of_range) {
+ return default_value;
+ }
+ @endcode
+
+ @note Unlike @ref at(const typename object_t::key_type&), this function
+ does not throw if the given key @a key was not found.
+
+ @note Unlike @ref operator[](const typename object_t::key_type& key), this
+ function does not implicitly add an element to the position defined by @a
+ key. This function is furthermore also applicable to const objects.
+
+ @param[in] key key of the element to access
+ @param[in] default_value the value to return if @a key is not found
+
+ @tparam ValueType type compatible to JSON values, for instance `int` for
+ JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for
+ JSON arrays. Note the type of the expected value at @a key and the default
+ value @a default_value must be compatible.
+
+ @return copy of the element at key @a key or @a default_value if @a key
+ is not found
+
+ @throw type_error.302 if @a default_value does not match the type of the
+ value at @a key
+ @throw type_error.306 if the JSON value is not an object; in that case,
+ using `value()` with a key makes no sense.
+
+ @complexity Logarithmic in the size of the container.
+
+ @liveexample{The example below shows how object elements can be queried
+ with a default value.,basic_json__value}
+
+ @sa @ref at(const typename object_t::key_type&) for access by reference
+ with range checking
+ @sa @ref operator[](const typename object_t::key_type&) for unchecked
+ access by reference
+
+ @since version 1.0.0
+ */
+ // using std::is_convertible in a std::enable_if will fail when using explicit conversions
+ template < class ValueType, typename std::enable_if <
+ detail::is_getable<basic_json_t, ValueType>::value
+ && !std::is_same<value_t, ValueType>::value, int >::type = 0 >
+ ValueType value(const typename object_t::key_type& key, const ValueType& default_value) const
+ {
+ // at only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ // if key is found, return value and given default value otherwise
+ const auto it = find(key);
+ if (it != end())
+ {
+ return it->template get<ValueType>();
+ }
+
+ return default_value;
+ }
+
+ JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief overload for a default value of type const char*
+ @copydoc basic_json::value(const typename object_t::key_type&, const ValueType&) const
+ */
+ string_t value(const typename object_t::key_type& key, const char* default_value) const
+ {
+ return value(key, string_t(default_value));
+ }
+
+ /*!
+ @brief access specified object element via JSON Pointer with default value
+
+ Returns either a copy of an object's element at the specified key @a key
+ or a given default value if no element with key @a key exists.
+
+ The function is basically equivalent to executing
+ @code {.cpp}
+ try {
+ return at(ptr);
+ } catch(out_of_range) {
+ return default_value;
+ }
+ @endcode
+
+ @note Unlike @ref at(const json_pointer&), this function does not throw
+ if the given key @a key was not found.
+
+ @param[in] ptr a JSON pointer to the element to access
+ @param[in] default_value the value to return if @a ptr found no value
+
+ @tparam ValueType type compatible to JSON values, for instance `int` for
+ JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for
+ JSON arrays. Note the type of the expected value at @a key and the default
+ value @a default_value must be compatible.
+
+ @return copy of the element at key @a key or @a default_value if @a key
+ is not found
+
+ @throw type_error.302 if @a default_value does not match the type of the
+ value at @a ptr
+ @throw type_error.306 if the JSON value is not an object; in that case,
+ using `value()` with a key makes no sense.
+
+ @complexity Logarithmic in the size of the container.
+
+ @liveexample{The example below shows how object elements can be queried
+ with a default value.,basic_json__value_ptr}
+
+ @sa @ref operator[](const json_pointer&) for unchecked access by reference
+
+ @since version 2.0.2
+ */
+ template<class ValueType, typename std::enable_if<
+ detail::is_getable<basic_json_t, ValueType>::value, int>::type = 0>
+ ValueType value(const json_pointer& ptr, const ValueType& default_value) const
+ {
+ // at only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ // if pointer resolves a value, return it or use default value
+ JSON_TRY
+ {
+ return ptr.get_checked(this).template get<ValueType>();
+ }
+ JSON_INTERNAL_CATCH (out_of_range&)
+ {
+ return default_value;
+ }
+ }
+
+ JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief overload for a default value of type const char*
+ @copydoc basic_json::value(const json_pointer&, ValueType) const
+ */
+ JSON_HEDLEY_NON_NULL(3)
+ string_t value(const json_pointer& ptr, const char* default_value) const
+ {
+ return value(ptr, string_t(default_value));
+ }
+
+ /*!
+ @brief access the first element
+
+ Returns a reference to the first element in the container. For a JSON
+ container `c`, the expression `c.front()` is equivalent to `*c.begin()`.
+
+ @return In case of a structured type (array or object), a reference to the
+ first element is returned. In case of number, string, boolean, or binary
+ values, a reference to the value is returned.
+
+ @complexity Constant.
+
+ @pre The JSON value must not be `null` (would throw `std::out_of_range`)
+ or an empty array or object (undefined behavior, **guarded by
+ assertions**).
+ @post The JSON value remains unchanged.
+
+ @throw invalid_iterator.214 when called on `null` value
+
+ @liveexample{The following code shows an example for `front()`.,front}
+
+ @sa @ref back() -- access the last element
+
+ @since version 1.0.0
+ */
+ reference front()
+ {
+ return *begin();
+ }
+
+ /*!
+ @copydoc basic_json::front()
+ */
+ const_reference front() const
+ {
+ return *cbegin();
+ }
+
+ /*!
+ @brief access the last element
+
+ Returns a reference to the last element in the container. For a JSON
+ container `c`, the expression `c.back()` is equivalent to
+ @code {.cpp}
+ auto tmp = c.end();
+ --tmp;
+ return *tmp;
+ @endcode
+
+ @return In case of a structured type (array or object), a reference to the
+ last element is returned. In case of number, string, boolean, or binary
+ values, a reference to the value is returned.
+
+ @complexity Constant.
+
+ @pre The JSON value must not be `null` (would throw `std::out_of_range`)
+ or an empty array or object (undefined behavior, **guarded by
+ assertions**).
+ @post The JSON value remains unchanged.
+
+ @throw invalid_iterator.214 when called on a `null` value. See example
+ below.
+
+ @liveexample{The following code shows an example for `back()`.,back}
+
+ @sa @ref front() -- access the first element
+
+ @since version 1.0.0
+ */
+ reference back()
+ {
+ auto tmp = end();
+ --tmp;
+ return *tmp;
+ }
+
+ /*!
+ @copydoc basic_json::back()
+ */
+ const_reference back() const
+ {
+ auto tmp = cend();
+ --tmp;
+ return *tmp;
+ }
+
+ /*!
+ @brief remove element given an iterator
+
+ Removes the element specified by iterator @a pos. The iterator @a pos must
+ be valid and dereferenceable. Thus the `end()` iterator (which is valid,
+ but is not dereferenceable) cannot be used as a value for @a pos.
+
+ If called on a primitive type other than `null`, the resulting JSON value
+ will be `null`.
+
+ @param[in] pos iterator to the element to remove
+ @return Iterator following the last removed element. If the iterator @a
+ pos refers to the last element, the `end()` iterator is returned.
+
+ @tparam IteratorType an @ref iterator or @ref const_iterator
+
+ @post Invalidates iterators and references at or after the point of the
+ erase, including the `end()` iterator.
+
+ @throw type_error.307 if called on a `null` value; example: `"cannot use
+ erase() with null"`
+ @throw invalid_iterator.202 if called on an iterator which does not belong
+ to the current JSON value; example: `"iterator does not fit current
+ value"`
+ @throw invalid_iterator.205 if called on a primitive type with invalid
+ iterator (i.e., any iterator which is not `begin()`); example: `"iterator
+ out of range"`
+
+ @complexity The complexity depends on the type:
+ - objects: amortized constant
+ - arrays: linear in distance between @a pos and the end of the container
+ - strings and binary: linear in the length of the member
+ - other types: constant
+
+ @liveexample{The example shows the result of `erase()` for different JSON
+ types.,erase__IteratorType}
+
+ @sa @ref erase(IteratorType, IteratorType) -- removes the elements in
+ the given range
+ @sa @ref erase(const typename object_t::key_type&) -- removes the element
+ from an object at the given key
+ @sa @ref erase(const size_type) -- removes the element from an array at
+ the given index
+
+ @since version 1.0.0
+ */
+ template < class IteratorType, typename std::enable_if <
+ std::is_same<IteratorType, typename basic_json_t::iterator>::value ||
+ std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int >::type
+ = 0 >
+ IteratorType erase(IteratorType pos)
+ {
+ // make sure iterator fits the current value
+ if (JSON_HEDLEY_UNLIKELY(this != pos.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+ }
+
+ IteratorType result = end();
+
+ switch (m_type)
+ {
+ case value_t::boolean:
+ case value_t::number_float:
+ case value_t::number_integer:
+ case value_t::number_unsigned:
+ case value_t::string:
+ case value_t::binary:
+ {
+ if (JSON_HEDLEY_UNLIKELY(!pos.m_it.primitive_iterator.is_begin()))
+ {
+ JSON_THROW(invalid_iterator::create(205, "iterator out of range"));
+ }
+
+ if (is_string())
+ {
+ AllocatorType<string_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1);
+ m_value.string = nullptr;
+ }
+ else if (is_binary())
+ {
+ AllocatorType<binary_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.binary);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.binary, 1);
+ m_value.binary = nullptr;
+ }
+
+ m_type = value_t::null;
+ assert_invariant();
+ break;
+ }
+
+ case value_t::object:
+ {
+ result.m_it.object_iterator = m_value.object->erase(pos.m_it.object_iterator);
+ break;
+ }
+
+ case value_t::array:
+ {
+ result.m_it.array_iterator = m_value.array->erase(pos.m_it.array_iterator);
+ break;
+ }
+
+ default:
+ JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief remove elements given an iterator range
+
+ Removes the element specified by the range `[first; last)`. The iterator
+ @a first does not need to be dereferenceable if `first == last`: erasing
+ an empty range is a no-op.
+
+ If called on a primitive type other than `null`, the resulting JSON value
+ will be `null`.
+
+ @param[in] first iterator to the beginning of the range to remove
+ @param[in] last iterator past the end of the range to remove
+ @return Iterator following the last removed element. If the iterator @a
+ second refers to the last element, the `end()` iterator is returned.
+
+ @tparam IteratorType an @ref iterator or @ref const_iterator
+
+ @post Invalidates iterators and references at or after the point of the
+ erase, including the `end()` iterator.
+
+ @throw type_error.307 if called on a `null` value; example: `"cannot use
+ erase() with null"`
+ @throw invalid_iterator.203 if called on iterators which does not belong
+ to the current JSON value; example: `"iterators do not fit current value"`
+ @throw invalid_iterator.204 if called on a primitive type with invalid
+ iterators (i.e., if `first != begin()` and `last != end()`); example:
+ `"iterators out of range"`
+
+ @complexity The complexity depends on the type:
+ - objects: `log(size()) + std::distance(first, last)`
+ - arrays: linear in the distance between @a first and @a last, plus linear
+ in the distance between @a last and end of the container
+ - strings and binary: linear in the length of the member
+ - other types: constant
+
+ @liveexample{The example shows the result of `erase()` for different JSON
+ types.,erase__IteratorType_IteratorType}
+
+ @sa @ref erase(IteratorType) -- removes the element at a given position
+ @sa @ref erase(const typename object_t::key_type&) -- removes the element
+ from an object at the given key
+ @sa @ref erase(const size_type) -- removes the element from an array at
+ the given index
+
+ @since version 1.0.0
+ */
+ template < class IteratorType, typename std::enable_if <
+ std::is_same<IteratorType, typename basic_json_t::iterator>::value ||
+ std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int >::type
+ = 0 >
+ IteratorType erase(IteratorType first, IteratorType last)
+ {
+ // make sure iterator fits the current value
+ if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(203, "iterators do not fit current value"));
+ }
+
+ IteratorType result = end();
+
+ switch (m_type)
+ {
+ case value_t::boolean:
+ case value_t::number_float:
+ case value_t::number_integer:
+ case value_t::number_unsigned:
+ case value_t::string:
+ case value_t::binary:
+ {
+ if (JSON_HEDLEY_LIKELY(!first.m_it.primitive_iterator.is_begin()
+ || !last.m_it.primitive_iterator.is_end()))
+ {
+ JSON_THROW(invalid_iterator::create(204, "iterators out of range"));
+ }
+
+ if (is_string())
+ {
+ AllocatorType<string_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1);
+ m_value.string = nullptr;
+ }
+ else if (is_binary())
+ {
+ AllocatorType<binary_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.binary);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.binary, 1);
+ m_value.binary = nullptr;
+ }
+
+ m_type = value_t::null;
+ assert_invariant();
+ break;
+ }
+
+ case value_t::object:
+ {
+ result.m_it.object_iterator = m_value.object->erase(first.m_it.object_iterator,
+ last.m_it.object_iterator);
+ break;
+ }
+
+ case value_t::array:
+ {
+ result.m_it.array_iterator = m_value.array->erase(first.m_it.array_iterator,
+ last.m_it.array_iterator);
+ break;
+ }
+
+ default:
+ JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief remove element from a JSON object given a key
+
+ Removes elements from a JSON object with the key value @a key.
+
+ @param[in] key value of the elements to remove
+
+ @return Number of elements removed. If @a ObjectType is the default
+ `std::map` type, the return value will always be `0` (@a key was not
+ found) or `1` (@a key was found).
+
+ @post References and iterators to the erased elements are invalidated.
+ Other references and iterators are not affected.
+
+ @throw type_error.307 when called on a type other than JSON object;
+ example: `"cannot use erase() with null"`
+
+ @complexity `log(size()) + count(key)`
+
+ @liveexample{The example shows the effect of `erase()`.,erase__key_type}
+
+ @sa @ref erase(IteratorType) -- removes the element at a given position
+ @sa @ref erase(IteratorType, IteratorType) -- removes the elements in
+ the given range
+ @sa @ref erase(const size_type) -- removes the element from an array at
+ the given index
+
+ @since version 1.0.0
+ */
+ size_type erase(const typename object_t::key_type& key)
+ {
+ // this erase only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ return m_value.object->erase(key);
+ }
+
+ JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief remove element from a JSON array given an index
+
+ Removes element from a JSON array at the index @a idx.
+
+ @param[in] idx index of the element to remove
+
+ @throw type_error.307 when called on a type other than JSON object;
+ example: `"cannot use erase() with null"`
+ @throw out_of_range.401 when `idx >= size()`; example: `"array index 17
+ is out of range"`
+
+ @complexity Linear in distance between @a idx and the end of the container.
+
+ @liveexample{The example shows the effect of `erase()`.,erase__size_type}
+
+ @sa @ref erase(IteratorType) -- removes the element at a given position
+ @sa @ref erase(IteratorType, IteratorType) -- removes the elements in
+ the given range
+ @sa @ref erase(const typename object_t::key_type&) -- removes the element
+ from an object at the given key
+
+ @since version 1.0.0
+ */
+ void erase(const size_type idx)
+ {
+ // this erase only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ if (JSON_HEDLEY_UNLIKELY(idx >= size()))
+ {
+ JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+ }
+
+ m_value.array->erase(m_value.array->begin() + static_cast<difference_type>(idx));
+ }
+ else
+ {
+ JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+ }
+ }
+
+ /// @}
+
+
+ ////////////
+ // lookup //
+ ////////////
+
+ /// @name lookup
+ /// @{
+
+ /*!
+ @brief find an element in a JSON object
+
+ Finds an element in a JSON object with key equivalent to @a key. If the
+ element is not found or the JSON value is not an object, end() is
+ returned.
+
+ @note This method always returns @ref end() when executed on a JSON type
+ that is not an object.
+
+ @param[in] key key value of the element to search for.
+
+ @return Iterator to an element with key equivalent to @a key. If no such
+ element is found or the JSON value is not an object, past-the-end (see
+ @ref end()) iterator is returned.
+
+ @complexity Logarithmic in the size of the JSON object.
+
+ @liveexample{The example shows how `find()` is used.,find__key_type}
+
+ @sa @ref contains(KeyT&&) const -- checks whether a key exists
+
+ @since version 1.0.0
+ */
+ template<typename KeyT>
+ iterator find(KeyT&& key)
+ {
+ auto result = end();
+
+ if (is_object())
+ {
+ result.m_it.object_iterator = m_value.object->find(std::forward<KeyT>(key));
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief find an element in a JSON object
+ @copydoc find(KeyT&&)
+ */
+ template<typename KeyT>
+ const_iterator find(KeyT&& key) const
+ {
+ auto result = cend();
+
+ if (is_object())
+ {
+ result.m_it.object_iterator = m_value.object->find(std::forward<KeyT>(key));
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief returns the number of occurrences of a key in a JSON object
+
+ Returns the number of elements with key @a key. If ObjectType is the
+ default `std::map` type, the return value will always be `0` (@a key was
+ not found) or `1` (@a key was found).
+
+ @note This method always returns `0` when executed on a JSON type that is
+ not an object.
+
+ @param[in] key key value of the element to count
+
+ @return Number of elements with key @a key. If the JSON value is not an
+ object, the return value will be `0`.
+
+ @complexity Logarithmic in the size of the JSON object.
+
+ @liveexample{The example shows how `count()` is used.,count}
+
+ @since version 1.0.0
+ */
+ template<typename KeyT>
+ size_type count(KeyT&& key) const
+ {
+ // return 0 for all nonobject types
+ return is_object() ? m_value.object->count(std::forward<KeyT>(key)) : 0;
+ }
+
+ /*!
+ @brief check the existence of an element in a JSON object
+
+ Check whether an element exists in a JSON object with key equivalent to
+ @a key. If the element is not found or the JSON value is not an object,
+ false is returned.
+
+ @note This method always returns false when executed on a JSON type
+ that is not an object.
+
+ @param[in] key key value to check its existence.
+
+ @return true if an element with specified @a key exists. If no such
+ element with such key is found or the JSON value is not an object,
+ false is returned.
+
+ @complexity Logarithmic in the size of the JSON object.
+
+ @liveexample{The following code shows an example for `contains()`.,contains}
+
+ @sa @ref find(KeyT&&) -- returns an iterator to an object element
+ @sa @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer
+
+ @since version 3.6.0
+ */
+ template < typename KeyT, typename std::enable_if <
+ !std::is_same<typename std::decay<KeyT>::type, json_pointer>::value, int >::type = 0 >
+ bool contains(KeyT && key) const
+ {
+ return is_object() && m_value.object->find(std::forward<KeyT>(key)) != m_value.object->end();
+ }
+
+ /*!
+ @brief check the existence of an element in a JSON object given a JSON pointer
+
+ Check whether the given JSON pointer @a ptr can be resolved in the current
+ JSON value.
+
+ @note This method can be executed on any JSON value type.
+
+ @param[in] ptr JSON pointer to check its existence.
+
+ @return true if the JSON pointer can be resolved to a stored value, false
+ otherwise.
+
+ @post If `j.contains(ptr)` returns true, it is safe to call `j[ptr]`.
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+
+ @complexity Logarithmic in the size of the JSON object.
+
+ @liveexample{The following code shows an example for `contains()`.,contains_json_pointer}
+
+ @sa @ref contains(KeyT &&) const -- checks the existence of a key
+
+ @since version 3.7.0
+ */
+ bool contains(const json_pointer& ptr) const
+ {
+ return ptr.contains(this);
+ }
+
+ /// @}
+
+
+ ///////////////
+ // iterators //
+ ///////////////
+
+ /// @name iterators
+ /// @{
+
+ /*!
+ @brief returns an iterator to the first element
+
+ Returns an iterator to the first element.
+
+ @image html range-begin-end.svg "Illustration from cppreference.com"
+
+ @return iterator to the first element
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+
+ @liveexample{The following code shows an example for `begin()`.,begin}
+
+ @sa @ref cbegin() -- returns a const iterator to the beginning
+ @sa @ref end() -- returns an iterator to the end
+ @sa @ref cend() -- returns a const iterator to the end
+
+ @since version 1.0.0
+ */
+ iterator begin() noexcept
+ {
+ iterator result(this);
+ result.set_begin();
+ return result;
+ }
+
+ /*!
+ @copydoc basic_json::cbegin()
+ */
+ const_iterator begin() const noexcept
+ {
+ return cbegin();
+ }
+
+ /*!
+ @brief returns a const iterator to the first element
+
+ Returns a const iterator to the first element.
+
+ @image html range-begin-end.svg "Illustration from cppreference.com"
+
+ @return const iterator to the first element
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `const_cast<const basic_json&>(*this).begin()`.
+
+ @liveexample{The following code shows an example for `cbegin()`.,cbegin}
+
+ @sa @ref begin() -- returns an iterator to the beginning
+ @sa @ref end() -- returns an iterator to the end
+ @sa @ref cend() -- returns a const iterator to the end
+
+ @since version 1.0.0
+ */
+ const_iterator cbegin() const noexcept
+ {
+ const_iterator result(this);
+ result.set_begin();
+ return result;
+ }
+
+ /*!
+ @brief returns an iterator to one past the last element
+
+ Returns an iterator to one past the last element.
+
+ @image html range-begin-end.svg "Illustration from cppreference.com"
+
+ @return iterator one past the last element
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+
+ @liveexample{The following code shows an example for `end()`.,end}
+
+ @sa @ref cend() -- returns a const iterator to the end
+ @sa @ref begin() -- returns an iterator to the beginning
+ @sa @ref cbegin() -- returns a const iterator to the beginning
+
+ @since version 1.0.0
+ */
+ iterator end() noexcept
+ {
+ iterator result(this);
+ result.set_end();
+ return result;
+ }
+
+ /*!
+ @copydoc basic_json::cend()
+ */
+ const_iterator end() const noexcept
+ {
+ return cend();
+ }
+
+ /*!
+ @brief returns a const iterator to one past the last element
+
+ Returns a const iterator to one past the last element.
+
+ @image html range-begin-end.svg "Illustration from cppreference.com"
+
+ @return const iterator one past the last element
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `const_cast<const basic_json&>(*this).end()`.
+
+ @liveexample{The following code shows an example for `cend()`.,cend}
+
+ @sa @ref end() -- returns an iterator to the end
+ @sa @ref begin() -- returns an iterator to the beginning
+ @sa @ref cbegin() -- returns a const iterator to the beginning
+
+ @since version 1.0.0
+ */
+ const_iterator cend() const noexcept
+ {
+ const_iterator result(this);
+ result.set_end();
+ return result;
+ }
+
+ /*!
+ @brief returns an iterator to the reverse-beginning
+
+ Returns an iterator to the reverse-beginning; that is, the last element.
+
+ @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `reverse_iterator(end())`.
+
+ @liveexample{The following code shows an example for `rbegin()`.,rbegin}
+
+ @sa @ref crbegin() -- returns a const reverse iterator to the beginning
+ @sa @ref rend() -- returns a reverse iterator to the end
+ @sa @ref crend() -- returns a const reverse iterator to the end
+
+ @since version 1.0.0
+ */
+ reverse_iterator rbegin() noexcept
+ {
+ return reverse_iterator(end());
+ }
+
+ /*!
+ @copydoc basic_json::crbegin()
+ */
+ const_reverse_iterator rbegin() const noexcept
+ {
+ return crbegin();
+ }
+
+ /*!
+ @brief returns an iterator to the reverse-end
+
+ Returns an iterator to the reverse-end; that is, one before the first
+ element.
+
+ @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `reverse_iterator(begin())`.
+
+ @liveexample{The following code shows an example for `rend()`.,rend}
+
+ @sa @ref crend() -- returns a const reverse iterator to the end
+ @sa @ref rbegin() -- returns a reverse iterator to the beginning
+ @sa @ref crbegin() -- returns a const reverse iterator to the beginning
+
+ @since version 1.0.0
+ */
+ reverse_iterator rend() noexcept
+ {
+ return reverse_iterator(begin());
+ }
+
+ /*!
+ @copydoc basic_json::crend()
+ */
+ const_reverse_iterator rend() const noexcept
+ {
+ return crend();
+ }
+
+ /*!
+ @brief returns a const reverse iterator to the last element
+
+ Returns a const iterator to the reverse-beginning; that is, the last
+ element.
+
+ @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `const_cast<const basic_json&>(*this).rbegin()`.
+
+ @liveexample{The following code shows an example for `crbegin()`.,crbegin}
+
+ @sa @ref rbegin() -- returns a reverse iterator to the beginning
+ @sa @ref rend() -- returns a reverse iterator to the end
+ @sa @ref crend() -- returns a const reverse iterator to the end
+
+ @since version 1.0.0
+ */
+ const_reverse_iterator crbegin() const noexcept
+ {
+ return const_reverse_iterator(cend());
+ }
+
+ /*!
+ @brief returns a const reverse iterator to one before the first
+
+ Returns a const reverse iterator to the reverse-end; that is, one before
+ the first element.
+
+ @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+ @complexity Constant.
+
+ @requirement This function helps `basic_json` satisfying the
+ [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `const_cast<const basic_json&>(*this).rend()`.
+
+ @liveexample{The following code shows an example for `crend()`.,crend}
+
+ @sa @ref rend() -- returns a reverse iterator to the end
+ @sa @ref rbegin() -- returns a reverse iterator to the beginning
+ @sa @ref crbegin() -- returns a const reverse iterator to the beginning
+
+ @since version 1.0.0
+ */
+ const_reverse_iterator crend() const noexcept
+ {
+ return const_reverse_iterator(cbegin());
+ }
+
+ public:
+ /*!
+ @brief wrapper to access iterator member functions in range-based for
+
+ This function allows to access @ref iterator::key() and @ref
+ iterator::value() during range-based for loops. In these loops, a
+ reference to the JSON values is returned, so there is no access to the
+ underlying iterator.
+
+ For loop without iterator_wrapper:
+
+ @code{cpp}
+ for (auto it = j_object.begin(); it != j_object.end(); ++it)
+ {
+ std::cout << "key: " << it.key() << ", value:" << it.value() << '\n';
+ }
+ @endcode
+
+ Range-based for loop without iterator proxy:
+
+ @code{cpp}
+ for (auto it : j_object)
+ {
+ // "it" is of type json::reference and has no key() member
+ std::cout << "value: " << it << '\n';
+ }
+ @endcode
+
+ Range-based for loop with iterator proxy:
+
+ @code{cpp}
+ for (auto it : json::iterator_wrapper(j_object))
+ {
+ std::cout << "key: " << it.key() << ", value:" << it.value() << '\n';
+ }
+ @endcode
+
+ @note When iterating over an array, `key()` will return the index of the
+ element as string (see example).
+
+ @param[in] ref reference to a JSON value
+ @return iteration proxy object wrapping @a ref with an interface to use in
+ range-based for loops
+
+ @liveexample{The following code shows how the wrapper is used,iterator_wrapper}
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Constant.
+
+ @note The name of this function is not yet final and may change in the
+ future.
+
+ @deprecated This stream operator is deprecated and will be removed in
+ future 4.0.0 of the library. Please use @ref items() instead;
+ that is, replace `json::iterator_wrapper(j)` with `j.items()`.
+ */
+ JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items())
+ static iteration_proxy<iterator> iterator_wrapper(reference ref) noexcept
+ {
+ return ref.items();
+ }
+
+ /*!
+ @copydoc iterator_wrapper(reference)
+ */
+ JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items())
+ static iteration_proxy<const_iterator> iterator_wrapper(const_reference ref) noexcept
+ {
+ return ref.items();
+ }
+
+ /*!
+ @brief helper to access iterator member functions in range-based for
+
+ This function allows to access @ref iterator::key() and @ref
+ iterator::value() during range-based for loops. In these loops, a
+ reference to the JSON values is returned, so there is no access to the
+ underlying iterator.
+
+ For loop without `items()` function:
+
+ @code{cpp}
+ for (auto it = j_object.begin(); it != j_object.end(); ++it)
+ {
+ std::cout << "key: " << it.key() << ", value:" << it.value() << '\n';
+ }
+ @endcode
+
+ Range-based for loop without `items()` function:
+
+ @code{cpp}
+ for (auto it : j_object)
+ {
+ // "it" is of type json::reference and has no key() member
+ std::cout << "value: " << it << '\n';
+ }
+ @endcode
+
+ Range-based for loop with `items()` function:
+
+ @code{cpp}
+ for (auto& el : j_object.items())
+ {
+ std::cout << "key: " << el.key() << ", value:" << el.value() << '\n';
+ }
+ @endcode
+
+ The `items()` function also allows to use
+ [structured bindings](https://en.cppreference.com/w/cpp/language/structured_binding)
+ (C++17):
+
+ @code{cpp}
+ for (auto& [key, val] : j_object.items())
+ {
+ std::cout << "key: " << key << ", value:" << val << '\n';
+ }
+ @endcode
+
+ @note When iterating over an array, `key()` will return the index of the
+ element as string (see example). For primitive types (e.g., numbers),
+ `key()` returns an empty string.
+
+ @warning Using `items()` on temporary objects is dangerous. Make sure the
+ object's lifetime exeeds the iteration. See
+ <https://github.com/nlohmann/json/issues/2040> for more
+ information.
+
+ @return iteration proxy object wrapping @a ref with an interface to use in
+ range-based for loops
+
+ @liveexample{The following code shows how the function is used.,items}
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Constant.
+
+ @since version 3.1.0, structured bindings support since 3.5.0.
+ */
+ iteration_proxy<iterator> items() noexcept
+ {
+ return iteration_proxy<iterator>(*this);
+ }
+
+ /*!
+ @copydoc items()
+ */
+ iteration_proxy<const_iterator> items() const noexcept
+ {
+ return iteration_proxy<const_iterator>(*this);
+ }
+
+ /// @}
+
+
+ //////////////
+ // capacity //
+ //////////////
+
+ /// @name capacity
+ /// @{
+
+ /*!
+ @brief checks whether the container is empty.
+
+ Checks if a JSON value has no elements (i.e. whether its @ref size is `0`).
+
+ @return The return value depends on the different types and is
+ defined as follows:
+ Value type | return value
+ ----------- | -------------
+ null | `true`
+ boolean | `false`
+ string | `false`
+ number | `false`
+ binary | `false`
+ object | result of function `object_t::empty()`
+ array | result of function `array_t::empty()`
+
+ @liveexample{The following code uses `empty()` to check if a JSON
+ object contains any elements.,empty}
+
+ @complexity Constant, as long as @ref array_t and @ref object_t satisfy
+ the Container concept; that is, their `empty()` functions have constant
+ complexity.
+
+ @iterators No changes.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @note This function does not return whether a string stored as JSON value
+ is empty - it returns whether the JSON container itself is empty which is
+ false in the case of a string.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `begin() == end()`.
+
+ @sa @ref size() -- returns the number of elements
+
+ @since version 1.0.0
+ */
+ bool empty() const noexcept
+ {
+ switch (m_type)
+ {
+ case value_t::null:
+ {
+ // null values are empty
+ return true;
+ }
+
+ case value_t::array:
+ {
+ // delegate call to array_t::empty()
+ return m_value.array->empty();
+ }
+
+ case value_t::object:
+ {
+ // delegate call to object_t::empty()
+ return m_value.object->empty();
+ }
+
+ default:
+ {
+ // all other types are nonempty
+ return false;
+ }
+ }
+ }
+
+ /*!
+ @brief returns the number of elements
+
+ Returns the number of elements in a JSON value.
+
+ @return The return value depends on the different types and is
+ defined as follows:
+ Value type | return value
+ ----------- | -------------
+ null | `0`
+ boolean | `1`
+ string | `1`
+ number | `1`
+ binary | `1`
+ object | result of function object_t::size()
+ array | result of function array_t::size()
+
+ @liveexample{The following code calls `size()` on the different value
+ types.,size}
+
+ @complexity Constant, as long as @ref array_t and @ref object_t satisfy
+ the Container concept; that is, their size() functions have constant
+ complexity.
+
+ @iterators No changes.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @note This function does not return the length of a string stored as JSON
+ value - it returns the number of elements in the JSON value which is 1 in
+ the case of a string.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of `std::distance(begin(), end())`.
+
+ @sa @ref empty() -- checks whether the container is empty
+ @sa @ref max_size() -- returns the maximal number of elements
+
+ @since version 1.0.0
+ */
+ size_type size() const noexcept
+ {
+ switch (m_type)
+ {
+ case value_t::null:
+ {
+ // null values are empty
+ return 0;
+ }
+
+ case value_t::array:
+ {
+ // delegate call to array_t::size()
+ return m_value.array->size();
+ }
+
+ case value_t::object:
+ {
+ // delegate call to object_t::size()
+ return m_value.object->size();
+ }
+
+ default:
+ {
+ // all other types have size 1
+ return 1;
+ }
+ }
+ }
+
+ /*!
+ @brief returns the maximum possible number of elements
+
+ Returns the maximum number of elements a JSON value is able to hold due to
+ system or library implementation limitations, i.e. `std::distance(begin(),
+ end())` for the JSON value.
+
+ @return The return value depends on the different types and is
+ defined as follows:
+ Value type | return value
+ ----------- | -------------
+ null | `0` (same as `size()`)
+ boolean | `1` (same as `size()`)
+ string | `1` (same as `size()`)
+ number | `1` (same as `size()`)
+ binary | `1` (same as `size()`)
+ object | result of function `object_t::max_size()`
+ array | result of function `array_t::max_size()`
+
+ @liveexample{The following code calls `max_size()` on the different value
+ types. Note the output is implementation specific.,max_size}
+
+ @complexity Constant, as long as @ref array_t and @ref object_t satisfy
+ the Container concept; that is, their `max_size()` functions have constant
+ complexity.
+
+ @iterators No changes.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @requirement This function helps `basic_json` satisfying the
+ [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+ requirements:
+ - The complexity is constant.
+ - Has the semantics of returning `b.size()` where `b` is the largest
+ possible JSON value.
+
+ @sa @ref size() -- returns the number of elements
+
+ @since version 1.0.0
+ */
+ size_type max_size() const noexcept
+ {
+ switch (m_type)
+ {
+ case value_t::array:
+ {
+ // delegate call to array_t::max_size()
+ return m_value.array->max_size();
+ }
+
+ case value_t::object:
+ {
+ // delegate call to object_t::max_size()
+ return m_value.object->max_size();
+ }
+
+ default:
+ {
+ // all other types have max_size() == size()
+ return size();
+ }
+ }
+ }
+
+ /// @}
+
+
+ ///////////////
+ // modifiers //
+ ///////////////
+
+ /// @name modifiers
+ /// @{
+
+ /*!
+ @brief clears the contents
+
+ Clears the content of a JSON value and resets it to the default value as
+ if @ref basic_json(value_t) would have been called with the current value
+ type from @ref type():
+
+ Value type | initial value
+ ----------- | -------------
+ null | `null`
+ boolean | `false`
+ string | `""`
+ number | `0`
+ binary | An empty byte vector
+ object | `{}`
+ array | `[]`
+
+ @post Has the same effect as calling
+ @code {.cpp}
+ *this = basic_json(type());
+ @endcode
+
+ @liveexample{The example below shows the effect of `clear()` to different
+ JSON types.,clear}
+
+ @complexity Linear in the size of the JSON value.
+
+ @iterators All iterators, pointers and references related to this container
+ are invalidated.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @sa @ref basic_json(value_t) -- constructor that creates an object with the
+ same value than calling `clear()`
+
+ @since version 1.0.0
+ */
+ void clear() noexcept
+ {
+ switch (m_type)
+ {
+ case value_t::number_integer:
+ {
+ m_value.number_integer = 0;
+ break;
+ }
+
+ case value_t::number_unsigned:
+ {
+ m_value.number_unsigned = 0;
+ break;
+ }
+
+ case value_t::number_float:
+ {
+ m_value.number_float = 0.0;
+ break;
+ }
+
+ case value_t::boolean:
+ {
+ m_value.boolean = false;
+ break;
+ }
+
+ case value_t::string:
+ {
+ m_value.string->clear();
+ break;
+ }
+
+ case value_t::binary:
+ {
+ m_value.binary->clear();
+ break;
+ }
+
+ case value_t::array:
+ {
+ m_value.array->clear();
+ break;
+ }
+
+ case value_t::object:
+ {
+ m_value.object->clear();
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ /*!
+ @brief add an object to an array
+
+ Appends the given element @a val to the end of the JSON value. If the
+ function is called on a JSON null value, an empty array is created before
+ appending @a val.
+
+ @param[in] val the value to add to the JSON array
+
+ @throw type_error.308 when called on a type other than JSON array or
+ null; example: `"cannot use push_back() with number"`
+
+ @complexity Amortized constant.
+
+ @liveexample{The example shows how `push_back()` and `+=` can be used to
+ add elements to a JSON array. Note how the `null` value was silently
+ converted to a JSON array.,push_back}
+
+ @since version 1.0.0
+ */
+ void push_back(basic_json&& val)
+ {
+ // push_back only works for null objects or arrays
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array())))
+ {
+ JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
+ }
+
+ // transform null object into an array
+ if (is_null())
+ {
+ m_type = value_t::array;
+ m_value = value_t::array;
+ assert_invariant();
+ }
+
+ // add element to array (move semantics)
+ m_value.array->push_back(std::move(val));
+ // if val is moved from, basic_json move constructor marks it null so we do not call the destructor
+ }
+
+ /*!
+ @brief add an object to an array
+ @copydoc push_back(basic_json&&)
+ */
+ reference operator+=(basic_json&& val)
+ {
+ push_back(std::move(val));
+ return *this;
+ }
+
+ /*!
+ @brief add an object to an array
+ @copydoc push_back(basic_json&&)
+ */
+ void push_back(const basic_json& val)
+ {
+ // push_back only works for null objects or arrays
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array())))
+ {
+ JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
+ }
+
+ // transform null object into an array
+ if (is_null())
+ {
+ m_type = value_t::array;
+ m_value = value_t::array;
+ assert_invariant();
+ }
+
+ // add element to array
+ m_value.array->push_back(val);
+ }
+
+ /*!
+ @brief add an object to an array
+ @copydoc push_back(basic_json&&)
+ */
+ reference operator+=(const basic_json& val)
+ {
+ push_back(val);
+ return *this;
+ }
+
+ /*!
+ @brief add an object to an object
+
+ Inserts the given element @a val to the JSON object. If the function is
+ called on a JSON null value, an empty object is created before inserting
+ @a val.
+
+ @param[in] val the value to add to the JSON object
+
+ @throw type_error.308 when called on a type other than JSON object or
+ null; example: `"cannot use push_back() with number"`
+
+ @complexity Logarithmic in the size of the container, O(log(`size()`)).
+
+ @liveexample{The example shows how `push_back()` and `+=` can be used to
+ add elements to a JSON object. Note how the `null` value was silently
+ converted to a JSON object.,push_back__object_t__value}
+
+ @since version 1.0.0
+ */
+ void push_back(const typename object_t::value_type& val)
+ {
+ // push_back only works for null objects or objects
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object())))
+ {
+ JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
+ }
+
+ // transform null object into an object
+ if (is_null())
+ {
+ m_type = value_t::object;
+ m_value = value_t::object;
+ assert_invariant();
+ }
+
+ // add element to array
+ m_value.object->insert(val);
+ }
+
+ /*!
+ @brief add an object to an object
+ @copydoc push_back(const typename object_t::value_type&)
+ */
+ reference operator+=(const typename object_t::value_type& val)
+ {
+ push_back(val);
+ return *this;
+ }
+
+ /*!
+ @brief add an object to an object
+
+ This function allows to use `push_back` with an initializer list. In case
+
+ 1. the current value is an object,
+ 2. the initializer list @a init contains only two elements, and
+ 3. the first element of @a init is a string,
+
+ @a init is converted into an object element and added using
+ @ref push_back(const typename object_t::value_type&). Otherwise, @a init
+ is converted to a JSON value and added using @ref push_back(basic_json&&).
+
+ @param[in] init an initializer list
+
+ @complexity Linear in the size of the initializer list @a init.
+
+ @note This function is required to resolve an ambiguous overload error,
+ because pairs like `{"key", "value"}` can be both interpreted as
+ `object_t::value_type` or `std::initializer_list<basic_json>`, see
+ https://github.com/nlohmann/json/issues/235 for more information.
+
+ @liveexample{The example shows how initializer lists are treated as
+ objects when possible.,push_back__initializer_list}
+ */
+ void push_back(initializer_list_t init)
+ {
+ if (is_object() && init.size() == 2 && (*init.begin())->is_string())
+ {
+ basic_json&& key = init.begin()->moved_or_copied();
+ push_back(typename object_t::value_type(
+ std::move(key.get_ref<string_t&>()), (init.begin() + 1)->moved_or_copied()));
+ }
+ else
+ {
+ push_back(basic_json(init));
+ }
+ }
+
+ /*!
+ @brief add an object to an object
+ @copydoc push_back(initializer_list_t)
+ */
+ reference operator+=(initializer_list_t init)
+ {
+ push_back(init);
+ return *this;
+ }
+
+ /*!
+ @brief add an object to an array
+
+ Creates a JSON value from the passed parameters @a args to the end of the
+ JSON value. If the function is called on a JSON null value, an empty array
+ is created before appending the value created from @a args.
+
+ @param[in] args arguments to forward to a constructor of @ref basic_json
+ @tparam Args compatible types to create a @ref basic_json object
+
+ @return reference to the inserted element
+
+ @throw type_error.311 when called on a type other than JSON array or
+ null; example: `"cannot use emplace_back() with number"`
+
+ @complexity Amortized constant.
+
+ @liveexample{The example shows how `push_back()` can be used to add
+ elements to a JSON array. Note how the `null` value was silently converted
+ to a JSON array.,emplace_back}
+
+ @since version 2.0.8, returns reference since 3.7.0
+ */
+ template<class... Args>
+ reference emplace_back(Args&& ... args)
+ {
+ // emplace_back only works for null objects or arrays
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array())))
+ {
+ JSON_THROW(type_error::create(311, "cannot use emplace_back() with " + std::string(type_name())));
+ }
+
+ // transform null object into an array
+ if (is_null())
+ {
+ m_type = value_t::array;
+ m_value = value_t::array;
+ assert_invariant();
+ }
+
+ // add element to array (perfect forwarding)
+#ifdef JSON_HAS_CPP_17
+ return m_value.array->emplace_back(std::forward<Args>(args)...);
+#else
+ m_value.array->emplace_back(std::forward<Args>(args)...);
+ return m_value.array->back();
+#endif
+ }
+
+ /*!
+ @brief add an object to an object if key does not exist
+
+ Inserts a new element into a JSON object constructed in-place with the
+ given @a args if there is no element with the key in the container. If the
+ function is called on a JSON null value, an empty object is created before
+ appending the value created from @a args.
+
+ @param[in] args arguments to forward to a constructor of @ref basic_json
+ @tparam Args compatible types to create a @ref basic_json object
+
+ @return a pair consisting of an iterator to the inserted element, or the
+ already-existing element if no insertion happened, and a bool
+ denoting whether the insertion took place.
+
+ @throw type_error.311 when called on a type other than JSON object or
+ null; example: `"cannot use emplace() with number"`
+
+ @complexity Logarithmic in the size of the container, O(log(`size()`)).
+
+ @liveexample{The example shows how `emplace()` can be used to add elements
+ to a JSON object. Note how the `null` value was silently converted to a
+ JSON object. Further note how no value is added if there was already one
+ value stored with the same key.,emplace}
+
+ @since version 2.0.8
+ */
+ template<class... Args>
+ std::pair<iterator, bool> emplace(Args&& ... args)
+ {
+ // emplace only works for null objects or arrays
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object())))
+ {
+ JSON_THROW(type_error::create(311, "cannot use emplace() with " + std::string(type_name())));
+ }
+
+ // transform null object into an object
+ if (is_null())
+ {
+ m_type = value_t::object;
+ m_value = value_t::object;
+ assert_invariant();
+ }
+
+ // add element to array (perfect forwarding)
+ auto res = m_value.object->emplace(std::forward<Args>(args)...);
+ // create result iterator and set iterator to the result of emplace
+ auto it = begin();
+ it.m_it.object_iterator = res.first;
+
+ // return pair of iterator and boolean
+ return {it, res.second};
+ }
+
+ /// Helper for insertion of an iterator
+ /// @note: This uses std::distance to support GCC 4.8,
+ /// see https://github.com/nlohmann/json/pull/1257
+ template<typename... Args>
+ iterator insert_iterator(const_iterator pos, Args&& ... args)
+ {
+ iterator result(this);
+ JSON_ASSERT(m_value.array != nullptr);
+
+ auto insert_pos = std::distance(m_value.array->begin(), pos.m_it.array_iterator);
+ m_value.array->insert(pos.m_it.array_iterator, std::forward<Args>(args)...);
+ result.m_it.array_iterator = m_value.array->begin() + insert_pos;
+
+ // This could have been written as:
+ // result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, cnt, val);
+ // but the return value of insert is missing in GCC 4.8, so it is written this way instead.
+
+ return result;
+ }
+
+ /*!
+ @brief inserts element
+
+ Inserts element @a val before iterator @a pos.
+
+ @param[in] pos iterator before which the content will be inserted; may be
+ the end() iterator
+ @param[in] val element to insert
+ @return iterator pointing to the inserted @a val.
+
+ @throw type_error.309 if called on JSON values other than arrays;
+ example: `"cannot use insert() with string"`
+ @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+ example: `"iterator does not fit current value"`
+
+ @complexity Constant plus linear in the distance between @a pos and end of
+ the container.
+
+ @liveexample{The example shows how `insert()` is used.,insert}
+
+ @since version 1.0.0
+ */
+ iterator insert(const_iterator pos, const basic_json& val)
+ {
+ // insert only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ // check if iterator pos fits to this JSON value
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+ }
+
+ // insert to array and return iterator
+ return insert_iterator(pos, val);
+ }
+
+ JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief inserts element
+ @copydoc insert(const_iterator, const basic_json&)
+ */
+ iterator insert(const_iterator pos, basic_json&& val)
+ {
+ return insert(pos, val);
+ }
+
+ /*!
+ @brief inserts elements
+
+ Inserts @a cnt copies of @a val before iterator @a pos.
+
+ @param[in] pos iterator before which the content will be inserted; may be
+ the end() iterator
+ @param[in] cnt number of copies of @a val to insert
+ @param[in] val element to insert
+ @return iterator pointing to the first element inserted, or @a pos if
+ `cnt==0`
+
+ @throw type_error.309 if called on JSON values other than arrays; example:
+ `"cannot use insert() with string"`
+ @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+ example: `"iterator does not fit current value"`
+
+ @complexity Linear in @a cnt plus linear in the distance between @a pos
+ and end of the container.
+
+ @liveexample{The example shows how `insert()` is used.,insert__count}
+
+ @since version 1.0.0
+ */
+ iterator insert(const_iterator pos, size_type cnt, const basic_json& val)
+ {
+ // insert only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ // check if iterator pos fits to this JSON value
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+ }
+
+ // insert to array and return iterator
+ return insert_iterator(pos, cnt, val);
+ }
+
+ JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+ }
+
+ /*!
+ @brief inserts elements
+
+ Inserts elements from range `[first, last)` before iterator @a pos.
+
+ @param[in] pos iterator before which the content will be inserted; may be
+ the end() iterator
+ @param[in] first begin of the range of elements to insert
+ @param[in] last end of the range of elements to insert
+
+ @throw type_error.309 if called on JSON values other than arrays; example:
+ `"cannot use insert() with string"`
+ @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+ example: `"iterator does not fit current value"`
+ @throw invalid_iterator.210 if @a first and @a last do not belong to the
+ same JSON value; example: `"iterators do not fit"`
+ @throw invalid_iterator.211 if @a first or @a last are iterators into
+ container for which insert is called; example: `"passed iterators may not
+ belong to container"`
+
+ @return iterator pointing to the first element inserted, or @a pos if
+ `first==last`
+
+ @complexity Linear in `std::distance(first, last)` plus linear in the
+ distance between @a pos and end of the container.
+
+ @liveexample{The example shows how `insert()` is used.,insert__range}
+
+ @since version 1.0.0
+ */
+ iterator insert(const_iterator pos, const_iterator first, const_iterator last)
+ {
+ // insert only works for arrays
+ if (JSON_HEDLEY_UNLIKELY(!is_array()))
+ {
+ JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+ }
+
+ // check if iterator pos fits to this JSON value
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+ }
+
+ // check if range iterators belong to the same JSON object
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(first.m_object == this))
+ {
+ JSON_THROW(invalid_iterator::create(211, "passed iterators may not belong to container"));
+ }
+
+ // insert to array and return iterator
+ return insert_iterator(pos, first.m_it.array_iterator, last.m_it.array_iterator);
+ }
+
+ /*!
+ @brief inserts elements
+
+ Inserts elements from initializer list @a ilist before iterator @a pos.
+
+ @param[in] pos iterator before which the content will be inserted; may be
+ the end() iterator
+ @param[in] ilist initializer list to insert the values from
+
+ @throw type_error.309 if called on JSON values other than arrays; example:
+ `"cannot use insert() with string"`
+ @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+ example: `"iterator does not fit current value"`
+
+ @return iterator pointing to the first element inserted, or @a pos if
+ `ilist` is empty
+
+ @complexity Linear in `ilist.size()` plus linear in the distance between
+ @a pos and end of the container.
+
+ @liveexample{The example shows how `insert()` is used.,insert__ilist}
+
+ @since version 1.0.0
+ */
+ iterator insert(const_iterator pos, initializer_list_t ilist)
+ {
+ // insert only works for arrays
+ if (JSON_HEDLEY_UNLIKELY(!is_array()))
+ {
+ JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+ }
+
+ // check if iterator pos fits to this JSON value
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+ }
+
+ // insert to array and return iterator
+ return insert_iterator(pos, ilist.begin(), ilist.end());
+ }
+
+ /*!
+ @brief inserts elements
+
+ Inserts elements from range `[first, last)`.
+
+ @param[in] first begin of the range of elements to insert
+ @param[in] last end of the range of elements to insert
+
+ @throw type_error.309 if called on JSON values other than objects; example:
+ `"cannot use insert() with string"`
+ @throw invalid_iterator.202 if iterator @a first or @a last does does not
+ point to an object; example: `"iterators first and last must point to
+ objects"`
+ @throw invalid_iterator.210 if @a first and @a last do not belong to the
+ same JSON value; example: `"iterators do not fit"`
+
+ @complexity Logarithmic: `O(N*log(size() + N))`, where `N` is the number
+ of elements to insert.
+
+ @liveexample{The example shows how `insert()` is used.,insert__range_object}
+
+ @since version 3.0.0
+ */
+ void insert(const_iterator first, const_iterator last)
+ {
+ // insert only works for objects
+ if (JSON_HEDLEY_UNLIKELY(!is_object()))
+ {
+ JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+ }
+
+ // check if range iterators belong to the same JSON object
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
+ }
+
+ // passed iterators must belong to objects
+ if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object()))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects"));
+ }
+
+ m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator);
+ }
+
+ /*!
+ @brief updates a JSON object from another object, overwriting existing keys
+
+ Inserts all values from JSON object @a j and overwrites existing keys.
+
+ @param[in] j JSON object to read values from
+
+ @throw type_error.312 if called on JSON values other than objects; example:
+ `"cannot use update() with string"`
+
+ @complexity O(N*log(size() + N)), where N is the number of elements to
+ insert.
+
+ @liveexample{The example shows how `update()` is used.,update}
+
+ @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update
+
+ @since version 3.0.0
+ */
+ void update(const_reference j)
+ {
+ // implicitly convert null value to an empty object
+ if (is_null())
+ {
+ m_type = value_t::object;
+ m_value.object = create<object_t>();
+ assert_invariant();
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!is_object()))
+ {
+ JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name())));
+ }
+ if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
+ {
+ JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name())));
+ }
+
+ for (auto it = j.cbegin(); it != j.cend(); ++it)
+ {
+ m_value.object->operator[](it.key()) = it.value();
+ }
+ }
+
+ /*!
+ @brief updates a JSON object from another object, overwriting existing keys
+
+ Inserts all values from from range `[first, last)` and overwrites existing
+ keys.
+
+ @param[in] first begin of the range of elements to insert
+ @param[in] last end of the range of elements to insert
+
+ @throw type_error.312 if called on JSON values other than objects; example:
+ `"cannot use update() with string"`
+ @throw invalid_iterator.202 if iterator @a first or @a last does does not
+ point to an object; example: `"iterators first and last must point to
+ objects"`
+ @throw invalid_iterator.210 if @a first and @a last do not belong to the
+ same JSON value; example: `"iterators do not fit"`
+
+ @complexity O(N*log(size() + N)), where N is the number of elements to
+ insert.
+
+ @liveexample{The example shows how `update()` is used__range.,update}
+
+ @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update
+
+ @since version 3.0.0
+ */
+ void update(const_iterator first, const_iterator last)
+ {
+ // implicitly convert null value to an empty object
+ if (is_null())
+ {
+ m_type = value_t::object;
+ m_value.object = create<object_t>();
+ assert_invariant();
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!is_object()))
+ {
+ JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name())));
+ }
+
+ // check if range iterators belong to the same JSON object
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
+ {
+ JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
+ }
+
+ // passed iterators must belong to objects
+ if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object()
+ || !last.m_object->is_object()))
+ {
+ JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects"));
+ }
+
+ for (auto it = first; it != last; ++it)
+ {
+ m_value.object->operator[](it.key()) = it.value();
+ }
+ }
+
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of the JSON value with those of @a other. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated.
+
+ @param[in,out] other JSON value to exchange the contents with
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how JSON values can be swapped with
+ `swap()`.,swap__reference}
+
+ @since version 1.0.0
+ */
+ void swap(reference other) noexcept (
+ std::is_nothrow_move_constructible<value_t>::value&&
+ std::is_nothrow_move_assignable<value_t>::value&&
+ std::is_nothrow_move_constructible<json_value>::value&&
+ std::is_nothrow_move_assignable<json_value>::value
+ )
+ {
+ std::swap(m_type, other.m_type);
+ std::swap(m_value, other.m_value);
+ assert_invariant();
+ }
+
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of the JSON value from @a left with those of @a right. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated. implemented as a friend function callable via ADL.
+
+ @param[in,out] left JSON value to exchange the contents with
+ @param[in,out] right JSON value to exchange the contents with
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how JSON values can be swapped with
+ `swap()`.,swap__reference}
+
+ @since version 1.0.0
+ */
+ friend void swap(reference left, reference right) noexcept (
+ std::is_nothrow_move_constructible<value_t>::value&&
+ std::is_nothrow_move_assignable<value_t>::value&&
+ std::is_nothrow_move_constructible<json_value>::value&&
+ std::is_nothrow_move_assignable<json_value>::value
+ )
+ {
+ left.swap(right);
+ }
+
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of a JSON array with those of @a other. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated.
+
+ @param[in,out] other array to exchange the contents with
+
+ @throw type_error.310 when JSON value is not an array; example: `"cannot
+ use swap() with string"`
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how arrays can be swapped with
+ `swap()`.,swap__array_t}
+
+ @since version 1.0.0
+ */
+ void swap(array_t& other)
+ {
+ // swap only works for arrays
+ if (JSON_HEDLEY_LIKELY(is_array()))
+ {
+ std::swap(*(m_value.array), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of a JSON object with those of @a other. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated.
+
+ @param[in,out] other object to exchange the contents with
+
+ @throw type_error.310 when JSON value is not an object; example:
+ `"cannot use swap() with string"`
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how objects can be swapped with
+ `swap()`.,swap__object_t}
+
+ @since version 1.0.0
+ */
+ void swap(object_t& other)
+ {
+ // swap only works for objects
+ if (JSON_HEDLEY_LIKELY(is_object()))
+ {
+ std::swap(*(m_value.object), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of a JSON string with those of @a other. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated.
+
+ @param[in,out] other string to exchange the contents with
+
+ @throw type_error.310 when JSON value is not a string; example: `"cannot
+ use swap() with boolean"`
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how strings can be swapped with
+ `swap()`.,swap__string_t}
+
+ @since version 1.0.0
+ */
+ void swap(string_t& other)
+ {
+ // swap only works for strings
+ if (JSON_HEDLEY_LIKELY(is_string()))
+ {
+ std::swap(*(m_value.string), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of a JSON string with those of @a other. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated.
+
+ @param[in,out] other binary to exchange the contents with
+
+ @throw type_error.310 when JSON value is not a string; example: `"cannot
+ use swap() with boolean"`
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how strings can be swapped with
+ `swap()`.,swap__binary_t}
+
+ @since version 3.8.0
+ */
+ void swap(binary_t& other)
+ {
+ // swap only works for strings
+ if (JSON_HEDLEY_LIKELY(is_binary()))
+ {
+ std::swap(*(m_value.binary), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
+ /// @copydoc swap(binary_t)
+ void swap(typename binary_t::container_type& other)
+ {
+ // swap only works for strings
+ if (JSON_HEDLEY_LIKELY(is_binary()))
+ {
+ std::swap(*(m_value.binary), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
+ /// @}
+
+ public:
+ //////////////////////////////////////////
+ // lexicographical comparison operators //
+ //////////////////////////////////////////
+
+ /// @name lexicographical comparison operators
+ /// @{
+
+ /*!
+ @brief comparison: equal
+
+ Compares two JSON values for equality according to the following rules:
+ - Two JSON values are equal if (1) they are from the same type and (2)
+ their stored values are the same according to their respective
+ `operator==`.
+ - Integer and floating-point numbers are automatically converted before
+ comparison. Note that two NaN values are always treated as unequal.
+ - Two JSON null values are equal.
+
+ @note Floating-point inside JSON values numbers are compared with
+ `json::number_float_t::operator==` which is `double::operator==` by
+ default. To compare floating-point while respecting an epsilon, an alternative
+ [comparison function](https://github.com/mariokonrad/marnav/blob/master/include/marnav/math/floatingpoint.hpp#L34-#L39)
+ could be used, for instance
+ @code {.cpp}
+ template<typename T, typename = typename std::enable_if<std::is_floating_point<T>::value, T>::type>
+ inline bool is_same(T a, T b, T epsilon = std::numeric_limits<T>::epsilon()) noexcept
+ {
+ return std::abs(a - b) <= epsilon;
+ }
+ @endcode
+ Or you can self-defined operator equal function like this:
+ @code {.cpp}
+ bool my_equal(const_reference lhs, const_reference rhs) {
+ const auto lhs_type lhs.type();
+ const auto rhs_type rhs.type();
+ if (lhs_type == rhs_type) {
+ switch(lhs_type)
+ // self_defined case
+ case value_t::number_float:
+ return std::abs(lhs - rhs) <= std::numeric_limits<float>::epsilon();
+ // other cases remain the same with the original
+ ...
+ }
+ ...
+ }
+ @endcode
+
+ @note NaN values never compare equal to themselves or to other NaN values.
+
+ @param[in] lhs first JSON value to consider
+ @param[in] rhs second JSON value to consider
+ @return whether the values @a lhs and @a rhs are equal
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @complexity Linear.
+
+ @liveexample{The example demonstrates comparing several JSON
+ types.,operator__equal}
+
+ @since version 1.0.0
+ */
+ friend bool operator==(const_reference lhs, const_reference rhs) noexcept
+ {
+ const auto lhs_type = lhs.type();
+ const auto rhs_type = rhs.type();
+
+ if (lhs_type == rhs_type)
+ {
+ switch (lhs_type)
+ {
+ case value_t::array:
+ return *lhs.m_value.array == *rhs.m_value.array;
+
+ case value_t::object:
+ return *lhs.m_value.object == *rhs.m_value.object;
+
+ case value_t::null:
+ return true;
+
+ case value_t::string:
+ return *lhs.m_value.string == *rhs.m_value.string;
+
+ case value_t::boolean:
+ return lhs.m_value.boolean == rhs.m_value.boolean;
+
+ case value_t::number_integer:
+ return lhs.m_value.number_integer == rhs.m_value.number_integer;
+
+ case value_t::number_unsigned:
+ return lhs.m_value.number_unsigned == rhs.m_value.number_unsigned;
+
+ case value_t::number_float:
+ return lhs.m_value.number_float == rhs.m_value.number_float;
+
+ case value_t::binary:
+ return *lhs.m_value.binary == *rhs.m_value.binary;
+
+ default:
+ return false;
+ }
+ }
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float)
+ {
+ return static_cast<number_float_t>(lhs.m_value.number_integer) == rhs.m_value.number_float;
+ }
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer)
+ {
+ return lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_integer);
+ }
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float)
+ {
+ return static_cast<number_float_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_float;
+ }
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned)
+ {
+ return lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_unsigned);
+ }
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer)
+ {
+ return static_cast<number_integer_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_integer;
+ }
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned)
+ {
+ return lhs.m_value.number_integer == static_cast<number_integer_t>(rhs.m_value.number_unsigned);
+ }
+
+ return false;
+ }
+
+ /*!
+ @brief comparison: equal
+ @copydoc operator==(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept
+ {
+ return lhs == basic_json(rhs);
+ }
+
+ /*!
+ @brief comparison: equal
+ @copydoc operator==(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept
+ {
+ return basic_json(lhs) == rhs;
+ }
+
+ /*!
+ @brief comparison: not equal
+
+ Compares two JSON values for inequality by calculating `not (lhs == rhs)`.
+
+ @param[in] lhs first JSON value to consider
+ @param[in] rhs second JSON value to consider
+ @return whether the values @a lhs and @a rhs are not equal
+
+ @complexity Linear.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @liveexample{The example demonstrates comparing several JSON
+ types.,operator__notequal}
+
+ @since version 1.0.0
+ */
+ friend bool operator!=(const_reference lhs, const_reference rhs) noexcept
+ {
+ return !(lhs == rhs);
+ }
+
+ /*!
+ @brief comparison: not equal
+ @copydoc operator!=(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept
+ {
+ return lhs != basic_json(rhs);
+ }
+
+ /*!
+ @brief comparison: not equal
+ @copydoc operator!=(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept
+ {
+ return basic_json(lhs) != rhs;
+ }
+
+ /*!
+ @brief comparison: less than
+
+ Compares whether one JSON value @a lhs is less than another JSON value @a
+ rhs according to the following rules:
+ - If @a lhs and @a rhs have the same type, the values are compared using
+ the default `<` operator.
+ - Integer and floating-point numbers are automatically converted before
+ comparison
+ - In case @a lhs and @a rhs have different types, the values are ignored
+ and the order of the types is considered, see
+ @ref operator<(const value_t, const value_t).
+
+ @param[in] lhs first JSON value to consider
+ @param[in] rhs second JSON value to consider
+ @return whether @a lhs is less than @a rhs
+
+ @complexity Linear.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @liveexample{The example demonstrates comparing several JSON
+ types.,operator__less}
+
+ @since version 1.0.0
+ */
+ friend bool operator<(const_reference lhs, const_reference rhs) noexcept
+ {
+ const auto lhs_type = lhs.type();
+ const auto rhs_type = rhs.type();
+
+ if (lhs_type == rhs_type)
+ {
+ switch (lhs_type)
+ {
+ case value_t::array:
+ // note parentheses are necessary, see
+ // https://github.com/nlohmann/json/issues/1530
+ return (*lhs.m_value.array) < (*rhs.m_value.array);
+
+ case value_t::object:
+ return (*lhs.m_value.object) < (*rhs.m_value.object);
+
+ case value_t::null:
+ return false;
+
+ case value_t::string:
+ return (*lhs.m_value.string) < (*rhs.m_value.string);
+
+ case value_t::boolean:
+ return (lhs.m_value.boolean) < (rhs.m_value.boolean);
+
+ case value_t::number_integer:
+ return (lhs.m_value.number_integer) < (rhs.m_value.number_integer);
+
+ case value_t::number_unsigned:
+ return (lhs.m_value.number_unsigned) < (rhs.m_value.number_unsigned);
+
+ case value_t::number_float:
+ return (lhs.m_value.number_float) < (rhs.m_value.number_float);
+
+ case value_t::binary:
+ return (*lhs.m_value.binary) < (*rhs.m_value.binary);
+
+ default:
+ return false;
+ }
+ }
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float)
+ {
+ return static_cast<number_float_t>(lhs.m_value.number_integer) < rhs.m_value.number_float;
+ }
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer)
+ {
+ return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_integer);
+ }
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float)
+ {
+ return static_cast<number_float_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_float;
+ }
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned)
+ {
+ return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_unsigned);
+ }
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned)
+ {
+ return lhs.m_value.number_integer < static_cast<number_integer_t>(rhs.m_value.number_unsigned);
+ }
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer)
+ {
+ return static_cast<number_integer_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_integer;
+ }
+
+ // We only reach this line if we cannot compare values. In that case,
+ // we compare types. Note we have to call the operator explicitly,
+ // because MSVC has problems otherwise.
+ return operator<(lhs_type, rhs_type);
+ }
+
+ /*!
+ @brief comparison: less than
+ @copydoc operator<(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept
+ {
+ return lhs < basic_json(rhs);
+ }
+
+ /*!
+ @brief comparison: less than
+ @copydoc operator<(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept
+ {
+ return basic_json(lhs) < rhs;
+ }
+
+ /*!
+ @brief comparison: less than or equal
+
+ Compares whether one JSON value @a lhs is less than or equal to another
+ JSON value by calculating `not (rhs < lhs)`.
+
+ @param[in] lhs first JSON value to consider
+ @param[in] rhs second JSON value to consider
+ @return whether @a lhs is less than or equal to @a rhs
+
+ @complexity Linear.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @liveexample{The example demonstrates comparing several JSON
+ types.,operator__greater}
+
+ @since version 1.0.0
+ */
+ friend bool operator<=(const_reference lhs, const_reference rhs) noexcept
+ {
+ return !(rhs < lhs);
+ }
+
+ /*!
+ @brief comparison: less than or equal
+ @copydoc operator<=(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept
+ {
+ return lhs <= basic_json(rhs);
+ }
+
+ /*!
+ @brief comparison: less than or equal
+ @copydoc operator<=(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept
+ {
+ return basic_json(lhs) <= rhs;
+ }
+
+ /*!
+ @brief comparison: greater than
+
+ Compares whether one JSON value @a lhs is greater than another
+ JSON value by calculating `not (lhs <= rhs)`.
+
+ @param[in] lhs first JSON value to consider
+ @param[in] rhs second JSON value to consider
+ @return whether @a lhs is greater than to @a rhs
+
+ @complexity Linear.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @liveexample{The example demonstrates comparing several JSON
+ types.,operator__lessequal}
+
+ @since version 1.0.0
+ */
+ friend bool operator>(const_reference lhs, const_reference rhs) noexcept
+ {
+ return !(lhs <= rhs);
+ }
+
+ /*!
+ @brief comparison: greater than
+ @copydoc operator>(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept
+ {
+ return lhs > basic_json(rhs);
+ }
+
+ /*!
+ @brief comparison: greater than
+ @copydoc operator>(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept
+ {
+ return basic_json(lhs) > rhs;
+ }
+
+ /*!
+ @brief comparison: greater than or equal
+
+ Compares whether one JSON value @a lhs is greater than or equal to another
+ JSON value by calculating `not (lhs < rhs)`.
+
+ @param[in] lhs first JSON value to consider
+ @param[in] rhs second JSON value to consider
+ @return whether @a lhs is greater than or equal to @a rhs
+
+ @complexity Linear.
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @liveexample{The example demonstrates comparing several JSON
+ types.,operator__greaterequal}
+
+ @since version 1.0.0
+ */
+ friend bool operator>=(const_reference lhs, const_reference rhs) noexcept
+ {
+ return !(lhs < rhs);
+ }
+
+ /*!
+ @brief comparison: greater than or equal
+ @copydoc operator>=(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept
+ {
+ return lhs >= basic_json(rhs);
+ }
+
+ /*!
+ @brief comparison: greater than or equal
+ @copydoc operator>=(const_reference, const_reference)
+ */
+ template<typename ScalarType, typename std::enable_if<
+ std::is_scalar<ScalarType>::value, int>::type = 0>
+ friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept
+ {
+ return basic_json(lhs) >= rhs;
+ }
+
+ /// @}
+
+ ///////////////////
+ // serialization //
+ ///////////////////
+
+ /// @name serialization
+ /// @{
+
+ /*!
+ @brief serialize to stream
+
+ Serialize the given JSON value @a j to the output stream @a o. The JSON
+ value will be serialized using the @ref dump member function.
+
+ - The indentation of the output can be controlled with the member variable
+ `width` of the output stream @a o. For instance, using the manipulator
+ `std::setw(4)` on @a o sets the indentation level to `4` and the
+ serialization result is the same as calling `dump(4)`.
+
+ - The indentation character can be controlled with the member variable
+ `fill` of the output stream @a o. For instance, the manipulator
+ `std::setfill('\\t')` sets indentation to use a tab character rather than
+ the default space character.
+
+ @param[in,out] o stream to serialize to
+ @param[in] j JSON value to serialize
+
+ @return the stream @a o
+
+ @throw type_error.316 if a string stored inside the JSON value is not
+ UTF-8 encoded
+
+ @complexity Linear.
+
+ @liveexample{The example below shows the serialization with different
+ parameters to `width` to adjust the indentation level.,operator_serialize}
+
+ @since version 1.0.0; indentation character added in version 3.0.0
+ */
+ friend std::ostream& operator<<(std::ostream& o, const basic_json& j)
+ {
+ // read width member and use it as indentation parameter if nonzero
+ const bool pretty_print = o.width() > 0;
+ const auto indentation = pretty_print ? o.width() : 0;
+
+ // reset width to 0 for subsequent calls to this stream
+ o.width(0);
+
+ // do the actual serialization
+ serializer s(detail::output_adapter<char>(o), o.fill());
+ s.dump(j, pretty_print, false, static_cast<unsigned int>(indentation));
+ return o;
+ }
+
+ /*!
+ @brief serialize to stream
+ @deprecated This stream operator is deprecated and will be removed in
+ future 4.0.0 of the library. Please use
+ @ref operator<<(std::ostream&, const basic_json&)
+ instead; that is, replace calls like `j >> o;` with `o << j;`.
+ @since version 1.0.0; deprecated since version 3.0.0
+ */
+ JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator<<(std::ostream&, const basic_json&))
+ friend std::ostream& operator>>(const basic_json& j, std::ostream& o)
+ {
+ return o << j;
+ }
+
+ /// @}
+
+
+ /////////////////////
+ // deserialization //
+ /////////////////////
+
+ /// @name deserialization
+ /// @{
+
+ /*!
+ @brief deserialize from a compatible input
+
+ @tparam InputType A compatible input, for instance
+ - an std::istream object
+ - a FILE pointer
+ - a C-style array of characters
+ - a pointer to a null-terminated string of single byte characters
+ - an object obj for which begin(obj) and end(obj) produces a valid pair of
+ iterators.
+
+ @param[in] i input to read from
+ @param[in] cb a parser callback function of type @ref parser_callback_t
+ which is used to control the deserialization by filtering unwanted values
+ (optional)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.101 if a parse error occurs; example: `""unexpected end
+ of input; expected string literal""`
+ @throw parse_error.102 if to_unicode fails or surrogate error
+ @throw parse_error.103 if to_unicode fails
+
+ @complexity Linear in the length of the input. The parser is a predictive
+ LL(1) parser. The complexity can be higher if the parser callback function
+ @a cb or reading from the input @a i has a super-linear complexity.
+
+ @note A UTF-8 byte order mark is silently ignored.
+
+ @liveexample{The example below demonstrates the `parse()` function reading
+ from an array.,parse__array__parser_callback_t}
+
+ @liveexample{The example below demonstrates the `parse()` function with
+ and without callback function.,parse__string__parser_callback_t}
+
+ @liveexample{The example below demonstrates the `parse()` function with
+ and without callback function.,parse__istream__parser_callback_t}
+
+ @liveexample{The example below demonstrates the `parse()` function reading
+ from a contiguous container.,parse__contiguouscontainer__parser_callback_t}
+
+ @since version 2.0.3 (contiguous containers); version 3.9.0 allowed to
+ ignore comments.
+ */
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json parse(InputType&& i,
+ const parser_callback_t cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false)
+ {
+ basic_json result;
+ parser(detail::input_adapter(std::forward<InputType>(i)), cb, allow_exceptions, ignore_comments).parse(true, result);
+ return result;
+ }
+
+ /*!
+ @brief deserialize from a pair of character iterators
+
+ The value_type of the iterator must be a integral type with size of 1, 2 or
+ 4 bytes, which will be interpreted respectively as UTF-8, UTF-16 and UTF-32.
+
+ @param[in] first iterator to start of character range
+ @param[in] last iterator to end of character range
+ @param[in] cb a parser callback function of type @ref parser_callback_t
+ which is used to control the deserialization by filtering unwanted values
+ (optional)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.101 if a parse error occurs; example: `""unexpected end
+ of input; expected string literal""`
+ @throw parse_error.102 if to_unicode fails or surrogate error
+ @throw parse_error.103 if to_unicode fails
+ */
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json parse(IteratorType first,
+ IteratorType last,
+ const parser_callback_t cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false)
+ {
+ basic_json result;
+ parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result);
+ return result;
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len))
+ static basic_json parse(detail::span_input_adapter&& i,
+ const parser_callback_t cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false)
+ {
+ basic_json result;
+ parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result);
+ return result;
+ }
+
+ /*!
+ @brief check if the input is valid JSON
+
+ Unlike the @ref parse(InputType&&, const parser_callback_t,const bool)
+ function, this function neither throws an exception in case of invalid JSON
+ input (i.e., a parse error) nor creates diagnostic information.
+
+ @tparam InputType A compatible input, for instance
+ - an std::istream object
+ - a FILE pointer
+ - a C-style array of characters
+ - a pointer to a null-terminated string of single byte characters
+ - an object obj for which begin(obj) and end(obj) produces a valid pair of
+ iterators.
+
+ @param[in] i input to read from
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default)
+
+ @return Whether the input read from @a i is valid JSON.
+
+ @complexity Linear in the length of the input. The parser is a predictive
+ LL(1) parser.
+
+ @note A UTF-8 byte order mark is silently ignored.
+
+ @liveexample{The example below demonstrates the `accept()` function reading
+ from a string.,accept__string}
+ */
+ template<typename InputType>
+ static bool accept(InputType&& i,
+ const bool ignore_comments = false)
+ {
+ return parser(detail::input_adapter(std::forward<InputType>(i)), nullptr, false, ignore_comments).accept(true);
+ }
+
+ template<typename IteratorType>
+ static bool accept(IteratorType first, IteratorType last,
+ const bool ignore_comments = false)
+ {
+ return parser(detail::input_adapter(std::move(first), std::move(last)), nullptr, false, ignore_comments).accept(true);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, accept(ptr, ptr + len))
+ static bool accept(detail::span_input_adapter&& i,
+ const bool ignore_comments = false)
+ {
+ return parser(i.get(), nullptr, false, ignore_comments).accept(true);
+ }
+
+ /*!
+ @brief generate SAX events
+
+ The SAX event lister must follow the interface of @ref json_sax.
+
+ This function reads from a compatible input. Examples are:
+ - an std::istream object
+ - a FILE pointer
+ - a C-style array of characters
+ - a pointer to a null-terminated string of single byte characters
+ - an object obj for which begin(obj) and end(obj) produces a valid pair of
+ iterators.
+
+ @param[in] i input to read from
+ @param[in,out] sax SAX event listener
+ @param[in] format the format to parse (JSON, CBOR, MessagePack, or UBJSON)
+ @param[in] strict whether the input has to be consumed completely
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default); only applies to the JSON file format.
+
+ @return return value of the last processed SAX event
+
+ @throw parse_error.101 if a parse error occurs; example: `""unexpected end
+ of input; expected string literal""`
+ @throw parse_error.102 if to_unicode fails or surrogate error
+ @throw parse_error.103 if to_unicode fails
+
+ @complexity Linear in the length of the input. The parser is a predictive
+ LL(1) parser. The complexity can be higher if the SAX consumer @a sax has
+ a super-linear complexity.
+
+ @note A UTF-8 byte order mark is silently ignored.
+
+ @liveexample{The example below demonstrates the `sax_parse()` function
+ reading from string and processing the events with a user-defined SAX
+ event consumer.,sax_parse}
+
+ @since version 3.2.0
+ */
+ template <typename InputType, typename SAX>
+ JSON_HEDLEY_NON_NULL(2)
+ static bool sax_parse(InputType&& i, SAX* sax,
+ input_format_t format = input_format_t::json,
+ const bool strict = true,
+ const bool ignore_comments = false)
+ {
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ return format == input_format_t::json
+ ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict)
+ : detail::binary_reader<basic_json, decltype(ia), SAX>(std::move(ia)).sax_parse(format, sax, strict);
+ }
+
+ template<class IteratorType, class SAX>
+ JSON_HEDLEY_NON_NULL(3)
+ static bool sax_parse(IteratorType first, IteratorType last, SAX* sax,
+ input_format_t format = input_format_t::json,
+ const bool strict = true,
+ const bool ignore_comments = false)
+ {
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ return format == input_format_t::json
+ ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict)
+ : detail::binary_reader<basic_json, decltype(ia), SAX>(std::move(ia)).sax_parse(format, sax, strict);
+ }
+
+ template <typename SAX>
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, sax_parse(ptr, ptr + len, ...))
+ JSON_HEDLEY_NON_NULL(2)
+ static bool sax_parse(detail::span_input_adapter&& i, SAX* sax,
+ input_format_t format = input_format_t::json,
+ const bool strict = true,
+ const bool ignore_comments = false)
+ {
+ auto ia = i.get();
+ return format == input_format_t::json
+ ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict)
+ : detail::binary_reader<basic_json, decltype(ia), SAX>(std::move(ia)).sax_parse(format, sax, strict);
+ }
+
+ /*!
+ @brief deserialize from stream
+ @deprecated This stream operator is deprecated and will be removed in
+ version 4.0.0 of the library. Please use
+ @ref operator>>(std::istream&, basic_json&)
+ instead; that is, replace calls like `j << i;` with `i >> j;`.
+ @since version 1.0.0; deprecated since version 3.0.0
+ */
+ JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator>>(std::istream&, basic_json&))
+ friend std::istream& operator<<(basic_json& j, std::istream& i)
+ {
+ return operator>>(i, j);
+ }
+
+ /*!
+ @brief deserialize from stream
+
+ Deserializes an input stream to a JSON value.
+
+ @param[in,out] i input stream to read a serialized JSON value from
+ @param[in,out] j JSON value to write the deserialized input to
+
+ @throw parse_error.101 in case of an unexpected token
+ @throw parse_error.102 if to_unicode fails or surrogate error
+ @throw parse_error.103 if to_unicode fails
+
+ @complexity Linear in the length of the input. The parser is a predictive
+ LL(1) parser.
+
+ @note A UTF-8 byte order mark is silently ignored.
+
+ @liveexample{The example below shows how a JSON value is constructed by
+ reading a serialization from a stream.,operator_deserialize}
+
+ @sa parse(std::istream&, const parser_callback_t) for a variant with a
+ parser callback function to filter values while parsing
+
+ @since version 1.0.0
+ */
+ friend std::istream& operator>>(std::istream& i, basic_json& j)
+ {
+ parser(detail::input_adapter(i)).parse(false, j);
+ return i;
+ }
+
+ /// @}
+
+ ///////////////////////////
+ // convenience functions //
+ ///////////////////////////
+
+ /*!
+ @brief return the type as string
+
+ Returns the type name as string to be used in error messages - usually to
+ indicate that a function was called on a wrong JSON type.
+
+ @return a string representation of a the @a m_type member:
+ Value type | return value
+ ----------- | -------------
+ null | `"null"`
+ boolean | `"boolean"`
+ string | `"string"`
+ number | `"number"` (for all number types)
+ object | `"object"`
+ array | `"array"`
+ binary | `"binary"`
+ discarded | `"discarded"`
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+ @complexity Constant.
+
+ @liveexample{The following code exemplifies `type_name()` for all JSON
+ types.,type_name}
+
+ @sa @ref type() -- return the type of the JSON value
+ @sa @ref operator value_t() -- return the type of the JSON value (implicit)
+
+ @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept`
+ since 3.0.0
+ */
+ JSON_HEDLEY_RETURNS_NON_NULL
+ const char* type_name() const noexcept
+ {
+ {
+ switch (m_type)
+ {
+ case value_t::null:
+ return "null";
+ case value_t::object:
+ return "object";
+ case value_t::array:
+ return "array";
+ case value_t::string:
+ return "string";
+ case value_t::boolean:
+ return "boolean";
+ case value_t::binary:
+ return "binary";
+ case value_t::discarded:
+ return "discarded";
+ default:
+ return "number";
+ }
+ }
+ }
+
+
+ private:
+ //////////////////////
+ // member variables //
+ //////////////////////
+
+ /// the type of the current element
+ value_t m_type = value_t::null;
+
+ /// the value of the current element
+ json_value m_value = {};
+
+ //////////////////////////////////////////
+ // binary serialization/deserialization //
+ //////////////////////////////////////////
+
+ /// @name binary serialization/deserialization support
+ /// @{
+
+ public:
+ /*!
+ @brief create a CBOR serialization of a given JSON value
+
+ Serializes a given JSON value @a j to a byte vector using the CBOR (Concise
+ Binary Object Representation) serialization format. CBOR is a binary
+ serialization format which aims to be more compact than JSON itself, yet
+ more efficient to parse.
+
+ The library uses the following mapping from JSON values types to
+ CBOR types according to the CBOR specification (RFC 7049):
+
+ JSON value type | value/range | CBOR type | first byte
+ --------------- | ------------------------------------------ | ---------------------------------- | ---------------
+ null | `null` | Null | 0xF6
+ boolean | `true` | True | 0xF5
+ boolean | `false` | False | 0xF4
+ number_integer | -9223372036854775808..-2147483649 | Negative integer (8 bytes follow) | 0x3B
+ number_integer | -2147483648..-32769 | Negative integer (4 bytes follow) | 0x3A
+ number_integer | -32768..-129 | Negative integer (2 bytes follow) | 0x39
+ number_integer | -128..-25 | Negative integer (1 byte follow) | 0x38
+ number_integer | -24..-1 | Negative integer | 0x20..0x37
+ number_integer | 0..23 | Integer | 0x00..0x17
+ number_integer | 24..255 | Unsigned integer (1 byte follow) | 0x18
+ number_integer | 256..65535 | Unsigned integer (2 bytes follow) | 0x19
+ number_integer | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A
+ number_integer | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B
+ number_unsigned | 0..23 | Integer | 0x00..0x17
+ number_unsigned | 24..255 | Unsigned integer (1 byte follow) | 0x18
+ number_unsigned | 256..65535 | Unsigned integer (2 bytes follow) | 0x19
+ number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A
+ number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B
+ number_float | *any value representable by a float* | Single-Precision Float | 0xFA
+ number_float | *any value NOT representable by a float* | Double-Precision Float | 0xFB
+ string | *length*: 0..23 | UTF-8 string | 0x60..0x77
+ string | *length*: 23..255 | UTF-8 string (1 byte follow) | 0x78
+ string | *length*: 256..65535 | UTF-8 string (2 bytes follow) | 0x79
+ string | *length*: 65536..4294967295 | UTF-8 string (4 bytes follow) | 0x7A
+ string | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow) | 0x7B
+ array | *size*: 0..23 | array | 0x80..0x97
+ array | *size*: 23..255 | array (1 byte follow) | 0x98
+ array | *size*: 256..65535 | array (2 bytes follow) | 0x99
+ array | *size*: 65536..4294967295 | array (4 bytes follow) | 0x9A
+ array | *size*: 4294967296..18446744073709551615 | array (8 bytes follow) | 0x9B
+ object | *size*: 0..23 | map | 0xA0..0xB7
+ object | *size*: 23..255 | map (1 byte follow) | 0xB8
+ object | *size*: 256..65535 | map (2 bytes follow) | 0xB9
+ object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xBA
+ object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xBB
+ binary | *size*: 0..23 | byte string | 0x40..0x57
+ binary | *size*: 23..255 | byte string (1 byte follow) | 0x58
+ binary | *size*: 256..65535 | byte string (2 bytes follow) | 0x59
+ binary | *size*: 65536..4294967295 | byte string (4 bytes follow) | 0x5A
+ binary | *size*: 4294967296..18446744073709551615 | byte string (8 bytes follow) | 0x5B
+
+ @note The mapping is **complete** in the sense that any JSON value type
+ can be converted to a CBOR value.
+
+ @note If NaN or Infinity are stored inside a JSON number, they are
+ serialized properly. This behavior differs from the @ref dump()
+ function which serializes NaN or Infinity to `null`.
+
+ @note The following CBOR types are not used in the conversion:
+ - UTF-8 strings terminated by "break" (0x7F)
+ - arrays terminated by "break" (0x9F)
+ - maps terminated by "break" (0xBF)
+ - byte strings terminated by "break" (0x5F)
+ - date/time (0xC0..0xC1)
+ - bignum (0xC2..0xC3)
+ - decimal fraction (0xC4)
+ - bigfloat (0xC5)
+ - expected conversions (0xD5..0xD7)
+ - simple values (0xE0..0xF3, 0xF8)
+ - undefined (0xF7)
+ - half-precision floats (0xF9)
+ - break (0xFF)
+
+ @param[in] j JSON value to serialize
+ @return CBOR serialization as byte vector
+
+ @complexity Linear in the size of the JSON value @a j.
+
+ @liveexample{The example shows the serialization of a JSON value to a byte
+ vector in CBOR format.,to_cbor}
+
+ @sa http://cbor.io
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
+ analogous deserialization
+ @sa @ref to_msgpack(const basic_json&) for the related MessagePack format
+ @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+ related UBJSON format
+
+ @since version 2.0.9; compact representation of floating-point numbers
+ since version 3.8.0
+ */
+ static std::vector<uint8_t> to_cbor(const basic_json& j)
+ {
+ std::vector<uint8_t> result;
+ to_cbor(j, result);
+ return result;
+ }
+
+ static void to_cbor(const basic_json& j, detail::output_adapter<uint8_t> o)
+ {
+ binary_writer<uint8_t>(o).write_cbor(j);
+ }
+
+ static void to_cbor(const basic_json& j, detail::output_adapter<char> o)
+ {
+ binary_writer<char>(o).write_cbor(j);
+ }
+
+ /*!
+ @brief create a MessagePack serialization of a given JSON value
+
+ Serializes a given JSON value @a j to a byte vector using the MessagePack
+ serialization format. MessagePack is a binary serialization format which
+ aims to be more compact than JSON itself, yet more efficient to parse.
+
+ The library uses the following mapping from JSON values types to
+ MessagePack types according to the MessagePack specification:
+
+ JSON value type | value/range | MessagePack type | first byte
+ --------------- | --------------------------------- | ---------------- | ----------
+ null | `null` | nil | 0xC0
+ boolean | `true` | true | 0xC3
+ boolean | `false` | false | 0xC2
+ number_integer | -9223372036854775808..-2147483649 | int64 | 0xD3
+ number_integer | -2147483648..-32769 | int32 | 0xD2
+ number_integer | -32768..-129 | int16 | 0xD1
+ number_integer | -128..-33 | int8 | 0xD0
+ number_integer | -32..-1 | negative fixint | 0xE0..0xFF
+ number_integer | 0..127 | positive fixint | 0x00..0x7F
+ number_integer | 128..255 | uint 8 | 0xCC
+ number_integer | 256..65535 | uint 16 | 0xCD
+ number_integer | 65536..4294967295 | uint 32 | 0xCE
+ number_integer | 4294967296..18446744073709551615 | uint 64 | 0xCF
+ number_unsigned | 0..127 | positive fixint | 0x00..0x7F
+ number_unsigned | 128..255 | uint 8 | 0xCC
+ number_unsigned | 256..65535 | uint 16 | 0xCD
+ number_unsigned | 65536..4294967295 | uint 32 | 0xCE
+ number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xCF
+ number_float | *any value representable by a float* | float 32 | 0xCA
+ number_float | *any value NOT representable by a float* | float 64 | 0xCB
+ string | *length*: 0..31 | fixstr | 0xA0..0xBF
+ string | *length*: 32..255 | str 8 | 0xD9
+ string | *length*: 256..65535 | str 16 | 0xDA
+ string | *length*: 65536..4294967295 | str 32 | 0xDB
+ array | *size*: 0..15 | fixarray | 0x90..0x9F
+ array | *size*: 16..65535 | array 16 | 0xDC
+ array | *size*: 65536..4294967295 | array 32 | 0xDD
+ object | *size*: 0..15 | fix map | 0x80..0x8F
+ object | *size*: 16..65535 | map 16 | 0xDE
+ object | *size*: 65536..4294967295 | map 32 | 0xDF
+ binary | *size*: 0..255 | bin 8 | 0xC4
+ binary | *size*: 256..65535 | bin 16 | 0xC5
+ binary | *size*: 65536..4294967295 | bin 32 | 0xC6
+
+ @note The mapping is **complete** in the sense that any JSON value type
+ can be converted to a MessagePack value.
+
+ @note The following values can **not** be converted to a MessagePack value:
+ - strings with more than 4294967295 bytes
+ - byte strings with more than 4294967295 bytes
+ - arrays with more than 4294967295 elements
+ - objects with more than 4294967295 elements
+
+ @note Any MessagePack output created @ref to_msgpack can be successfully
+ parsed by @ref from_msgpack.
+
+ @note If NaN or Infinity are stored inside a JSON number, they are
+ serialized properly. This behavior differs from the @ref dump()
+ function which serializes NaN or Infinity to `null`.
+
+ @param[in] j JSON value to serialize
+ @return MessagePack serialization as byte vector
+
+ @complexity Linear in the size of the JSON value @a j.
+
+ @liveexample{The example shows the serialization of a JSON value to a byte
+ vector in MessagePack format.,to_msgpack}
+
+ @sa http://msgpack.org
+ @sa @ref from_msgpack for the analogous deserialization
+ @sa @ref to_cbor(const basic_json& for the related CBOR format
+ @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+ related UBJSON format
+
+ @since version 2.0.9
+ */
+ static std::vector<uint8_t> to_msgpack(const basic_json& j)
+ {
+ std::vector<uint8_t> result;
+ to_msgpack(j, result);
+ return result;
+ }
+
+ static void to_msgpack(const basic_json& j, detail::output_adapter<uint8_t> o)
+ {
+ binary_writer<uint8_t>(o).write_msgpack(j);
+ }
+
+ static void to_msgpack(const basic_json& j, detail::output_adapter<char> o)
+ {
+ binary_writer<char>(o).write_msgpack(j);
+ }
+
+ /*!
+ @brief create a UBJSON serialization of a given JSON value
+
+ Serializes a given JSON value @a j to a byte vector using the UBJSON
+ (Universal Binary JSON) serialization format. UBJSON aims to be more compact
+ than JSON itself, yet more efficient to parse.
+
+ The library uses the following mapping from JSON values types to
+ UBJSON types according to the UBJSON specification:
+
+ JSON value type | value/range | UBJSON type | marker
+ --------------- | --------------------------------- | ----------- | ------
+ null | `null` | null | `Z`
+ boolean | `true` | true | `T`
+ boolean | `false` | false | `F`
+ number_integer | -9223372036854775808..-2147483649 | int64 | `L`
+ number_integer | -2147483648..-32769 | int32 | `l`
+ number_integer | -32768..-129 | int16 | `I`
+ number_integer | -128..127 | int8 | `i`
+ number_integer | 128..255 | uint8 | `U`
+ number_integer | 256..32767 | int16 | `I`
+ number_integer | 32768..2147483647 | int32 | `l`
+ number_integer | 2147483648..9223372036854775807 | int64 | `L`
+ number_unsigned | 0..127 | int8 | `i`
+ number_unsigned | 128..255 | uint8 | `U`
+ number_unsigned | 256..32767 | int16 | `I`
+ number_unsigned | 32768..2147483647 | int32 | `l`
+ number_unsigned | 2147483648..9223372036854775807 | int64 | `L`
+ number_unsigned | 2147483649..18446744073709551615 | high-precision | `H`
+ number_float | *any value* | float64 | `D`
+ string | *with shortest length indicator* | string | `S`
+ array | *see notes on optimized format* | array | `[`
+ object | *see notes on optimized format* | map | `{`
+
+ @note The mapping is **complete** in the sense that any JSON value type
+ can be converted to a UBJSON value.
+
+ @note The following values can **not** be converted to a UBJSON value:
+ - strings with more than 9223372036854775807 bytes (theoretical)
+
+ @note The following markers are not used in the conversion:
+ - `Z`: no-op values are not created.
+ - `C`: single-byte strings are serialized with `S` markers.
+
+ @note Any UBJSON output created @ref to_ubjson can be successfully parsed
+ by @ref from_ubjson.
+
+ @note If NaN or Infinity are stored inside a JSON number, they are
+ serialized properly. This behavior differs from the @ref dump()
+ function which serializes NaN or Infinity to `null`.
+
+ @note The optimized formats for containers are supported: Parameter
+ @a use_size adds size information to the beginning of a container and
+ removes the closing marker. Parameter @a use_type further checks
+ whether all elements of a container have the same type and adds the
+ type marker to the beginning of the container. The @a use_type
+ parameter must only be used together with @a use_size = true. Note
+ that @a use_size = true alone may result in larger representations -
+ the benefit of this parameter is that the receiving side is
+ immediately informed on the number of elements of the container.
+
+ @note If the JSON data contains the binary type, the value stored is a list
+ of integers, as suggested by the UBJSON documentation. In particular,
+ this means that serialization and the deserialization of a JSON
+ containing binary values into UBJSON and back will result in a
+ different JSON object.
+
+ @param[in] j JSON value to serialize
+ @param[in] use_size whether to add size annotations to container types
+ @param[in] use_type whether to add type annotations to container types
+ (must be combined with @a use_size = true)
+ @return UBJSON serialization as byte vector
+
+ @complexity Linear in the size of the JSON value @a j.
+
+ @liveexample{The example shows the serialization of a JSON value to a byte
+ vector in UBJSON format.,to_ubjson}
+
+ @sa http://ubjson.org
+ @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
+ analogous deserialization
+ @sa @ref to_cbor(const basic_json& for the related CBOR format
+ @sa @ref to_msgpack(const basic_json&) for the related MessagePack format
+
+ @since version 3.1.0
+ */
+ static std::vector<uint8_t> to_ubjson(const basic_json& j,
+ const bool use_size = false,
+ const bool use_type = false)
+ {
+ std::vector<uint8_t> result;
+ to_ubjson(j, result, use_size, use_type);
+ return result;
+ }
+
+ static void to_ubjson(const basic_json& j, detail::output_adapter<uint8_t> o,
+ const bool use_size = false, const bool use_type = false)
+ {
+ binary_writer<uint8_t>(o).write_ubjson(j, use_size, use_type);
+ }
+
+ static void to_ubjson(const basic_json& j, detail::output_adapter<char> o,
+ const bool use_size = false, const bool use_type = false)
+ {
+ binary_writer<char>(o).write_ubjson(j, use_size, use_type);
+ }
+
+
+ /*!
+ @brief Serializes the given JSON object `j` to BSON and returns a vector
+ containing the corresponding BSON-representation.
+
+ BSON (Binary JSON) is a binary format in which zero or more ordered key/value pairs are
+ stored as a single entity (a so-called document).
+
+ The library uses the following mapping from JSON values types to BSON types:
+
+ JSON value type | value/range | BSON type | marker
+ --------------- | --------------------------------- | ----------- | ------
+ null | `null` | null | 0x0A
+ boolean | `true`, `false` | boolean | 0x08
+ number_integer | -9223372036854775808..-2147483649 | int64 | 0x12
+ number_integer | -2147483648..2147483647 | int32 | 0x10
+ number_integer | 2147483648..9223372036854775807 | int64 | 0x12
+ number_unsigned | 0..2147483647 | int32 | 0x10
+ number_unsigned | 2147483648..9223372036854775807 | int64 | 0x12
+ number_unsigned | 9223372036854775808..18446744073709551615| -- | --
+ number_float | *any value* | double | 0x01
+ string | *any value* | string | 0x02
+ array | *any value* | document | 0x04
+ object | *any value* | document | 0x03
+ binary | *any value* | binary | 0x05
+
+ @warning The mapping is **incomplete**, since only JSON-objects (and things
+ contained therein) can be serialized to BSON.
+ Also, integers larger than 9223372036854775807 cannot be serialized to BSON,
+ and the keys may not contain U+0000, since they are serialized a
+ zero-terminated c-strings.
+
+ @throw out_of_range.407 if `j.is_number_unsigned() && j.get<std::uint64_t>() > 9223372036854775807`
+ @throw out_of_range.409 if a key in `j` contains a NULL (U+0000)
+ @throw type_error.317 if `!j.is_object()`
+
+ @pre The input `j` is required to be an object: `j.is_object() == true`.
+
+ @note Any BSON output created via @ref to_bson can be successfully parsed
+ by @ref from_bson.
+
+ @param[in] j JSON value to serialize
+ @return BSON serialization as byte vector
+
+ @complexity Linear in the size of the JSON value @a j.
+
+ @liveexample{The example shows the serialization of a JSON value to a byte
+ vector in BSON format.,to_bson}
+
+ @sa http://bsonspec.org/spec.html
+ @sa @ref from_bson(detail::input_adapter&&, const bool strict) for the
+ analogous deserialization
+ @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+ related UBJSON format
+ @sa @ref to_cbor(const basic_json&) for the related CBOR format
+ @sa @ref to_msgpack(const basic_json&) for the related MessagePack format
+ */
+ static std::vector<uint8_t> to_bson(const basic_json& j)
+ {
+ std::vector<uint8_t> result;
+ to_bson(j, result);
+ return result;
+ }
+
+ /*!
+ @brief Serializes the given JSON object `j` to BSON and forwards the
+ corresponding BSON-representation to the given output_adapter `o`.
+ @param j The JSON object to convert to BSON.
+ @param o The output adapter that receives the binary BSON representation.
+ @pre The input `j` shall be an object: `j.is_object() == true`
+ @sa @ref to_bson(const basic_json&)
+ */
+ static void to_bson(const basic_json& j, detail::output_adapter<uint8_t> o)
+ {
+ binary_writer<uint8_t>(o).write_bson(j);
+ }
+
+ /*!
+ @copydoc to_bson(const basic_json&, detail::output_adapter<uint8_t>)
+ */
+ static void to_bson(const basic_json& j, detail::output_adapter<char> o)
+ {
+ binary_writer<char>(o).write_bson(j);
+ }
+
+
+ /*!
+ @brief create a JSON value from an input in CBOR format
+
+ Deserializes a given input @a i to a JSON value using the CBOR (Concise
+ Binary Object Representation) serialization format.
+
+ The library maps CBOR types to JSON value types as follows:
+
+ CBOR type | JSON value type | first byte
+ ---------------------- | --------------- | ----------
+ Integer | number_unsigned | 0x00..0x17
+ Unsigned integer | number_unsigned | 0x18
+ Unsigned integer | number_unsigned | 0x19
+ Unsigned integer | number_unsigned | 0x1A
+ Unsigned integer | number_unsigned | 0x1B
+ Negative integer | number_integer | 0x20..0x37
+ Negative integer | number_integer | 0x38
+ Negative integer | number_integer | 0x39
+ Negative integer | number_integer | 0x3A
+ Negative integer | number_integer | 0x3B
+ Byte string | binary | 0x40..0x57
+ Byte string | binary | 0x58
+ Byte string | binary | 0x59
+ Byte string | binary | 0x5A
+ Byte string | binary | 0x5B
+ UTF-8 string | string | 0x60..0x77
+ UTF-8 string | string | 0x78
+ UTF-8 string | string | 0x79
+ UTF-8 string | string | 0x7A
+ UTF-8 string | string | 0x7B
+ UTF-8 string | string | 0x7F
+ array | array | 0x80..0x97
+ array | array | 0x98
+ array | array | 0x99
+ array | array | 0x9A
+ array | array | 0x9B
+ array | array | 0x9F
+ map | object | 0xA0..0xB7
+ map | object | 0xB8
+ map | object | 0xB9
+ map | object | 0xBA
+ map | object | 0xBB
+ map | object | 0xBF
+ False | `false` | 0xF4
+ True | `true` | 0xF5
+ Null | `null` | 0xF6
+ Half-Precision Float | number_float | 0xF9
+ Single-Precision Float | number_float | 0xFA
+ Double-Precision Float | number_float | 0xFB
+
+ @warning The mapping is **incomplete** in the sense that not all CBOR
+ types can be converted to a JSON value. The following CBOR types
+ are not supported and will yield parse errors (parse_error.112):
+ - date/time (0xC0..0xC1)
+ - bignum (0xC2..0xC3)
+ - decimal fraction (0xC4)
+ - bigfloat (0xC5)
+ - expected conversions (0xD5..0xD7)
+ - simple values (0xE0..0xF3, 0xF8)
+ - undefined (0xF7)
+
+ @warning CBOR allows map keys of any type, whereas JSON only allows
+ strings as keys in object values. Therefore, CBOR maps with keys
+ other than UTF-8 strings are rejected (parse_error.113).
+
+ @note Any CBOR output created @ref to_cbor can be successfully parsed by
+ @ref from_cbor.
+
+ @param[in] i an input in CBOR format convertible to an input adapter
+ @param[in] strict whether to expect the input to be consumed until EOF
+ (true by default)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+ @param[in] tag_handler how to treat CBOR tags (optional, error by default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.110 if the given input ends prematurely or the end of
+ file was not reached when @a strict was set to true
+ @throw parse_error.112 if unsupported features from CBOR were
+ used in the given input @a v or if the input is not valid CBOR
+ @throw parse_error.113 if a string was expected as map key, but not found
+
+ @complexity Linear in the size of the input @a i.
+
+ @liveexample{The example shows the deserialization of a byte vector in CBOR
+ format to a JSON value.,from_cbor}
+
+ @sa http://cbor.io
+ @sa @ref to_cbor(const basic_json&) for the analogous serialization
+ @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the
+ related MessagePack format
+ @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
+ related UBJSON format
+
+ @since version 2.0.9; parameter @a start_index since 2.1.1; changed to
+ consume input adapters, removed start_index parameter, and added
+ @a strict parameter since 3.0.0; added @a allow_exceptions parameter
+ since 3.2.0; added @a tag_handler parameter since 3.9.0.
+ */
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_cbor(InputType&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ /*!
+ @copydoc from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t)
+ */
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_cbor(IteratorType first, IteratorType last,
+ const bool strict = true,
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len))
+ static basic_json from_cbor(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ return from_cbor(ptr, ptr + len, strict, allow_exceptions, tag_handler);
+ }
+
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len))
+ static basic_json from_cbor(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ /*!
+ @brief create a JSON value from an input in MessagePack format
+
+ Deserializes a given input @a i to a JSON value using the MessagePack
+ serialization format.
+
+ The library maps MessagePack types to JSON value types as follows:
+
+ MessagePack type | JSON value type | first byte
+ ---------------- | --------------- | ----------
+ positive fixint | number_unsigned | 0x00..0x7F
+ fixmap | object | 0x80..0x8F
+ fixarray | array | 0x90..0x9F
+ fixstr | string | 0xA0..0xBF
+ nil | `null` | 0xC0
+ false | `false` | 0xC2
+ true | `true` | 0xC3
+ float 32 | number_float | 0xCA
+ float 64 | number_float | 0xCB
+ uint 8 | number_unsigned | 0xCC
+ uint 16 | number_unsigned | 0xCD
+ uint 32 | number_unsigned | 0xCE
+ uint 64 | number_unsigned | 0xCF
+ int 8 | number_integer | 0xD0
+ int 16 | number_integer | 0xD1
+ int 32 | number_integer | 0xD2
+ int 64 | number_integer | 0xD3
+ str 8 | string | 0xD9
+ str 16 | string | 0xDA
+ str 32 | string | 0xDB
+ array 16 | array | 0xDC
+ array 32 | array | 0xDD
+ map 16 | object | 0xDE
+ map 32 | object | 0xDF
+ bin 8 | binary | 0xC4
+ bin 16 | binary | 0xC5
+ bin 32 | binary | 0xC6
+ ext 8 | binary | 0xC7
+ ext 16 | binary | 0xC8
+ ext 32 | binary | 0xC9
+ fixext 1 | binary | 0xD4
+ fixext 2 | binary | 0xD5
+ fixext 4 | binary | 0xD6
+ fixext 8 | binary | 0xD7
+ fixext 16 | binary | 0xD8
+ negative fixint | number_integer | 0xE0-0xFF
+
+ @note Any MessagePack output created @ref to_msgpack can be successfully
+ parsed by @ref from_msgpack.
+
+ @param[in] i an input in MessagePack format convertible to an input
+ adapter
+ @param[in] strict whether to expect the input to be consumed until EOF
+ (true by default)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.110 if the given input ends prematurely or the end of
+ file was not reached when @a strict was set to true
+ @throw parse_error.112 if unsupported features from MessagePack were
+ used in the given input @a i or if the input is not valid MessagePack
+ @throw parse_error.113 if a string was expected as map key, but not found
+
+ @complexity Linear in the size of the input @a i.
+
+ @liveexample{The example shows the deserialization of a byte vector in
+ MessagePack format to a JSON value.,from_msgpack}
+
+ @sa http://msgpack.org
+ @sa @ref to_msgpack(const basic_json&) for the analogous serialization
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
+ related CBOR format
+ @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for
+ the related UBJSON format
+ @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for
+ the related BSON format
+
+ @since version 2.0.9; parameter @a start_index since 2.1.1; changed to
+ consume input adapters, removed start_index parameter, and added
+ @a strict parameter since 3.0.0; added @a allow_exceptions parameter
+ since 3.2.0
+ */
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_msgpack(InputType&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ /*!
+ @copydoc from_msgpack(detail::input_adapter&&, const bool, const bool)
+ */
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_msgpack(IteratorType first, IteratorType last,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len))
+ static basic_json from_msgpack(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ return from_msgpack(ptr, ptr + len, strict, allow_exceptions);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len))
+ static basic_json from_msgpack(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+
+ /*!
+ @brief create a JSON value from an input in UBJSON format
+
+ Deserializes a given input @a i to a JSON value using the UBJSON (Universal
+ Binary JSON) serialization format.
+
+ The library maps UBJSON types to JSON value types as follows:
+
+ UBJSON type | JSON value type | marker
+ ----------- | --------------------------------------- | ------
+ no-op | *no value, next value is read* | `N`
+ null | `null` | `Z`
+ false | `false` | `F`
+ true | `true` | `T`
+ float32 | number_float | `d`
+ float64 | number_float | `D`
+ uint8 | number_unsigned | `U`
+ int8 | number_integer | `i`
+ int16 | number_integer | `I`
+ int32 | number_integer | `l`
+ int64 | number_integer | `L`
+ high-precision number | number_integer, number_unsigned, or number_float - depends on number string | 'H'
+ string | string | `S`
+ char | string | `C`
+ array | array (optimized values are supported) | `[`
+ object | object (optimized values are supported) | `{`
+
+ @note The mapping is **complete** in the sense that any UBJSON value can
+ be converted to a JSON value.
+
+ @param[in] i an input in UBJSON format convertible to an input adapter
+ @param[in] strict whether to expect the input to be consumed until EOF
+ (true by default)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.110 if the given input ends prematurely or the end of
+ file was not reached when @a strict was set to true
+ @throw parse_error.112 if a parse error occurs
+ @throw parse_error.113 if a string could not be parsed successfully
+
+ @complexity Linear in the size of the input @a i.
+
+ @liveexample{The example shows the deserialization of a byte vector in
+ UBJSON format to a JSON value.,from_ubjson}
+
+ @sa http://ubjson.org
+ @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+ analogous serialization
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
+ related CBOR format
+ @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for
+ the related MessagePack format
+ @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for
+ the related BSON format
+
+ @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0
+ */
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_ubjson(InputType&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ /*!
+ @copydoc from_ubjson(detail::input_adapter&&, const bool, const bool)
+ */
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_ubjson(IteratorType first, IteratorType last,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len))
+ static basic_json from_ubjson(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ return from_ubjson(ptr, ptr + len, strict, allow_exceptions);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len))
+ static basic_json from_ubjson(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+
+ /*!
+ @brief Create a JSON value from an input in BSON format
+
+ Deserializes a given input @a i to a JSON value using the BSON (Binary JSON)
+ serialization format.
+
+ The library maps BSON record types to JSON value types as follows:
+
+ BSON type | BSON marker byte | JSON value type
+ --------------- | ---------------- | ---------------------------
+ double | 0x01 | number_float
+ string | 0x02 | string
+ document | 0x03 | object
+ array | 0x04 | array
+ binary | 0x05 | still unsupported
+ undefined | 0x06 | still unsupported
+ ObjectId | 0x07 | still unsupported
+ boolean | 0x08 | boolean
+ UTC Date-Time | 0x09 | still unsupported
+ null | 0x0A | null
+ Regular Expr. | 0x0B | still unsupported
+ DB Pointer | 0x0C | still unsupported
+ JavaScript Code | 0x0D | still unsupported
+ Symbol | 0x0E | still unsupported
+ JavaScript Code | 0x0F | still unsupported
+ int32 | 0x10 | number_integer
+ Timestamp | 0x11 | still unsupported
+ 128-bit decimal float | 0x13 | still unsupported
+ Max Key | 0x7F | still unsupported
+ Min Key | 0xFF | still unsupported
+
+ @warning The mapping is **incomplete**. The unsupported mappings
+ are indicated in the table above.
+
+ @param[in] i an input in BSON format convertible to an input adapter
+ @param[in] strict whether to expect the input to be consumed until EOF
+ (true by default)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.114 if an unsupported BSON record type is encountered
+
+ @complexity Linear in the size of the input @a i.
+
+ @liveexample{The example shows the deserialization of a byte vector in
+ BSON format to a JSON value.,from_bson}
+
+ @sa http://bsonspec.org/spec.html
+ @sa @ref to_bson(const basic_json&) for the analogous serialization
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
+ related CBOR format
+ @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for
+ the related MessagePack format
+ @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
+ related UBJSON format
+ */
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_bson(InputType&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ /*!
+ @copydoc from_bson(detail::input_adapter&&, const bool, const bool)
+ */
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_bson(IteratorType first, IteratorType last,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len))
+ static basic_json from_bson(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ return from_bson(ptr, ptr + len, strict, allow_exceptions);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len))
+ static basic_json from_bson(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+ /// @}
+
+ //////////////////////////
+ // JSON Pointer support //
+ //////////////////////////
+
+ /// @name JSON Pointer functions
+ /// @{
+
+ /*!
+ @brief access specified element via JSON Pointer
+
+ Uses a JSON pointer to retrieve a reference to the respective JSON value.
+ No bound checking is performed. Similar to @ref operator[](const typename
+ object_t::key_type&), `null` values are created in arrays and objects if
+ necessary.
+
+ In particular:
+ - If the JSON pointer points to an object key that does not exist, it
+ is created an filled with a `null` value before a reference to it
+ is returned.
+ - If the JSON pointer points to an array index that does not exist, it
+ is created an filled with a `null` value before a reference to it
+ is returned. All indices between the current maximum and the given
+ index are also filled with `null`.
+ - The special value `-` is treated as a synonym for the index past the
+ end.
+
+ @param[in] ptr a JSON pointer
+
+ @return reference to the element pointed to by @a ptr
+
+ @complexity Constant.
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+
+ @liveexample{The behavior is shown in the example.,operatorjson_pointer}
+
+ @since version 2.0.0
+ */
+ reference operator[](const json_pointer& ptr)
+ {
+ return ptr.get_unchecked(this);
+ }
+
+ /*!
+ @brief access specified element via JSON Pointer
+
+ Uses a JSON pointer to retrieve a reference to the respective JSON value.
+ No bound checking is performed. The function does not change the JSON
+ value; no `null` values are created. In particular, the special value
+ `-` yields an exception.
+
+ @param[in] ptr JSON pointer to the desired element
+
+ @return const reference to the element pointed to by @a ptr
+
+ @complexity Constant.
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+
+ @liveexample{The behavior is shown in the example.,operatorjson_pointer_const}
+
+ @since version 2.0.0
+ */
+ const_reference operator[](const json_pointer& ptr) const
+ {
+ return ptr.get_unchecked(this);
+ }
+
+ /*!
+ @brief access specified element via JSON Pointer
+
+ Returns a reference to the element at with specified JSON pointer @a ptr,
+ with bounds checking.
+
+ @param[in] ptr JSON pointer to the desired element
+
+ @return reference to the element pointed to by @a ptr
+
+ @throw parse_error.106 if an array index in the passed JSON pointer @a ptr
+ begins with '0'. See example below.
+
+ @throw parse_error.109 if an array index in the passed JSON pointer @a ptr
+ is not a number. See example below.
+
+ @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr
+ is out of range. See example below.
+
+ @throw out_of_range.402 if the array index '-' is used in the passed JSON
+ pointer @a ptr. As `at` provides checked access (and no elements are
+ implicitly inserted), the index '-' is always invalid. See example below.
+
+ @throw out_of_range.403 if the JSON pointer describes a key of an object
+ which cannot be found. See example below.
+
+ @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved.
+ See example below.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Constant.
+
+ @since version 2.0.0
+
+ @liveexample{The behavior is shown in the example.,at_json_pointer}
+ */
+ reference at(const json_pointer& ptr)
+ {
+ return ptr.get_checked(this);
+ }
+
+ /*!
+ @brief access specified element via JSON Pointer
+
+ Returns a const reference to the element at with specified JSON pointer @a
+ ptr, with bounds checking.
+
+ @param[in] ptr JSON pointer to the desired element
+
+ @return reference to the element pointed to by @a ptr
+
+ @throw parse_error.106 if an array index in the passed JSON pointer @a ptr
+ begins with '0'. See example below.
+
+ @throw parse_error.109 if an array index in the passed JSON pointer @a ptr
+ is not a number. See example below.
+
+ @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr
+ is out of range. See example below.
+
+ @throw out_of_range.402 if the array index '-' is used in the passed JSON
+ pointer @a ptr. As `at` provides checked access (and no elements are
+ implicitly inserted), the index '-' is always invalid. See example below.
+
+ @throw out_of_range.403 if the JSON pointer describes a key of an object
+ which cannot be found. See example below.
+
+ @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved.
+ See example below.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes in the JSON value.
+
+ @complexity Constant.
+
+ @since version 2.0.0
+
+ @liveexample{The behavior is shown in the example.,at_json_pointer_const}
+ */
+ const_reference at(const json_pointer& ptr) const
+ {
+ return ptr.get_checked(this);
+ }
+
+ /*!
+ @brief return flattened JSON value
+
+ The function creates a JSON object whose keys are JSON pointers (see [RFC
+ 6901](https://tools.ietf.org/html/rfc6901)) and whose values are all
+ primitive. The original JSON value can be restored using the @ref
+ unflatten() function.
+
+ @return an object that maps JSON pointers to primitive values
+
+ @note Empty objects and arrays are flattened to `null` and will not be
+ reconstructed correctly by the @ref unflatten() function.
+
+ @complexity Linear in the size the JSON value.
+
+ @liveexample{The following code shows how a JSON object is flattened to an
+ object whose keys consist of JSON pointers.,flatten}
+
+ @sa @ref unflatten() for the reverse function
+
+ @since version 2.0.0
+ */
+ basic_json flatten() const
+ {
+ basic_json result(value_t::object);
+ json_pointer::flatten("", *this, result);
+ return result;
+ }
+
+ /*!
+ @brief unflatten a previously flattened JSON value
+
+ The function restores the arbitrary nesting of a JSON value that has been
+ flattened before using the @ref flatten() function. The JSON value must
+ meet certain constraints:
+ 1. The value must be an object.
+ 2. The keys must be JSON pointers (see
+ [RFC 6901](https://tools.ietf.org/html/rfc6901))
+ 3. The mapped values must be primitive JSON types.
+
+ @return the original JSON from a flattened version
+
+ @note Empty objects and arrays are flattened by @ref flatten() to `null`
+ values and can not unflattened to their original type. Apart from
+ this example, for a JSON value `j`, the following is always true:
+ `j == j.flatten().unflatten()`.
+
+ @complexity Linear in the size the JSON value.
+
+ @throw type_error.314 if value is not an object
+ @throw type_error.315 if object values are not primitive
+
+ @liveexample{The following code shows how a flattened JSON object is
+ unflattened into the original nested JSON object.,unflatten}
+
+ @sa @ref flatten() for the reverse function
+
+ @since version 2.0.0
+ */
+ basic_json unflatten() const
+ {
+ return json_pointer::unflatten(*this);
+ }
+
+ /// @}
+
+ //////////////////////////
+ // JSON Patch functions //
+ //////////////////////////
+
+ /// @name JSON Patch functions
+ /// @{
+
+ /*!
+ @brief applies a JSON patch
+
+ [JSON Patch](http://jsonpatch.com) defines a JSON document structure for
+ expressing a sequence of operations to apply to a JSON) document. With
+ this function, a JSON Patch is applied to the current JSON value by
+ executing all operations from the patch.
+
+ @param[in] json_patch JSON patch document
+ @return patched document
+
+ @note The application of a patch is atomic: Either all operations succeed
+ and the patched document is returned or an exception is thrown. In
+ any case, the original value is not changed: the patch is applied
+ to a copy of the value.
+
+ @throw parse_error.104 if the JSON patch does not consist of an array of
+ objects
+
+ @throw parse_error.105 if the JSON patch is malformed (e.g., mandatory
+ attributes are missing); example: `"operation add must have member path"`
+
+ @throw out_of_range.401 if an array index is out of range.
+
+ @throw out_of_range.403 if a JSON pointer inside the patch could not be
+ resolved successfully in the current JSON value; example: `"key baz not
+ found"`
+
+ @throw out_of_range.405 if JSON pointer has no parent ("add", "remove",
+ "move")
+
+ @throw other_error.501 if "test" operation was unsuccessful
+
+ @complexity Linear in the size of the JSON value and the length of the
+ JSON patch. As usually only a fraction of the JSON value is affected by
+ the patch, the complexity can usually be neglected.
+
+ @liveexample{The following code shows how a JSON patch is applied to a
+ value.,patch}
+
+ @sa @ref diff -- create a JSON patch by comparing two JSON values
+
+ @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902)
+ @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901)
+
+ @since version 2.0.0
+ */
+ basic_json patch(const basic_json& json_patch) const
+ {
+ // make a working copy to apply the patch to
+ basic_json result = *this;
+
+ // the valid JSON Patch operations
+ enum class patch_operations {add, remove, replace, move, copy, test, invalid};
+
+ const auto get_op = [](const std::string & op)
+ {
+ if (op == "add")
+ {
+ return patch_operations::add;
+ }
+ if (op == "remove")
+ {
+ return patch_operations::remove;
+ }
+ if (op == "replace")
+ {
+ return patch_operations::replace;
+ }
+ if (op == "move")
+ {
+ return patch_operations::move;
+ }
+ if (op == "copy")
+ {
+ return patch_operations::copy;
+ }
+ if (op == "test")
+ {
+ return patch_operations::test;
+ }
+
+ return patch_operations::invalid;
+ };
+
+ // wrapper for "add" operation; add value at ptr
+ const auto operation_add = [&result](json_pointer & ptr, basic_json val)
+ {
+ // adding to the root of the target document means replacing it
+ if (ptr.empty())
+ {
+ result = val;
+ return;
+ }
+
+ // make sure the top element of the pointer exists
+ json_pointer top_pointer = ptr.top();
+ if (top_pointer != ptr)
+ {
+ result.at(top_pointer);
+ }
+
+ // get reference to parent of JSON pointer ptr
+ const auto last_path = ptr.back();
+ ptr.pop_back();
+ basic_json& parent = result[ptr];
+
+ switch (parent.m_type)
+ {
+ case value_t::null:
+ case value_t::object:
+ {
+ // use operator[] to add value
+ parent[last_path] = val;
+ break;
+ }
+
+ case value_t::array:
+ {
+ if (last_path == "-")
+ {
+ // special case: append to back
+ parent.push_back(val);
+ }
+ else
+ {
+ const auto idx = json_pointer::array_index(last_path);
+ if (JSON_HEDLEY_UNLIKELY(idx > parent.size()))
+ {
+ // avoid undefined behavior
+ JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+ }
+
+ // default case: insert add offset
+ parent.insert(parent.begin() + static_cast<difference_type>(idx), val);
+ }
+ break;
+ }
+
+ // if there exists a parent it cannot be primitive
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ };
+
+ // wrapper for "remove" operation; remove value at ptr
+ const auto operation_remove = [&result](json_pointer & ptr)
+ {
+ // get reference to parent of JSON pointer ptr
+ const auto last_path = ptr.back();
+ ptr.pop_back();
+ basic_json& parent = result.at(ptr);
+
+ // remove child
+ if (parent.is_object())
+ {
+ // perform range check
+ auto it = parent.find(last_path);
+ if (JSON_HEDLEY_LIKELY(it != parent.end()))
+ {
+ parent.erase(it);
+ }
+ else
+ {
+ JSON_THROW(out_of_range::create(403, "key '" + last_path + "' not found"));
+ }
+ }
+ else if (parent.is_array())
+ {
+ // note erase performs range check
+ parent.erase(json_pointer::array_index(last_path));
+ }
+ };
+
+ // type check: top level value must be an array
+ if (JSON_HEDLEY_UNLIKELY(!json_patch.is_array()))
+ {
+ JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects"));
+ }
+
+ // iterate and apply the operations
+ for (const auto& val : json_patch)
+ {
+ // wrapper to get a value for an operation
+ const auto get_value = [&val](const std::string & op,
+ const std::string & member,
+ bool string_type) -> basic_json &
+ {
+ // find value
+ auto it = val.m_value.object->find(member);
+
+ // context-sensitive error message
+ const auto error_msg = (op == "op") ? "operation" : "operation '" + op + "'";
+
+ // check if desired value is present
+ if (JSON_HEDLEY_UNLIKELY(it == val.m_value.object->end()))
+ {
+ JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'"));
+ }
+
+ // check if result is of type string
+ if (JSON_HEDLEY_UNLIKELY(string_type && !it->second.is_string()))
+ {
+ JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'"));
+ }
+
+ // no error: return value
+ return it->second;
+ };
+
+ // type check: every element of the array must be an object
+ if (JSON_HEDLEY_UNLIKELY(!val.is_object()))
+ {
+ JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects"));
+ }
+
+ // collect mandatory members
+ const auto op = get_value("op", "op", true).template get<std::string>();
+ const auto path = get_value(op, "path", true).template get<std::string>();
+ json_pointer ptr(path);
+
+ switch (get_op(op))
+ {
+ case patch_operations::add:
+ {
+ operation_add(ptr, get_value("add", "value", false));
+ break;
+ }
+
+ case patch_operations::remove:
+ {
+ operation_remove(ptr);
+ break;
+ }
+
+ case patch_operations::replace:
+ {
+ // the "path" location must exist - use at()
+ result.at(ptr) = get_value("replace", "value", false);
+ break;
+ }
+
+ case patch_operations::move:
+ {
+ const auto from_path = get_value("move", "from", true).template get<std::string>();
+ json_pointer from_ptr(from_path);
+
+ // the "from" location must exist - use at()
+ basic_json v = result.at(from_ptr);
+
+ // The move operation is functionally identical to a
+ // "remove" operation on the "from" location, followed
+ // immediately by an "add" operation at the target
+ // location with the value that was just removed.
+ operation_remove(from_ptr);
+ operation_add(ptr, v);
+ break;
+ }
+
+ case patch_operations::copy:
+ {
+ const auto from_path = get_value("copy", "from", true).template get<std::string>();
+ const json_pointer from_ptr(from_path);
+
+ // the "from" location must exist - use at()
+ basic_json v = result.at(from_ptr);
+
+ // The copy is functionally identical to an "add"
+ // operation at the target location using the value
+ // specified in the "from" member.
+ operation_add(ptr, v);
+ break;
+ }
+
+ case patch_operations::test:
+ {
+ bool success = false;
+ JSON_TRY
+ {
+ // check if "value" matches the one at "path"
+ // the "path" location must exist - use at()
+ success = (result.at(ptr) == get_value("test", "value", false));
+ }
+ JSON_INTERNAL_CATCH (out_of_range&)
+ {
+ // ignore out of range errors: success remains false
+ }
+
+ // throw an exception if test fails
+ if (JSON_HEDLEY_UNLIKELY(!success))
+ {
+ JSON_THROW(other_error::create(501, "unsuccessful: " + val.dump()));
+ }
+
+ break;
+ }
+
+ default:
+ {
+ // op must be "add", "remove", "replace", "move", "copy", or
+ // "test"
+ JSON_THROW(parse_error::create(105, 0, "operation value '" + op + "' is invalid"));
+ }
+ }
+ }
+
+ return result;
+ }
+
+ /*!
+ @brief creates a diff as a JSON patch
+
+ Creates a [JSON Patch](http://jsonpatch.com) so that value @a source can
+ be changed into the value @a target by calling @ref patch function.
+
+ @invariant For two JSON values @a source and @a target, the following code
+ yields always `true`:
+ @code {.cpp}
+ source.patch(diff(source, target)) == target;
+ @endcode
+
+ @note Currently, only `remove`, `add`, and `replace` operations are
+ generated.
+
+ @param[in] source JSON value to compare from
+ @param[in] target JSON value to compare against
+ @param[in] path helper value to create JSON pointers
+
+ @return a JSON patch to convert the @a source to @a target
+
+ @complexity Linear in the lengths of @a source and @a target.
+
+ @liveexample{The following code shows how a JSON patch is created as a
+ diff for two JSON values.,diff}
+
+ @sa @ref patch -- apply a JSON patch
+ @sa @ref merge_patch -- apply a JSON Merge Patch
+
+ @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902)
+
+ @since version 2.0.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json diff(const basic_json& source, const basic_json& target,
+ const std::string& path = "")
+ {
+ // the patch
+ basic_json result(value_t::array);
+
+ // if the values are the same, return empty patch
+ if (source == target)
+ {
+ return result;
+ }
+
+ if (source.type() != target.type())
+ {
+ // different types: replace value
+ result.push_back(
+ {
+ {"op", "replace"}, {"path", path}, {"value", target}
+ });
+ return result;
+ }
+
+ switch (source.type())
+ {
+ case value_t::array:
+ {
+ // first pass: traverse common elements
+ std::size_t i = 0;
+ while (i < source.size() && i < target.size())
+ {
+ // recursive call to compare array values at index i
+ auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i));
+ result.insert(result.end(), temp_diff.begin(), temp_diff.end());
+ ++i;
+ }
+
+ // i now reached the end of at least one array
+ // in a second pass, traverse the remaining elements
+
+ // remove my remaining elements
+ const auto end_index = static_cast<difference_type>(result.size());
+ while (i < source.size())
+ {
+ // add operations in reverse order to avoid invalid
+ // indices
+ result.insert(result.begin() + end_index, object(
+ {
+ {"op", "remove"},
+ {"path", path + "/" + std::to_string(i)}
+ }));
+ ++i;
+ }
+
+ // add other remaining elements
+ while (i < target.size())
+ {
+ result.push_back(
+ {
+ {"op", "add"},
+ {"path", path + "/-"},
+ {"value", target[i]}
+ });
+ ++i;
+ }
+
+ break;
+ }
+
+ case value_t::object:
+ {
+ // first pass: traverse this object's elements
+ for (auto it = source.cbegin(); it != source.cend(); ++it)
+ {
+ // escape the key name to be used in a JSON patch
+ const auto key = json_pointer::escape(it.key());
+
+ if (target.find(it.key()) != target.end())
+ {
+ // recursive call to compare object values at key it
+ auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key);
+ result.insert(result.end(), temp_diff.begin(), temp_diff.end());
+ }
+ else
+ {
+ // found a key that is not in o -> remove it
+ result.push_back(object(
+ {
+ {"op", "remove"}, {"path", path + "/" + key}
+ }));
+ }
+ }
+
+ // second pass: traverse other object's elements
+ for (auto it = target.cbegin(); it != target.cend(); ++it)
+ {
+ if (source.find(it.key()) == source.end())
+ {
+ // found a key that is not in this -> add it
+ const auto key = json_pointer::escape(it.key());
+ result.push_back(
+ {
+ {"op", "add"}, {"path", path + "/" + key},
+ {"value", it.value()}
+ });
+ }
+ }
+
+ break;
+ }
+
+ default:
+ {
+ // both primitive type: replace value
+ result.push_back(
+ {
+ {"op", "replace"}, {"path", path}, {"value", target}
+ });
+ break;
+ }
+ }
+
+ return result;
+ }
+
+ /// @}
+
+ ////////////////////////////////
+ // JSON Merge Patch functions //
+ ////////////////////////////////
+
+ /// @name JSON Merge Patch functions
+ /// @{
+
+ /*!
+ @brief applies a JSON Merge Patch
+
+ The merge patch format is primarily intended for use with the HTTP PATCH
+ method as a means of describing a set of modifications to a target
+ resource's content. This function applies a merge patch to the current
+ JSON value.
+
+ The function implements the following algorithm from Section 2 of
+ [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396):
+
+ ```
+ define MergePatch(Target, Patch):
+ if Patch is an Object:
+ if Target is not an Object:
+ Target = {} // Ignore the contents and set it to an empty Object
+ for each Name/Value pair in Patch:
+ if Value is null:
+ if Name exists in Target:
+ remove the Name/Value pair from Target
+ else:
+ Target[Name] = MergePatch(Target[Name], Value)
+ return Target
+ else:
+ return Patch
+ ```
+
+ Thereby, `Target` is the current object; that is, the patch is applied to
+ the current value.
+
+ @param[in] apply_patch the patch to apply
+
+ @complexity Linear in the lengths of @a patch.
+
+ @liveexample{The following code shows how a JSON Merge Patch is applied to
+ a JSON document.,merge_patch}
+
+ @sa @ref patch -- apply a JSON patch
+ @sa [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396)
+
+ @since version 3.0.0
+ */
+ void merge_patch(const basic_json& apply_patch)
+ {
+ if (apply_patch.is_object())
+ {
+ if (!is_object())
+ {
+ *this = object();
+ }
+ for (auto it = apply_patch.begin(); it != apply_patch.end(); ++it)
+ {
+ if (it.value().is_null())
+ {
+ erase(it.key());
+ }
+ else
+ {
+ operator[](it.key()).merge_patch(it.value());
+ }
+ }
+ }
+ else
+ {
+ *this = apply_patch;
+ }
+ }
+
+ /// @}
+};
+
+/*!
+@brief user-defined to_string function for JSON values
+
+This function implements a user-defined to_string for JSON objects.
+
+@param[in] j a JSON object
+@return a std::string object
+*/
+
+NLOHMANN_BASIC_JSON_TPL_DECLARATION
+std::string to_string(const NLOHMANN_BASIC_JSON_TPL& j)
+{
+ return j.dump();
+}
+} // namespace nlohmann
+
+///////////////////////
+// nonmember support //
+///////////////////////
+
+// specialization of std::swap, and std::hash
+namespace std
+{
+
+/// hash value for JSON objects
+template<>
+struct hash<nlohmann::json>
+{
+ /*!
+ @brief return a hash value for a JSON object
+
+ @since version 1.0.0
+ */
+ std::size_t operator()(const nlohmann::json& j) const
+ {
+ return nlohmann::detail::hash(j);
+ }
+};
+
+/// specialization for std::less<value_t>
+/// @note: do not remove the space after '<',
+/// see https://github.com/nlohmann/json/pull/679
+template<>
+struct less<::nlohmann::detail::value_t>
+{
+ /*!
+ @brief compare two value_t enum values
+ @since version 3.0.0
+ */
+ bool operator()(nlohmann::detail::value_t lhs,
+ nlohmann::detail::value_t rhs) const noexcept
+ {
+ return nlohmann::detail::operator<(lhs, rhs);
+ }
+};
+
+// C++20 prohibit function specialization in the std namespace.
+#ifndef JSON_HAS_CPP_20
+
+/*!
+@brief exchanges the values of two JSON objects
+
+@since version 1.0.0
+*/
+template<>
+inline void swap<nlohmann::json>(nlohmann::json& j1, nlohmann::json& j2) noexcept(
+ is_nothrow_move_constructible<nlohmann::json>::value&&
+ is_nothrow_move_assignable<nlohmann::json>::value
+ )
+{
+ j1.swap(j2);
+}
+
+#endif
+
+} // namespace std
+
+/*!
+@brief user-defined string literal for JSON values
+
+This operator implements a user-defined string literal for JSON objects. It
+can be used by adding `"_json"` to a string literal and returns a JSON object
+if no parse error occurred.
+
+@param[in] s a string representation of a JSON object
+@param[in] n the length of string @a s
+@return a JSON object
+
+@since version 1.0.0
+*/
+JSON_HEDLEY_NON_NULL(1)
+inline nlohmann::json operator "" _json(const char* s, std::size_t n)
+{
+ return nlohmann::json::parse(s, s + n);
+}
+
+/*!
+@brief user-defined string literal for JSON pointer
+
+This operator implements a user-defined string literal for JSON Pointers. It
+can be used by adding `"_json_pointer"` to a string literal and returns a JSON pointer
+object if no parse error occurred.
+
+@param[in] s a string representation of a JSON Pointer
+@param[in] n the length of string @a s
+@return a JSON pointer object
+
+@since version 2.0.0
+*/
+JSON_HEDLEY_NON_NULL(1)
+inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std::size_t n)
+{
+ return nlohmann::json::json_pointer(std::string(s, n));
+}
+
+// #include <nlohmann/detail/macro_unscope.hpp>
+
+
+// restore GCC/clang diagnostic settings
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+ #pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+ #pragma GCC diagnostic pop
+#endif
+
+// clean up
+#undef JSON_ASSERT
+#undef JSON_INTERNAL_CATCH
+#undef JSON_CATCH
+#undef JSON_THROW
+#undef JSON_TRY
+#undef JSON_HAS_CPP_14
+#undef JSON_HAS_CPP_17
+#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION
+#undef NLOHMANN_BASIC_JSON_TPL
+#undef JSON_EXPLICIT
+
+// #include <nlohmann/thirdparty/hedley/hedley_undef.hpp>
+#undef JSON_HEDLEY_ALWAYS_INLINE
+#undef JSON_HEDLEY_ARM_VERSION
+#undef JSON_HEDLEY_ARM_VERSION_CHECK
+#undef JSON_HEDLEY_ARRAY_PARAM
+#undef JSON_HEDLEY_ASSUME
+#undef JSON_HEDLEY_BEGIN_C_DECLS
+#undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_CLANG_HAS_BUILTIN
+#undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_CLANG_HAS_EXTENSION
+#undef JSON_HEDLEY_CLANG_HAS_FEATURE
+#undef JSON_HEDLEY_CLANG_HAS_WARNING
+#undef JSON_HEDLEY_COMPCERT_VERSION
+#undef JSON_HEDLEY_COMPCERT_VERSION_CHECK
+#undef JSON_HEDLEY_CONCAT
+#undef JSON_HEDLEY_CONCAT3
+#undef JSON_HEDLEY_CONCAT3_EX
+#undef JSON_HEDLEY_CONCAT_EX
+#undef JSON_HEDLEY_CONST
+#undef JSON_HEDLEY_CONSTEXPR
+#undef JSON_HEDLEY_CONST_CAST
+#undef JSON_HEDLEY_CPP_CAST
+#undef JSON_HEDLEY_CRAY_VERSION
+#undef JSON_HEDLEY_CRAY_VERSION_CHECK
+#undef JSON_HEDLEY_C_DECL
+#undef JSON_HEDLEY_DEPRECATED
+#undef JSON_HEDLEY_DEPRECATED_FOR
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
+#undef JSON_HEDLEY_DIAGNOSTIC_POP
+#undef JSON_HEDLEY_DIAGNOSTIC_PUSH
+#undef JSON_HEDLEY_DMC_VERSION
+#undef JSON_HEDLEY_DMC_VERSION_CHECK
+#undef JSON_HEDLEY_EMPTY_BASES
+#undef JSON_HEDLEY_EMSCRIPTEN_VERSION
+#undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK
+#undef JSON_HEDLEY_END_C_DECLS
+#undef JSON_HEDLEY_FLAGS
+#undef JSON_HEDLEY_FLAGS_CAST
+#undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_GCC_HAS_BUILTIN
+#undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_GCC_HAS_EXTENSION
+#undef JSON_HEDLEY_GCC_HAS_FEATURE
+#undef JSON_HEDLEY_GCC_HAS_WARNING
+#undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK
+#undef JSON_HEDLEY_GCC_VERSION
+#undef JSON_HEDLEY_GCC_VERSION_CHECK
+#undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_GNUC_HAS_BUILTIN
+#undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_GNUC_HAS_EXTENSION
+#undef JSON_HEDLEY_GNUC_HAS_FEATURE
+#undef JSON_HEDLEY_GNUC_HAS_WARNING
+#undef JSON_HEDLEY_GNUC_VERSION
+#undef JSON_HEDLEY_GNUC_VERSION_CHECK
+#undef JSON_HEDLEY_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_HAS_BUILTIN
+#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS
+#undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_HAS_EXTENSION
+#undef JSON_HEDLEY_HAS_FEATURE
+#undef JSON_HEDLEY_HAS_WARNING
+#undef JSON_HEDLEY_IAR_VERSION
+#undef JSON_HEDLEY_IAR_VERSION_CHECK
+#undef JSON_HEDLEY_IBM_VERSION
+#undef JSON_HEDLEY_IBM_VERSION_CHECK
+#undef JSON_HEDLEY_IMPORT
+#undef JSON_HEDLEY_INLINE
+#undef JSON_HEDLEY_INTEL_VERSION
+#undef JSON_HEDLEY_INTEL_VERSION_CHECK
+#undef JSON_HEDLEY_IS_CONSTANT
+#undef JSON_HEDLEY_IS_CONSTEXPR_
+#undef JSON_HEDLEY_LIKELY
+#undef JSON_HEDLEY_MALLOC
+#undef JSON_HEDLEY_MESSAGE
+#undef JSON_HEDLEY_MSVC_VERSION
+#undef JSON_HEDLEY_MSVC_VERSION_CHECK
+#undef JSON_HEDLEY_NEVER_INLINE
+#undef JSON_HEDLEY_NON_NULL
+#undef JSON_HEDLEY_NO_ESCAPE
+#undef JSON_HEDLEY_NO_RETURN
+#undef JSON_HEDLEY_NO_THROW
+#undef JSON_HEDLEY_NULL
+#undef JSON_HEDLEY_PELLES_VERSION
+#undef JSON_HEDLEY_PELLES_VERSION_CHECK
+#undef JSON_HEDLEY_PGI_VERSION
+#undef JSON_HEDLEY_PGI_VERSION_CHECK
+#undef JSON_HEDLEY_PREDICT
+#undef JSON_HEDLEY_PRINTF_FORMAT
+#undef JSON_HEDLEY_PRIVATE
+#undef JSON_HEDLEY_PUBLIC
+#undef JSON_HEDLEY_PURE
+#undef JSON_HEDLEY_REINTERPRET_CAST
+#undef JSON_HEDLEY_REQUIRE
+#undef JSON_HEDLEY_REQUIRE_CONSTEXPR
+#undef JSON_HEDLEY_REQUIRE_MSG
+#undef JSON_HEDLEY_RESTRICT
+#undef JSON_HEDLEY_RETURNS_NON_NULL
+#undef JSON_HEDLEY_SENTINEL
+#undef JSON_HEDLEY_STATIC_ASSERT
+#undef JSON_HEDLEY_STATIC_CAST
+#undef JSON_HEDLEY_STRINGIFY
+#undef JSON_HEDLEY_STRINGIFY_EX
+#undef JSON_HEDLEY_SUNPRO_VERSION
+#undef JSON_HEDLEY_SUNPRO_VERSION_CHECK
+#undef JSON_HEDLEY_TINYC_VERSION
+#undef JSON_HEDLEY_TINYC_VERSION_CHECK
+#undef JSON_HEDLEY_TI_ARMCL_VERSION
+#undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL2000_VERSION
+#undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL430_VERSION
+#undef JSON_HEDLEY_TI_CL430_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL6X_VERSION
+#undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL7X_VERSION
+#undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CLPRU_VERSION
+#undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK
+#undef JSON_HEDLEY_TI_VERSION
+#undef JSON_HEDLEY_TI_VERSION_CHECK
+#undef JSON_HEDLEY_UNAVAILABLE
+#undef JSON_HEDLEY_UNLIKELY
+#undef JSON_HEDLEY_UNPREDICTABLE
+#undef JSON_HEDLEY_UNREACHABLE
+#undef JSON_HEDLEY_UNREACHABLE_RETURN
+#undef JSON_HEDLEY_VERSION
+#undef JSON_HEDLEY_VERSION_DECODE_MAJOR
+#undef JSON_HEDLEY_VERSION_DECODE_MINOR
+#undef JSON_HEDLEY_VERSION_DECODE_REVISION
+#undef JSON_HEDLEY_VERSION_ENCODE
+#undef JSON_HEDLEY_WARNING
+#undef JSON_HEDLEY_WARN_UNUSED_RESULT
+#undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG
+#undef JSON_HEDLEY_FALL_THROUGH
+
+
+
+#endif // INCLUDE_NLOHMANN_JSON_HPP_
diff --git a/third-party/socketpair/CMakeLists.txt b/third-party/socketpair/CMakeLists.txt
new file mode 100644
index 0000000..89a38f7
--- /dev/null
+++ b/third-party/socketpair/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+set(socketpair_SOURCES
+ socketpair.c socketpair.h
+)
+
+add_library(socketpair OBJECT ${socketpair_SOURCES})
+
+set_target_properties (
+ socketpair PROPERTIES
+ FOLDER Lib
+)
diff --git a/third-party/socketpair/socketpair.c b/third-party/socketpair/socketpair.c
new file mode 100644
index 0000000..6ad0e70
--- /dev/null
+++ b/third-party/socketpair/socketpair.c
@@ -0,0 +1,154 @@
+/* socketpair.c
+Copyright 2007, 2010 by Nathan C. Myers <ncm@cantrip.org>
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ The name of the author must not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Changes:
+ * 2014-02-12: merge David Woodhouse, Ger Hobbelt improvements
+ * git.infradead.org/users/dwmw2/openconnect.git/commitdiff/bdeefa54
+ * github.com/GerHobbelt/selectable-socketpair
+ * always init the socks[] to -1/INVALID_SOCKET on error, both on Win32/64
+ * and UNIX/other platforms
+ * 2013-07-18: Change to BSD 3-clause license
+ * 2010-03-31:
+ * set addr to 127.0.0.1 because win32 getsockname does not always set it.
+ * 2010-02-25:
+ * set SO_REUSEADDR option to avoid leaking some windows resource.
+ * Windows System Error 10049, "Event ID 4226 TCP/IP has reached
+ * the security limit imposed on the number of concurrent TCP connect
+ * attempts." Bleah.
+ * 2007-04-25:
+ * preserve value of WSAGetLastError() on all error returns.
+ * 2007-04-22: (Thanks to Matthew Gregan <kinetik@flim.org>)
+ * s/EINVAL/WSAEINVAL/ fix trivial compile failure
+ * s/socket/WSASocket/ enable creation of sockets suitable as stdin/stdout
+ * of a child process.
+ * add argument make_overlapped
+ */
+
+#include <string.h>
+
+#ifdef WIN32
+# include <ws2tcpip.h> /* socklen_t, et al (MSVC20xx) */
+# include <windows.h>
+# include <io.h>
+#else
+# include <sys/types.h>
+# include <sys/socket.h>
+# include <errno.h>
+#endif
+
+#include "socketpair.h"
+
+#ifdef WIN32
+
+/* dumb_socketpair:
+ * If make_overlapped is nonzero, both sockets created will be usable for
+ * "overlapped" operations via WSASend etc. If make_overlapped is zero,
+ * socks[0] (only) will be usable with regular ReadFile etc., and thus
+ * suitable for use as stdin or stdout of a child process. Note that the
+ * sockets must be closed with closesocket() regardless.
+ */
+
+int dumb_socketpair(SOCKET socks[2], int make_overlapped)
+{
+ union {
+ struct sockaddr_in inaddr;
+ struct sockaddr addr;
+ } a;
+ SOCKET listener;
+ int e;
+ socklen_t addrlen = sizeof(a.inaddr);
+ DWORD flags = (make_overlapped ? WSA_FLAG_OVERLAPPED : 0);
+ int reuse = 1;
+
+ if (socks == 0) {
+ WSASetLastError(WSAEINVAL);
+ return SOCKET_ERROR;
+ }
+ socks[0] = socks[1] = -1;
+
+ listener = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (listener == -1)
+ return SOCKET_ERROR;
+
+ memset(&a, 0, sizeof(a));
+ a.inaddr.sin_family = AF_INET;
+ a.inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ a.inaddr.sin_port = 0;
+
+ for (;;) {
+ if (setsockopt(listener, SOL_SOCKET, SO_REUSEADDR,
+ (char*) &reuse, (socklen_t) sizeof(reuse)) == -1)
+ break;
+ if (bind(listener, &a.addr, sizeof(a.inaddr)) == SOCKET_ERROR)
+ break;
+
+ memset(&a, 0, sizeof(a));
+ if (getsockname(listener, &a.addr, &addrlen) == SOCKET_ERROR)
+ break;
+ // win32 getsockname may only set the port number, p=0.0005.
+ // ( http://msdn.microsoft.com/library/ms738543.aspx ):
+ a.inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ a.inaddr.sin_family = AF_INET;
+
+ if (listen(listener, 1) == SOCKET_ERROR)
+ break;
+
+ socks[0] = WSASocket(AF_INET, SOCK_STREAM, 0, NULL, 0, flags);
+ if (socks[0] == -1)
+ break;
+ if (connect(socks[0], &a.addr, sizeof(a.inaddr)) == SOCKET_ERROR)
+ break;
+
+ socks[1] = accept(listener, NULL, NULL);
+ if (socks[1] == -1)
+ break;
+
+ closesocket(listener);
+ return 0;
+ }
+
+ e = WSAGetLastError();
+ closesocket(listener);
+ closesocket(socks[0]);
+ closesocket(socks[1]);
+ WSASetLastError(e);
+ socks[0] = socks[1] = -1;
+ return SOCKET_ERROR;
+}
+#else
+int dumb_socketpair(int socks[2], int dummy)
+{
+ if (socks == 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ dummy = socketpair(AF_UNIX, SOCK_STREAM, 0, socks);
+ if (dummy)
+ socks[0] = socks[1] = -1;
+ return dummy;
+}
+#endif
diff --git a/third-party/socketpair/socketpair.h b/third-party/socketpair/socketpair.h
new file mode 100644
index 0000000..4b37839
--- /dev/null
+++ b/third-party/socketpair/socketpair.h
@@ -0,0 +1,46 @@
+/* socketpair.h
+Copyright 2007, 2010 by Nathan C. Myers <ncm@cantrip.org>
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ The name of the author must not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SOCKETPAIR_H
+#define SOCKETPAIR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef _WIN32
+int dumb_socketpair(SOCKET socks[2], int make_overlapped);
+#else /* _WIN32 */
+int dumb_socketpair(int socks[2], int dummy);
+#endif
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* SOCKETPAIR_H */
+
diff --git a/third-party/utf8cpp/.circleci/config.yml b/third-party/utf8cpp/.circleci/config.yml
new file mode 100644
index 0000000..2588646
--- /dev/null
+++ b/third-party/utf8cpp/.circleci/config.yml
@@ -0,0 +1,13 @@
+version: 2
+
+jobs:
+ build:
+ docker:
+ - image: nemtrif/utf8cpp:3.1.3
+ steps:
+ - checkout
+ - run: git submodule update --init --recursive --remote
+ - run: mkdir build
+ - run: cd build && cmake ..
+ - run: cd build && cmake --build .
+ - run: cd build && ctest -VV
diff --git a/third-party/utf8cpp/.gitignore b/third-party/utf8cpp/.gitignore
new file mode 100644
index 0000000..488d51d
--- /dev/null
+++ b/third-party/utf8cpp/.gitignore
@@ -0,0 +1,4 @@
+# VS Code:
+.vscode/
+# Often used by CMake
+build/ \ No newline at end of file
diff --git a/third-party/utf8cpp/.gitmodules b/third-party/utf8cpp/.gitmodules
new file mode 100644
index 0000000..424f86b
--- /dev/null
+++ b/third-party/utf8cpp/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "extern/ftest"]
+ path = extern/ftest
+ url = https://github.com/nemtrif/ftest
diff --git a/third-party/utf8cpp/CMakeLists.txt b/third-party/utf8cpp/CMakeLists.txt
new file mode 100644
index 0000000..c8d4b7a
--- /dev/null
+++ b/third-party/utf8cpp/CMakeLists.txt
@@ -0,0 +1,62 @@
+cmake_minimum_required (VERSION 3.0.2)
+project (utf8cpp VERSION 3.2.2 LANGUAGES CXX)
+
+if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
+ set(IS_ROOT_PROJECT ON)
+else()
+ set(IS_ROOT_PROJECT OFF)
+endif()
+
+option(UTF8_TESTS "Enable tests for UTF8-CPP" ${IS_ROOT_PROJECT})
+option(UTF8_INSTALL "Enable installation for UTF8-CPP" ${IS_ROOT_PROJECT})
+option(UTF8_SAMPLES "Enable building samples for UTF8-CPP" ${IS_ROOT_PROJECT})
+
+add_library(utf8cpp INTERFACE)
+target_include_directories(utf8cpp INTERFACE
+ "$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/source>"
+ $<INSTALL_INTERFACE:include/utf8cpp>
+)
+add_library(utf8::cpp ALIAS utf8cpp)
+
+if(UTF8_INSTALL)
+ include(CMakePackageConfigHelpers)
+ if(MSVC)
+ set(DEF_INSTALL_CMAKE_DIR CMake)
+ else()
+ include(GNUInstallDirs) # define CMAKE_INSTALL_*
+ set(DEF_INSTALL_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/utf8cpp)
+ endif()
+
+ write_basic_package_version_file(
+ ${CMAKE_CURRENT_BINARY_DIR}/utf8cppConfigVersion.cmake
+ VERSION ${PROJECT_VERSION}
+ COMPATIBILITY SameMajorVersion
+ )
+
+ configure_package_config_file(
+ ${PROJECT_SOURCE_DIR}/utf8cppConfig.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/utf8cppConfig.cmake
+ INSTALL_DESTINATION ${DEF_INSTALL_CMAKE_DIR}
+ )
+
+ install(DIRECTORY source/ DESTINATION include/utf8cpp)
+ install(TARGETS utf8cpp EXPORT utf8cppTargets)
+ install(EXPORT utf8cppTargets DESTINATION ${DEF_INSTALL_CMAKE_DIR})
+ install(
+ FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/utf8cppConfig.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/utf8cppConfigVersion.cmake
+ DESTINATION
+ ${DEF_INSTALL_CMAKE_DIR}
+ )
+endif()
+
+if(UTF8_SAMPLES)
+ add_executable(docsample ${PROJECT_SOURCE_DIR}/samples/docsample.cpp)
+ target_link_libraries(docsample PRIVATE utf8::cpp)
+endif()
+
+if(UTF8_TESTS)
+ enable_testing()
+ add_subdirectory(tests)
+endif()
diff --git a/third-party/utf8cpp/LICENSE b/third-party/utf8cpp/LICENSE
new file mode 100644
index 0000000..36b7cd9
--- /dev/null
+++ b/third-party/utf8cpp/LICENSE
@@ -0,0 +1,23 @@
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third-party/utf8cpp/README.md b/third-party/utf8cpp/README.md
new file mode 100644
index 0000000..a519cdb
--- /dev/null
+++ b/third-party/utf8cpp/README.md
@@ -0,0 +1,1503 @@
+# UTF8-CPP: UTF-8 with C++ in a Portable Way
+
+
+## Introduction
+
+C++ developers miss an easy and portable way of handling Unicode encoded strings. The original C++ Standard (known as C++98 or C++03) is Unicode agnostic. C++11 provides some support for Unicode on core language and library level: u8, u, and U character and string literals, char16_t and char32_t character types, u16string and u32string library classes, and codecvt support for conversions between Unicode encoding forms. In the meantime, developers use third party libraries like ICU, OS specific capabilities, or simply roll out their own solutions.
+
+In order to easily handle UTF-8 encoded Unicode strings, I came up with a small, C++98 compatible generic library. For anybody used to work with STL algorithms and iterators, it should be easy and natural to use. The code is freely available for any purpose - check out the [license](./LICENSE). The library has been used a lot in the past ten years both in commercial and open-source projects and is considered feature-complete now. If you run into bugs or performance issues, please let me know and I'll do my best to address them.
+
+The purpose of this article is not to offer an introduction to Unicode in general, and UTF-8 in particular. If you are not familiar with Unicode, be sure to check out [Unicode Home Page](http://www.unicode.org/) or some other source of information for Unicode. Also, it is not my aim to advocate the use of UTF-8 encoded strings in C++ programs; if you want to handle UTF-8 encoded strings from C++, I am sure you have good reasons for it.
+
+## Examples of use
+
+### Introductionary Sample
+
+To illustrate the use of the library, let's start with a small but complete program that opens a file containing UTF-8 encoded text, reads it line by line, checks each line for invalid UTF-8 byte sequences, and converts it to UTF-16 encoding and back to UTF-8:
+
+```cpp
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <vector>
+#include "utf8.h"
+using namespace std;
+int main(int argc, char** argv)
+{
+ if (argc != 2) {
+ cout << "\nUsage: docsample filename\n";
+ return 0;
+ }
+ const char* test_file_path = argv[1];
+ // Open the test file (must be UTF-8 encoded)
+ ifstream fs8(test_file_path);
+ if (!fs8.is_open()) {
+ cout << "Could not open " << test_file_path << endl;
+ return 0;
+ }
+
+ unsigned line_count = 1;
+ string line;
+ // Play with all the lines in the file
+ while (getline(fs8, line)) {
+ // check for invalid utf-8 (for a simple yes/no check, there is also utf8::is_valid function)
+#if __cplusplus >= 201103L // C++ 11 or later
+ auto end_it = utf8::find_invalid(line.begin(), line.end());
+#else
+ string::iterator end_it = utf8::find_invalid(line.begin(), line.end());
+#endif // C++ 11
+ if (end_it != line.end()) {
+ cout << "Invalid UTF-8 encoding detected at line " << line_count << "\n";
+ cout << "This part is fine: " << string(line.begin(), end_it) << "\n";
+ }
+ // Get the line length (at least for the valid part)
+ int length = utf8::distance(line.begin(), end_it);
+ cout << "Length of line " << line_count << " is " << length << "\n";
+
+ // Convert it to utf-16
+#if __cplusplus >= 201103L // C++ 11 or later
+ u16string utf16line = utf8::utf8to16(line);
+#else
+ vector<unsigned short> utf16line;
+ utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line));
+#endif // C++ 11
+ // And back to utf-8;
+#if __cplusplus >= 201103L // C++ 11 or later
+ string utf8line = utf8::utf16to8(utf16line);
+#else
+ string utf8line;
+ utf8::utf16to8(utf16line.begin(), utf16line.end(), back_inserter(utf8line));
+#endif // C++ 11
+ // Confirm that the conversion went OK:
+ if (utf8line != string(line.begin(), end_it))
+ cout << "Error in UTF-16 conversion at line: " << line_count << "\n";
+
+ line_count++;
+ }
+
+ return 0;
+}
+```
+
+In the previous code sample, for each line we performed a detection of invalid UTF-8 sequences with `find_invalid`; the number of characters (more precisely - the number of Unicode code points, including the end of line and even BOM if there is one) in each line was determined with a use of `utf8::distance`; finally, we have converted each line to UTF-16 encoding with `utf8to16` and back to UTF-8 with `utf16to8`.
+
+Note a different pattern of usage for old compilers. For instance, this is how we convert
+a UTF-8 encoded string to a UTF-16 encoded one with a pre - C++11 compiler:
+```cpp
+ vector<unsigned short> utf16line;
+ utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line));
+```
+
+With a more modern compiler, the same operation would look like:
+```cpp
+ u16string utf16line = utf8::utf8to16(line);
+```
+If `__cplusplus` macro points to a C++ 11 or later, the library exposes API that takes into
+account C++ standard Unicode strings and move semantics. With an older compiler, it is still
+possible to use the same functionality, just in a little less convenient way
+
+In case you do not trust the `__cplusplus` macro or, for instance, do not want to include
+the C++ 11 helper functions even with a modern compiler, define `UTF_CPP_CPLUSPLUS` macro
+before including `utf8.h` and assign it a value for the standard you want to use - the values are the same as for the `__cplusplus` macro. This can be also useful with compilers that are conservative in setting the `__cplusplus` macro even if they have a good support for a recent standard edition - Microsoft's Visual C++ is one example.
+
+### Checking if a file contains valid UTF-8 text
+
+Here is a function that checks whether the content of a file is valid UTF-8 encoded text without reading the content into the memory:
+
+```cpp
+bool valid_utf8_file(const char* file_name)
+{
+ ifstream ifs(file_name);
+ if (!ifs)
+ return false; // even better, throw here
+
+ istreambuf_iterator<char> it(ifs.rdbuf());
+ istreambuf_iterator<char> eos;
+
+ return utf8::is_valid(it, eos);
+}
+```
+
+Because the function `utf8::is_valid()` works with input iterators, we were able to pass an `istreambuf_iterator` to `it` and read the content of the file directly without loading it to the memory first.
+
+Note that other functions that take input iterator arguments can be used in a similar way. For instance, to read the content of a UTF-8 encoded text file and convert the text to UTF-16, just do something like:
+
+```cpp
+ utf8::utf8to16(it, eos, back_inserter(u16string));
+```
+
+### Ensure that a string contains valid UTF-8 text
+
+If we have some text that "probably" contains UTF-8 encoded text and we want to replace any invalid UTF-8 sequence with a replacement character, something like the following function may be used:
+
+```cpp
+void fix_utf8_string(std::string& str)
+{
+ std::string temp;
+ utf8::replace_invalid(str.begin(), str.end(), back_inserter(temp));
+ str = temp;
+}
+```
+
+The function will replace any invalid UTF-8 sequence with a Unicode replacement character. There is an overloaded function that enables the caller to supply their own replacement character.
+
+
+## Points of interest
+
+#### Design goals and decisions
+
+The library was designed to be:
+
+1. Generic: for better or worse, there are many C++ string classes out there, and the library should work with as many of them as possible.
+2. Portable: the library should be portable both accross different platforms and compilers. The only non-portable code is a small section that declares unsigned integers of different sizes: three typedefs. They can be changed by the users of the library if they don't match their platform. The default setting should work for Windows (both 32 and 64 bit), and most 32 bit and 64 bit Unix derivatives. Support for post C++03 language features is included for modern compilers at API level only, so the library should work even with pretty old compilers.
+3. Lightweight: follow the "pay only for what you use" guideline.
+4. Unintrusive: avoid forcing any particular design or even programming style on the user. This is a library, not a framework.
+
+#### Alternatives
+
+In case you want to look into other means of working with UTF-8 strings from C++, here is the list of solutions I am aware of:
+
+1. [ICU Library](http://icu.sourceforge.net/). It is very powerful, complete, feature-rich, mature, and widely used. Also big, intrusive, non-generic, and doesn't play well with the Standard Library. I definitelly recommend looking at ICU even if you don't plan to use it.
+2. C++11 language and library features. Still far from complete, and not easy to use.
+3. [Glib::ustring](http://www.gtkmm.org/gtkmm2/docs/tutorial/html/ch03s04.html). A class specifically made to work with UTF-8 strings, and also feel like `std::string`. If you prefer to have yet another string class in your code, it may be worth a look. Be aware of the licensing issues, though.
+4. Platform dependent solutions: Windows and POSIX have functions to convert strings from one encoding to another. That is only a subset of what my library offers, but if that is all you need it may be good enough.
+
+
+## Reference
+
+### Functions From utf8 Namespace
+
+#### utf8::append
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Encodes a 32 bit code point as a UTF-8 sequence of octets and appends the sequence to a UTF-8 string.
+
+```cpp
+void append(char32_t cp, std::string& s);
+```
+
+`cp`: a code point to append to the string.
+`s`: a utf-8 encoded string to append the code point to.
+
+Example of use:
+
+```cpp
+std::string u;
+append(0x0448, u);
+assert (u[0] == char(0xd1) && u[1] == char(0x88) && u.length() == 2);
+```
+
+In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown.
+
+
+#### utf8::append
+
+Available in version 1.0 and later.
+
+Encodes a 32 bit code point as a UTF-8 sequence of octets and appends the sequence to a UTF-8 string.
+
+```cpp
+template <typename octet_iterator>
+octet_iterator append(uint32_t cp, octet_iterator result);
+```
+
+`octet_iterator`: an output iterator.
+`cp`: a 32 bit integer representing a code point to append to the sequence.
+`result`: an output iterator to the place in the sequence where to append the code point.
+Return value: an iterator pointing to the place after the newly appended sequence.
+
+Example of use:
+
+```cpp
+unsigned char u[5] = {0,0,0,0,0};
+unsigned char* end = append(0x0448, u);
+assert (u[0] == 0xd1 && u[1] == 0x88 && u[2] == 0 && u[3] == 0 && u[4] == 0);
+```
+
+Note that `append` does not allocate any memory - it is the burden of the caller to make sure there is enough memory allocated for the operation. To make things more interesting, `append` can add anywhere between 1 and 4 octets to the sequence. In practice, you would most often want to use `std::back_inserter` to ensure that the necessary memory is allocated.
+
+In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown.
+
+#### utf8::next
+
+Available in version 1.0 and later.
+
+Given the iterator to the beginning of the UTF-8 sequence, it returns the code point and moves the iterator to the next position.
+
+```cpp
+template <typename octet_iterator>
+uint32_t next(octet_iterator& it, octet_iterator end);
+```
+
+`octet_iterator`: an input iterator.
+`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the beginning of the next code point.
+`end`: end of the UTF-8 sequence to be processed. If `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown.
+Return value: the 32 bit representation of the processed UTF-8 code point.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+char* w = twochars;
+int cp = next(w, twochars + 6);
+assert (cp == 0x65e5);
+assert (w == twochars + 3);
+```
+
+This function is typically used to iterate through a UTF-8 encoded string.
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown.
+
+#### utf8::peek_next
+
+Available in version 2.1 and later.
+
+Given the iterator to the beginning of the UTF-8 sequence, it returns the code point for the following sequence without changing the value of the iterator.
+
+```cpp
+template <typename octet_iterator>
+uint32_t peek_next(octet_iterator it, octet_iterator end);
+```
+
+
+`octet_iterator`: an input iterator.
+`it`: an iterator pointing to the beginning of an UTF-8 encoded code point.
+`end`: end of the UTF-8 sequence to be processed. If `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown.
+Return value: the 32 bit representation of the processed UTF-8 code point.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+char* w = twochars;
+int cp = peek_next(w, twochars + 6);
+assert (cp == 0x65e5);
+assert (w == twochars);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown.
+
+#### utf8::prior
+
+Available in version 1.02 and later.
+
+Given a reference to an iterator pointing to an octet in a UTF-8 sequence, it decreases the iterator until it hits the beginning of the previous UTF-8 encoded code point and returns the 32 bits representation of the code point.
+
+```cpp
+template <typename octet_iterator>
+uint32_t prior(octet_iterator& it, octet_iterator start);
+```
+
+`octet_iterator`: a bidirectional iterator.
+`it`: a reference pointing to an octet within a UTF-8 encoded string. After the function returns, it is decremented to point to the beginning of the previous code point.
+`start`: an iterator to the beginning of the sequence where the search for the beginning of a code point is performed. It is a safety measure to prevent passing the beginning of the string in the search for a UTF-8 lead octet.
+ Return value: the 32 bit representation of the previous code point.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+unsigned char* w = twochars + 3;
+int cp = prior (w, twochars);
+assert (cp == 0x65e5);
+assert (w == twochars);
+```
+
+This function has two purposes: one is two iterate backwards through a UTF-8 encoded string. Note that it is usually a better idea to iterate forward instead, since `utf8::next` is faster. The second purpose is to find a beginning of a UTF-8 sequence if we have a random position within a string. Note that in that case `utf8::prior` may not detect an invalid UTF-8 sequence in some scenarios: for instance if there are superfluous trail octets, it will just skip them.
+
+`it` will typically point to the beginning of a code point, and `start` will point to the beginning of the string to ensure we don't go backwards too far. `it` is decreased until it points to a lead UTF-8 octet, and then the UTF-8 sequence beginning with that octet is decoded to a 32 bit representation and returned.
+
+In case `start` is reached before a UTF-8 lead octet is hit, or if an invalid UTF-8 sequence is started by the lead octet, an `invalid_utf8` exception is thrown.
+
+In case `start` equals `it`, a `not_enough_room` exception is thrown.
+
+#### utf8::advance
+Available in version 1.0 and later.
+
+Advances an iterator by the specified number of code points within an UTF-8 sequence.
+
+```cpp
+template <typename octet_iterator, typename distance_type>
+void advance (octet_iterator& it, distance_type n, octet_iterator end);
+```
+
+`octet_iterator`: an input iterator.
+`distance_type`: an integral type convertible to `octet_iterator`'s difference type.
+`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point.
+`n`: number of code points `it` should be advanced. A negative value means decrement.
+`end`: limit of the UTF-8 sequence to be processed. If `n` is positive and `it` gets equal to `end` during the extraction of a code point, an `utf8::not_enough_room` exception is thrown. If `n` is negative and `it` reaches `end` while `it` points t a trail byte of a UTF-8 sequence, a `utf8::invalid_code_point` exception is thrown.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+unsigned char* w = twochars;
+advance (w, 2, twochars + 6);
+assert (w == twochars + 5);
+advance (w, -2, twochars);
+assert (w == twochars);
+```
+
+In case of an invalid code point, a `utf8::invalid_code_point` exception is thrown.
+
+#### utf8::distance
+
+Available in version 1.0 and later.
+
+Given the iterators to two UTF-8 encoded code points in a seqence, returns the number of code points between them.
+
+```cpp
+template <typename octet_iterator>
+typename std::iterator_traits<octet_iterator>::difference_type distance (octet_iterator first, octet_iterator last);
+```
+
+`octet_iterator`: an input iterator.
+`first`: an iterator to a beginning of a UTF-8 encoded code point.
+`last`: an iterator to a "post-end" of the last UTF-8 encoded code point in the sequence we are trying to determine the length. It can be the beginning of a new code point, or not.
+ Return value the distance between the iterators, in code points.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+size_t dist = utf8::distance(twochars, twochars + 5);
+assert (dist == 2);
+```
+
+This function is used to find the length (in code points) of a UTF-8 encoded string. The reason it is called _distance_, rather than, say, _length_ is mainly because developers are used that _length_ is an O(1) function. Computing the length of an UTF-8 string is a linear operation, and it looked better to model it after `std::distance` algorithm.
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `last` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown.
+
+#### utf8::utf16to8
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Converts a UTF-16 encoded string to UTF-8.
+
+```cpp
+std::string utf16to8(const std::u16string& s);
+```
+
+`s`: a UTF-16 encoded string.
+Return value: A UTF-8 encoded string.
+
+Example of use:
+
+```cpp
+ u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+ string u = utf16to8(utf16string);
+ assert (u.size() == 10);
+```
+
+In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown.
+
+#### utf8::utf16to8
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Converts a UTF-16 encoded string to UTF-8.
+
+```cpp
+std::string utf16to8(std::u16string_view s);
+```
+
+`s`: a UTF-16 encoded string.
+Return value: A UTF-8 encoded string.
+
+Example of use:
+
+```cpp
+ u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+ u16string_view utf16stringview(u16string);
+ string u = utf16to8(utf16string);
+ assert (u.size() == 10);
+```
+
+In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown.
+
+
+#### utf8::utf16to8
+
+Available in version 1.0 and later.
+
+Converts a UTF-16 encoded string to UTF-8.
+
+```cpp
+template <typename u16bit_iterator, typename octet_iterator>
+octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result);
+```
+
+`u16bit_iterator`: an input iterator.
+`octet_iterator`: an output iterator.
+`start`: an iterator pointing to the beginning of the UTF-16 encoded string to convert.
+`end`: an iterator pointing to pass-the-end of the UTF-16 encoded string to convert.
+`result`: an output iterator to the place in the UTF-8 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-8 string.
+
+Example of use:
+
+```cpp
+unsigned short utf16string[] = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+vector<unsigned char> utf8result;
+utf16to8(utf16string, utf16string + 5, back_inserter(utf8result));
+assert (utf8result.size() == 10);
+```
+
+In case of invalid UTF-16 sequence, a `utf8::invalid_utf16` exception is thrown.
+
+#### utf8::utf8to16
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Converts an UTF-8 encoded string to UTF-16.
+
+```cpp
+std::u16string utf8to16(const std::string& s);
+```
+
+`s`: an UTF-8 encoded string to convert.
+Return value: A UTF-16 encoded string
+
+Example of use:
+
+```cpp
+string utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+u16string utf16result = utf8to16(utf8_with_surrogates);
+assert (utf16result.length() == 4);
+assert (utf16result[2] == 0xd834);
+assert (utf16result[3] == 0xdd1e);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown.
+
+#### utf8::utf8to16
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Converts an UTF-8 encoded string to UTF-16.
+
+```cpp
+std::u16string utf8to16(std::string_view s);
+```
+
+`s`: an UTF-8 encoded string to convert.
+Return value: A UTF-16 encoded string
+
+Example of use:
+
+```cpp
+string_view utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+u16string utf16result = utf8to16(utf8_with_surrogates);
+assert (utf16result.length() == 4);
+assert (utf16result[2] == 0xd834);
+assert (utf16result[3] == 0xdd1e);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown.
+
+
+#### utf8::utf8to16
+
+Available in version 1.0 and later.
+
+Converts an UTF-8 encoded string to UTF-16
+
+```cpp
+template <typename u16bit_iterator, typename octet_iterator>
+u16bit_iterator utf8to16 (octet_iterator start, octet_iterator end, u16bit_iterator result);
+```
+
+`octet_iterator`: an input iterator.
+`u16bit_iterator`: an output iterator.
+`start`: an iterator pointing to the beginning of the UTF-8 encoded string to convert. < br /> `end`: an iterator pointing to pass-the-end of the UTF-8 encoded string to convert.
+`result`: an output iterator to the place in the UTF-16 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-16 string.
+
+Example of use:
+
+```cpp
+char utf8_with_surrogates[] = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+vector <unsigned short> utf16result;
+utf8to16(utf8_with_surrogates, utf8_with_surrogates + 9, back_inserter(utf16result));
+assert (utf16result.size() == 4);
+assert (utf16result[2] == 0xd834);
+assert (utf16result[3] == 0xdd1e);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `end` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown.
+
+#### utf8::utf32to8
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Converts a UTF-32 encoded string to UTF-8.
+
+```cpp
+std::string utf32to8(const std::u32string& s);
+```
+
+`s`: a UTF-32 encoded string.
+Return value: a UTF-8 encoded string.
+
+Example of use:
+
+```cpp
+u32string utf32string = {0x448, 0x65E5, 0x10346};
+string utf8result = utf32to8(utf32string);
+assert (utf8result.size() == 9);
+```
+
+In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown.
+
+#### utf8::utf32to8
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Converts a UTF-32 encoded string to UTF-8.
+
+```cpp
+std::string utf32to8(std::u32string_view s);
+```
+
+`s`: a UTF-32 encoded string.
+Return value: a UTF-8 encoded string.
+
+Example of use:
+
+```cpp
+u32string utf32string = {0x448, 0x65E5, 0x10346};
+u32string_view utf32stringview(utf32string);
+string utf8result = utf32to8(utf32stringview);
+assert (utf8result.size() == 9);
+```
+
+In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown.
+
+
+#### utf8::utf32to8
+
+Available in version 1.0 and later.
+
+Converts a UTF-32 encoded string to UTF-8.
+
+```cpp
+template <typename octet_iterator, typename u32bit_iterator>
+octet_iterator utf32to8 (u32bit_iterator start, u32bit_iterator end, octet_iterator result);
+```
+
+`octet_iterator`: an output iterator.
+`u32bit_iterator`: an input iterator.
+`start`: an iterator pointing to the beginning of the UTF-32 encoded string to convert.
+`end`: an iterator pointing to pass-the-end of the UTF-32 encoded string to convert.
+`result`: an output iterator to the place in the UTF-8 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-8 string.
+
+Example of use:
+
+```cpp
+int utf32string[] = {0x448, 0x65E5, 0x10346, 0};
+vector<unsigned char> utf8result;
+utf32to8(utf32string, utf32string + 3, back_inserter(utf8result));
+assert (utf8result.size() == 9);
+```
+
+In case of invalid UTF-32 string, a `utf8::invalid_code_point` exception is thrown.
+
+#### utf8::utf8to32
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Converts a UTF-8 encoded string to UTF-32.
+
+```cpp
+std::u32string utf8to32(const std::string& s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: a UTF-32 encoded string.
+
+Example of use:
+
+```cpp
+const char* twochars = "\xe6\x97\xa5\xd1\x88";
+u32string utf32result = utf8to32(twochars);
+assert (utf32result.size() == 2);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown.
+
+#### utf8::utf8to32
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Converts a UTF-8 encoded string to UTF-32.
+
+```cpp
+std::u32string utf8to32(std::string_view s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: a UTF-32 encoded string.
+
+Example of use:
+
+```cpp
+string_view twochars = "\xe6\x97\xa5\xd1\x88";
+u32string utf32result = utf8to32(twochars);
+assert (utf32result.size() == 2);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown.
+
+
+#### utf8::utf8to32
+
+Available in version 1.0 and later.
+
+Converts a UTF-8 encoded string to UTF-32.
+
+```cpp
+template <typename octet_iterator, typename u32bit_iterator>
+u32bit_iterator utf8to32 (octet_iterator start, octet_iterator end, u32bit_iterator result);
+```
+
+`octet_iterator`: an input iterator.
+`u32bit_iterator`: an output iterator.
+`start`: an iterator pointing to the beginning of the UTF-8 encoded string to convert.
+`end`: an iterator pointing to pass-the-end of the UTF-8 encoded string to convert.
+`result`: an output iterator to the place in the UTF-32 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-32 string.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+vector<int> utf32result;
+utf8to32(twochars, twochars + 5, back_inserter(utf32result));
+assert (utf32result.size() == 2);
+```
+
+In case of an invalid UTF-8 seqence, a `utf8::invalid_utf8` exception is thrown. If `end` does not point to the past-of-end of a UTF-8 seqence, a `utf8::not_enough_room` exception is thrown.
+
+#### utf8::find_invalid
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Detects an invalid sequence within a UTF-8 string.
+
+```cpp
+std::size_t find_invalid(const std::string& s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: the index of the first invalid octet in the UTF-8 string. In case none were found, equals `std::string::npos`.
+
+Example of use:
+
+```cpp
+string utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+auto invalid = find_invalid(utf_invalid);
+assert (invalid == 5);
+```
+
+This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it.
+
+#### utf8::find_invalid
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Detects an invalid sequence within a UTF-8 string.
+
+```cpp
+std::size_t find_invalid(std::string_view s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: the index of the first invalid octet in the UTF-8 string. In case none were found, equals `std::string_view::npos`.
+
+Example of use:
+
+```cpp
+string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+auto invalid = find_invalid(utf_invalid);
+assert (invalid == 5);
+```
+
+This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it.
+
+
+#### utf8::find_invalid
+
+Available in version 1.0 and later.
+
+Detects an invalid sequence within a UTF-8 string.
+
+```cpp
+template <typename octet_iterator>
+octet_iterator find_invalid(octet_iterator start, octet_iterator end);
+```
+
+`octet_iterator`: an input iterator.
+`start`: an iterator pointing to the beginning of the UTF-8 string to test for validity.
+`end`: an iterator pointing to pass-the-end of the UTF-8 string to test for validity.
+Return value: an iterator pointing to the first invalid octet in the UTF-8 string. In case none were found, equals `end`.
+
+Example of use:
+
+```cpp
+char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa";
+char* invalid = find_invalid(utf_invalid, utf_invalid + 6);
+assert (invalid == utf_invalid + 5);
+```
+
+This function is typically used to make sure a UTF-8 string is valid before processing it with other functions. It is especially important to call it if before doing any of the _unchecked_ operations on it.
+
+#### utf8::is_valid
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Checks whether a string object contains valid UTF-8 encoded text.
+
+```cpp
+bool is_valid(const std::string& s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: `true` if the string contains valid UTF-8 encoded text; `false` if not.
+
+Example of use:
+
+```cpp
+char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa";
+bool bvalid = is_valid(utf_invalid);
+assert (bvalid == false);
+```
+
+You may want to use `is_valid` to make sure that a string contains valid UTF-8 text without the need to know where it fails if it is not valid.
+
+#### utf8::is_valid
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Checks whether a string object contains valid UTF-8 encoded text.
+
+```cpp
+bool is_valid(std::string_view s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: `true` if the string contains valid UTF-8 encoded text; `false` if not.
+
+Example of use:
+
+```cpp
+string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+bool bvalid = is_valid(utf_invalid);
+assert (bvalid == false);
+```
+
+You may want to use `is_valid` to make sure that a string contains valid UTF-8 text without the need to know where it fails if it is not valid.
+
+
+#### utf8::is_valid
+
+Available in version 1.0 and later.
+
+Checks whether a sequence of octets is a valid UTF-8 string.
+
+```cpp
+template <typename octet_iterator>
+bool is_valid(octet_iterator start, octet_iterator end);
+```
+
+`octet_iterator`: an input iterator.
+`start`: an iterator pointing to the beginning of the UTF-8 string to test for validity.
+`end`: an iterator pointing to pass-the-end of the UTF-8 string to test for validity.
+Return value: `true` if the sequence is a valid UTF-8 string; `false` if not.
+
+Example of use:
+
+```cpp
+char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa";
+bool bvalid = is_valid(utf_invalid, utf_invalid + 6);
+assert (bvalid == false);
+```
+
+`is_valid` is a shorthand for `find_invalid(start, end) == end;`. You may want to use it to make sure that a byte seqence is a valid UTF-8 string without the need to know where it fails if it is not valid.
+
+#### utf8::replace_invalid
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Replaces all invalid UTF-8 sequences within a string with a replacement marker.
+
+```cpp
+std::string replace_invalid(const std::string& s, char32_t replacement);
+std::string replace_invalid(const std::string& s);
+```
+
+`s`: a UTF-8 encoded string.
+`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd`
+Return value: A UTF-8 encoded string with replaced invalid sequences.
+
+Example of use:
+
+```cpp
+string invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+string replace_invalid_result = replace_invalid(invalid_sequence, '?');
+bvalid = is_valid(replace_invalid_result);
+assert (bvalid);
+const string fixed_invalid_sequence = "a????z";
+assert (fixed_invalid_sequence == replace_invalid_result);
+```
+
+#### utf8::replace_invalid
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Replaces all invalid UTF-8 sequences within a string with a replacement marker.
+
+```cpp
+std::string replace_invalid(std::string_view s, char32_t replacement);
+std::string replace_invalid(std::string_view s);
+```
+
+`s`: a UTF-8 encoded string.
+`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd`
+Return value: A UTF-8 encoded string with replaced invalid sequences.
+
+Example of use:
+
+```cpp
+string_view invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+string replace_invalid_result = replace_invalid(invalid_sequence, '?');
+bool bvalid = is_valid(replace_invalid_result);
+assert (bvalid);
+const string fixed_invalid_sequence = "a????z";
+assert(fixed_invalid_sequence, replace_invalid_result);
+```
+
+
+#### utf8::replace_invalid
+
+Available in version 2.0 and later.
+
+Replaces all invalid UTF-8 sequences within a string with a replacement marker.
+
+```cpp
+template <typename octet_iterator, typename output_iterator>
+output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement);
+template <typename octet_iterator, typename output_iterator>
+output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out);
+```
+
+`octet_iterator`: an input iterator.
+`output_iterator`: an output iterator.
+`start`: an iterator pointing to the beginning of the UTF-8 string to look for invalid UTF-8 sequences.
+`end`: an iterator pointing to pass-the-end of the UTF-8 string to look for invalid UTF-8 sequences.
+`out`: An output iterator to the range where the result of replacement is stored.
+`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd`
+Return value: An iterator pointing to the place after the UTF-8 string with replaced invalid sequences.
+
+Example of use:
+
+```cpp
+char invalid_sequence[] = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+vector<char> replace_invalid_result;
+replace_invalid (invalid_sequence, invalid_sequence + sizeof(invalid_sequence), back_inserter(replace_invalid_result), '?');
+bvalid = is_valid(replace_invalid_result.begin(), replace_invalid_result.end());
+assert (bvalid);
+char* fixed_invalid_sequence = "a????z";
+assert (std::equal(replace_invalid_result.begin(), replace_invalid_result.end(), fixed_invalid_sequence));
+```
+
+`replace_invalid` does not perform in-place replacement of invalid sequences. Rather, it produces a copy of the original string with the invalid sequences replaced with a replacement marker. Therefore, `out` must not be in the `[start, end]` range.
+
+#### utf8::starts_with_bom
+
+Available in version 3.0 and later. Requires a C++ 11 compliant compiler.
+
+Checks whether a string starts with a UTF-8 byte order mark (BOM)
+
+```cpp
+bool starts_with_bom(const std::string& s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: `true` if the string starts with a UTF-8 byte order mark; `false` if not.
+
+Example of use:
+
+```cpp
+string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)};
+bool bbom = starts_with_bom(byte_order_mark);
+assert (bbom == true);
+string threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+bool no_bbom = starts_with_bom(threechars);
+assert (no_bbom == false);
+ ```
+
+The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text.
+
+
+#### utf8::starts_with_bom
+
+Available in version 3.2 and later. Requires a C++ 17 compliant compiler.
+
+Checks whether a string starts with a UTF-8 byte order mark (BOM)
+
+```cpp
+bool starts_with_bom(std::string_view s);
+```
+
+`s`: a UTF-8 encoded string.
+Return value: `true` if the string starts with a UTF-8 byte order mark; `false` if not.
+
+Example of use:
+
+```cpp
+string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)};
+string_view byte_order_mark_view(byte_order_mark);
+bool bbom = starts_with_bom(byte_order_mark_view);
+assert (bbom);
+string_view threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+bool no_bbom = starts_with_bom(threechars);
+assert (!no_bbom);
+ ```
+
+The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text.
+
+
+#### utf8::starts_with_bom
+
+Available in version 2.3 and later.
+
+Checks whether an octet sequence starts with a UTF-8 byte order mark (BOM)
+
+```cpp
+template <typename octet_iterator>
+bool starts_with_bom (octet_iterator it, octet_iterator end);
+```
+
+`octet_iterator`: an input iterator.
+`it`: beginning of the octet sequence to check
+`end`: pass-end of the sequence to check
+Return value: `true` if the sequence starts with a UTF-8 byte order mark; `false` if not.
+
+Example of use:
+
+```cpp
+unsigned char byte_order_mark[] = {0xef, 0xbb, 0xbf};
+bool bbom = starts_with_bom(byte_order_mark, byte_order_mark + sizeof(byte_order_mark));
+assert (bbom == true);
+```
+
+The typical use of this function is to check the first three bytes of a file. If they form the UTF-8 BOM, we want to skip them before processing the actual UTF-8 encoded text.
+
+### Types From utf8 Namespace
+
+#### utf8::exception
+
+Available in version 2.3 and later.
+
+Base class for the exceptions thrown by UTF CPP library functions.
+
+```cpp
+class exception : public std::exception {};
+```
+
+Example of use:
+
+```cpp
+try {
+ code_that_uses_utf_cpp_library();
+}
+catch(const utf8::exception& utfcpp_ex) {
+ cerr << utfcpp_ex.what();
+}
+```
+
+#### utf8::invalid_code_point
+
+Available in version 1.0 and later.
+
+Thrown by UTF8 CPP functions such as `advance` and `next` if an UTF-8 sequence represents and invalid code point.
+
+```cpp
+class invalid_code_point : public exception {
+public:
+ uint32_t code_point() const;
+};
+```
+
+Member function `code_point()` can be used to determine the invalid code point that caused the exception to be thrown.
+
+#### utf8::invalid_utf8
+
+Available in version 1.0 and later.
+
+Thrown by UTF8 CPP functions such as `next` and `prior` if an invalid UTF-8 sequence is detected during decoding.
+
+```cpp
+class invalid_utf8 : public exception {
+public:
+ uint8_t utf8_octet() const;
+};
+```
+
+Member function `utf8_octet()` can be used to determine the beginning of the byte sequence that caused the exception to be thrown.
+
+#### utf8::invalid_utf16
+
+Available in version 1.0 and later.
+
+Thrown by UTF8 CPP function `utf16to8` if an invalid UTF-16 sequence is detected during decoding.
+
+```cpp
+class invalid_utf16 : public exception {
+public:
+ uint16_t utf16_word() const;
+};
+```
+
+Member function `utf16_word()` can be used to determine the UTF-16 code unit that caused the exception to be thrown.
+
+#### utf8::not_enough_room
+
+Available in version 1.0 and later.
+
+Thrown by UTF8 CPP functions such as `next` if the end of the decoded UTF-8 sequence was reached before the code point was decoded.
+
+```cpp
+class not_enough_room : public exception {};
+```
+
+#### utf8::iterator
+
+Available in version 2.0 and later.
+
+Adapts the underlying octet iterator to iterate over the sequence of code points, rather than raw octets.
+
+```cpp
+template <typename octet_iterator>
+class iterator;
+```
+
+##### Member functions
+
+`iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor.
+
+`explicit iterator (const octet_iterator& octet_it, const octet_iterator& range_start, const octet_iterator& range_end);` a constructor that initializes the underlying octet_iterator with octet_it and sets the range in which the iterator is considered valid.
+
+`octet_iterator base () const;` returns the underlying octet_iterator.
+
+`uint32_t operator * () const;` decodes the utf-8 sequence the underlying octet_iterator is pointing to and returns the code point.
+
+`bool operator == (const iterator& rhs) const;` returns `true` if the two underlaying iterators are equal.
+
+`bool operator != (const iterator& rhs) const;` returns `true` if the two underlaying iterators are not equal.
+
+`iterator& operator ++ ();` the prefix increment - moves the iterator to the next UTF-8 encoded code point.
+
+`iterator operator ++ (int);` the postfix increment - moves the iterator to the next UTF-8 encoded code point and returns the current one.
+
+`iterator& operator -- ();` the prefix decrement - moves the iterator to the previous UTF-8 encoded code point.
+
+`iterator operator -- (int);` the postfix decrement - moves the iterator to the previous UTF-8 encoded code point and returns the current one.
+
+Example of use:
+
+```cpp
+char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+utf8::iterator<char*> it(threechars, threechars, threechars + 9);
+utf8::iterator<char*> it2 = it;
+assert (it2 == it);
+assert (*it == 0x10346);
+assert (*(++it) == 0x65e5);
+assert ((*it++) == 0x65e5);
+assert (*it == 0x0448);
+assert (it != it2);
+utf8::iterator<char*> endit (threechars + 9, threechars, threechars + 9);
+assert (++it == endit);
+assert (*(--it) == 0x0448);
+assert ((*it--) == 0x0448);
+assert (*it == 0x65e5);
+assert (--it == utf8::iterator<char*>(threechars, threechars, threechars + 9));
+assert (*it == 0x10346);
+```
+
+The purpose of `utf8::iterator` adapter is to enable easy iteration as well as the use of STL algorithms with UTF-8 encoded strings. Increment and decrement operators are implemented in terms of `utf8::next()` and `utf8::prior()` functions.
+
+Note that `utf8::iterator` adapter is a checked iterator. It operates on the range specified in the constructor; any attempt to go out of that range will result in an exception. Even the comparison operators require both iterator object to be constructed against the same range - otherwise an exception is thrown. Typically, the range will be determined by sequence container functions `begin` and `end`, i.e.:
+
+```cpp
+std::string s = "example";
+utf8::iterator i (s.begin(), s.begin(), s.end());
+```
+
+### Functions From utf8::unchecked Namespace
+
+#### utf8::unchecked::append
+
+Available in version 1.0 and later.
+
+Encodes a 32 bit code point as a UTF-8 sequence of octets and appends the sequence to a UTF-8 string.
+
+```cpp
+template <typename octet_iterator>
+octet_iterator append(uint32_t cp, octet_iterator result);
+```
+
+`cp`: A 32 bit integer representing a code point to append to the sequence.
+`result`: An output iterator to the place in the sequence where to append the code point.
+Return value: An iterator pointing to the place after the newly appended sequence.
+
+Example of use:
+
+```cpp
+unsigned char u[5] = {0,0,0,0,0};
+unsigned char* end = unchecked::append(0x0448, u);
+assert (u[0] == 0xd1 && u[1] == 0x88 && u[2] == 0 && u[3] == 0 && u[4] == 0);
+```
+
+This is a faster but less safe version of `utf8::append`. It does not check for validity of the supplied code point, and may produce an invalid UTF-8 sequence.
+
+#### utf8::unchecked::next
+
+Available in version 1.0 and later.
+
+Given the iterator to the beginning of a UTF-8 sequence, it returns the code point and moves the iterator to the next position.
+
+```cpp
+template <typename octet_iterator>
+uint32_t next(octet_iterator& it);
+```
+
+`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the beginning of the next code point.
+ Return value: the 32 bit representation of the processed UTF-8 code point.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+char* w = twochars;
+int cp = unchecked::next(w);
+assert (cp == 0x65e5);
+assert (w == twochars + 3);
+```
+
+This is a faster but less safe version of `utf8::next`. It does not check for validity of the supplied UTF-8 sequence.
+
+#### utf8::unchecked::peek_next
+
+Available in version 2.1 and later.
+
+Given the iterator to the beginning of a UTF-8 sequence, it returns the code point.
+
+```cpp
+template <typename octet_iterator>
+uint32_t peek_next(octet_iterator it);
+```
+
+`it`: an iterator pointing to the beginning of an UTF-8 encoded code point.
+Return value: the 32 bit representation of the processed UTF-8 code point.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+char* w = twochars;
+int cp = unchecked::peek_next(w);
+assert (cp == 0x65e5);
+assert (w == twochars);
+```
+
+This is a faster but less safe version of `utf8::peek_next`. It does not check for validity of the supplied UTF-8 sequence.
+
+#### utf8::unchecked::prior
+
+Available in version 1.02 and later.
+
+Given a reference to an iterator pointing to an octet in a UTF-8 seqence, it decreases the iterator until it hits the beginning of the previous UTF-8 encoded code point and returns the 32 bits representation of the code point.
+
+```cpp
+template <typename octet_iterator>
+uint32_t prior(octet_iterator& it);
+```
+
+`it`: a reference pointing to an octet within a UTF-8 encoded string. After the function returns, it is decremented to point to the beginning of the previous code point.
+ Return value: the 32 bit representation of the previous code point.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+char* w = twochars + 3;
+int cp = unchecked::prior (w);
+assert (cp == 0x65e5);
+assert (w == twochars);
+```
+
+This is a faster but less safe version of `utf8::prior`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking.
+
+#### utf8::unchecked::advance
+
+Available in version 1.0 and later.
+
+Advances an iterator by the specified number of code points within an UTF-8 sequence.
+
+```cpp
+template <typename octet_iterator, typename distance_type>
+void advance (octet_iterator& it, distance_type n);
+```
+
+`it`: a reference to an iterator pointing to the beginning of an UTF-8 encoded code point. After the function returns, it is incremented to point to the nth following code point.
+`n`: number of code points `it` should be advanced. A negative value means decrement.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+char* w = twochars;
+unchecked::advance (w, 2);
+assert (w == twochars + 5);
+```
+
+This is a faster but less safe version of `utf8::advance`. It does not check for validity of the supplied UTF-8 sequence and offers no boundary checking.
+
+#### utf8::unchecked::distance
+
+Available in version 1.0 and later.
+
+Given the iterators to two UTF-8 encoded code points in a seqence, returns the number of code points between them.
+
+```cpp
+template <typename octet_iterator>
+typename std::iterator_traits<octet_iterator>::difference_type distance (octet_iterator first, octet_iterator last);
+```
+
+`first`: an iterator to a beginning of a UTF-8 encoded code point.
+`last`: an iterator to a "post-end" of the last UTF-8 encoded code point in the sequence we are trying to determine the length. It can be the beginning of a new code point, or not.
+Return value: the distance between the iterators, in code points.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+size_t dist = utf8::unchecked::distance(twochars, twochars + 5);
+assert (dist == 2);
+```
+
+This is a faster but less safe version of `utf8::distance`. It does not check for validity of the supplied UTF-8 sequence.
+
+#### utf8::unchecked::utf16to8
+
+Available in version 1.0 and later.
+
+Converts a UTF-16 encoded string to UTF-8.
+
+```cpp
+template <typename u16bit_iterator, typename octet_iterator>
+octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result);
+```
+
+`start`: an iterator pointing to the beginning of the UTF-16 encoded string to convert.
+`end`: an iterator pointing to pass-the-end of the UTF-16 encoded string to convert.
+`result`: an output iterator to the place in the UTF-8 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-8 string.
+
+Example of use:
+
+```cpp
+unsigned short utf16string[] = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+vector<unsigned char> utf8result;
+unchecked::utf16to8(utf16string, utf16string + 5, back_inserter(utf8result));
+assert (utf8result.size() == 10);
+```
+
+This is a faster but less safe version of `utf8::utf16to8`. It does not check for validity of the supplied UTF-16 sequence.
+
+#### utf8::unchecked::utf8to16
+
+Available in version 1.0 and later.
+
+Converts an UTF-8 encoded string to UTF-16
+
+```cpp
+template <typename u16bit_iterator, typename octet_iterator>
+u16bit_iterator utf8to16 (octet_iterator start, octet_iterator end, u16bit_iterator result);
+```
+
+`start`: an iterator pointing to the beginning of the UTF-8 encoded string to convert. < br /> `end`: an iterator pointing to pass-the-end of the UTF-8 encoded string to convert.
+`result`: an output iterator to the place in the UTF-16 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-16 string.
+
+Example of use:
+
+```cpp
+char utf8_with_surrogates[] = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+vector <unsigned short> utf16result;
+unchecked::utf8to16(utf8_with_surrogates, utf8_with_surrogates + 9, back_inserter(utf16result));
+assert (utf16result.size() == 4);
+assert (utf16result[2] == 0xd834);
+assert (utf16result[3] == 0xdd1e);
+```
+
+This is a faster but less safe version of `utf8::utf8to16`. It does not check for validity of the supplied UTF-8 sequence.
+
+#### utf8::unchecked::utf32to8
+
+Available in version 1.0 and later.
+
+Converts a UTF-32 encoded string to UTF-8.
+
+```cpp
+template <typename octet_iterator, typename u32bit_iterator>
+octet_iterator utf32to8 (u32bit_iterator start, u32bit_iterator end, octet_iterator result);
+```
+
+`start`: an iterator pointing to the beginning of the UTF-32 encoded string to convert.
+`end`: an iterator pointing to pass-the-end of the UTF-32 encoded string to convert.
+`result`: an output iterator to the place in the UTF-8 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-8 string.
+
+Example of use:
+
+```cpp
+int utf32string[] = {0x448, 0x65e5, 0x10346, 0};
+vector<unsigned char> utf8result;
+utf32to8(utf32string, utf32string + 3, back_inserter(utf8result));
+assert (utf8result.size() == 9);
+```
+
+This is a faster but less safe version of `utf8::utf32to8`. It does not check for validity of the supplied UTF-32 sequence.
+
+#### utf8::unchecked::utf8to32
+
+Available in version 1.0 and later.
+
+Converts a UTF-8 encoded string to UTF-32.
+
+```cpp
+template <typename octet_iterator, typename u32bit_iterator>
+u32bit_iterator utf8to32 (octet_iterator start, octet_iterator end, u32bit_iterator result);
+```
+
+`start`: an iterator pointing to the beginning of the UTF-8 encoded string to convert.
+`end`: an iterator pointing to pass-the-end of the UTF-8 encoded string to convert.
+`result`: an output iterator to the place in the UTF-32 string where to append the result of conversion.
+Return value: An iterator pointing to the place after the appended UTF-32 string.
+
+Example of use:
+
+```cpp
+char* twochars = "\xe6\x97\xa5\xd1\x88";
+vector<int> utf32result;
+unchecked::utf8to32(twochars, twochars + 5, back_inserter(utf32result));
+assert (utf32result.size() == 2);
+```
+
+This is a faster but less safe version of `utf8::utf8to32`. It does not check for validity of the supplied UTF-8 sequence.
+
+#### utf8::unchecked::replace_invalid
+
+Available in version 3.1 and later.
+
+Replaces all invalid UTF-8 sequences within a string with a replacement marker.
+
+```cpp
+template <typename octet_iterator, typename output_iterator>
+output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement);
+template <typename octet_iterator, typename output_iterator>
+output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out);
+```
+
+`octet_iterator`: an input iterator.
+`output_iterator`: an output iterator.
+`start`: an iterator pointing to the beginning of the UTF-8 string to look for invalid UTF-8 sequences.
+`end`: an iterator pointing to pass-the-end of the UTF-8 string to look for invalid UTF-8 sequences.
+`out`: An output iterator to the range where the result of replacement is stored.
+`replacement`: A Unicode code point for the replacement marker. The version without this parameter assumes the value `0xfffd`
+Return value: An iterator pointing to the place after the UTF-8 string with replaced invalid sequences.
+
+Example of use:
+
+```cpp
+char invalid_sequence[] = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+vector<char> replace_invalid_result;
+unchecked::replace_invalid (invalid_sequence, invalid_sequence + sizeof(invalid_sequence), back_inserter(replace_invalid_result), '?');
+bvalid = utf8::is_valid(replace_invalid_result.begin(), replace_invalid_result.end());
+assert (bvalid);
+char* fixed_invalid_sequence = "a????z";
+assert (std::equal(replace_invalid_result.begin(), replace_invalid_result.end(), fixed_invalid_sequence));
+```
+
+`replace_invalid` does not perform in-place replacement of invalid sequences. Rather, it produces a copy of the original string with the invalid sequences replaced with a replacement marker. Therefore, `out` must not be in the `[start, end]` range.
+
+Unlike `utf8::replace_invalid`, this function does not verify validity of the replacement marker.
+
+### Types From utf8::unchecked Namespace
+
+#### utf8::iterator
+
+Available in version 2.0 and later.
+
+Adapts the underlying octet iterator to iterate over the sequence of code points, rather than raw octets.
+
+```cpp
+template <typename octet_iterator>
+class iterator;
+```
+
+##### Member functions
+
+`iterator();` the deafult constructor; the underlying octet_iterator is constructed with its default constructor.
+
+`explicit iterator (const octet_iterator& octet_it);` a constructor that initializes the underlying octet_iterator with `octet_it`.
+
+`octet_iterator base () const;` returns the underlying octet_iterator.
+
+`uint32_t operator * () const;` decodes the utf-8 sequence the underlying octet_iterator is pointing to and returns the code point.
+
+`bool operator == (const iterator& rhs) const;` returns `true` if the two underlaying iterators are equal.
+
+`bool operator != (const iterator& rhs) const;` returns `true` if the two underlaying iterators are not equal.
+
+`iterator& operator ++ ();` the prefix increment - moves the iterator to the next UTF-8 encoded code point.
+
+`iterator operator ++ (int);` the postfix increment - moves the iterator to the next UTF-8 encoded code point and returns the current one.
+
+`iterator& operator -- ();` the prefix decrement - moves the iterator to the previous UTF-8 encoded code point.
+
+`iterator operator -- (int);` the postfix decrement - moves the iterator to the previous UTF-8 encoded code point and returns the current one.
+
+Example of use:
+
+```cpp
+char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+utf8::unchecked::iterator<char*> un_it(threechars);
+utf8::unchecked::iterator<char*> un_it2 = un_it;
+assert (un_it2 == un_it);
+assert (*un_it == 0x10346);
+assert (*(++un_it) == 0x65e5);
+assert ((*un_it++) == 0x65e5);
+assert (*un_it == 0x0448);
+assert (un_it != un_it2);
+utf8::::unchecked::iterator<char*> un_endit (threechars + 9);
+assert (++un_it == un_endit);
+assert (*(--un_it) == 0x0448);
+assert ((*un_it--) == 0x0448);
+assert (*un_it == 0x65e5);
+assert (--un_it == utf8::unchecked::iterator<char*>(threechars));
+assert (*un_it == 0x10346);
+```
+
+This is an unchecked version of `utf8::iterator`. It is faster in many cases, but offers no validity or range checks.
+
+## Links
+
+1. [The Unicode Consortium](http://www.unicode.org/).
+2. [ICU Library](http://icu.sourceforge.net/).
+3. [UTF-8 at Wikipedia](http://en.wikipedia.org/wiki/UTF-8)
+4. [UTF-8 and Unicode FAQ for Unix/Linux](http://www.cl.cam.ac.uk/~mgk25/unicode.html)
diff --git a/third-party/utf8cpp/samples/docsample.cpp b/third-party/utf8cpp/samples/docsample.cpp
new file mode 100644
index 0000000..6533887
--- /dev/null
+++ b/third-party/utf8cpp/samples/docsample.cpp
@@ -0,0 +1,64 @@
+#include "../source/utf8.h"
+#include <iostream>
+#include <fstream>
+#include <string>
+#include <vector>
+
+
+using namespace std;
+
+int main(int argc, char** argv)
+{
+ if (argc != 2) {
+ cout << "\nUsage: docsample filename\n";
+ return 0;
+ }
+ const char* test_file_path = argv[1];
+ // Open the test file (must be UTF-8 encoded)
+ ifstream fs8(test_file_path);
+ if (!fs8.is_open()) {
+ cout << "Could not open " << test_file_path << endl;
+ return 0;
+ }
+
+ unsigned line_count = 1;
+ string line;
+ // Play with all the lines in the file
+ while (getline(fs8, line)) {
+ // check for invalid utf-8 (for a simple yes/no check, there is also utf8::is_valid function)
+#if __cplusplus >= 201103L // C++ 11 or later
+ auto end_it = utf8::find_invalid(line.begin(), line.end());
+#else
+ string::iterator end_it = utf8::find_invalid(line.begin(), line.end());
+#endif // C++ 11
+ if (end_it != line.end()) {
+ cout << "Invalid UTF-8 encoding detected at line " << line_count << "\n";
+ cout << "This part is fine: " << string(line.begin(), end_it) << "\n";
+ }
+ // Get the line length (at least for the valid part)
+ ptrdiff_t length = utf8::distance(line.begin(), end_it);
+ cout << "Length of line " << line_count << " is " << length << "\n";
+
+ // Convert it to utf-16
+#if __cplusplus >= 201103L // C++ 11 or later
+ u16string utf16line = utf8::utf8to16(line);
+#else
+ vector<unsigned short> utf16line;
+ utf8::utf8to16(line.begin(), end_it, back_inserter(utf16line));
+#endif // C++ 11
+ // And back to utf-8;
+#if __cplusplus >= 201103L // C++ 11 or later
+ string utf8line = utf8::utf16to8(utf16line);
+#else
+ string utf8line;
+ utf8::utf16to8(utf16line.begin(), utf16line.end(), back_inserter(utf8line));
+#endif // C++ 11
+ // Confirm that the conversion went OK:
+ if (utf8line != string(line.begin(), end_it))
+ cout << "Error in UTF-16 conversion at line: " << line_count << "\n";
+
+ line_count++;
+ }
+
+ return 0;
+}
diff --git a/third-party/utf8cpp/source/utf8.h b/third-party/utf8cpp/source/utf8.h
new file mode 100644
index 0000000..82b13f5
--- /dev/null
+++ b/third-party/utf8cpp/source/utf8.h
@@ -0,0 +1,34 @@
+// Copyright 2006 Nemanja Trifunovic
+
+/*
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#ifndef UTF8_FOR_CPP_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "utf8/checked.h"
+#include "utf8/unchecked.h"
+
+#endif // header guard
diff --git a/third-party/utf8cpp/source/utf8/checked.h b/third-party/utf8cpp/source/utf8/checked.h
new file mode 100644
index 0000000..512dcc2
--- /dev/null
+++ b/third-party/utf8cpp/source/utf8/checked.h
@@ -0,0 +1,319 @@
+// Copyright 2006-2016 Nemanja Trifunovic
+
+/*
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#ifndef UTF8_FOR_CPP_CHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_CHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "core.h"
+#include <stdexcept>
+
+namespace utf8
+{
+ // Base for the exceptions that may be thrown from the library
+ class exception : public ::std::exception {
+ };
+
+ // Exceptions that may be thrown from the library functions.
+ class invalid_code_point : public exception {
+ uint32_t cp;
+ public:
+ invalid_code_point(uint32_t codepoint) : cp(codepoint) {}
+ virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid code point"; }
+ uint32_t code_point() const {return cp;}
+ };
+
+ class invalid_utf8 : public exception {
+ uint8_t u8;
+ public:
+ invalid_utf8 (uint8_t u) : u8(u) {}
+ invalid_utf8 (char c) : u8(static_cast<uint8_t>(c)) {}
+ virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid UTF-8"; }
+ uint8_t utf8_octet() const {return u8;}
+ };
+
+ class invalid_utf16 : public exception {
+ uint16_t u16;
+ public:
+ invalid_utf16 (uint16_t u) : u16(u) {}
+ virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Invalid UTF-16"; }
+ uint16_t utf16_word() const {return u16;}
+ };
+
+ class not_enough_room : public exception {
+ public:
+ virtual const char* what() const UTF_CPP_NOEXCEPT UTF_CPP_OVERRIDE { return "Not enough space"; }
+ };
+
+ /// The library API - functions intended to be called by the users
+
+ template <typename octet_iterator>
+ octet_iterator append(uint32_t cp, octet_iterator result)
+ {
+ if (!utf8::internal::is_code_point_valid(cp))
+ throw invalid_code_point(cp);
+
+ return internal::append(cp, result);
+ }
+
+ template <typename octet_iterator, typename output_iterator>
+ output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement)
+ {
+ while (start != end) {
+ octet_iterator sequence_start = start;
+ internal::utf_error err_code = utf8::internal::validate_next(start, end);
+ switch (err_code) {
+ case internal::UTF8_OK :
+ for (octet_iterator it = sequence_start; it != start; ++it)
+ *out++ = *it;
+ break;
+ case internal::NOT_ENOUGH_ROOM:
+ out = utf8::append (replacement, out);
+ start = end;
+ break;
+ case internal::INVALID_LEAD:
+ out = utf8::append (replacement, out);
+ ++start;
+ break;
+ case internal::INCOMPLETE_SEQUENCE:
+ case internal::OVERLONG_SEQUENCE:
+ case internal::INVALID_CODE_POINT:
+ out = utf8::append (replacement, out);
+ ++start;
+ // just one replacement mark for the sequence
+ while (start != end && utf8::internal::is_trail(*start))
+ ++start;
+ break;
+ }
+ }
+ return out;
+ }
+
+ template <typename octet_iterator, typename output_iterator>
+ inline output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out)
+ {
+ static const uint32_t replacement_marker = utf8::internal::mask16(0xfffd);
+ return utf8::replace_invalid(start, end, out, replacement_marker);
+ }
+
+ template <typename octet_iterator>
+ uint32_t next(octet_iterator& it, octet_iterator end)
+ {
+ uint32_t cp = 0;
+ internal::utf_error err_code = utf8::internal::validate_next(it, end, cp);
+ switch (err_code) {
+ case internal::UTF8_OK :
+ break;
+ case internal::NOT_ENOUGH_ROOM :
+ throw not_enough_room();
+ case internal::INVALID_LEAD :
+ case internal::INCOMPLETE_SEQUENCE :
+ case internal::OVERLONG_SEQUENCE :
+ throw invalid_utf8(static_cast<uint8_t>(*it));
+ case internal::INVALID_CODE_POINT :
+ throw invalid_code_point(cp);
+ }
+ return cp;
+ }
+
+ template <typename octet_iterator>
+ uint32_t peek_next(octet_iterator it, octet_iterator end)
+ {
+ return utf8::next(it, end);
+ }
+
+ template <typename octet_iterator>
+ uint32_t prior(octet_iterator& it, octet_iterator start)
+ {
+ // can't do much if it == start
+ if (it == start)
+ throw not_enough_room();
+
+ octet_iterator end = it;
+ // Go back until we hit either a lead octet or start
+ while (utf8::internal::is_trail(*(--it)))
+ if (it == start)
+ throw invalid_utf8(*it); // error - no lead byte in the sequence
+ return utf8::peek_next(it, end);
+ }
+
+ template <typename octet_iterator, typename distance_type>
+ void advance (octet_iterator& it, distance_type n, octet_iterator end)
+ {
+ const distance_type zero(0);
+ if (n < zero) {
+ // backward
+ for (distance_type i = n; i < zero; ++i)
+ utf8::prior(it, end);
+ } else {
+ // forward
+ for (distance_type i = zero; i < n; ++i)
+ utf8::next(it, end);
+ }
+ }
+
+ template <typename octet_iterator>
+ typename std::iterator_traits<octet_iterator>::difference_type
+ distance (octet_iterator first, octet_iterator last)
+ {
+ typename std::iterator_traits<octet_iterator>::difference_type dist;
+ for (dist = 0; first < last; ++dist)
+ utf8::next(first, last);
+ return dist;
+ }
+
+ template <typename u16bit_iterator, typename octet_iterator>
+ octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result)
+ {
+ while (start != end) {
+ uint32_t cp = utf8::internal::mask16(*start++);
+ // Take care of surrogate pairs first
+ if (utf8::internal::is_lead_surrogate(cp)) {
+ if (start != end) {
+ uint32_t trail_surrogate = utf8::internal::mask16(*start++);
+ if (utf8::internal::is_trail_surrogate(trail_surrogate))
+ cp = (cp << 10) + trail_surrogate + internal::SURROGATE_OFFSET;
+ else
+ throw invalid_utf16(static_cast<uint16_t>(trail_surrogate));
+ }
+ else
+ throw invalid_utf16(static_cast<uint16_t>(cp));
+
+ }
+ // Lone trail surrogate
+ else if (utf8::internal::is_trail_surrogate(cp))
+ throw invalid_utf16(static_cast<uint16_t>(cp));
+
+ result = utf8::append(cp, result);
+ }
+ return result;
+ }
+
+ template <typename u16bit_iterator, typename octet_iterator>
+ u16bit_iterator utf8to16 (octet_iterator start, octet_iterator end, u16bit_iterator result)
+ {
+ while (start < end) {
+ uint32_t cp = utf8::next(start, end);
+ if (cp > 0xffff) { //make a surrogate pair
+ *result++ = static_cast<uint16_t>((cp >> 10) + internal::LEAD_OFFSET);
+ *result++ = static_cast<uint16_t>((cp & 0x3ff) + internal::TRAIL_SURROGATE_MIN);
+ }
+ else
+ *result++ = static_cast<uint16_t>(cp);
+ }
+ return result;
+ }
+
+ template <typename octet_iterator, typename u32bit_iterator>
+ octet_iterator utf32to8 (u32bit_iterator start, u32bit_iterator end, octet_iterator result)
+ {
+ while (start != end)
+ result = utf8::append(*(start++), result);
+
+ return result;
+ }
+
+ template <typename octet_iterator, typename u32bit_iterator>
+ u32bit_iterator utf8to32 (octet_iterator start, octet_iterator end, u32bit_iterator result)
+ {
+ while (start < end)
+ (*result++) = utf8::next(start, end);
+
+ return result;
+ }
+
+ // The iterator class
+ template <typename octet_iterator>
+ class iterator {
+ octet_iterator it;
+ octet_iterator range_start;
+ octet_iterator range_end;
+ public:
+ typedef uint32_t value_type;
+ typedef uint32_t* pointer;
+ typedef uint32_t& reference;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+ iterator () {}
+ explicit iterator (const octet_iterator& octet_it,
+ const octet_iterator& rangestart,
+ const octet_iterator& rangeend) :
+ it(octet_it), range_start(rangestart), range_end(rangeend)
+ {
+ if (it < range_start || it > range_end)
+ throw std::out_of_range("Invalid utf-8 iterator position");
+ }
+ // the default "big three" are OK
+ octet_iterator base () const { return it; }
+ uint32_t operator * () const
+ {
+ octet_iterator temp = it;
+ return utf8::next(temp, range_end);
+ }
+ bool operator == (const iterator& rhs) const
+ {
+ if (range_start != rhs.range_start || range_end != rhs.range_end)
+ throw std::logic_error("Comparing utf-8 iterators defined with different ranges");
+ return (it == rhs.it);
+ }
+ bool operator != (const iterator& rhs) const
+ {
+ return !(operator == (rhs));
+ }
+ iterator& operator ++ ()
+ {
+ utf8::next(it, range_end);
+ return *this;
+ }
+ iterator operator ++ (int)
+ {
+ iterator temp = *this;
+ utf8::next(it, range_end);
+ return temp;
+ }
+ iterator& operator -- ()
+ {
+ utf8::prior(it, range_start);
+ return *this;
+ }
+ iterator operator -- (int)
+ {
+ iterator temp = *this;
+ utf8::prior(it, range_start);
+ return temp;
+ }
+ }; // class iterator
+
+} // namespace utf8
+
+#if UTF_CPP_CPLUSPLUS >= 201703L // C++ 17 or later
+#include "cpp17.h"
+#elif UTF_CPP_CPLUSPLUS >= 201103L // C++ 11 or later
+#include "cpp11.h"
+#endif // C++ 11 or later
+
+#endif //header guard
+
diff --git a/third-party/utf8cpp/source/utf8/core.h b/third-party/utf8cpp/source/utf8/core.h
new file mode 100644
index 0000000..34371ee
--- /dev/null
+++ b/third-party/utf8cpp/source/utf8/core.h
@@ -0,0 +1,387 @@
+// Copyright 2006 Nemanja Trifunovic
+
+/*
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#ifndef UTF8_FOR_CPP_CORE_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_CORE_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include <iterator>
+
+// Determine the C++ standard version.
+// If the user defines UTF_CPP_CPLUSPLUS, use that.
+// Otherwise, trust the unreliable predefined macro __cplusplus
+
+#if !defined UTF_CPP_CPLUSPLUS
+ #define UTF_CPP_CPLUSPLUS __cplusplus
+#endif
+
+#if UTF_CPP_CPLUSPLUS >= 201103L // C++ 11 or later
+ #define UTF_CPP_OVERRIDE override
+ #define UTF_CPP_NOEXCEPT noexcept
+#else // C++ 98/03
+ #define UTF_CPP_OVERRIDE
+ #define UTF_CPP_NOEXCEPT throw()
+#endif // C++ 11 or later
+
+
+namespace utf8
+{
+ // The typedefs for 8-bit, 16-bit and 32-bit unsigned integers
+ // You may need to change them to match your system.
+ // These typedefs have the same names as ones from cstdint, or boost/cstdint
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+
+// Helper code - not intended to be directly called by the library users. May be changed at any time
+namespace internal
+{
+ // Unicode constants
+ // Leading (high) surrogates: 0xd800 - 0xdbff
+ // Trailing (low) surrogates: 0xdc00 - 0xdfff
+ const uint16_t LEAD_SURROGATE_MIN = 0xd800u;
+ const uint16_t LEAD_SURROGATE_MAX = 0xdbffu;
+ const uint16_t TRAIL_SURROGATE_MIN = 0xdc00u;
+ const uint16_t TRAIL_SURROGATE_MAX = 0xdfffu;
+ const uint16_t LEAD_OFFSET = 0xd7c0u; // LEAD_SURROGATE_MIN - (0x10000 >> 10)
+ const uint32_t SURROGATE_OFFSET = 0xfca02400u; // 0x10000u - (LEAD_SURROGATE_MIN << 10) - TRAIL_SURROGATE_MIN
+
+ // Maximum valid value for a Unicode code point
+ const uint32_t CODE_POINT_MAX = 0x0010ffffu;
+
+ template<typename octet_type>
+ inline uint8_t mask8(octet_type oc)
+ {
+ return static_cast<uint8_t>(0xff & oc);
+ }
+ template<typename u16_type>
+ inline uint16_t mask16(u16_type oc)
+ {
+ return static_cast<uint16_t>(0xffff & oc);
+ }
+ template<typename octet_type>
+ inline bool is_trail(octet_type oc)
+ {
+ return ((utf8::internal::mask8(oc) >> 6) == 0x2);
+ }
+
+ template <typename u16>
+ inline bool is_lead_surrogate(u16 cp)
+ {
+ return (cp >= LEAD_SURROGATE_MIN && cp <= LEAD_SURROGATE_MAX);
+ }
+
+ template <typename u16>
+ inline bool is_trail_surrogate(u16 cp)
+ {
+ return (cp >= TRAIL_SURROGATE_MIN && cp <= TRAIL_SURROGATE_MAX);
+ }
+
+ template <typename u16>
+ inline bool is_surrogate(u16 cp)
+ {
+ return (cp >= LEAD_SURROGATE_MIN && cp <= TRAIL_SURROGATE_MAX);
+ }
+
+ template <typename u32>
+ inline bool is_code_point_valid(u32 cp)
+ {
+ return (cp <= CODE_POINT_MAX && !utf8::internal::is_surrogate(cp));
+ }
+
+ template <typename octet_iterator>
+ inline typename std::iterator_traits<octet_iterator>::difference_type
+ sequence_length(octet_iterator lead_it)
+ {
+ uint8_t lead = utf8::internal::mask8(*lead_it);
+ if (lead < 0x80)
+ return 1;
+ else if ((lead >> 5) == 0x6)
+ return 2;
+ else if ((lead >> 4) == 0xe)
+ return 3;
+ else if ((lead >> 3) == 0x1e)
+ return 4;
+ else
+ return 0;
+ }
+
+ template <typename octet_difference_type>
+ inline bool is_overlong_sequence(uint32_t cp, octet_difference_type length)
+ {
+ if (cp < 0x80) {
+ if (length != 1)
+ return true;
+ }
+ else if (cp < 0x800) {
+ if (length != 2)
+ return true;
+ }
+ else if (cp < 0x10000) {
+ if (length != 3)
+ return true;
+ }
+
+ return false;
+ }
+
+ enum utf_error {UTF8_OK, NOT_ENOUGH_ROOM, INVALID_LEAD, INCOMPLETE_SEQUENCE, OVERLONG_SEQUENCE, INVALID_CODE_POINT};
+
+ /// Helper for get_sequence_x
+ template <typename octet_iterator>
+ utf_error increase_safely(octet_iterator& it, octet_iterator end)
+ {
+ if (++it == end)
+ return NOT_ENOUGH_ROOM;
+
+ if (!utf8::internal::is_trail(*it))
+ return INCOMPLETE_SEQUENCE;
+
+ return UTF8_OK;
+ }
+
+ #define UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(IT, END) {utf_error ret = increase_safely(IT, END); if (ret != UTF8_OK) return ret;}
+
+ /// get_sequence_x functions decode utf-8 sequences of the length x
+ template <typename octet_iterator>
+ utf_error get_sequence_1(octet_iterator& it, octet_iterator end, uint32_t& code_point)
+ {
+ if (it == end)
+ return NOT_ENOUGH_ROOM;
+
+ code_point = utf8::internal::mask8(*it);
+
+ return UTF8_OK;
+ }
+
+ template <typename octet_iterator>
+ utf_error get_sequence_2(octet_iterator& it, octet_iterator end, uint32_t& code_point)
+ {
+ if (it == end)
+ return NOT_ENOUGH_ROOM;
+
+ code_point = utf8::internal::mask8(*it);
+
+ UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end)
+
+ code_point = ((code_point << 6) & 0x7ff) + ((*it) & 0x3f);
+
+ return UTF8_OK;
+ }
+
+ template <typename octet_iterator>
+ utf_error get_sequence_3(octet_iterator& it, octet_iterator end, uint32_t& code_point)
+ {
+ if (it == end)
+ return NOT_ENOUGH_ROOM;
+
+ code_point = utf8::internal::mask8(*it);
+
+ UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end)
+
+ code_point = ((code_point << 12) & 0xffff) + ((utf8::internal::mask8(*it) << 6) & 0xfff);
+
+ UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end)
+
+ code_point += (*it) & 0x3f;
+
+ return UTF8_OK;
+ }
+
+ template <typename octet_iterator>
+ utf_error get_sequence_4(octet_iterator& it, octet_iterator end, uint32_t& code_point)
+ {
+ if (it == end)
+ return NOT_ENOUGH_ROOM;
+
+ code_point = utf8::internal::mask8(*it);
+
+ UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end)
+
+ code_point = ((code_point << 18) & 0x1fffff) + ((utf8::internal::mask8(*it) << 12) & 0x3ffff);
+
+ UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end)
+
+ code_point += (utf8::internal::mask8(*it) << 6) & 0xfff;
+
+ UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR(it, end)
+
+ code_point += (*it) & 0x3f;
+
+ return UTF8_OK;
+ }
+
+ #undef UTF8_CPP_INCREASE_AND_RETURN_ON_ERROR
+
+ template <typename octet_iterator>
+ utf_error validate_next(octet_iterator& it, octet_iterator end, uint32_t& code_point)
+ {
+ if (it == end)
+ return NOT_ENOUGH_ROOM;
+
+ // Save the original value of it so we can go back in case of failure
+ // Of course, it does not make much sense with i.e. stream iterators
+ octet_iterator original_it = it;
+
+ uint32_t cp = 0;
+ // Determine the sequence length based on the lead octet
+ typedef typename std::iterator_traits<octet_iterator>::difference_type octet_difference_type;
+ const octet_difference_type length = utf8::internal::sequence_length(it);
+
+ // Get trail octets and calculate the code point
+ utf_error err = UTF8_OK;
+ switch (length) {
+ case 0:
+ return INVALID_LEAD;
+ case 1:
+ err = utf8::internal::get_sequence_1(it, end, cp);
+ break;
+ case 2:
+ err = utf8::internal::get_sequence_2(it, end, cp);
+ break;
+ case 3:
+ err = utf8::internal::get_sequence_3(it, end, cp);
+ break;
+ case 4:
+ err = utf8::internal::get_sequence_4(it, end, cp);
+ break;
+ }
+
+ if (err == UTF8_OK) {
+ // Decoding succeeded. Now, security checks...
+ if (utf8::internal::is_code_point_valid(cp)) {
+ if (!utf8::internal::is_overlong_sequence(cp, length)){
+ // Passed! Return here.
+ code_point = cp;
+ ++it;
+ return UTF8_OK;
+ }
+ else
+ err = OVERLONG_SEQUENCE;
+ }
+ else
+ err = INVALID_CODE_POINT;
+ }
+
+ // Failure branch - restore the original value of the iterator
+ it = original_it;
+ return err;
+ }
+
+ template <typename octet_iterator>
+ inline utf_error validate_next(octet_iterator& it, octet_iterator end) {
+ uint32_t ignored;
+ return utf8::internal::validate_next(it, end, ignored);
+ }
+
+ // Internal implementation of both checked and unchecked append() function
+ // This function will be invoked by the overloads below, as they will know
+ // the octet_type.
+ template <typename octet_iterator, typename octet_type>
+ octet_iterator append(uint32_t cp, octet_iterator result) {
+ if (cp < 0x80) // one octet
+ *(result++) = static_cast<octet_type>(cp);
+ else if (cp < 0x800) { // two octets
+ *(result++) = static_cast<octet_type>((cp >> 6) | 0xc0);
+ *(result++) = static_cast<octet_type>((cp & 0x3f) | 0x80);
+ }
+ else if (cp < 0x10000) { // three octets
+ *(result++) = static_cast<octet_type>((cp >> 12) | 0xe0);
+ *(result++) = static_cast<octet_type>(((cp >> 6) & 0x3f) | 0x80);
+ *(result++) = static_cast<octet_type>((cp & 0x3f) | 0x80);
+ }
+ else { // four octets
+ *(result++) = static_cast<octet_type>((cp >> 18) | 0xf0);
+ *(result++) = static_cast<octet_type>(((cp >> 12) & 0x3f)| 0x80);
+ *(result++) = static_cast<octet_type>(((cp >> 6) & 0x3f) | 0x80);
+ *(result++) = static_cast<octet_type>((cp & 0x3f) | 0x80);
+ }
+ return result;
+ }
+
+ // One of the following overloads will be invoked from the API calls
+
+ // A simple (but dangerous) case: the caller appends byte(s) to a char array
+ inline char* append(uint32_t cp, char* result) {
+ return append<char*, char>(cp, result);
+ }
+
+ // Hopefully, most common case: the caller uses back_inserter
+ // i.e. append(cp, std::back_inserter(str));
+ template<typename container_type>
+ std::back_insert_iterator<container_type> append
+ (uint32_t cp, std::back_insert_iterator<container_type> result) {
+ return append<std::back_insert_iterator<container_type>,
+ typename container_type::value_type>(cp, result);
+ }
+
+ // The caller uses some other kind of output operator - not covered above
+ // Note that in this case we are not able to determine octet_type
+ // so we assume it's uint_8; that can cause a conversion warning if we are wrong.
+ template <typename octet_iterator>
+ octet_iterator append(uint32_t cp, octet_iterator result) {
+ return append<octet_iterator, uint8_t>(cp, result);
+ }
+
+} // namespace internal
+
+ /// The library API - functions intended to be called by the users
+
+ // Byte order mark
+ const uint8_t bom[] = {0xef, 0xbb, 0xbf};
+
+ template <typename octet_iterator>
+ octet_iterator find_invalid(octet_iterator start, octet_iterator end)
+ {
+ octet_iterator result = start;
+ while (result != end) {
+ utf8::internal::utf_error err_code = utf8::internal::validate_next(result, end);
+ if (err_code != internal::UTF8_OK)
+ return result;
+ }
+ return result;
+ }
+
+ template <typename octet_iterator>
+ inline bool is_valid(octet_iterator start, octet_iterator end)
+ {
+ return (utf8::find_invalid(start, end) == end);
+ }
+
+ template <typename octet_iterator>
+ inline bool starts_with_bom (octet_iterator it, octet_iterator end)
+ {
+ return (
+ ((it != end) && (utf8::internal::mask8(*it++)) == bom[0]) &&
+ ((it != end) && (utf8::internal::mask8(*it++)) == bom[1]) &&
+ ((it != end) && (utf8::internal::mask8(*it)) == bom[2])
+ );
+ }
+} // namespace utf8
+
+#endif // header guard
+
+
diff --git a/third-party/utf8cpp/source/utf8/cpp11.h b/third-party/utf8cpp/source/utf8/cpp11.h
new file mode 100644
index 0000000..2366f12
--- /dev/null
+++ b/third-party/utf8cpp/source/utf8/cpp11.h
@@ -0,0 +1,103 @@
+// Copyright 2018 Nemanja Trifunovic
+
+/*
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#ifndef UTF8_FOR_CPP_a184c22c_d012_11e8_a8d5_f2801f1b9fd1
+#define UTF8_FOR_CPP_a184c22c_d012_11e8_a8d5_f2801f1b9fd1
+
+#include "checked.h"
+#include <string>
+
+namespace utf8
+{
+
+ inline void append(char32_t cp, std::string& s)
+ {
+ append(uint32_t(cp), std::back_inserter(s));
+ }
+
+ inline std::string utf16to8(const std::u16string& s)
+ {
+ std::string result;
+ utf16to8(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::u16string utf8to16(const std::string& s)
+ {
+ std::u16string result;
+ utf8to16(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::string utf32to8(const std::u32string& s)
+ {
+ std::string result;
+ utf32to8(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::u32string utf8to32(const std::string& s)
+ {
+ std::u32string result;
+ utf8to32(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::size_t find_invalid(const std::string& s)
+ {
+ std::string::const_iterator invalid = find_invalid(s.begin(), s.end());
+ return (invalid == s.end()) ? std::string::npos : static_cast<std::size_t>(invalid - s.begin());
+ }
+
+ inline bool is_valid(const std::string& s)
+ {
+ return is_valid(s.begin(), s.end());
+ }
+
+ inline std::string replace_invalid(const std::string& s, char32_t replacement)
+ {
+ std::string result;
+ replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement);
+ return result;
+ }
+
+ inline std::string replace_invalid(const std::string& s)
+ {
+ std::string result;
+ replace_invalid(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline bool starts_with_bom(const std::string& s)
+ {
+ return starts_with_bom(s.begin(), s.end());
+ }
+
+} // namespace utf8
+
+#endif // header guard
+
diff --git a/third-party/utf8cpp/source/utf8/cpp17.h b/third-party/utf8cpp/source/utf8/cpp17.h
new file mode 100644
index 0000000..32a77ce
--- /dev/null
+++ b/third-party/utf8cpp/source/utf8/cpp17.h
@@ -0,0 +1,103 @@
+// Copyright 2018 Nemanja Trifunovic
+
+/*
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#ifndef UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9
+#define UTF8_FOR_CPP_7e906c01_03a3_4daf_b420_ea7ea952b3c9
+
+#include "checked.h"
+#include <string>
+
+namespace utf8
+{
+
+ inline void append(char32_t cp, std::string& s)
+ {
+ append(uint32_t(cp), std::back_inserter(s));
+ }
+
+ inline std::string utf16to8(std::u16string_view s)
+ {
+ std::string result;
+ utf16to8(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::u16string utf8to16(std::string_view s)
+ {
+ std::u16string result;
+ utf8to16(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::string utf32to8(std::u32string_view s)
+ {
+ std::string result;
+ utf32to8(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::u32string utf8to32(std::string_view s)
+ {
+ std::u32string result;
+ utf8to32(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline std::size_t find_invalid(std::string_view s)
+ {
+ std::string_view::const_iterator invalid = find_invalid(s.begin(), s.end());
+ return (invalid == s.end()) ? std::string_view::npos : static_cast<std::size_t>(invalid - s.begin());
+ }
+
+ inline bool is_valid(std::string_view s)
+ {
+ return is_valid(s.begin(), s.end());
+ }
+
+ inline std::string replace_invalid(std::string_view s, char32_t replacement)
+ {
+ std::string result;
+ replace_invalid(s.begin(), s.end(), std::back_inserter(result), replacement);
+ return result;
+ }
+
+ inline std::string replace_invalid(std::string_view s)
+ {
+ std::string result;
+ replace_invalid(s.begin(), s.end(), std::back_inserter(result));
+ return result;
+ }
+
+ inline bool starts_with_bom(std::string_view s)
+ {
+ return starts_with_bom(s.begin(), s.end());
+ }
+
+} // namespace utf8
+
+#endif // header guard
+
diff --git a/third-party/utf8cpp/source/utf8/unchecked.h b/third-party/utf8cpp/source/utf8/unchecked.h
new file mode 100644
index 0000000..8fe83c9
--- /dev/null
+++ b/third-party/utf8cpp/source/utf8/unchecked.h
@@ -0,0 +1,257 @@
+// Copyright 2006 Nemanja Trifunovic
+
+/*
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+
+#ifndef UTF8_FOR_CPP_UNCHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_UNCHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "core.h"
+
+namespace utf8
+{
+ namespace unchecked
+ {
+ template <typename octet_iterator>
+ octet_iterator append(uint32_t cp, octet_iterator result)
+ {
+ return internal::append(cp, result);
+ }
+
+ template <typename octet_iterator, typename output_iterator>
+ output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out, uint32_t replacement)
+ {
+ while (start != end) {
+ octet_iterator sequence_start = start;
+ internal::utf_error err_code = utf8::internal::validate_next(start, end);
+ switch (err_code) {
+ case internal::UTF8_OK :
+ for (octet_iterator it = sequence_start; it != start; ++it)
+ *out++ = *it;
+ break;
+ case internal::NOT_ENOUGH_ROOM:
+ out = utf8::unchecked::append (replacement, out);
+ start = end;
+ break;
+ case internal::INVALID_LEAD:
+ out = utf8::unchecked::append (replacement, out);
+ ++start;
+ break;
+ case internal::INCOMPLETE_SEQUENCE:
+ case internal::OVERLONG_SEQUENCE:
+ case internal::INVALID_CODE_POINT:
+ out = utf8::unchecked::append (replacement, out);
+ ++start;
+ // just one replacement mark for the sequence
+ while (start != end && utf8::internal::is_trail(*start))
+ ++start;
+ break;
+ }
+ }
+ return out;
+ }
+
+ template <typename octet_iterator, typename output_iterator>
+ inline output_iterator replace_invalid(octet_iterator start, octet_iterator end, output_iterator out)
+ {
+ static const uint32_t replacement_marker = utf8::internal::mask16(0xfffd);
+ return utf8::unchecked::replace_invalid(start, end, out, replacement_marker);
+ }
+
+ template <typename octet_iterator>
+ uint32_t next(octet_iterator& it)
+ {
+ uint32_t cp = utf8::internal::mask8(*it);
+ typename std::iterator_traits<octet_iterator>::difference_type length = utf8::internal::sequence_length(it);
+ switch (length) {
+ case 1:
+ break;
+ case 2:
+ it++;
+ cp = ((cp << 6) & 0x7ff) + ((*it) & 0x3f);
+ break;
+ case 3:
+ ++it;
+ cp = ((cp << 12) & 0xffff) + ((utf8::internal::mask8(*it) << 6) & 0xfff);
+ ++it;
+ cp += (*it) & 0x3f;
+ break;
+ case 4:
+ ++it;
+ cp = ((cp << 18) & 0x1fffff) + ((utf8::internal::mask8(*it) << 12) & 0x3ffff);
+ ++it;
+ cp += (utf8::internal::mask8(*it) << 6) & 0xfff;
+ ++it;
+ cp += (*it) & 0x3f;
+ break;
+ }
+ ++it;
+ return cp;
+ }
+
+ template <typename octet_iterator>
+ uint32_t peek_next(octet_iterator it)
+ {
+ return utf8::unchecked::next(it);
+ }
+
+ template <typename octet_iterator>
+ uint32_t prior(octet_iterator& it)
+ {
+ while (utf8::internal::is_trail(*(--it))) ;
+ octet_iterator temp = it;
+ return utf8::unchecked::next(temp);
+ }
+
+ template <typename octet_iterator, typename distance_type>
+ void advance (octet_iterator& it, distance_type n)
+ {
+ const distance_type zero(0);
+ if (n < zero) {
+ // backward
+ for (distance_type i = n; i < zero; ++i)
+ utf8::unchecked::prior(it);
+ } else {
+ // forward
+ for (distance_type i = zero; i < n; ++i)
+ utf8::unchecked::next(it);
+ }
+ }
+
+ template <typename octet_iterator>
+ typename std::iterator_traits<octet_iterator>::difference_type
+ distance (octet_iterator first, octet_iterator last)
+ {
+ typename std::iterator_traits<octet_iterator>::difference_type dist;
+ for (dist = 0; first < last; ++dist)
+ utf8::unchecked::next(first);
+ return dist;
+ }
+
+ template <typename u16bit_iterator, typename octet_iterator>
+ octet_iterator utf16to8 (u16bit_iterator start, u16bit_iterator end, octet_iterator result)
+ {
+ while (start != end) {
+ uint32_t cp = utf8::internal::mask16(*start++);
+ // Take care of surrogate pairs first
+ if (utf8::internal::is_lead_surrogate(cp)) {
+ uint32_t trail_surrogate = utf8::internal::mask16(*start++);
+ cp = (cp << 10) + trail_surrogate + internal::SURROGATE_OFFSET;
+ }
+ result = utf8::unchecked::append(cp, result);
+ }
+ return result;
+ }
+
+ template <typename u16bit_iterator, typename octet_iterator>
+ u16bit_iterator utf8to16 (octet_iterator start, octet_iterator end, u16bit_iterator result)
+ {
+ while (start < end) {
+ uint32_t cp = utf8::unchecked::next(start);
+ if (cp > 0xffff) { //make a surrogate pair
+ *result++ = static_cast<uint16_t>((cp >> 10) + internal::LEAD_OFFSET);
+ *result++ = static_cast<uint16_t>((cp & 0x3ff) + internal::TRAIL_SURROGATE_MIN);
+ }
+ else
+ *result++ = static_cast<uint16_t>(cp);
+ }
+ return result;
+ }
+
+ template <typename octet_iterator, typename u32bit_iterator>
+ octet_iterator utf32to8 (u32bit_iterator start, u32bit_iterator end, octet_iterator result)
+ {
+ while (start != end)
+ result = utf8::unchecked::append(*(start++), result);
+
+ return result;
+ }
+
+ template <typename octet_iterator, typename u32bit_iterator>
+ u32bit_iterator utf8to32 (octet_iterator start, octet_iterator end, u32bit_iterator result)
+ {
+ while (start < end)
+ (*result++) = utf8::unchecked::next(start);
+
+ return result;
+ }
+
+ // The iterator class
+ template <typename octet_iterator>
+ class iterator {
+ octet_iterator it;
+ public:
+ typedef uint32_t value_type;
+ typedef uint32_t* pointer;
+ typedef uint32_t& reference;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+ iterator () {}
+ explicit iterator (const octet_iterator& octet_it): it(octet_it) {}
+ // the default "big three" are OK
+ octet_iterator base () const { return it; }
+ uint32_t operator * () const
+ {
+ octet_iterator temp = it;
+ return utf8::unchecked::next(temp);
+ }
+ bool operator == (const iterator& rhs) const
+ {
+ return (it == rhs.it);
+ }
+ bool operator != (const iterator& rhs) const
+ {
+ return !(operator == (rhs));
+ }
+ iterator& operator ++ ()
+ {
+ ::std::advance(it, utf8::internal::sequence_length(it));
+ return *this;
+ }
+ iterator operator ++ (int)
+ {
+ iterator temp = *this;
+ ::std::advance(it, utf8::internal::sequence_length(it));
+ return temp;
+ }
+ iterator& operator -- ()
+ {
+ utf8::unchecked::prior(it);
+ return *this;
+ }
+ iterator operator -- (int)
+ {
+ iterator temp = *this;
+ utf8::unchecked::prior(it);
+ return temp;
+ }
+ }; // class iterator
+
+ } // namespace utf8::unchecked
+} // namespace utf8
+
+
+#endif // header guard
+
diff --git a/third-party/utf8cpp/tests/CMakeLists.txt b/third-party/utf8cpp/tests/CMakeLists.txt
new file mode 100644
index 0000000..f3ce258
--- /dev/null
+++ b/third-party/utf8cpp/tests/CMakeLists.txt
@@ -0,0 +1,43 @@
+add_executable(negative ${PROJECT_SOURCE_DIR}/tests/negative.cpp)
+add_executable(cpp11 ${PROJECT_SOURCE_DIR}/tests/test_cpp11.cpp)
+add_executable(cpp17 ${PROJECT_SOURCE_DIR}/tests/test_cpp17.cpp)
+add_executable(apitests ${PROJECT_SOURCE_DIR}/tests/apitests.cpp)
+
+add_executable(noexceptionstests ${PROJECT_SOURCE_DIR}/tests/noexceptionstests.cpp)
+
+target_link_libraries(negative PRIVATE utf8::cpp)
+target_link_libraries(cpp11 PRIVATE utf8::cpp)
+target_link_libraries(cpp17 PRIVATE utf8::cpp)
+target_link_libraries(apitests PRIVATE utf8::cpp)
+target_link_libraries(noexceptionstests PRIVATE utf8::cpp)
+
+target_compile_options(${PROJECT_NAME} INTERFACE
+ $<$<CXX_COMPILER_ID:MSVC>:/W4>
+ $<$<NOT:$<CXX_COMPILER_ID:MSVC>>:-Wall -Wextra -Wpedantic -Wconversion>)
+
+target_compile_options(noexceptionstests PUBLIC -fno-exceptions)
+
+set_target_properties(negative apitests noexceptionstests
+ PROPERTIES
+ CXX_STANDARD 98
+ CXX_STANDARD_REQUIRED YES
+ CXX_EXTENSIONS NO)
+
+set_target_properties(cpp11
+ PROPERTIES
+ CXX_STANDARD 11
+ CXX_STANDARD_REQUIRED YES
+ CXX_EXTENSIONS NO)
+
+set_target_properties(cpp17
+ PROPERTIES
+ CXX_STANDARD 17
+ CXX_STANDARD_REQUIRED YES
+ CXX_EXTENSIONS NO)
+
+add_test(negative_test negative ${PROJECT_SOURCE_DIR}/tests/test_data/utf8_invalid.txt)
+add_test(cpp11_test cpp11)
+add_test(cpp17_test cpp17)
+add_test(api_test apitests)
+add_test(noexceptions_test noexceptionstests)
+
diff --git a/third-party/utf8cpp/tests/apitests.cpp b/third-party/utf8cpp/tests/apitests.cpp
new file mode 100644
index 0000000..083266d
--- /dev/null
+++ b/third-party/utf8cpp/tests/apitests.cpp
@@ -0,0 +1,6 @@
+#include "../extern/ftest/ftest.h"
+
+#include "test_checked_api.h"
+#include "test_checked_iterator.h"
+#include "test_unchecked_api.h"
+#include "test_unchecked_iterator.h"
diff --git a/third-party/utf8cpp/tests/docker/Dockerfile b/third-party/utf8cpp/tests/docker/Dockerfile
new file mode 100644
index 0000000..9df3717
--- /dev/null
+++ b/third-party/utf8cpp/tests/docker/Dockerfile
@@ -0,0 +1,5 @@
+FROM debian:buster-slim
+
+RUN apt-get update \
+ && apt-get install -y make g++ cmake git \
+ && rm -rf /var/lib/apt/lists/*
diff --git a/third-party/utf8cpp/tests/negative.cpp b/third-party/utf8cpp/tests/negative.cpp
new file mode 100644
index 0000000..f1bcc99
--- /dev/null
+++ b/third-party/utf8cpp/tests/negative.cpp
@@ -0,0 +1,59 @@
+#include "utf8.h"
+using namespace utf8;
+
+#include <string>
+#include <iostream>
+#include <fstream>
+#include <algorithm>
+using namespace std;
+
+const unsigned INVALID_LINES[] = { 75, 76, 83, 84, 85, 93, 102, 103, 105, 106, 107, 108, 109, 110, 114, 115, 116, 117, 124, 125, 130, 135, 140, 145, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 169, 175, 176, 177, 207, 208, 209, 210, 211, 220, 221, 222, 223, 224, 232, 233, 234, 235, 236, 247, 248, 249, 250, 251, 252, 253, 257, 258, 259, 260, 261, 262, 263, 264};
+const unsigned* INVALID_LINES_END = INVALID_LINES + sizeof(INVALID_LINES)/sizeof(unsigned);
+
+int main(int argc, char** argv)
+{
+ string test_file_path;
+ if (argc == 2)
+ test_file_path = argv[1];
+ else {
+ cout << "Wrong number of arguments" << endl;
+ return 1;
+ }
+ // Open the test file
+ ifstream fs8(test_file_path.c_str());
+ if (!fs8.is_open()) {
+ cout << "Could not open " << test_file_path << endl;
+ return 1;
+ }
+
+ // Read it line by line
+ unsigned int line_count = 0;
+ char byte;
+ while (!fs8.eof()) {
+ string line;
+ while ((byte = static_cast<char>(fs8.get())) != '\n' && !fs8.eof())
+ line.push_back(byte);
+
+ line_count++;
+ bool expected_valid = (find(INVALID_LINES, INVALID_LINES_END, line_count) == INVALID_LINES_END);
+ // Print out lines that contain unexpected invalid UTF-8
+ if (!is_valid(line.begin(), line.end())) {
+ if (expected_valid) {
+ cout << "Unexpected invalid utf-8 at line " << line_count << '\n';
+ return 1;
+ }
+
+ // try fixing it:
+ string fixed_line;
+ replace_invalid(line.begin(), line.end(), back_inserter(fixed_line));
+ if (!is_valid(fixed_line.begin(), fixed_line.end())) {
+ cout << "replace_invalid() resulted in an invalid utf-8 at line " << line_count << '\n';
+ return 1;
+ }
+ }
+ else if (!expected_valid) {
+ cout << "Invalid utf-8 NOT detected at line " << line_count << '\n';
+ return 1;
+ }
+ }
+}
diff --git a/third-party/utf8cpp/tests/noexceptionstests.cpp b/third-party/utf8cpp/tests/noexceptionstests.cpp
new file mode 100644
index 0000000..108ee75
--- /dev/null
+++ b/third-party/utf8cpp/tests/noexceptionstests.cpp
@@ -0,0 +1,4 @@
+#include "../extern/ftest/ftest.h"
+
+#include "test_unchecked_api.h"
+#include "test_unchecked_iterator.h"
diff --git a/third-party/utf8cpp/tests/test_checked_api.h b/third-party/utf8cpp/tests/test_checked_api.h
new file mode 100644
index 0000000..3a7067b
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_checked_api.h
@@ -0,0 +1,198 @@
+#ifndef UTF8_FOR_CPP_TEST_CHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_TEST_CHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "utf8.h"
+
+#include <string>
+#include <vector>
+using namespace utf8;
+using namespace std;
+
+
+TEST(CheckedAPITests, test_append)
+{
+ unsigned char u[5] = {0,0,0,0,0};
+ append(0x0448, u);
+ EXPECT_EQ (u[0], 0xd1);
+ EXPECT_EQ (u[1], 0x88);
+ EXPECT_EQ (u[2], 0);
+ EXPECT_EQ (u[3], 0);
+ EXPECT_EQ (u[4], 0);
+
+ append(0x65e5, u);
+ EXPECT_EQ (u[0], 0xe6);
+ EXPECT_EQ (u[1], 0x97);
+ EXPECT_EQ (u[2], 0xa5);
+ EXPECT_EQ (u[3], 0);
+ EXPECT_EQ (u[4], 0);
+
+ append(0x3044, u);
+ EXPECT_EQ (u[0], 0xe3);
+ EXPECT_EQ (u[1], 0x81);
+ EXPECT_EQ (u[2], 0x84);
+ EXPECT_EQ (u[3], 0);
+ EXPECT_EQ (u[4], 0);
+
+ append(0x10346, u);
+ EXPECT_EQ (u[0], 0xf0);
+ EXPECT_EQ (u[1], 0x90);
+ EXPECT_EQ (u[2], 0x8d);
+ EXPECT_EQ (u[3], 0x86);
+ EXPECT_EQ (u[4], 0);
+
+ // Ensure no warnings with plain char
+ char c[2] = {0,0};
+ append('a', c);
+ EXPECT_EQ (c[0], 'a');
+ EXPECT_EQ (c[1], 0);
+}
+
+TEST(CheckedAPITests, test_next)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ const char* w = twochars;
+ unsigned int cp = next(w, twochars + 6);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, twochars + 3);
+
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ w = threechars;
+
+ cp = next(w, threechars + 9);
+ EXPECT_EQ (cp, 0x10346);
+ EXPECT_EQ (w, threechars + 4);
+
+ cp = next(w, threechars + 9);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, threechars + 7);
+
+ cp = next(w, threechars + 9);
+ EXPECT_EQ (cp, 0x0448);
+ EXPECT_EQ (w, threechars + 9);
+}
+
+TEST(CheckedAPITests, test_peek_next)
+{
+ const char* const cw = "\xe6\x97\xa5\xd1\x88";
+ unsigned int cp = peek_next(cw, cw + 6);
+ EXPECT_EQ (cp, 0x65e5);
+}
+
+TEST(CheckedAPITests, test_prior)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ const char* w = twochars + 3;
+ unsigned int cp = prior (w, twochars);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, twochars);
+
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ w = threechars + 9;
+ cp = prior(w, threechars);
+ EXPECT_EQ (cp, 0x0448);
+ EXPECT_EQ (w, threechars + 7);
+ cp = prior(w, threechars);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, threechars + 4);
+ cp = prior(w, threechars);
+ EXPECT_EQ (cp, 0x10346);
+ EXPECT_EQ (w, threechars);
+}
+
+TEST(CheckedAPITests, test_advance)
+{
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ const char* w = threechars;
+ advance(w, 2, threechars + 9);
+ EXPECT_EQ(w, threechars + 7);
+ advance(w, -2, threechars);
+ EXPECT_EQ(w, threechars);
+ advance(w, 3, threechars + 9);
+ EXPECT_EQ(w, threechars + 9);
+ advance(w, -2, threechars);
+ EXPECT_EQ(w, threechars + 4);
+ advance(w, -1, threechars);
+ EXPECT_EQ(w, threechars);
+}
+
+TEST(CheckedAPITests, test_distance)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ size_t dist = static_cast<size_t>(utf8::distance(twochars, twochars + 5));
+ EXPECT_EQ (dist, 2);
+}
+
+TEST(CheckedAPITests, test_utf32to8)
+{
+ unsigned int utf32string[] = {0x448, 0x65E5, 0x10346, 0};
+ string utf8result;
+ utf32to8(utf32string, utf32string + 3, back_inserter(utf8result));
+ EXPECT_EQ (utf8result.size(), 9);
+}
+
+TEST(CheckedAPITests, test_utf8to32)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ vector<unsigned int> utf32result;
+ utf8to32(twochars, twochars + 5, back_inserter(utf32result));
+ EXPECT_EQ (utf32result.size(), 2);
+}
+
+TEST(CheckedAPITests, test_utf16to8)
+{
+ unsigned short utf16string[] = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+ string utf8result;
+ utf16to8(utf16string, utf16string + 5, back_inserter(utf8result));
+ EXPECT_EQ (utf8result.size(), 10);
+}
+
+TEST(CheckedAPITests, test_utf8to16)
+{
+ char utf8_with_surrogates[] = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ vector <unsigned short> utf16result;
+ utf8to16(utf8_with_surrogates, utf8_with_surrogates + 9, back_inserter(utf16result));
+ EXPECT_EQ (utf16result.size(), 4);
+ EXPECT_EQ (utf16result[2], 0xd834);
+ EXPECT_EQ (utf16result[3], 0xdd1e);
+}
+
+TEST(CheckedAPITests, test_replace_invalid)
+{
+ char invalid_sequence[] = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+ vector<char> replace_invalid_result;
+ replace_invalid (invalid_sequence, invalid_sequence + sizeof(invalid_sequence), std::back_inserter(replace_invalid_result), '?');
+ bool bvalid = is_valid(replace_invalid_result.begin(), replace_invalid_result.end());
+ EXPECT_TRUE (bvalid);
+ const char fixed_invalid_sequence[] = "a????z";
+ EXPECT_EQ (sizeof(fixed_invalid_sequence), replace_invalid_result.size());
+ EXPECT_TRUE (std::equal(replace_invalid_result.begin(), replace_invalid_result.begin() + sizeof(fixed_invalid_sequence), fixed_invalid_sequence));
+}
+
+TEST(CheckedAPITests, test_find_invalid)
+{
+ char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa";
+ char* invalid = find_invalid(utf_invalid, utf_invalid + 6);
+ EXPECT_EQ (invalid, utf_invalid + 5);
+}
+
+TEST(CheckedAPITests, test_is_valid)
+{
+ char utf_invalid[] = "\xe6\x97\xa5\xd1\x88\xfa";
+ bool bvalid = is_valid(utf_invalid, utf_invalid + 6);
+ EXPECT_FALSE (bvalid);
+ char utf8_with_surrogates[] = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ bvalid = is_valid(utf8_with_surrogates, utf8_with_surrogates + 9);
+ EXPECT_TRUE (bvalid);
+}
+
+TEST(CheckedAPITests, test_starts_with_bom)
+{
+ unsigned char byte_order_mark[] = {0xef, 0xbb, 0xbf};
+ bool bbom = starts_with_bom(byte_order_mark, byte_order_mark + sizeof(byte_order_mark));
+ EXPECT_TRUE (bbom);
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ bool no_bbom = starts_with_bom(threechars, threechars + sizeof(threechars));
+ EXPECT_FALSE (no_bbom);
+}
+
+#endif
diff --git a/third-party/utf8cpp/tests/test_checked_iterator.h b/third-party/utf8cpp/tests/test_checked_iterator.h
new file mode 100644
index 0000000..2829a73
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_checked_iterator.h
@@ -0,0 +1,35 @@
+#ifndef UTF8_FOR_CPP_TEST_CHECKED_ITERATOR_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_TEST_CHECKED_ITERATOR_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "utf8.h"
+
+using namespace utf8;
+
+
+TEST(CheckedIteratrTests, test_increment)
+{
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ utf8::iterator<const char*> it(threechars, threechars, threechars + 9);
+ utf8::iterator<const char*> it2 = it;
+ EXPECT_EQ (it2, it);
+ EXPECT_EQ (*it, 0x10346);
+ EXPECT_EQ (*(++it), 0x65e5);
+ EXPECT_EQ ((*it++), 0x65e5);
+ EXPECT_EQ (*it, 0x0448);
+ EXPECT_NE (it, it2);
+ utf8::iterator<const char*> endit (threechars + 9, threechars, threechars + 9);
+ EXPECT_EQ (++it, endit);
+}
+
+TEST(CheckedIteratrTests, test_decrement)
+{
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ utf8::iterator<const char*> it(threechars+9, threechars, threechars + 9);
+ EXPECT_EQ (*(--it), 0x0448);
+ EXPECT_EQ ((*it--), 0x0448);
+ EXPECT_EQ (*it, 0x65e5);
+ EXPECT_EQ (--it, utf8::iterator<const char*>(threechars, threechars, threechars + 9));
+ EXPECT_EQ (*it, 0x10346);
+}
+
+#endif
diff --git a/third-party/utf8cpp/tests/test_cpp11.cpp b/third-party/utf8cpp/tests/test_cpp11.cpp
new file mode 100644
index 0000000..ee4ddd8
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_cpp11.cpp
@@ -0,0 +1,109 @@
+#include "../extern/ftest/ftest.h"
+#include "utf8.h"
+#include <string>
+using namespace utf8;
+using namespace std;
+
+#if __cplusplus >= 201103L // C++ 11 or later
+
+TEST(CPP11APITests, test_append)
+{
+ string u;
+ append(0x0448, u);
+ EXPECT_EQ (u[0], char(0xd1));
+ EXPECT_EQ (u[1], char(0x88));
+ EXPECT_EQ (u.length(), 2);
+
+ u.clear();
+ append(0x65e5, u);
+ EXPECT_EQ (u[0], char(0xe6));
+ EXPECT_EQ (u[1], char(0x97));
+ EXPECT_EQ (u[2], char(0xa5));
+ EXPECT_EQ (u.length(), 3);
+
+ u.clear();
+ append(0x3044, u);
+ EXPECT_EQ (u[0], char(0xe3));
+ EXPECT_EQ (u[1], char(0x81));
+ EXPECT_EQ (u[2], char(0x84));
+ EXPECT_EQ (u.length(), 3);
+
+ u.clear();
+ append(0x10346, u);
+ EXPECT_EQ (u[0], char(0xf0));
+ EXPECT_EQ (u[1], char(0x90));
+ EXPECT_EQ (u[2], char(0x8d));
+ EXPECT_EQ (u[3], char(0x86));
+ EXPECT_EQ (u.length(), 4);
+}
+
+TEST(CPP11APITests, test_utf16to8)
+{
+ u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+ string u = utf16to8(utf16string);
+ EXPECT_EQ (u.size(), 10);
+}
+
+TEST(CPP11APITests, test_utf8to16)
+{
+ string utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ u16string utf16result = utf8to16(utf8_with_surrogates);
+ EXPECT_EQ (utf16result.size(), 4);
+ EXPECT_EQ (utf16result[2], 0xd834);
+ EXPECT_EQ (utf16result[3], 0xdd1e);
+ // Just to make sure it compiles with string literals
+ utf8to16(u8"simple");
+ utf8to16("simple");
+}
+
+TEST(CPP11APITests, test_utf32to8)
+{
+ u32string utf32string = {0x448, 0x65E5, 0x10346};
+ string utf8result = utf32to8(utf32string);
+ EXPECT_EQ (utf8result.size(), 9);
+}
+
+TEST(CPP11APITests, test_utf8to32)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ u32string utf32result = utf8to32(twochars);
+ EXPECT_EQ (utf32result.size(), 2);
+}
+
+TEST(CPP11APITests, test_find_invalid)
+{
+ string utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+ auto invalid = find_invalid(utf_invalid);
+ EXPECT_EQ (invalid, 5);
+}
+
+TEST(CPP11APITests, test_is_valid)
+{
+ string utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+ bool bvalid = is_valid(utf_invalid);
+ EXPECT_FALSE (bvalid);
+ string utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ bvalid = is_valid(utf8_with_surrogates);
+ EXPECT_TRUE (bvalid);
+}
+
+TEST(CPP11APITests, test_replace_invalid)
+{
+ string invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+ string replace_invalid_result = replace_invalid(invalid_sequence, '?');
+ bool bvalid = is_valid(replace_invalid_result);
+ EXPECT_TRUE (bvalid);
+ const string fixed_invalid_sequence = "a????z";
+ EXPECT_EQ(fixed_invalid_sequence, replace_invalid_result);
+}
+
+TEST(CPP11APITests, test_starts_with_bom)
+{
+ string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)};
+ bool bbom = starts_with_bom(byte_order_mark);
+ EXPECT_TRUE (bbom);
+ string threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ bool no_bbom = starts_with_bom(threechars);
+ EXPECT_FALSE (no_bbom);
+}
+#endif // C++ 11 or later
diff --git a/third-party/utf8cpp/tests/test_cpp17.cpp b/third-party/utf8cpp/tests/test_cpp17.cpp
new file mode 100644
index 0000000..4b87816
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_cpp17.cpp
@@ -0,0 +1,88 @@
+#include "../extern/ftest/ftest.h"
+#include "utf8.h"
+#include <string>
+using namespace utf8;
+using namespace std;
+
+#if __cplusplus >= 201703L // C++ 17 or later
+
+
+TEST(CPP17APITests, test_utf16to8)
+{
+ u16string utf16string = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+ u16string_view utf16stringview(u16string);
+ string u = utf16to8(utf16string);
+ EXPECT_EQ (u.size(), 10);
+}
+
+TEST(CPP17APITests, test_utf8to16)
+{
+ string_view utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ u16string utf16result = utf8to16(utf8_with_surrogates);
+ EXPECT_EQ (utf16result.size(), 4);
+ EXPECT_EQ (utf16result[2], 0xd834);
+ EXPECT_EQ (utf16result[3], 0xdd1e);
+}
+
+TEST(CPP17APITests, test_utf32to8)
+{
+ u32string utf32string = {0x448, 0x65E5, 0x10346};
+ u32string_view utf32stringview(utf32string);
+ string utf8result = utf32to8(utf32stringview);
+ EXPECT_EQ (utf8result.size(), 9);
+}
+
+TEST(CPP17APITests, test_utf8to32)
+{
+ string_view twochars = "\xe6\x97\xa5\xd1\x88";
+ u32string utf32result = utf8to32(twochars);
+ EXPECT_EQ (utf32result.size(), 2);
+}
+
+TEST(CPP17APITests, test_find_invalid)
+{
+ string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+ auto invalid = find_invalid(utf_invalid);
+ EXPECT_EQ (invalid, 5);
+}
+
+TEST(CPP17APITests, test_is_valid)
+{
+ string_view utf_invalid = "\xe6\x97\xa5\xd1\x88\xfa";
+ bool bvalid = is_valid(utf_invalid);
+ EXPECT_FALSE (bvalid);
+ string_view utf8_with_surrogates = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ bvalid = is_valid(utf8_with_surrogates);
+ EXPECT_TRUE (bvalid);
+}
+
+TEST(CPP17APITests, test_replace_invalid)
+{
+ string_view invalid_sequence = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+ string replace_invalid_result = replace_invalid(invalid_sequence, '?');
+ bool bvalid = is_valid(replace_invalid_result);
+ EXPECT_TRUE (bvalid);
+ const string fixed_invalid_sequence = "a????z";
+ EXPECT_EQ(fixed_invalid_sequence, replace_invalid_result);
+}
+
+TEST(CPP17APITests, test_starts_with_bom)
+{
+ string byte_order_mark = {char(0xef), char(0xbb), char(0xbf)};
+ string_view byte_order_mark_view(byte_order_mark);
+ bool bbom = starts_with_bom(byte_order_mark_view);
+ EXPECT_TRUE (bbom);
+ string_view threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ bool no_bbom = starts_with_bom(threechars);
+ EXPECT_FALSE (no_bbom);
+}
+
+TEST(CPP17APITests, string_class_and_literals)
+{
+ const char* twochars = u8"ab";
+ EXPECT_TRUE (is_valid(twochars));
+ const string two_chars_string(twochars);
+ EXPECT_TRUE (is_valid(two_chars_string));
+}
+
+#endif // C++ 11 or later
diff --git a/third-party/utf8cpp/tests/test_data/utf8_invalid.txt b/third-party/utf8cpp/tests/test_data/utf8_invalid.txt
new file mode 100644
index 0000000..ae83159
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_data/utf8_invalid.txt
Binary files differ
diff --git a/third-party/utf8cpp/tests/test_unchecked_api.h b/third-party/utf8cpp/tests/test_unchecked_api.h
new file mode 100644
index 0000000..10c5991
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_unchecked_api.h
@@ -0,0 +1,164 @@
+#ifndef UTF8_FOR_CPP_TEST_UNCHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_TEST_UNCHECKED_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "utf8/unchecked.h"
+
+#include <string>
+#include <vector>
+
+using namespace std;
+
+TEST(UnCheckedAPITests, test_append)
+{
+ unsigned char u[5] = {0,0,0,0,0};
+ utf8::unchecked::append(0x0448, u);
+ EXPECT_EQ (u[0], 0xd1);
+ EXPECT_EQ (u[1], 0x88);
+ EXPECT_EQ (u[2], 0);
+ EXPECT_EQ (u[3], 0);
+ EXPECT_EQ (u[4], 0);
+
+ utf8::unchecked::append(0x65e5, u);
+ EXPECT_EQ (u[0], 0xe6);
+ EXPECT_EQ (u[1], 0x97);
+ EXPECT_EQ (u[2], 0xa5);
+ EXPECT_EQ (u[3], 0);
+ EXPECT_EQ (u[4], 0);
+
+ utf8::unchecked::append(0x3044, u);
+ EXPECT_EQ (u[0], 0xe3);
+ EXPECT_EQ (u[1], 0x81);
+ EXPECT_EQ (u[2], 0x84);
+ EXPECT_EQ (u[3], 0);
+ EXPECT_EQ (u[4], 0);
+
+ utf8::unchecked::append(0x10346, u);
+ EXPECT_EQ (u[0], 0xf0);
+ EXPECT_EQ (u[1], 0x90);
+ EXPECT_EQ (u[2], 0x8d);
+ EXPECT_EQ (u[3], 0x86);
+ EXPECT_EQ (u[4], 0);
+}
+
+TEST(UnCheckedAPITests, test_next)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ const char* w = twochars;
+ unsigned int cp = utf8::unchecked::next(w);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, twochars + 3);
+
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ w = threechars;
+
+ cp = utf8::unchecked::next(w);
+ EXPECT_EQ (cp, 0x10346);
+ EXPECT_EQ (w, threechars + 4);
+
+ cp = utf8::unchecked::next(w);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, threechars + 7);
+
+ cp = utf8::unchecked::next(w);
+ EXPECT_EQ (cp, 0x0448);
+ EXPECT_EQ (w, threechars + 9);
+}
+
+TEST(UnCheckedAPITests, test_peek_next)
+{
+ const char* const cw = "\xe6\x97\xa5\xd1\x88";
+ unsigned int cp = utf8::unchecked::peek_next(cw);
+ EXPECT_EQ (cp, 0x65e5);
+}
+
+TEST(UnCheckedAPITests, test_prior)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ const char* w = twochars + 3;
+ unsigned int cp = utf8::unchecked::prior (w);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, twochars);
+
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ w = threechars + 9;
+ cp = utf8::unchecked::prior(w);
+ EXPECT_EQ (cp, 0x0448);
+ EXPECT_EQ (w, threechars + 7);
+ cp = utf8::unchecked::prior(w);
+ EXPECT_EQ (cp, 0x65e5);
+ EXPECT_EQ (w, threechars + 4);
+ cp = utf8::unchecked::prior(w);
+ EXPECT_EQ (cp, 0x10346);
+ EXPECT_EQ (w, threechars);
+}
+
+TEST(UnCheckedAPITests, test_advance)
+{
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ const char* w = threechars;
+ utf8::unchecked::advance(w, 2);
+ EXPECT_EQ(w, threechars + 7);
+ utf8::unchecked::advance(w, -2);
+ EXPECT_EQ(w, threechars);
+ utf8::unchecked::advance(w, 3);
+ EXPECT_EQ(w, threechars + 9);
+ utf8::unchecked::advance(w, -2);
+ EXPECT_EQ(w, threechars + 4);
+ utf8::unchecked::advance(w, -1);
+ EXPECT_EQ(w, threechars);
+}
+
+TEST(UnCheckedAPITests, test_distance)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ size_t dist = static_cast<size_t>(utf8::unchecked::distance(twochars, twochars + 5));
+ EXPECT_EQ (dist, 2);
+}
+
+TEST(UnCheckedAPITests, test_utf32to8)
+{
+ unsigned int utf32string[] = {0x448, 0x65E5, 0x10346, 0};
+ string utf8result;
+ utf8::unchecked::utf32to8(utf32string, utf32string + 3, back_inserter(utf8result));
+ EXPECT_EQ (utf8result.size(), 9);
+}
+
+TEST(UnCheckedAPITests, test_utf8to32)
+{
+ const char* twochars = "\xe6\x97\xa5\xd1\x88";
+ vector<unsigned int> utf32result;
+ utf8::unchecked::utf8to32(twochars, twochars + 5, back_inserter(utf32result));
+ EXPECT_EQ (utf32result.size(), 2);
+}
+
+TEST(UnCheckedAPITests, test_utf16to8)
+{
+ unsigned short utf16string[] = {0x41, 0x0448, 0x65e5, 0xd834, 0xdd1e};
+ string utf8result;
+ utf8::unchecked::utf16to8(utf16string, utf16string + 5, back_inserter(utf8result));
+ EXPECT_EQ (utf8result.size(), 10);
+}
+
+TEST(UnCheckedAPITests, test_utf8to16)
+{
+ char utf8_with_surrogates[] = "\xe6\x97\xa5\xd1\x88\xf0\x9d\x84\x9e";
+ vector <unsigned short> utf16result;
+ utf8::unchecked::utf8to16(utf8_with_surrogates, utf8_with_surrogates + 9, back_inserter(utf16result));
+ EXPECT_EQ (utf16result.size(), 4);
+ EXPECT_EQ (utf16result[2], 0xd834);
+ EXPECT_EQ (utf16result[3], 0xdd1e);
+}
+
+TEST(UnCheckedAPITests, test_replace_invalid)
+{
+ char invalid_sequence[] = "a\x80\xe0\xa0\xc0\xaf\xed\xa0\x80z";
+ vector<char> replace_invalid_result;
+ utf8::unchecked::replace_invalid (invalid_sequence, invalid_sequence + sizeof(invalid_sequence), std::back_inserter(replace_invalid_result), '?');
+ bool bvalid = utf8::is_valid(replace_invalid_result.begin(), replace_invalid_result.end());
+ EXPECT_TRUE (bvalid);
+ const char fixed_invalid_sequence[] = "a????z";
+ EXPECT_EQ (sizeof(fixed_invalid_sequence), replace_invalid_result.size());
+ EXPECT_TRUE (std::equal(replace_invalid_result.begin(), replace_invalid_result.begin() + sizeof(fixed_invalid_sequence), fixed_invalid_sequence));
+}
+
+#endif
diff --git a/third-party/utf8cpp/tests/test_unchecked_iterator.h b/third-party/utf8cpp/tests/test_unchecked_iterator.h
new file mode 100644
index 0000000..4294232
--- /dev/null
+++ b/third-party/utf8cpp/tests/test_unchecked_iterator.h
@@ -0,0 +1,36 @@
+#ifndef UTF8_FOR_CPP_TEST_UNCHECKED_ITERATOR_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+#define UTF8_FOR_CPP_TEST_UNCHECKED_ITERATOR_H_2675DCD0_9480_4c0c_B92A_CC14C027B731
+
+#include "utf8/unchecked.h"
+
+using namespace utf8::unchecked;
+
+
+TEST(UnCheckedIteratrTests, test_increment)
+{
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ utf8::unchecked::iterator<const char*> it(threechars);
+ utf8::unchecked::iterator<const char*> it2 = it;
+ EXPECT_EQ (it2, it);
+ EXPECT_EQ (*it, 0x10346);
+ EXPECT_EQ (*(++it), 0x65e5);
+ EXPECT_EQ ((*it++), 0x65e5);
+ EXPECT_EQ (*it, 0x0448);
+ EXPECT_NE (it, it2);
+ utf8::unchecked::iterator<const char*> endit (threechars + 9);
+ EXPECT_EQ (++it, endit);
+}
+
+TEST(UnCheckedIteratrTests, test_decrement)
+{
+ const char* threechars = "\xf0\x90\x8d\x86\xe6\x97\xa5\xd1\x88";
+ utf8::unchecked::iterator<const char*> it(threechars+9);
+ EXPECT_EQ (*(--it), 0x0448);
+ EXPECT_EQ ((*it--), 0x0448);
+ EXPECT_EQ (*it, 0x65e5);
+ EXPECT_EQ (--it, utf8::unchecked::iterator<const char*>(threechars));
+ EXPECT_EQ (*it, 0x10346);
+
+}
+
+#endif
diff --git a/third-party/utf8cpp/utf8cppConfig.cmake.in b/third-party/utf8cpp/utf8cppConfig.cmake.in
new file mode 100644
index 0000000..450fe8d
--- /dev/null
+++ b/third-party/utf8cpp/utf8cppConfig.cmake.in
@@ -0,0 +1,6 @@
+@PACKAGE_INIT@
+
+include("${CMAKE_CURRENT_LIST_DIR}/utf8cppTargets.cmake")
+check_required_components( "utf8cpp" )
+
+add_library(utf8::cpp ALIAS utf8cpp)
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
new file mode 100644
index 0000000..e3a822e
--- /dev/null
+++ b/tools/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+add_subdirectory(mkclass)
+add_subdirectory(mkembedconfig)
+add_subdirectory(mkunity)
+
diff --git a/tools/debug/gdb/.gitignore b/tools/debug/gdb/.gitignore
new file mode 100644
index 0000000..1c9c744
--- /dev/null
+++ b/tools/debug/gdb/.gitignore
@@ -0,0 +1 @@
+icingadbg.pyc
diff --git a/tools/debug/gdb/README.md b/tools/debug/gdb/README.md
new file mode 100644
index 0000000..b00f81a
--- /dev/null
+++ b/tools/debug/gdb/README.md
@@ -0,0 +1,40 @@
+# Pretty Printer Installation
+
+Requirements:
+* icinga2 debug symbols
+* boost, gcc, etc debug symbols
+
+Install the `boost`, `python` and `icinga2` pretty printers. Absolute paths are required,
+so please make sure to update the installation paths accordingly (`pwd`).
+
+Boost Pretty Printers:
+
+ $ mkdir ~/.gdb_printers && cd ~/.gdb_printers
+ $ git clone https://github.com/ruediger/Boost-Pretty-Printer.git && cd Boost-Pretty-Printer
+ $ pwd
+ /home/michi/.gdb_printers/Boost-Pretty-Printer
+
+Python Pretty Printers:
+
+ $ cd ~/.gdb_printers
+ $ svn co svn://gcc.gnu.org/svn/gcc/trunk/libstdc++-v3/python
+
+Icinga 2 Pretty Printers:
+
+ $ mkdir -p ~/.gdb_printers/icinga2 && ~/.gdb_printers/icinga2
+ $ wget https://raw.githubusercontent.com/Icinga/icinga2/master/tools/debug/gdb/icingadbg.py
+
+Now you'll need to modify/setup your `~/.gdbinit` configuration file.
+You can download the one from Icinga 2 and modify all paths.
+
+> **Note**
+>
+> The path to the `pthread` library varies on distributions. Use
+> `find /usr/lib* -type f -name '*libpthread.so*'` to get the proper
+> path.
+
+ $ wget https://raw.githubusercontent.com/Icinga/icinga2/master/tools/debug/gdb/gdbinit -O ~/.gdbinit
+ $ vim ~/.gdbinit
+
+
+More details in the [troubleshooting debug documentation](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/troubleshooting#debug).
diff --git a/tools/debug/gdb/gdbinit b/tools/debug/gdb/gdbinit
new file mode 100644
index 0000000..a7e6b87
--- /dev/null
+++ b/tools/debug/gdb/gdbinit
@@ -0,0 +1,25 @@
+set print pretty on
+
+python
+import sys
+sys.path.insert(0, '/home/gbeutner/icinga2/tools/debug/gdb')
+from icingadbg import register_icinga_printers
+register_icinga_printers()
+end
+
+python
+import sys
+sys.path.insert(0, '/home/gbeutner/gdb_printers/python')
+from libstdcxx.v6.printers import register_libstdcxx_printers
+try:
+ register_libstdcxx_printers(None)
+except:
+ pass
+end
+
+python
+import sys
+sys.path.insert(0, '/home/gbeutner/Boost-Pretty-Printer')
+from boost.printers import register_printer_gen
+register_printer_gen(None)
+end
diff --git a/tools/debug/gdb/icingadbg.py b/tools/debug/gdb/icingadbg.py
new file mode 100644
index 0000000..d1e1c59
--- /dev/null
+++ b/tools/debug/gdb/icingadbg.py
@@ -0,0 +1,64 @@
+import gdb
+import re
+
+class IcingaStringPrinter:
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return '"' + self.val['m_Data']['_M_dataplus']['_M_p'].string() + '"'
+
+class IcingaValuePrinter:
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ which = self.val['m_Value']['which_']
+
+ if which == 0:
+ return 'Empty'
+ elif which == 1:
+ return self.val['m_Value']['storage_']['data_']['buf'].cast(gdb.lookup_type('double').pointer()).dereference()
+ elif which == 2:
+ return self.val['m_Value']['storage_']['data_']['buf'].cast(gdb.lookup_type('bool').pointer()).dereference()
+ elif which == 3:
+ return self.val['m_Value']['storage_']['data_']['buf'].cast(gdb.lookup_type('icinga::String').pointer()).dereference()
+ elif which == 4:
+ return self.val['m_Value']['storage_']['data_']['buf'].cast(gdb.lookup_type('icinga::Object').pointer()).dereference()
+ else:
+ return '<INVALID>'
+
+class IcingaSignalPrinter:
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return '<SIGNAL>'
+
+class IcingaMutexPrinter:
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ owner = self.val['__data']['__owner']
+
+ if owner == 0:
+ return '<unlocked>'
+ else:
+ return '<locked by #' + str(owner) + '>'
+
+def lookup_icinga_type(val):
+ t = val.type.unqualified()
+ if str(t) == 'icinga::String':
+ return IcingaStringPrinter(val)
+ elif str(t) == 'icinga::Value':
+ return IcingaValuePrinter(val)
+ elif re.match('^boost::signals2::signal.*<.*>$', str(t)):
+ return IcingaSignalPrinter(val)
+ elif str(t) == 'pthread_mutex_t':
+ return IcingaMutexPrinter(val)
+
+ return None
+
+def register_icinga_printers():
+ gdb.pretty_printers.append(lookup_icinga_type)
diff --git a/tools/debug/natvis/Visualizers/icinga2.natstepfilter b/tools/debug/natvis/Visualizers/icinga2.natstepfilter
new file mode 100644
index 0000000..f53f002
--- /dev/null
+++ b/tools/debug/natvis/Visualizers/icinga2.natstepfilter
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="utf-8"?>
+<StepFilter xmlns="http://schemas.microsoft.com/vstudio/debugger/natstepfilter/2010">
+ <Function><Name>icinga::String::.*</Name><Action>NoStepInto</Action></Function>
+ <Function><Name>icinga::Value::.*</Name><Action>NoStepInto</Action></Function>
+ <Function><Name>icinga::Array::.*</Name><Action>NoStepInto</Action></Function>
+ <Function><Name>icinga::Dictionary::.*</Name><Action>NoStepInto</Action></Function>
+ <Function><Name>icinga::Object::.*</Name><Action>NoStepInto</Action></Function>
+ <Function><Name>icinga::ObjectImpl&lt;.*</Name><Action>NoStepInto</Action></Function>
+</StepFilter>
diff --git a/tools/debug/natvis/Visualizers/icinga2.natvis b/tools/debug/natvis/Visualizers/icinga2.natvis
new file mode 100644
index 0000000..0ec4b78
--- /dev/null
+++ b/tools/debug/natvis/Visualizers/icinga2.natvis
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='utf-8'?>
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="icinga::String">
+ <DisplayString>{m_Data}</DisplayString>
+ </Type>
+
+ <Type Name="icinga::Value">
+ <DisplayString Condition="m_Value.which_ == 0">Empty</DisplayString>
+ <DisplayString Condition="m_Value.which_ == 1">{*(double *)m_Value.storage_.data_.buf}</DisplayString>
+ <DisplayString Condition="m_Value.which_ == 2">{*(double *)m_Value.storage_.data_.buf}</DisplayString>
+ <DisplayString Condition="m_Value.which_ == 3">{*(icinga::String *)m_Value.storage_.data_.buf}</DisplayString>
+ <DisplayString Condition="m_Value.which_ == 4">{*(boost::intrusive_ptr&lt;icinga::Object&gt; *)m_Value.storage_.data_.buf}</DisplayString>
+ </Type>
+
+ <Type Name="icinga::Array">
+ <DisplayString>{m_Data}</DisplayString>
+ <Expand>
+ <ExpandedItem>m_Data</ExpandedItem>
+ </Expand>
+ </Type>
+
+ <Type Name="icinga::Dictionary">
+ <DisplayString>{m_Data}</DisplayString>
+ <Expand>
+ <ExpandedItem>m_Data</ExpandedItem>
+ </Expand>
+ </Type>
+
+ <Type Name="icinga::ObjectLock">
+ <DisplayString>{m_Lock}</DisplayString>
+ </Type>
+</AutoVisualizer>
diff --git a/tools/debug/natvis/[Content_Types].xml b/tools/debug/natvis/[Content_Types].xml
new file mode 100644
index 0000000..b1c9cf9
--- /dev/null
+++ b/tools/debug/natvis/[Content_Types].xml
@@ -0,0 +1 @@
+<?xml version="1.0" encoding="utf-8"?><Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types"><Default Extension="vsixmanifest" ContentType="text/xml" /><Default Extension="natstepfilter" ContentType="application/octet-stream" /><Default Extension="natvis" ContentType="application/octet-stream" /></Types> \ No newline at end of file
diff --git a/tools/debug/natvis/extension.vsixmanifest b/tools/debug/natvis/extension.vsixmanifest
new file mode 100644
index 0000000..d870e89
--- /dev/null
+++ b/tools/debug/natvis/extension.vsixmanifest
@@ -0,0 +1,18 @@
+<PackageManifest Version="2.0.0" xmlns="http://schemas.microsoft.com/developer/vsx-schema/2011">
+ <Metadata>
+ <Identity Id="Icinga2Visualizers.VS2013.D1DFF2F5-FB30-41FE-8EEF-0CEB97ABBC6B" Version="2.0.0" Language="en-US" Publisher="Icinga GmbH" />
+ <DisplayName>Icinga 2 Debugger Visualizers for Visual Studio</DisplayName>
+ <Description xml:space="preserve">Icinga 2 Debugger Visualizers</Description>
+ </Metadata>
+ <Installation>
+ <InstallationTarget Version="[12.0,13.0)" Id="Microsoft.VisualStudio.Premium" />
+ <InstallationTarget Version="[12.0,13.0)" Id="Microsoft.VisualStudio.Pro" />
+ <InstallationTarget Version="[12.0,13.0)" Id="Microsoft.VisualStudio.Ultimate" />
+ <InstallationTarget Version="[12.0,13.0)" Id="Microsoft.VisualStudio.VSWinDesktopExpress" />
+ </Installation>
+ <Dependencies></Dependencies>
+ <Assets>
+ <Asset Type="NativeVisualizer" Path="Visualizers\icinga2.natvis" />
+ <Asset Type="StepFilter" Path="Visualizers\icinga2.natstepfilter" />
+ </Assets>
+</PackageManifest>
diff --git a/tools/mkclass/CMakeLists.txt b/tools/mkclass/CMakeLists.txt
new file mode 100644
index 0000000..1b97bda
--- /dev/null
+++ b/tools/mkclass/CMakeLists.txt
@@ -0,0 +1,43 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+find_package(BISON 2.3.0 REQUIRED)
+find_package(FLEX 2.5.31 REQUIRED)
+
+bison_target(class_parser class_parser.yy ${CMAKE_CURRENT_BINARY_DIR}/class_parser.cc)
+flex_target(class_lexer class_lexer.ll ${CMAKE_CURRENT_BINARY_DIR}/class_lexer.cc)
+add_flex_bison_dependency(class_lexer class_parser)
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/class_parser.cc PROPERTY COMPILE_FLAGS "-Wno-deprecated-register")
+ set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/class_lexer.cc PROPERTY COMPILE_FLAGS "-Wno-deprecated-register -Wno-null-conversion")
+endif()
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
+
+set(mkclass_SOURCES
+ mkclass.cpp
+ classcompiler.cpp classcompiler.hpp
+ ${FLEX_class_lexer_OUTPUTS}
+ ${BISON_class_parser_OUTPUTS}
+)
+
+add_executable(mkclass ${mkclass_SOURCES})
+
+if(WIN32)
+ target_link_libraries(mkclass shlwapi)
+endif()
+
+set_target_properties (
+ mkclass PROPERTIES
+ FOLDER Bin
+)
+
+macro(MKCLASS_TARGET ClassInput ClassImplOutput ClassHeaderOutput)
+ add_custom_command(
+ OUTPUT ${ClassImplOutput} ${ClassHeaderOutput}
+ COMMAND mkclass
+ ARGS ${ClassInput} ${CMAKE_CURRENT_BINARY_DIR}/${ClassImplOutput} ${CMAKE_CURRENT_BINARY_DIR}/${ClassHeaderOutput}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ DEPENDS mkclass ${ClassInput}
+ )
+endmacro()
diff --git a/tools/mkclass/class_lexer.ll b/tools/mkclass/class_lexer.ll
new file mode 100644
index 0000000..217ca49
--- /dev/null
+++ b/tools/mkclass/class_lexer.ll
@@ -0,0 +1,167 @@
+%{
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "classcompiler.hpp"
+
+using namespace icinga;
+
+#define YYLTYPE icinga::ClassDebugInfo
+
+#include "class_parser.hh"
+
+#define YY_EXTRA_TYPE ClassCompiler *
+#define YY_USER_ACTION \
+do { \
+ yylloc->path = yyextra->GetPath(); \
+ yylloc->first_line = yylineno; \
+ yylloc->first_column = yycolumn; \
+ yylloc->last_line = yylineno; \
+ yylloc->last_column = yycolumn + yyleng - 1; \
+ yycolumn += yyleng; \
+} while (0);
+
+#define YY_INPUT(buf, result, max_size) \
+do { \
+ result = yyextra->ReadInput(buf, max_size); \
+} while (0)
+
+struct lex_buf {
+ char *buf;
+ size_t size;
+};
+
+static void lb_init(lex_buf *lb)
+{
+ lb->buf = NULL;
+ lb->size = 0;
+}
+
+/*static void lb_cleanup(lex_buf *lb)
+{
+ free(lb->buf);
+}*/
+
+static void lb_append_char(lex_buf *lb, char new_char)
+{
+ const size_t block_size = 64;
+
+ size_t old_blocks = (lb->size + (block_size - 1)) / block_size;
+ size_t new_blocks = ((lb->size + 1) + (block_size - 1)) / block_size;
+
+ if (old_blocks != new_blocks) {
+ char *new_buf = (char *)realloc(lb->buf, new_blocks * block_size);
+
+ if (new_buf == NULL && new_blocks > 0)
+ throw std::bad_alloc();
+
+ lb->buf = new_buf;
+ }
+
+ lb->size++;
+ lb->buf[lb->size - 1] = new_char;
+}
+
+static char *lb_steal(lex_buf *lb)
+{
+ lb_append_char(lb, '\0');
+
+ char *buf = lb->buf;
+ lb->buf = NULL;
+ lb->size = 0;
+ return buf;
+}
+%}
+
+%option reentrant noyywrap yylineno
+%option bison-bridge bison-locations
+%option never-interactive nounistd
+%option noinput nounput
+
+%x HEREDOC
+%x C_COMMENT
+
+%%
+ lex_buf string_buf;
+
+\{\{\{ { lb_init(&string_buf); BEGIN(HEREDOC); }
+
+<HEREDOC>\}\}\} {
+ BEGIN(INITIAL);
+
+ lb_append_char(&string_buf, '\0');
+
+ yylval->text = lb_steal(&string_buf);
+
+ return T_STRING;
+ }
+
+<HEREDOC>(.|\n) { lb_append_char(&string_buf, yytext[0]); }
+
+"/*" { BEGIN(C_COMMENT); }
+
+<C_COMMENT>{
+"*/" { BEGIN(INITIAL); }
+[^*] /* ignore comment */
+"*" /* ignore star */
+}
+
+<C_COMMENT><<EOF>> {
+ fprintf(stderr, "End-of-file while in comment.\n");
+ yyterminate();
+ }
+\/\/[^\n]* /* ignore C++-style comments */
+[ \t\r\n] /* ignore whitespace */
+
+#include { return T_INCLUDE; }
+#impl_include { return T_IMPL_INCLUDE; }
+class { return T_CLASS; }
+namespace { return T_NAMESPACE; }
+code { return T_CODE; }
+load_after { return T_LOAD_AFTER; }
+activation_priority { return T_ACTIVATION_PRIORITY; }
+library { return T_LIBRARY; }
+abstract { yylval->num = TAAbstract; return T_CLASS_ATTRIBUTE; }
+vararg_constructor { yylval->num = TAVarArgConstructor; return T_CLASS_ATTRIBUTE; }
+config { yylval->num = FAConfig; return T_FIELD_ATTRIBUTE; }
+state { yylval->num = FAState; return T_FIELD_ATTRIBUTE; }
+enum { yylval->num = FAEnum; return T_FIELD_ATTRIBUTE; }
+get_protected { yylval->num = FAGetProtected; return T_FIELD_ATTRIBUTE; }
+set_protected { yylval->num = FASetProtected; return T_FIELD_ATTRIBUTE; }
+protected { yylval->num = FAGetProtected | FASetProtected; return T_FIELD_ATTRIBUTE; }
+no_storage { yylval->num = FANoStorage; return T_FIELD_ATTRIBUTE; }
+no_user_modify { yylval->num = FANoUserModify; return T_FIELD_ATTRIBUTE; }
+no_user_view { yylval->num = FANoUserView; return T_FIELD_ATTRIBUTE; }
+deprecated { yylval->num = FADeprecated; return T_FIELD_ATTRIBUTE; }
+get_virtual { yylval->num = FAGetVirtual; return T_FIELD_ATTRIBUTE; }
+set_virtual { yylval->num = FASetVirtual; return T_FIELD_ATTRIBUTE; }
+signal_with_old_value { yylval->num = FASignalWithOldValue; return T_FIELD_ATTRIBUTE; }
+virtual { yylval->num = FAGetVirtual | FASetVirtual; return T_FIELD_ATTRIBUTE; }
+navigation { return T_NAVIGATION; }
+validator { return T_VALIDATOR; }
+required { return T_REQUIRED; }
+name { return T_NAME; }
+array { return T_ARRAY; }
+default { yylval->num = FTDefault; return T_FIELD_ACCESSOR_TYPE; }
+get { yylval->num = FTGet; return T_FIELD_ACCESSOR_TYPE; }
+set { yylval->num = FTSet; return T_FIELD_ACCESSOR_TYPE; }
+track { yylval->num = FTTrack; return T_FIELD_ACCESSOR_TYPE; }
+navigate { yylval->num = FTNavigate; return T_FIELD_ACCESSOR_TYPE; }
+\"[^\"]+\" { yylval->text = strdup(yytext + 1); yylval->text[strlen(yylval->text) - 1] = '\0'; return T_STRING; }
+\<[^ \>]*\> { yylval->text = strdup(yytext + 1); yylval->text[strlen(yylval->text) - 1] = '\0'; return T_ANGLE_STRING; }
+[a-zA-Z_][:a-zA-Z0-9\-_]* { yylval->text = strdup(yytext); return T_IDENTIFIER; }
+-?[0-9]+(\.[0-9]+)? { yylval->num = strtod(yytext, NULL); return T_NUMBER; }
+
+. return yytext[0];
+
+%%
+
+void ClassCompiler::InitializeScanner(void)
+{
+ yylex_init(&m_Scanner);
+ yyset_extra(this, m_Scanner);
+}
+
+void ClassCompiler::DestroyScanner(void)
+{
+ yylex_destroy(m_Scanner);
+}
diff --git a/tools/mkclass/class_parser.yy b/tools/mkclass/class_parser.yy
new file mode 100644
index 0000000..0524b2d
--- /dev/null
+++ b/tools/mkclass/class_parser.yy
@@ -0,0 +1,558 @@
+%{
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "classcompiler.hpp"
+#include <iostream>
+#include <vector>
+#include <cstring>
+
+using std::malloc;
+using std::free;
+using std::exit;
+
+using namespace icinga;
+
+#define YYLTYPE icinga::ClassDebugInfo
+
+%}
+
+%pure-parser
+
+%locations
+%defines
+%error-verbose
+
+%parse-param { ClassCompiler *context }
+%lex-param { void *scanner }
+
+%union {
+ char *text;
+ int num;
+ FieldType *type;
+ Field *field;
+ std::vector<Field> *fields;
+ Klass *klass;
+ FieldAccessor *fieldaccessor;
+ std::vector<FieldAccessor> *fieldaccessors;
+ Rule *rule;
+ std::vector<Rule> *rules;
+ Validator *validator;
+}
+
+%token T_INCLUDE "#include (T_INCLUDE)"
+%token T_IMPL_INCLUDE "#impl_include (T_IMPL_INCLUDE)"
+%token T_CLASS "class (T_CLASS)"
+%token T_CODE "code (T_CODE)"
+%token T_LOAD_AFTER "load_after (T_LOAD_AFTER)"
+%token T_ACTIVATION_PRIORITY "activation_priority (T_ACTIVATION_PRIORITY)"
+%token T_LIBRARY "library (T_LIBRARY)"
+%token T_NAMESPACE "namespace (T_NAMESPACE)"
+%token T_VALIDATOR "validator (T_VALIDATOR)"
+%token T_REQUIRED "required (T_REQUIRED)"
+%token T_NAVIGATION "navigation (T_NAVIGATION)"
+%token T_NAME "name (T_NAME)"
+%token T_ARRAY "array (T_ARRAY)"
+%token T_STRING "string (T_STRING)"
+%token T_ANGLE_STRING "angle_string (T_ANGLE_STRING)"
+%token T_FIELD_ATTRIBUTE "field_attribute (T_FIELD_ATTRIBUTE)"
+%token T_CLASS_ATTRIBUTE "class_attribute (T_CLASS_ATTRIBUTE)"
+%token T_IDENTIFIER "identifier (T_IDENTIFIER)"
+%token T_GET "get (T_GET)"
+%token T_SET "set (T_SET)"
+%token T_DEFAULT "default (T_DEFAULT)"
+%token T_FIELD_ACCESSOR_TYPE "field_accessor_type (T_FIELD_ACCESSOR_TYPE)"
+%token T_NUMBER "number (T_NUMBER)"
+%type <text> T_IDENTIFIER
+%type <text> T_STRING
+%type <text> T_ANGLE_STRING
+%type <text> identifier
+%type <text> alternative_name_specifier
+%type <text> inherits_specifier
+%type <text> type_base_specifier
+%type <text> include
+%type <text> angle_include
+%type <text> impl_include
+%type <text> angle_impl_include
+%type <text> code
+%type <num> T_FIELD_ATTRIBUTE
+%type <field> field_attribute
+%type <field> field_attributes
+%type <field> field_attribute_list
+%type <num> T_FIELD_ACCESSOR_TYPE
+%type <num> T_CLASS_ATTRIBUTE
+%type <num> class_attribute_list
+%type <type> field_type
+%type <field> class_field
+%type <fields> class_fields
+%type <klass> class
+%type <fieldaccessors> field_accessor_list
+%type <fieldaccessors> field_accessors
+%type <fieldaccessor> field_accessor
+%type <rule> validator_rule
+%type <rules> validator_rules
+%type <validator> validator
+%type <num> T_NUMBER
+
+%{
+
+int yylex(YYSTYPE *lvalp, YYLTYPE *llocp, void *scanner);
+
+void yyerror(YYLTYPE *locp, ClassCompiler *, const char *err)
+{
+ std::cerr << "in " << locp->path << " at " << locp->first_line << ":" << locp->first_column << "-"
+ << locp->last_line << ":" << locp->last_column << ": " << err << std::endl;
+ std::exit(1);
+}
+
+int yyparse(ClassCompiler *context);
+
+void ClassCompiler::Compile(void)
+{
+ try {
+ yyparse(this);
+ } catch (const std::exception& ex) {
+ std::cerr << "Exception: " << ex.what();
+ }
+
+ HandleMissingValidators();
+}
+
+#define scanner (context->GetScanner())
+
+%}
+
+%%
+
+statements: /* empty */
+ | statements statement
+ ;
+
+statement: include
+ {
+ context->HandleInclude($1, yylloc);
+ std::free($1);
+ }
+ | angle_include
+ {
+ context->HandleAngleInclude($1, yylloc);
+ std::free($1);
+ }
+ | impl_include
+ {
+ context->HandleImplInclude($1, yylloc);
+ std::free($1);
+ }
+ | angle_impl_include
+ {
+ context->HandleAngleImplInclude($1, yylloc);
+ std::free($1);
+ }
+ | class
+ {
+ context->HandleClass(*$1, yylloc);
+ delete $1;
+ }
+ | validator
+ {
+ context->HandleValidator(*$1, yylloc);
+ delete $1;
+ }
+ | namespace
+ | code
+ {
+ context->HandleCode($1, yylloc);
+ std::free($1);
+ }
+ | library
+ ;
+
+include: T_INCLUDE T_STRING
+ {
+ $$ = $2;
+ }
+ ;
+
+angle_include: T_INCLUDE T_ANGLE_STRING
+ {
+ $$ = $2;
+ }
+ ;
+
+impl_include: T_IMPL_INCLUDE T_STRING
+ {
+ $$ = $2;
+ }
+ ;
+
+angle_impl_include: T_IMPL_INCLUDE T_ANGLE_STRING
+ {
+ $$ = $2;
+ }
+ ;
+
+namespace: T_NAMESPACE identifier '{'
+ {
+ context->HandleNamespaceBegin($2, yylloc);
+ std::free($2);
+ }
+ statements '}'
+ {
+ context->HandleNamespaceEnd(yylloc);
+ }
+ ;
+
+code: T_CODE T_STRING
+ {
+ $$ = $2;
+ }
+ ;
+
+library: T_LIBRARY T_IDENTIFIER ';'
+ {
+ context->HandleLibrary($2, yylloc);
+ free($2);
+ }
+ ;
+
+class: class_attribute_list T_CLASS T_IDENTIFIER inherits_specifier type_base_specifier '{' class_fields '}' ';'
+ {
+ $$ = new Klass();
+
+ $$->Name = $3;
+ std::free($3);
+
+ if ($4) {
+ $$->Parent = $4;
+ std::free($4);
+ }
+
+ if ($5) {
+ $$->TypeBase = $5;
+ std::free($5);
+ }
+
+ $$->Attributes = $1;
+
+ for (const Field& field : *$7) {
+ if (field.Attributes & FALoadDependency) {
+ $$->LoadDependencies.push_back(field.Name);
+ } else if (field.Attributes & FAActivationPriority) {
+ $$->ActivationPriority = field.Priority;
+ } else
+ $$->Fields.push_back(field);
+ }
+
+ delete $7;
+
+ ClassCompiler::OptimizeStructLayout($$->Fields);
+ }
+ ;
+
+class_attribute_list: /* empty */
+ {
+ $$ = 0;
+ }
+ | T_CLASS_ATTRIBUTE
+ {
+ $$ = $1;
+ }
+ | class_attribute_list T_CLASS_ATTRIBUTE
+ {
+ $$ = $1 | $2;
+ }
+
+inherits_specifier: /* empty */
+ {
+ $$ = NULL;
+ }
+ | ':' identifier
+ {
+ $$ = $2;
+ }
+ ;
+
+type_base_specifier: /* empty */
+ {
+ $$ = NULL;
+ }
+ | '<' identifier
+ {
+ $$ = $2;
+ }
+ ;
+
+class_fields: /* empty */
+ {
+ $$ = new std::vector<Field>();
+ }
+ | class_fields class_field
+ {
+ $$->push_back(*$2);
+ delete $2;
+ }
+ ;
+
+field_type: identifier
+ {
+ $$ = new FieldType();
+ $$->IsName = false;
+ $$->TypeName = $1;
+ free($1);
+ }
+ | T_NAME '(' identifier ')'
+ {
+ $$ = new FieldType();
+ $$->IsName = true;
+ $$->TypeName = $3;
+ $$->ArrayRank = 0;
+ free($3);
+ }
+ | T_ARRAY '(' field_type ')'
+ {
+ $$ = $3;
+ $$->ArrayRank++;
+ }
+ ;
+
+class_field: field_attribute_list field_type identifier alternative_name_specifier field_accessor_list ';'
+ {
+ Field *field = $1;
+
+ if ((field->Attributes & (FAConfig | FAState)) == 0)
+ field->Attributes |= FAEphemeral;
+
+ field->Type = *$2;
+ delete $2;
+
+ field->Name = $3;
+ std::free($3);
+
+ if ($4) {
+ field->AlternativeName = $4;
+ std::free($4);
+ }
+
+ std::vector<FieldAccessor>::const_iterator it;
+ for (it = $5->begin(); it != $5->end(); it++) {
+ switch (it->Type) {
+ case FTGet:
+ field->GetAccessor = it->Accessor;
+ field->PureGetAccessor = it->Pure;
+ break;
+ case FTSet:
+ field->SetAccessor = it->Accessor;
+ field->PureSetAccessor = it->Pure;
+ break;
+ case FTDefault:
+ field->DefaultAccessor = it->Accessor;
+ break;
+ case FTTrack:
+ field->TrackAccessor = it->Accessor;
+ break;
+ case FTNavigate:
+ field->NavigateAccessor = it->Accessor;
+ field->PureNavigateAccessor = it->Pure;
+ break;
+ }
+ }
+
+ delete $5;
+
+ $$ = field;
+ }
+ | T_LOAD_AFTER identifier ';'
+ {
+ auto *field = new Field();
+ field->Attributes = FALoadDependency;
+ field->Name = $2;
+ std::free($2);
+ $$ = field;
+ }
+ | T_ACTIVATION_PRIORITY T_NUMBER ';'
+ {
+ auto *field = new Field();
+ field->Attributes = FAActivationPriority;
+ field->Priority = $2;
+ $$ = field;
+ }
+ ;
+
+alternative_name_specifier: /* empty */
+ {
+ $$ = NULL;
+ }
+ | '(' identifier ')'
+ {
+ $$ = $2;
+ }
+ ;
+
+field_attribute_list: /* empty */
+ {
+ $$ = new Field();
+ }
+ | '[' field_attributes ']'
+ {
+ $$ = $2;
+ }
+ ;
+
+field_attribute: T_FIELD_ATTRIBUTE
+ {
+ $$ = new Field();
+ $$->Attributes = $1;
+ }
+ | T_REQUIRED
+ {
+ $$ = new Field();
+ $$->Attributes = FARequired;
+ }
+ | T_NAVIGATION '(' identifier ')'
+ {
+ $$ = new Field();
+ $$->Attributes = FANavigation;
+ $$->NavigationName = $3;
+ std::free($3);
+ }
+ | T_NAVIGATION
+ {
+ $$ = new Field();
+ $$->Attributes = FANavigation;
+ }
+ ;
+
+field_attributes: /* empty */
+ {
+ $$ = new Field();
+ }
+ | field_attributes ',' field_attribute
+ {
+ $$ = $1;
+ $$->Attributes |= $3->Attributes;
+ if (!$3->NavigationName.empty())
+ $$->NavigationName = $3->NavigationName;
+ delete $3;
+ }
+ | field_attribute
+ {
+ $$ = $1;
+ }
+ ;
+
+field_accessor_list: /* empty */
+ {
+ $$ = new std::vector<FieldAccessor>();
+ }
+ | '{' field_accessors '}'
+ {
+ $$ = $2;
+ }
+ ;
+
+field_accessors: /* empty */
+ {
+ $$ = new std::vector<FieldAccessor>();
+ }
+ | field_accessors field_accessor
+ {
+ $$ = $1;
+ $$->push_back(*$2);
+ delete $2;
+ }
+ ;
+
+field_accessor: T_FIELD_ACCESSOR_TYPE T_STRING
+ {
+ $$ = new FieldAccessor(static_cast<FieldAccessorType>($1), $2, false);
+ std::free($2);
+ }
+ | T_FIELD_ACCESSOR_TYPE ';'
+ {
+ $$ = new FieldAccessor(static_cast<FieldAccessorType>($1), "", true);
+ }
+ ;
+
+validator_rules: /* empty */
+ {
+ $$ = new std::vector<Rule>();
+ }
+ | validator_rules validator_rule
+ {
+ $$->push_back(*$2);
+ delete $2;
+ }
+ ;
+
+validator_rule: T_NAME '(' T_IDENTIFIER ')' identifier ';'
+ {
+ $$ = new Rule();
+ $$->Attributes = 0;
+ $$->IsName = true;
+ $$->Type = $3;
+ std::free($3);
+ $$->Pattern = $5;
+ std::free($5);
+ }
+ | T_IDENTIFIER identifier ';'
+ {
+ $$ = new Rule();
+ $$->Attributes = 0;
+ $$->IsName = false;
+ $$->Type = $1;
+ std::free($1);
+ $$->Pattern = $2;
+ std::free($2);
+ }
+ | T_NAME '(' T_IDENTIFIER ')' identifier '{' validator_rules '}' ';'
+ {
+ $$ = new Rule();
+ $$->Attributes = 0;
+ $$->IsName = true;
+ $$->Type = $3;
+ std::free($3);
+ $$->Pattern = $5;
+ std::free($5);
+ $$->Rules = *$7;
+ delete $7;
+ }
+ | T_IDENTIFIER identifier '{' validator_rules '}' ';'
+ {
+ $$ = new Rule();
+ $$->Attributes = 0;
+ $$->IsName = false;
+ $$->Type = $1;
+ std::free($1);
+ $$->Pattern = $2;
+ std::free($2);
+ $$->Rules = *$4;
+ delete $4;
+ }
+ | T_REQUIRED identifier ';'
+ {
+ $$ = new Rule();
+ $$->Attributes = RARequired;
+ $$->IsName = false;
+ $$->Type = "";
+ $$->Pattern = $2;
+ std::free($2);
+ }
+ ;
+
+validator: T_VALIDATOR T_IDENTIFIER '{' validator_rules '}' ';'
+ {
+ $$ = new Validator();
+
+ $$->Name = $2;
+ std::free($2);
+
+ $$->Rules = *$4;
+ delete $4;
+ }
+ ;
+
+identifier: T_IDENTIFIER
+ | T_STRING
+ {
+ $$ = $1;
+ }
+ ;
diff --git a/tools/mkclass/classcompiler.cpp b/tools/mkclass/classcompiler.cpp
new file mode 100644
index 0000000..3a49576
--- /dev/null
+++ b/tools/mkclass/classcompiler.cpp
@@ -0,0 +1,1485 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "classcompiler.hpp"
+#include <iostream>
+#include <sstream>
+#include <fstream>
+#include <stdexcept>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+#include <cstring>
+#include <locale>
+#ifndef _WIN32
+#include <libgen.h>
+#else /* _WIN32 */
+#include <shlwapi.h>
+#endif /* _WIN32 */
+
+using namespace icinga;
+
+ClassCompiler::ClassCompiler(std::string path, std::istream& input,
+ std::ostream& oimpl, std::ostream& oheader)
+ : m_Path(std::move(path)), m_Input(input), m_Impl(oimpl), m_Header(oheader)
+{
+ InitializeScanner();
+}
+
+ClassCompiler::~ClassCompiler()
+{
+ DestroyScanner();
+}
+
+std::string ClassCompiler::GetPath() const
+{
+ return m_Path;
+}
+
+void *ClassCompiler::GetScanner()
+{
+ return m_Scanner;
+}
+
+size_t ClassCompiler::ReadInput(char *buffer, size_t max_size)
+{
+ m_Input.read(buffer, max_size);
+ return static_cast<size_t>(m_Input.gcount());
+}
+
+void ClassCompiler::HandleInclude(const std::string& path, const ClassDebugInfo&)
+{
+ m_Header << "#include \"" << path << "\"" << std::endl << std::endl;
+}
+
+void ClassCompiler::HandleAngleInclude(const std::string& path, const ClassDebugInfo&)
+{
+ m_Header << "#include <" << path << ">" << std::endl << std::endl;
+}
+
+void ClassCompiler::HandleImplInclude(const std::string& path, const ClassDebugInfo&)
+{
+ m_Impl << "#include \"" << path << "\"" << std::endl << std::endl;
+}
+
+void ClassCompiler::HandleAngleImplInclude(const std::string& path, const ClassDebugInfo&)
+{
+ m_Impl << "#include <" << path << ">" << std::endl << std::endl;
+}
+
+void ClassCompiler::HandleNamespaceBegin(const std::string& name, const ClassDebugInfo&)
+{
+ m_Header << "namespace " << name << std::endl
+ << "{" << std::endl << std::endl;
+
+ m_Impl << "namespace " << name << std::endl
+ << "{" << std::endl << std::endl;
+}
+
+void ClassCompiler::HandleNamespaceEnd(const ClassDebugInfo&)
+{
+ HandleMissingValidators();
+
+ m_Header << "}" << std::endl;
+
+ m_Impl << "}" << std::endl;
+}
+
+void ClassCompiler::HandleCode(const std::string& code, const ClassDebugInfo&)
+{
+ m_Header << code << std::endl;
+}
+
+void ClassCompiler::HandleLibrary(const std::string& library, const ClassDebugInfo&)
+{
+ m_Library = library;
+}
+
+unsigned long ClassCompiler::SDBM(const std::string& str, size_t len = std::string::npos)
+{
+ unsigned long hash = 0;
+ size_t current = 0;
+
+ for (const char& ch : str) {
+ if (current >= len)
+ break;
+
+ hash = ch + (hash << 6) + (hash << 16) - hash;
+
+ current++;
+ }
+
+ return hash;
+}
+
+static int TypePreference(const std::string& type)
+{
+ if (type == "Value")
+ return 0;
+ else if (type == "String")
+ return 1;
+ else if (type == "double")
+ return 2;
+ else if (type.find("::Ptr") != std::string::npos)
+ return 3;
+ else if (type == "int")
+ return 4;
+ else
+ return 5;
+}
+
+static bool FieldLayoutCmp(const Field& a, const Field& b)
+{
+ return TypePreference(a.Type.GetRealType()) < TypePreference(b.Type.GetRealType());
+}
+
+static bool FieldTypeCmp(const Field& a, const Field& b)
+{
+ return a.Type.GetRealType() < b.Type.GetRealType();
+}
+
+static std::string FieldTypeToIcingaName(const Field& field, bool inner)
+{
+ std::string ftype = field.Type.TypeName;
+
+ if (!inner && field.Type.ArrayRank > 0)
+ return "Array";
+
+ if (field.Type.IsName)
+ return "String";
+
+ if (field.Attributes & FAEnum)
+ return "Number";
+
+ if (ftype == "int" || ftype == "double")
+ return "Number";
+ else if (ftype == "bool")
+ return "Boolean";
+
+ if (ftype.find("::Ptr") != std::string::npos)
+ return ftype.substr(0, ftype.size() - strlen("::Ptr"));
+
+ return ftype;
+}
+
+void ClassCompiler::OptimizeStructLayout(std::vector<Field>& fields)
+{
+ std::sort(fields.begin(), fields.end(), FieldTypeCmp);
+ std::stable_sort(fields.begin(), fields.end(), FieldLayoutCmp);
+}
+
+void ClassCompiler::HandleClass(const Klass& klass, const ClassDebugInfo&)
+{
+ /* forward declaration */
+ if (klass.Name.find_first_of(':') == std::string::npos)
+ m_Header << "class " << klass.Name << ";" << std::endl << std::endl;
+
+ /* TypeHelper */
+ if (klass.Attributes & TAAbstract) {
+ m_Header << "template<>" << std::endl
+ << "struct TypeHelper<" << klass.Name << ", " << ((klass.Attributes & TAVarArgConstructor) ? "true" : "false") << ">" << std::endl
+ << "{" << std::endl
+ << "\t" << "static ObjectFactory GetFactory()" << std::endl
+ << "\t" << "{" << std::endl
+ << "\t\t" << "return nullptr;" << std::endl
+ << "\t" << "}" << std::endl
+ << "};" << std::endl << std::endl;
+ }
+
+ /* TypeImpl */
+ m_Header << "template<>" << std::endl
+ << "class TypeImpl<" << klass.Name << ">"
+ << " : public Type";
+
+ if (!klass.Parent.empty())
+ m_Header << "Impl<" << klass.Parent << ">";
+
+ if (!klass.TypeBase.empty())
+ m_Header << ", public " + klass.TypeBase;
+
+ m_Header << std::endl
+ << "{" << std::endl
+ << "public:" << std::endl
+ << "\t" << "DECLARE_PTR_TYPEDEFS(TypeImpl<" << klass.Name << ">);" << std::endl << std::endl
+ << "\t" << "TypeImpl();" << std::endl
+ << "\t" << "~TypeImpl() override;" << std::endl << std::endl;
+
+ m_Impl << "TypeImpl<" << klass.Name << ">::TypeImpl()" << std::endl
+ << "{ }" << std::endl << std::endl
+ << "TypeImpl<" << klass.Name << ">::~TypeImpl()" << std::endl
+ << "{ }" << std::endl << std::endl;
+
+ /* GetName */
+ m_Header << "\t" << "String GetName() const override;" << std::endl;
+
+ m_Impl << "String TypeImpl<" << klass.Name << ">::GetName() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "return \"" << klass.Name << "\";" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetAttributes */
+ m_Header << "\t" << "int GetAttributes() const override;" << std::endl;
+
+ m_Impl << "int TypeImpl<" << klass.Name << ">::GetAttributes() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "return " << klass.Attributes << ";" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetBaseType */
+ m_Header << "\t" << "Type::Ptr GetBaseType() const override;" << std::endl;
+
+ m_Impl << "Type::Ptr TypeImpl<" << klass.Name << ">::GetBaseType() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "return ";
+
+ if (!klass.Parent.empty())
+ m_Impl << klass.Parent << "::TypeInstance";
+ else
+ m_Impl << "Object::TypeInstance";
+
+ m_Impl << ";" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetFieldId */
+ m_Header << "\t" << "int GetFieldId(const String& name) const override;" << std::endl;
+
+ m_Impl << "int TypeImpl<" << klass.Name << ">::GetFieldId(const String& name) const" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Fields.empty()) {
+ m_Impl << "\t" << "int offset = ";
+
+ if (!klass.Parent.empty())
+ m_Impl << klass.Parent << "::TypeInstance->GetFieldCount()";
+ else
+ m_Impl << "0";
+
+ m_Impl << ";" << std::endl << std::endl;
+ }
+
+ std::map<int, std::vector<std::pair<int, std::string> > > jumptable;
+
+ int hlen = 0, collisions = 0;
+
+ do {
+ int num = 0;
+
+ hlen++;
+ jumptable.clear();
+ collisions = 0;
+
+ for (const Field& field : klass.Fields) {
+ auto hash = static_cast<int>(SDBM(field.Name, hlen));
+ jumptable[hash].emplace_back(num, field.Name);
+ num++;
+
+ if (jumptable[hash].size() > 1)
+ collisions++;
+ }
+ } while (collisions >= 5 && hlen < 8);
+
+ if (!klass.Fields.empty()) {
+ m_Impl << "\tswitch (static_cast<int>(Utility::SDBM(name, " << hlen << "))) {" << std::endl;
+
+ for (const auto& itj : jumptable) {
+ m_Impl << "\t\tcase " << itj.first << ":" << std::endl;
+
+ for (const auto& itf : itj.second) {
+ m_Impl << "\t\t\t" << "if (name == \"" << itf.second << "\")" << std::endl
+ << "\t\t\t\t" << "return offset + " << itf.first << ";" << std::endl;
+ }
+
+ m_Impl << std::endl
+ << "\t\t\tbreak;" << std::endl;
+ }
+
+ m_Impl << "\t}" << std::endl;
+ }
+
+ m_Impl << std::endl
+ << "\t" << "return ";
+
+ if (!klass.Parent.empty())
+ m_Impl << klass.Parent << "::TypeInstance->GetFieldId(name)";
+ else
+ m_Impl << "-1";
+
+ m_Impl << ";" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetFieldInfo */
+ m_Header << "\t" << "Field GetFieldInfo(int id) const override;" << std::endl;
+
+ m_Impl << "Field TypeImpl<" << klass.Name << ">::GetFieldInfo(int id) const" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = id - " << klass.Parent << "::TypeInstance->GetFieldCount();" << std::endl
+ << "\t" << "if (real_id < 0) { return " << klass.Parent << "::TypeInstance->GetFieldInfo(id); }" << std::endl;
+
+ if (!klass.Fields.empty()) {
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "id";
+
+ m_Impl << ") {" << std::endl;
+
+ size_t num = 0;
+ for (const Field& field : klass.Fields) {
+ std::string ftype = FieldTypeToIcingaName(field, false);
+
+ std::string nameref;
+
+ if (field.Type.IsName)
+ nameref = "\"" + field.Type.TypeName + "\"";
+ else
+ nameref = "nullptr";
+
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "return {" << num << ", \"" << ftype << "\", \"" << field.Name << "\", \"" << (field.NavigationName.empty() ? field.Name : field.NavigationName) << "\", " << nameref << ", " << field.Attributes << ", " << field.Type.ArrayRank << "};" << std::endl;
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t";
+ }
+
+ m_Impl << "\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl;
+
+ if (!klass.Fields.empty())
+ m_Impl << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* GetFieldCount */
+ m_Header << "\t" << "int GetFieldCount() const override;" << std::endl;
+
+ m_Impl << "int TypeImpl<" << klass.Name << ">::GetFieldCount() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "return " << klass.Fields.size();
+
+ if (!klass.Parent.empty())
+ m_Impl << " + " << klass.Parent << "::TypeInstance->GetFieldCount()";
+
+ m_Impl << ";" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetFactory */
+ m_Header << "\t" << "ObjectFactory GetFactory() const override;" << std::endl;
+
+ m_Impl << "ObjectFactory TypeImpl<" << klass.Name << ">::GetFactory() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "return TypeHelper<" << klass.Name << ", " << ((klass.Attributes & TAVarArgConstructor) ? "true" : "false") << ">::GetFactory();" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetLoadDependencies */
+ m_Header << "\t" << "const std::unordered_set<Type*>& GetLoadDependencies() const override;" << std::endl;
+
+ m_Impl << "const std::unordered_set<Type*>& TypeImpl<" << klass.Name << ">::GetLoadDependencies() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "static const std::unordered_set<Type*> deps ({" << std::endl;
+
+ for (const std::string& dep : klass.LoadDependencies)
+ m_Impl << "\t\t" << "GetByName(\"" << dep << "\").get()," << std::endl;
+
+ m_Impl << "\t" << "});" << std::endl;
+
+ m_Impl << "\t" << "return deps;" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* GetActivationPriority */
+ m_Header << "\t" << "int GetActivationPriority() const override;" << std::endl;
+
+ m_Impl << "int TypeImpl<" << klass.Name << ">::GetActivationPriority() const" << std::endl
+ << "{" << std::endl
+ << "\t" << "return " << klass.ActivationPriority << ";" << std::endl
+ << "}" << std::endl << std::endl;
+
+ /* RegisterAttributeHandler */
+ m_Header << "public:" << std::endl
+ << "\t" << "void RegisterAttributeHandler(int fieldId, const Type::AttributeHandler& callback) override;" << std::endl;
+
+ m_Impl << "void TypeImpl<" << klass.Name << ">::RegisterAttributeHandler(int fieldId, const Type::AttributeHandler& callback)" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = fieldId - " << klass.Parent << "::TypeInstance->GetFieldCount(); " << std::endl
+ << "\t" << "if (real_id < 0) { " << klass.Parent << "::TypeInstance->RegisterAttributeHandler(fieldId, callback); return; }" << std::endl;
+
+ if (!klass.Fields.empty()) {
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "fieldId";
+
+ m_Impl << ") {" << std::endl;
+
+ int num = 0;
+ for (const Field& field : klass.Fields) {
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "ObjectImpl<" << klass.Name << ">::On" << field.GetFriendlyName() << "Changed.connect(callback);" << std::endl
+ << "\t\t\t" << "break;" << std::endl;
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t";
+ }
+ m_Impl << "\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl;
+
+ if (!klass.Fields.empty())
+ m_Impl << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ m_Header << "};" << std::endl << std::endl;
+
+ m_Header << std::endl;
+
+ /* ObjectImpl */
+ m_Header << "template<>" << std::endl
+ << "class ObjectImpl<" << klass.Name << ">"
+ << " : public " << (klass.Parent.empty() ? "Object" : klass.Parent) << std::endl
+ << "{" << std::endl
+ << "public:" << std::endl
+ << "\t" << "DECLARE_PTR_TYPEDEFS(ObjectImpl<" << klass.Name << ">);" << std::endl << std::endl;
+
+ /* Validate */
+ m_Header << "\t" << "void Validate(int types, const ValidationUtils& utils) override;" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::Validate(int types, const ValidationUtils& utils)" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << klass.Parent << "::Validate(types, utils);" << std::endl << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ m_Impl << "\t" << "if (" << (field.Attributes & (FAEphemeral|FAConfig|FAState)) << " & types)" << std::endl
+ << "\t\t" << "Validate" << field.GetFriendlyName() << "(Lazy<" << field.Type.GetRealType() << ">([this]() { return Get" << field.GetFriendlyName() << "(); }), utils);" << std::endl;
+ }
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ std::string argName, valName;
+
+ if (field.Type.ArrayRank > 0) {
+ argName = "avalue";
+ valName = "value";
+ } else {
+ argName = "value";
+ valName = "value()";
+ }
+
+ m_Header << "\t" << "void SimpleValidate" << field.GetFriendlyName() << "(const Lazy<" << field.Type.GetRealType() << ">& " << argName << ", const ValidationUtils& utils);" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::SimpleValidate" << field.GetFriendlyName() << "(const Lazy<" << field.Type.GetRealType() << ">& " << argName << ", const ValidationUtils& utils)" << std::endl
+ << "{" << std::endl;
+
+ if (field.Attributes & FARequired) {
+ if (field.Type.GetRealType().find("::Ptr") != std::string::npos)
+ m_Impl << "\t" << "if (!" << argName << "())" << std::endl;
+ else
+ m_Impl << "\t" << "if (" << argName << "().IsEmpty())" << std::endl;
+
+ m_Impl << "\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_cast<ConfigObject *>(this), { \"" << field.Name << R"(" }, "Attribute must not be empty."));)" << std::endl << std::endl;
+ }
+
+ if (field.Attributes & FADeprecated) {
+ if (field.Type.GetRealType().find("::Ptr") != std::string::npos)
+ m_Impl << "\t" << "if (" << argName << "())" << std::endl;
+ else
+ m_Impl << "\t" << "if (" << argName << "() != GetDefault" << field.GetFriendlyName() << "())" << std::endl;
+
+ m_Impl << "\t\t" << "Log(LogWarning, \"" << klass.Name << "\") << \"Attribute '" << field.Name << R"(' for object '" << dynamic_cast<ConfigObject *>(this)->GetName() << "' of type '" << dynamic_cast<ConfigObject *>(this)->GetReflectionType()->GetName() << "' is deprecated and should not be used.";)" << std::endl;
+ }
+
+ if (field.Type.ArrayRank > 0) {
+ m_Impl << "\t" << "if (avalue()) {" << std::endl
+ << "\t\t" << "ObjectLock olock(avalue());" << std::endl
+ << "\t\t" << "for (const Value& value : avalue()) {" << std::endl;
+ }
+
+ std::string ftype = FieldTypeToIcingaName(field, true);
+
+ if (ftype == "Value") {
+ m_Impl << "\t" << "if (" << valName << ".IsObjectType<Function>()) {" << std::endl
+ << "\t\t" << "Function::Ptr func = " << valName << ";" << std::endl
+ << "\t\t" << "if (func->IsDeprecated())" << std::endl
+ << "\t\t\t" << "Log(LogWarning, \"" << klass.Name << "\") << \"Attribute '" << field.Name << R"(' for object '" << dynamic_cast<ConfigObject *>(this)->GetName() << "' of type '" << dynamic_cast<ConfigObject *>(this)->GetReflectionType()->GetName() << "' is set to a deprecated function: " << func->GetName();)" << std::endl
+ << "\t" << "}" << std::endl << std::endl;
+ }
+
+ if (field.Type.IsName) {
+ if (field.Type.ArrayRank > 0) {
+ m_Impl << "\t\t\t" << "if (!" << valName << ".IsEmpty() && ";
+
+ m_Impl << "!" << valName << ".IsString())" << std::endl
+ << "\t\t\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_cast<ConfigObject *>(this), { \""
+ << field.Name << R"(" }, "It is not allowed to specify '" + )" << valName << R"( + "' of type '" + )"
+ << valName << ".GetTypeName()" << R"( + "' as ')" << field.Type.TypeName
+ << "' name. Expected type of '" << field.Type.TypeName << "' name: 'String'.\"));" << std::endl;
+ }
+
+ m_Impl << "\t\t\t" << "if (";
+
+ if (field.Type.ArrayRank > 0)
+ m_Impl << valName << ".IsEmpty() || ";
+ else
+ m_Impl << "!" << valName << ".IsEmpty() && ";
+
+ m_Impl << "!utils.ValidateName(\"" << field.Type.TypeName << "\", " << valName << "))" << std::endl
+ << "\t\t\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_cast<ConfigObject *>(this), { \"" << field.Name << R"(" }, "Object '" + )" << valName << R"( + "' of type ')" << field.Type.TypeName
+ << "' does not exist.\"));" << std::endl;
+ } else if (field.Type.ArrayRank > 0 && (ftype == "Number" || ftype == "Boolean")) {
+ m_Impl << "\t" << "try {" << std::endl
+ << "\t\t" << "Convert::ToDouble(" << valName << ");" << std::endl
+ << "\t" << "} catch (const std::invalid_argument&) {" << std::endl
+ << "\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_cast<ConfigObject *>(this), { \"" << field.Name << R"(", "Array element '" + " << valName << " + "' of type '" + " << valName << ".GetReflectionType()->GetName() + "' is not valid here; expected type ')" << ftype << "'.\"));" << std::endl
+ << "\t" << "}" << std::endl;
+ }
+
+ if (field.Type.ArrayRank > 0) {
+ m_Impl << "\t\t" << "}" << std::endl
+ << "\t" << "}" << std::endl;
+ }
+
+ m_Impl << "}" << std::endl << std::endl;
+ }
+
+ /* constructor */
+ m_Header << "public:" << std::endl
+ << "\t" << "ObjectImpl<" << klass.Name << ">();" << std::endl;
+
+ m_Impl << "ObjectImpl<" << klass.Name << ">::ObjectImpl()" << std::endl
+ << "{" << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ if (!field.PureSetAccessor)
+ m_Impl << "\t" << "Set" << field.GetFriendlyName() << "(" << "GetDefault" << field.GetFriendlyName() << "(), true);" << std::endl;
+ }
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* destructor */
+ m_Header << "public:" << std::endl
+ << "\t" << "~ObjectImpl<" << klass.Name << ">() override;" << std::endl;
+
+ m_Impl << "ObjectImpl<" << klass.Name << ">::~ObjectImpl()" << std::endl
+ << "{ }" << std::endl << std::endl;
+
+ if (!klass.Fields.empty()) {
+ /* SetField */
+ m_Header << "public:" << std::endl
+ << "\t" << "void SetField(int id, const Value& value, bool suppress_events = false, const Value& cookie = Empty) override;" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::SetField(int id, const Value& value, bool suppress_events, const Value& cookie)" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = id - " << klass.Parent << "::TypeInstance->GetFieldCount(); " << std::endl
+ << "\t" << "if (real_id < 0) { " << klass.Parent << "::SetField(id, value, suppress_events, cookie); return; }" << std::endl;
+
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "id";
+
+ m_Impl << ") {" << std::endl;
+
+ size_t num = 0;
+ for (const Field& field : klass.Fields) {
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "Set" << field.GetFriendlyName() << "(";
+
+ if (field.Attributes & FAEnum)
+ m_Impl << "static_cast<" << field.Type.GetRealType() << ">(static_cast<int>(";
+
+ m_Impl << "value";
+
+ if (field.Attributes & FAEnum)
+ m_Impl << "))";
+
+ m_Impl << ", suppress_events, cookie);" << std::endl
+ << "\t\t\t" << "break;" << std::endl;
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl
+ << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* GetField */
+ m_Header << "public:" << std::endl
+ << "\t" << "Value GetField(int id) const override;" << std::endl;
+
+ m_Impl << "Value ObjectImpl<" << klass.Name << ">::GetField(int id) const" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = id - " << klass.Parent << "::TypeInstance->GetFieldCount(); " << std::endl
+ << "\t" << "if (real_id < 0) { return " << klass.Parent << "::GetField(id); }" << std::endl;
+
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "id";
+
+ m_Impl << ") {" << std::endl;
+
+ num = 0;
+ for (const Field& field : klass.Fields) {
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "return Get" << field.GetFriendlyName() << "();" << std::endl;
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl
+ << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* ValidateField */
+ m_Header << "public:" << std::endl
+ << "\t" << "void ValidateField(int id, const Lazy<Value>& lvalue, const ValidationUtils& utils) override;" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::ValidateField(int id, const Lazy<Value>& lvalue, const ValidationUtils& utils)" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = id - " << klass.Parent << "::TypeInstance->GetFieldCount(); " << std::endl
+ << "\t" << "if (real_id < 0) { " << klass.Parent << "::ValidateField(id, lvalue, utils); return; }" << std::endl;
+
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "id";
+
+ m_Impl << ") {" << std::endl;
+
+ num = 0;
+ for (const Field& field : klass.Fields) {
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "Validate" << field.GetFriendlyName() << "(";
+
+ if (field.Attributes & FAEnum)
+ m_Impl << "static_cast<Lazy<" << field.Type.GetRealType() << "> >(static_cast<Lazy<int> >(";
+
+ m_Impl << "lvalue";
+
+ if (field.Attributes & FAEnum)
+ m_Impl << "))";
+
+ m_Impl << ", utils);" << std::endl
+ << "\t\t\t" << "break;" << std::endl;
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl
+ << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* NotifyField */
+ m_Header << "public:" << std::endl
+ << "\t" << "void NotifyField(int id, const Value& cookie = Empty) override;" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::NotifyField(int id, const Value& cookie)" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = id - " << klass.Parent << "::TypeInstance->GetFieldCount(); " << std::endl
+ << "\t" << "if (real_id < 0) { " << klass.Parent << "::NotifyField(id, cookie); return; }" << std::endl;
+
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "id";
+
+ m_Impl << ") {" << std::endl;
+
+ num = 0;
+ for (const Field& field : klass.Fields) {
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "Notify" << field.GetFriendlyName() << "(cookie);" << std::endl
+ << "\t\t\t" << "break;" << std::endl;
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl
+ << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* NavigateField */
+ m_Header << "public:" << std::endl
+ << "\t" << "Object::Ptr NavigateField(int id) const override;" << std::endl;
+
+ m_Impl << "Object::Ptr ObjectImpl<" << klass.Name << ">::NavigateField(int id) const" << std::endl
+ << "{" << std::endl;
+
+ if (!klass.Parent.empty())
+ m_Impl << "\t" << "int real_id = id - " << klass.Parent << "::TypeInstance->GetFieldCount(); " << std::endl
+ << "\t" << "if (real_id < 0) { return " << klass.Parent << "::NavigateField(id); }" << std::endl;
+
+ bool haveNavigationFields = false;
+
+ for (const Field& field : klass.Fields) {
+ if (field.Attributes & FANavigation) {
+ haveNavigationFields = true;
+ break;
+ }
+ }
+
+ if (haveNavigationFields) {
+ m_Impl << "\t" << "switch (";
+
+ if (!klass.Parent.empty())
+ m_Impl << "real_id";
+ else
+ m_Impl << "id";
+
+ m_Impl << ") {" << std::endl;
+
+ num = 0;
+ for (const Field& field : klass.Fields) {
+ if (field.Attributes & FANavigation) {
+ m_Impl << "\t\t" << "case " << num << ":" << std::endl
+ << "\t\t\t" << "return Navigate" << field.GetFriendlyName() << "();" << std::endl;
+ }
+
+ num++;
+ }
+
+ m_Impl << "\t\t" << "default:" << std::endl
+ << "\t\t";
+ }
+
+ m_Impl << "\t" << "throw std::runtime_error(\"Invalid field ID.\");" << std::endl;
+
+ if (haveNavigationFields)
+ m_Impl << "\t" << "}" << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+
+ /* getters */
+ for (const Field& field : klass.Fields) {
+ std::string prot;
+
+ if (field.Attributes & FAGetProtected)
+ prot = "protected";
+ else
+ prot = "public";
+
+ m_Header << prot << ":" << std::endl
+ << "\t";
+
+ if (field.Attributes & FAGetVirtual || field.PureGetAccessor)
+ m_Header << "virtual ";
+
+ m_Header << field.Type.GetRealType() << " Get" << field.GetFriendlyName() << "() const";
+
+ if (field.PureGetAccessor) {
+ m_Header << " = 0;" << std::endl;
+ } else {
+ m_Header << ";" << std::endl;
+
+ m_Impl << field.Type.GetRealType() << " ObjectImpl<" << klass.Name << ">::Get" << field.GetFriendlyName() << "() const" << std::endl
+ << "{" << std::endl;
+
+ if (field.GetAccessor.empty() && !(field.Attributes & FANoStorage))
+ m_Impl << "\t" << "return m_" << field.GetFriendlyName() << ".load();" << std::endl;
+ else
+ m_Impl << field.GetAccessor << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+ }
+ }
+
+ /* setters */
+ for (const Field& field : klass.Fields) {
+ std::string prot;
+
+ if (field.Attributes & FASetProtected)
+ prot = "protected";
+ else
+ prot = "public";
+
+ m_Header << prot << ":" << std::endl
+ << "\t";
+
+ if (field.Attributes & FASetVirtual || field.PureSetAccessor)
+ m_Header << "virtual ";
+
+ m_Header << "void Set" << field.GetFriendlyName() << "(" << field.Type.GetArgumentType() << " value, bool suppress_events = false, const Value& cookie = Empty)";
+
+ if (field.PureSetAccessor) {
+ m_Header << " = 0;" << std::endl;
+ } else {
+ m_Header << ";" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::Set" << field.GetFriendlyName() << "(" << field.Type.GetArgumentType() << " value, bool suppress_events, const Value& cookie)" << std::endl
+ << "{" << std::endl;
+
+ if (field.Type.IsName || !field.TrackAccessor.empty() || field.Attributes & FASignalWithOldValue)
+ m_Impl << "\t" << "Value oldValue = Get" << field.GetFriendlyName() << "();" << std::endl
+ << "\t" << "auto *dobj = dynamic_cast<ConfigObject *>(this);" << std::endl;
+
+ if (field.SetAccessor.empty() && !(field.Attributes & FANoStorage))
+ m_Impl << "\t" << "m_" << field.GetFriendlyName() << ".store(value);" << std::endl;
+ else
+ m_Impl << field.SetAccessor << std::endl << std::endl;
+
+ if (field.Type.IsName || !field.TrackAccessor.empty()) {
+ if (field.Name != "active") {
+ m_Impl << "\t" << "if (!dobj || dobj->IsActive())" << std::endl
+ << "\t";
+ }
+
+ m_Impl << "\t" << "Track" << field.GetFriendlyName() << "(oldValue, value);" << std::endl;
+ }
+
+ m_Impl << "\t" << "if (!suppress_events) {" << std::endl
+ << "\t\t" << "Notify" << field.GetFriendlyName() << "(cookie);" << std::endl;
+
+ if (field.Attributes & FASignalWithOldValue) {
+ m_Impl << "\t\t" << "if (!dobj || dobj->IsActive())" << std::endl
+ << "\t\t\t" << "On" << field.GetFriendlyName() << "ChangedWithOldValue(static_cast<" << klass.Name << " *>(this), oldValue, value);" << std::endl;
+ }
+
+ m_Impl << "\t" "}" << std::endl << std::endl
+ << "}" << std::endl << std::endl;
+ }
+ }
+
+ m_Header << "protected:" << std::endl;
+
+ bool needs_tracking = false;
+
+ /* tracking */
+ for (const Field& field : klass.Fields) {
+ if (!field.Type.IsName && field.TrackAccessor.empty())
+ continue;
+
+ needs_tracking = true;
+
+ m_Header << "\t" << "void Track" << field.GetFriendlyName() << "(" << field.Type.GetArgumentType() << " oldValue, " << field.Type.GetArgumentType() << " newValue);" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::Track" << field.GetFriendlyName() << "(" << field.Type.GetArgumentType() << " oldValue, " << field.Type.GetArgumentType() << " newValue)" << std::endl
+ << "{" << std::endl;
+
+ if (!field.TrackAccessor.empty())
+ m_Impl << "\t" << field.TrackAccessor << std::endl;
+
+ if (field.Type.TypeName != "String") {
+ if (field.Type.ArrayRank > 0) {
+ m_Impl << "\t" << "if (oldValue) {" << std::endl
+ << "\t\t" << "ObjectLock olock(oldValue);" << std::endl
+ << "\t\t" << "for (const String& ref : oldValue) {" << std::endl
+ << "\t\t\t" << "DependencyGraph::RemoveDependency(this, ConfigObject::GetObject";
+
+ /* Ew */
+ if (field.Type.TypeName == "Zone" && m_Library == "base")
+ m_Impl << "(\"Zone\", ";
+ else
+ m_Impl << "<" << field.Type.TypeName << ">(";
+
+ m_Impl << "ref).get());" << std::endl
+ << "\t\t" << "}" << std::endl
+ << "\t" << "}" << std::endl
+ << "\t" << "if (newValue) {" << std::endl
+ << "\t\t" << "ObjectLock olock(newValue);" << std::endl
+ << "\t\t" << "for (const String& ref : newValue) {" << std::endl
+ << "\t\t\t" << "DependencyGraph::AddDependency(this, ConfigObject::GetObject";
+
+ /* Ew */
+ if (field.Type.TypeName == "Zone" && m_Library == "base")
+ m_Impl << "(\"Zone\", ";
+ else
+ m_Impl << "<" << field.Type.TypeName << ">(";
+
+ m_Impl << "ref).get());" << std::endl
+ << "\t\t" << "}" << std::endl
+ << "\t" << "}" << std::endl;
+ } else {
+ m_Impl << "\t" << "if (!oldValue.IsEmpty())" << std::endl
+ << "\t\t" << "DependencyGraph::RemoveDependency(this, ConfigObject::GetObject";
+
+ /* Ew */
+ if (field.Type.TypeName == "Zone" && m_Library == "base")
+ m_Impl << "(\"Zone\", ";
+ else
+ m_Impl << "<" << field.Type.TypeName << ">(";
+
+ m_Impl << "oldValue).get());" << std::endl
+ << "\t" << "if (!newValue.IsEmpty())" << std::endl
+ << "\t\t" << "DependencyGraph::AddDependency(this, ConfigObject::GetObject";
+
+ /* Ew */
+ if (field.Type.TypeName == "Zone" && m_Library == "base")
+ m_Impl << "(\"Zone\", ";
+ else
+ m_Impl << "<" << field.Type.TypeName << ">(";
+
+ m_Impl << "newValue).get());" << std::endl;
+ }
+ }
+
+ m_Impl << "}" << std::endl << std::endl;
+ }
+
+ /* navigation */
+ for (const Field& field : klass.Fields) {
+ if ((field.Attributes & FANavigation) == 0)
+ continue;
+
+ m_Header << "public:" << std::endl
+ << "\t" << "Object::Ptr Navigate" << field.GetFriendlyName() << "() const";
+
+ if (field.PureNavigateAccessor) {
+ m_Header << " = 0;" << std::endl;
+ } else {
+ m_Header << ";" << std::endl;
+
+ m_Impl << "Object::Ptr ObjectImpl<" << klass.Name << ">::Navigate" << field.GetFriendlyName() << "() const" << std::endl
+ << "{" << std::endl;
+
+ if (field.NavigateAccessor.empty())
+ m_Impl << "\t" << "return Get" << field.GetFriendlyName() << "();" << std::endl;
+ else
+ m_Impl << "\t" << field.NavigateAccessor << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+ }
+ }
+
+ /* start/stop */
+ if (needs_tracking) {
+ m_Header << "protected:" << std::endl
+ << "\tvoid Start(bool runtimeCreated = false) override;" << std::endl
+ << "\tvoid Stop(bool runtimeRemoved = false) override;" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::Start(bool runtimeCreated)" << std::endl
+ << "{" << std::endl
+ << "\t" << klass.Parent << "::Start(runtimeCreated);" << std::endl << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ if (!field.Type.IsName && field.TrackAccessor.empty())
+ continue;
+
+ m_Impl << "\t" << "Track" << field.GetFriendlyName() << "(Empty, Get" << field.GetFriendlyName() << "());" << std::endl;
+ }
+
+ m_Impl << "}" << std::endl << std::endl
+ << "void ObjectImpl<" << klass.Name << ">::Stop(bool runtimeRemoved)" << std::endl
+ << "{" << std::endl
+ << "\t" << klass.Parent << "::Stop(runtimeRemoved);" << std::endl << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ if (!field.Type.IsName && field.TrackAccessor.empty())
+ continue;
+
+ m_Impl << "\t" << "Track" << field.GetFriendlyName() << "(Get" << field.GetFriendlyName() << "(), Empty);" << std::endl;
+ }
+
+ m_Impl << "}" << std::endl << std::endl;
+ }
+
+ /* notify */
+ for (const Field& field : klass.Fields) {
+ std::string prot;
+
+ if (field.Attributes & FASetProtected)
+ prot = "protected";
+ else
+ prot = "public";
+
+ m_Header << prot << ":" << std::endl
+ << "\t" << "virtual void Notify" << field.GetFriendlyName() << "(const Value& cookie = Empty);" << std::endl;
+
+ m_Impl << "void ObjectImpl<" << klass.Name << ">::Notify" << field.GetFriendlyName() << "(const Value& cookie)" << std::endl
+ << "{" << std::endl;
+
+ if (field.Name != "active") {
+ m_Impl << "\t" << "auto *dobj = dynamic_cast<ConfigObject *>(this);" << std::endl
+ << "\t" << "if (!dobj || dobj->IsActive())" << std::endl
+ << "\t";
+ }
+
+ m_Impl << "\t" << "On" << field.GetFriendlyName() << "Changed(static_cast<" << klass.Name << " *>(this), cookie);" << std::endl
+ << "}" << std::endl << std::endl;
+ }
+
+ /* default */
+ for (const Field& field : klass.Fields) {
+ std::string realType = field.Type.GetRealType();
+
+ m_Header << "private:" << std::endl
+ << "\t" << "inline " << realType << " GetDefault" << field.GetFriendlyName() << "() const;" << std::endl;
+
+ m_Impl << realType << " ObjectImpl<" << klass.Name << ">::GetDefault" << field.GetFriendlyName() << "() const" << std::endl
+ << "{" << std::endl;
+
+ if (field.DefaultAccessor.empty())
+ m_Impl << "\t" << "return " << realType << "();" << std::endl;
+ else
+ m_Impl << "\t" << field.DefaultAccessor << std::endl;
+
+ m_Impl << "}" << std::endl << std::endl;
+ }
+
+ /* validators */
+ for (const Field& field : klass.Fields) {
+ m_Header << "protected:" << std::endl
+ << "\t" << "virtual void Validate" << field.GetFriendlyName() << "(const Lazy<" << field.Type.GetRealType() << ">& lvalue, const ValidationUtils& utils);" << std::endl;
+ }
+
+ /* instance variables */
+ m_Header << "private:" << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ if (field.Attributes & FANoStorage)
+ continue;
+
+ m_Header << "\tAtomicOrLocked<" << field.Type.GetRealType() << "> m_" << field.GetFriendlyName() << ";" << std::endl;
+ }
+
+ /* signal */
+ m_Header << "public:" << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ m_Header << "\t" << "static boost::signals2::signal<void (const intrusive_ptr<" << klass.Name << ">&, const Value&)> On" << field.GetFriendlyName() << "Changed;" << std::endl;
+ m_Impl << std::endl << "boost::signals2::signal<void (const intrusive_ptr<" << klass.Name << ">&, const Value&)> ObjectImpl<" << klass.Name << ">::On" << field.GetFriendlyName() << "Changed;" << std::endl << std::endl;
+
+ if (field.Attributes & FASignalWithOldValue) {
+ m_Header << "\t" << "static boost::signals2::signal<void (const intrusive_ptr<" << klass.Name
+ << ">&, const Value&, const Value&)> On" << field.GetFriendlyName() << "ChangedWithOldValue;"
+ << std::endl;
+ m_Impl << std::endl << "boost::signals2::signal<void (const intrusive_ptr<" << klass.Name
+ << ">&, const Value&, const Value&)> ObjectImpl<" << klass.Name << ">::On"
+ << field.GetFriendlyName() << "ChangedWithOldValue;" << std::endl << std::endl;
+ }
+ }
+ }
+
+ if (klass.Name == "ConfigObject")
+ m_Header << "\t" << "friend class ConfigItem;" << std::endl;
+
+ if (!klass.TypeBase.empty())
+ m_Header << "\t" << "friend class " << klass.TypeBase << ";" << std::endl;
+
+ m_Header << "};" << std::endl << std::endl;
+
+ for (const Field& field : klass.Fields) {
+ m_MissingValidators[std::make_pair(klass.Name, field.GetFriendlyName())] = field;
+ }
+}
+
+void ClassCompiler::CodeGenValidator(const std::string& name, const std::string& klass, const std::vector<Rule>& rules, const std::string& field, const FieldType& fieldType, ValidatorType validatorType)
+{
+ m_Impl << "static void TIValidate" << name << "(const intrusive_ptr<ObjectImpl<" << klass << "> >& object, ";
+
+ if (validatorType != ValidatorField)
+ m_Impl << "const String& key, ";
+
+ m_Impl << fieldType.GetArgumentType() << " value, std::vector<String>& location, const ValidationUtils& utils)" << std::endl
+ << "{" << std::endl;
+
+ if (validatorType == ValidatorField) {
+ bool required = false;
+
+ for (const Rule& rule : rules) {
+ if ((rule.Attributes & RARequired) && rule.Pattern == field) {
+ required = true;
+ break;
+ }
+ }
+
+ if (fieldType.GetRealType() != "int" && fieldType.GetRealType() != "double") {
+ if (fieldType.GetRealType() == "Value" || fieldType.GetRealType() == "String")
+ m_Impl << "\t" << "if (value.IsEmpty())" << std::endl;
+ else
+ m_Impl << "\t" << "if (!value)" << std::endl;
+
+ if (required)
+ m_Impl << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_cast<ConfigObject *>(this), location, \"This attribute must not be empty.\"));" << std::endl;
+ else
+ m_Impl << "\t\t" << "return;" << std::endl;
+
+ m_Impl << std::endl;
+ }
+ }
+
+ if (validatorType != ValidatorField)
+ m_Impl << "\t" << "bool known_attribute = false;" << std::endl;
+
+ bool type_check = false;
+ int i = 0;
+
+ for (const Rule& rule : rules) {
+ if (rule.Attributes & RARequired)
+ continue;
+
+ i++;
+
+ if (validatorType == ValidatorField && rule.Pattern != field)
+ continue;
+
+ m_Impl << "\t" << "do {" << std::endl;
+
+ if (validatorType != ValidatorField) {
+ if (rule.Pattern != "*") {
+ if (rule.Pattern.find_first_of("*?") != std::string::npos)
+ m_Impl << "\t\t" << "if (!Utility::Match(\"" << rule.Pattern << "\", key))" << std::endl;
+ else
+ m_Impl << "\t\t" << "if (key != \"" << rule.Pattern << "\")" << std::endl;
+
+ m_Impl << "\t\t\t" << "break;" << std::endl;
+ }
+
+ m_Impl << "\t\t" << "known_attribute = true;" << std::endl;
+ }
+
+ if (rule.IsName) {
+ m_Impl << "\t\t" << "if (value.IsScalar()) {" << std::endl
+ << "\t\t\t" << "if (utils.ValidateName(\"" << rule.Type << "\", value))" << std::endl
+ << "\t\t\t\t" << "return;" << std::endl
+ << "\t\t\t" << "else" << std::endl
+ << "\t\t\t\t" << R"(BOOST_THROW_EXCEPTION(ValidationError(dynamic_pointer_cast<ConfigObject>(object), location, "Object '" + ")" << "xxx" << R"( + "' of type ')" << rule.Type << "' does not exist.\"));" << std::endl
+ << "\t\t" << "}" << std::endl;
+ }
+
+ if (fieldType.GetRealType() == "Value") {
+ if (rule.Type == "String")
+ m_Impl << "\t\t" << "if (value.IsEmpty() || value.IsScalar())" << std::endl
+ << "\t\t\t" << "return;" << std::endl;
+ else if (rule.Type == "Number") {
+ m_Impl << "\t\t" << "try {" << std::endl
+ << "\t\t\t" << "Convert::ToDouble(value);" << std::endl
+ << "\t\t\t" << "return;" << std::endl
+ << "\t\t" << "} catch (...) { }" << std::endl;
+ }
+ }
+
+ if (rule.Type == "Dictionary" || rule.Type == "Array" || rule.Type == "Function") {
+ if (fieldType.GetRealType() == "Value") {
+ m_Impl << "\t\t" << "if (value.IsObjectType<" << rule.Type << ">()) {" << std::endl;
+ type_check = true;
+ } else if (fieldType.GetRealType() != rule.Type + "::Ptr") {
+ m_Impl << "\t\t" << "if (dynamic_pointer_cast<" << rule.Type << ">(value)) {" << std::endl;
+ type_check = true;
+ }
+
+ if (!rule.Rules.empty()) {
+ bool indent = false;
+
+ if (rule.Type == "Dictionary") {
+ if (type_check)
+ m_Impl << "\t\t\t" << "Dictionary::Ptr dict = value;" << std::endl;
+ else
+ m_Impl << "\t\t" << "const Dictionary::Ptr& dict = value;" << std::endl;
+
+ m_Impl << (type_check ? "\t" : "") << "\t\t" << "{" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t" << "ObjectLock olock(dict);" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t" << "for (const Dictionary::Pair& kv : dict) {" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t\t" << "const String& akey = kv.first;" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t\t" << "const Value& avalue = kv.second;" << std::endl;
+ indent = true;
+ } else if (rule.Type == "Array") {
+ if (type_check)
+ m_Impl << "\t\t\t" << "Array::Ptr arr = value;" << std::endl;
+ else
+ m_Impl << "\t\t" << "const Array::Ptr& arr = value;" << std::endl;
+
+ m_Impl << (type_check ? "\t" : "") << "\t\t" << "Array::SizeType anum = 0;" << std::endl
+ << (type_check ? "\t" : "") << "\t\t" << "{" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t" << "ObjectLock olock(arr);" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t" << "for (const Value& avalue : arr) {" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t\t" << "String akey = Convert::ToString(anum);" << std::endl;
+ indent = true;
+ } else {
+ m_Impl << (type_check ? "\t" : "") << "\t\t" << "String akey = \"\";" << std::endl
+ << (type_check ? "\t" : "") << "\t\t" << "const Value& avalue = value;" << std::endl;
+ }
+
+ std::string subvalidator_prefix;
+
+ if (validatorType == ValidatorField)
+ subvalidator_prefix = klass;
+ else
+ subvalidator_prefix = name;
+
+ m_Impl << (type_check ? "\t" : "") << (indent ? "\t\t" : "") << "\t\t" << "location.emplace_back(akey);" << std::endl
+ << (type_check ? "\t" : "") << (indent ? "\t\t" : "") << "\t\t" << "TIValidate" << subvalidator_prefix << "_" << i << "(object, akey, avalue, location, utils);" << std::endl
+ << (type_check ? "\t" : "") << (indent ? "\t\t" : "") << "\t\t" << "location.pop_back();" << std::endl;
+
+ if (rule.Type == "Array")
+ m_Impl << (type_check ? "\t" : "") << "\t\t\t\t" << "anum++;" << std::endl;
+
+ if (rule.Type == "Dictionary" || rule.Type == "Array") {
+ m_Impl << (type_check ? "\t" : "") << "\t\t\t" << "}" << std::endl
+ << (type_check ? "\t" : "") << "\t\t" << "}" << std::endl;
+ }
+
+ for (const Rule& srule : rule.Rules) {
+ if ((srule.Attributes & RARequired) == 0)
+ continue;
+
+ if (rule.Type == "Dictionary") {
+ m_Impl << (type_check ? "\t" : "") << "\t\t" << "if (dict->Get(\"" << srule.Pattern << "\").IsEmpty())" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_pointer_cast<ConfigObject>(object), location, \"Required dictionary item '" << srule.Pattern << "' is not set.\"));" << std::endl;
+ } else if (rule.Type == "Array") {
+ int index = -1;
+ std::stringstream idxbuf;
+ idxbuf << srule.Pattern;
+ idxbuf >> index;
+
+ if (index == -1) {
+ std::cerr << "Invalid index for 'required' keyword: " << srule.Pattern;
+ std::exit(1);
+ }
+
+ m_Impl << (type_check ? "\t" : "") << "\t\t" << "if (arr.GetLength() < " << (index + 1) << ")" << std::endl
+ << (type_check ? "\t" : "") << "\t\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_cast<ConfigObject *>(this), location, \"Required index '" << index << "' is not set.\"));" << std::endl;
+ }
+ }
+ }
+
+ m_Impl << (type_check ? "\t" : "") << "\t\t" << "return;" << std::endl;
+
+ if (fieldType.GetRealType() == "Value" || fieldType.GetRealType() != rule.Type + "::Ptr")
+ m_Impl << "\t\t" << "}" << std::endl;
+ }
+
+ m_Impl << "\t" << "} while (0);" << std::endl << std::endl;
+ }
+
+ if (type_check || validatorType != ValidatorField) {
+ if (validatorType != ValidatorField) {
+ m_Impl << "\t" << "if (!known_attribute)" << std::endl
+ << "\t\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_pointer_cast<ConfigObject>(object), location, \"Invalid attribute: \" + key));" << std::endl
+ << "\t" << "else" << std::endl;
+ }
+
+ m_Impl << (validatorType != ValidatorField ? "\t" : "") << "\t" << "BOOST_THROW_EXCEPTION(ValidationError(dynamic_pointer_cast<ConfigObject>(object), location, \"Invalid type.\"));" << std::endl;
+ }
+
+ m_Impl << "}" << std::endl << std::endl;
+}
+
+void ClassCompiler::CodeGenValidatorSubrules(const std::string& name, const std::string& klass, const std::vector<Rule>& rules)
+{
+ int i = 0;
+
+ for (const Rule& rule : rules) {
+ if (rule.Attributes & RARequired)
+ continue;
+
+ i++;
+
+ if (!rule.Rules.empty()) {
+ ValidatorType subtype;
+
+ if (rule.Type == "Array")
+ subtype = ValidatorArray;
+ else if (rule.Type == "Dictionary")
+ subtype = ValidatorDictionary;
+ else {
+ std::cerr << "Invalid sub-validator type: " << rule.Type << std::endl;
+ std::exit(EXIT_FAILURE);
+ }
+
+ std::ostringstream namebuf;
+ namebuf << name << "_" << i;
+
+ CodeGenValidatorSubrules(namebuf.str(), klass, rule.Rules);
+
+ FieldType ftype;
+ ftype.IsName = false;
+ ftype.TypeName = "Value";
+ CodeGenValidator(namebuf.str(), klass, rule.Rules, rule.Pattern, ftype, subtype);
+ }
+ }
+}
+
+void ClassCompiler::HandleValidator(const Validator& validator, const ClassDebugInfo&)
+{
+ CodeGenValidatorSubrules(validator.Name, validator.Name, validator.Rules);
+
+ for (const auto& it : m_MissingValidators)
+ CodeGenValidator(it.first.first + it.first.second, it.first.first, validator.Rules, it.second.Name, it.second.Type, ValidatorField);
+
+ for (const auto& it : m_MissingValidators) {
+ m_Impl << "void ObjectImpl<" << it.first.first << ">::Validate" << it.first.second << "(const Lazy<" << it.second.Type.GetRealType() << ">& lvalue, const ValidationUtils& utils)" << std::endl
+ << "{" << std::endl
+ << "\t" << "SimpleValidate" << it.first.second << "(lvalue, utils);" << std::endl
+ << "\t" << "std::vector<String> location;" << std::endl
+ << "\t" << "location.emplace_back(\"" << it.second.Name << "\");" << std::endl
+ << "\t" << "TIValidate" << it.first.first << it.first.second << "(this, lvalue(), location, utils);" << std::endl
+ << "\t" << "location.pop_back();" << std::endl
+ << "}" << std::endl << std::endl;
+ }
+
+ m_MissingValidators.clear();
+}
+
+void ClassCompiler::HandleMissingValidators()
+{
+ for (const auto& it : m_MissingValidators) {
+ m_Impl << "void ObjectImpl<" << it.first.first << ">::Validate" << it.first.second << "(const Lazy<" << it.second.Type.GetRealType() << ">& lvalue, const ValidationUtils& utils)" << std::endl
+ << "{" << std::endl
+ << "\t" << "SimpleValidate" << it.first.second << "(lvalue, utils);" << std::endl
+ << "}" << std::endl << std::endl;
+ }
+
+ m_MissingValidators.clear();
+}
+
+void ClassCompiler::CompileFile(const std::string& inputpath,
+ const std::string& implpath, const std::string& headerpath)
+{
+ std::ifstream input;
+ input.open(inputpath.c_str(), std::ifstream::in);
+
+ if (!input) {
+ std::cerr << "Could not open input file: " << inputpath << std::endl;
+ std::exit(EXIT_FAILURE);
+ }
+
+ std::string tmpheaderpath = headerpath + ".tmp";
+ std::ofstream oheader;
+ oheader.open(tmpheaderpath.c_str(), std::ofstream::out);
+
+ if (!oheader) {
+ std::cerr << "Could not open header file: " << tmpheaderpath << std::endl;
+ std::exit(EXIT_FAILURE);
+ }
+
+ std::string tmpimplpath = implpath + ".tmp";
+ std::ofstream oimpl;
+ oimpl.open(tmpimplpath.c_str(), std::ofstream::out);
+
+ if (!oimpl) {
+ std::cerr << "Could not open implementation file: " << tmpimplpath << std::endl;
+ std::exit(EXIT_FAILURE);
+ }
+
+ CompileStream(inputpath, input, oimpl, oheader);
+
+ input.close();
+ oimpl.close();
+ oheader.close();
+
+#ifdef _WIN32
+ _unlink(headerpath.c_str());
+#endif /* _WIN32 */
+
+ if (rename(tmpheaderpath.c_str(), headerpath.c_str()) < 0) {
+ std::cerr << "Could not rename header file: " << tmpheaderpath << " -> " << headerpath << std::endl;
+ std::exit(EXIT_FAILURE);
+ }
+
+#ifdef _WIN32
+ _unlink(implpath.c_str());
+#endif /* _WIN32 */
+
+ if (rename(tmpimplpath.c_str(), implpath.c_str()) < 0) {
+ std::cerr << "Could not rename implementation file: " << tmpimplpath << " -> " << implpath << std::endl;
+ std::exit(EXIT_FAILURE);
+ }
+}
+
+std::string ClassCompiler::BaseName(const std::string& path)
+{
+ char *dir = strdup(path.c_str());
+ std::string result;
+
+ if (!dir)
+ throw std::bad_alloc();
+
+#ifndef _WIN32
+ result = basename(dir);
+#else /* _WIN32 */
+ result = PathFindFileName(dir);
+#endif /* _WIN32 */
+
+ free(dir);
+
+ return result;
+}
+
+std::string ClassCompiler::FileNameToGuardName(const std::string& fname)
+{
+ std::string result = fname;
+ std::locale locale;
+
+ for (auto& ch : result) {
+ ch = std::toupper(ch, locale);
+
+ if (ch == '.')
+ ch = '_';
+ }
+
+ return result;
+}
+
+void ClassCompiler::CompileStream(const std::string& path, std::istream& input,
+ std::ostream& oimpl, std::ostream& oheader)
+{
+ input.exceptions(std::istream::badbit);
+
+ std::string guard_name = FileNameToGuardName(BaseName(path));
+
+ oheader << "#ifndef " << guard_name << std::endl
+ << "#define " << guard_name << std::endl << std::endl;
+
+ oheader << "#include \"base/object.hpp\"" << std::endl
+ << "#include \"base/type.hpp\"" << std::endl
+ << "#include \"base/value.hpp\"" << std::endl
+ << "#include \"base/array.hpp\"" << std::endl
+ << "#include \"base/atomic.hpp\"" << std::endl
+ << "#include \"base/dictionary.hpp\"" << std::endl
+ << "#include <boost/signals2.hpp>" << std::endl << std::endl;
+
+ oimpl << "#include \"base/exception.hpp\"" << std::endl
+ << "#include \"base/objectlock.hpp\"" << std::endl
+ << "#include \"base/utility.hpp\"" << std::endl
+ << "#include \"base/convert.hpp\"" << std::endl
+ << "#include \"base/dependencygraph.hpp\"" << std::endl
+ << "#include \"base/logger.hpp\"" << std::endl
+ << "#include \"base/function.hpp\"" << std::endl
+ << "#include \"base/configtype.hpp\"" << std::endl
+ << "#ifdef _MSC_VER" << std::endl
+ << "#pragma warning( push )" << std::endl
+ << "#pragma warning( disable : 4244 )" << std::endl
+ << "#pragma warning( disable : 4800 )" << std::endl
+ << "#endif /* _MSC_VER */" << std::endl << std::endl;
+
+
+ ClassCompiler ctx(path, input, oimpl, oheader);
+ ctx.Compile();
+
+ oheader << "#endif /* " << guard_name << " */" << std::endl;
+
+ oimpl << "#ifdef _MSC_VER" << std::endl
+ << "#pragma warning ( pop )" << std::endl
+ << "#endif /* _MSC_VER */" << std::endl;
+}
diff --git a/tools/mkclass/classcompiler.hpp b/tools/mkclass/classcompiler.hpp
new file mode 100644
index 0000000..0bd789d
--- /dev/null
+++ b/tools/mkclass/classcompiler.hpp
@@ -0,0 +1,245 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#ifndef CLASSCOMPILER_H
+#define CLASSCOMPILER_H
+
+#include <string>
+#include <istream>
+#include <utility>
+#include <vector>
+#include <algorithm>
+#include <map>
+
+namespace icinga
+{
+
+struct ClassDebugInfo
+{
+ std::string path;
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+};
+
+enum FieldAccessorType
+{
+ FTGet,
+ FTSet,
+ FTDefault,
+ FTTrack,
+ FTNavigate
+};
+
+struct FieldAccessor
+{
+ FieldAccessorType Type;
+ std::string Accessor;
+ bool Pure;
+
+ FieldAccessor(FieldAccessorType type, std::string accessor, bool pure)
+ : Type(type), Accessor(std::move(accessor)), Pure(pure)
+ { }
+};
+
+/* keep this in sync with lib/base/type.hpp */
+enum FieldAttribute
+{
+ FAEphemeral = 1,
+ FAConfig = 2,
+ FAState = 4,
+ FAEnum = 8,
+ FAGetProtected = 16,
+ FASetProtected = 32,
+ FANoStorage = 64,
+ FALoadDependency = 128,
+ FARequired = 256,
+ FANavigation = 512,
+ FANoUserModify = 1024,
+ FANoUserView = 2048,
+ FADeprecated = 4096,
+ FAGetVirtual = 8192,
+ FASetVirtual = 16384,
+ FAActivationPriority = 32768,
+ FASignalWithOldValue = 65536,
+};
+
+struct FieldType
+{
+ bool IsName{false};
+ std::string TypeName;
+ int ArrayRank{0};
+
+ inline std::string GetRealType() const
+ {
+ if (ArrayRank > 0)
+ return "Array::Ptr";
+
+ if (IsName)
+ return "String";
+
+ return TypeName;
+ }
+
+ inline std::string GetArgumentType() const
+ {
+ std::string realType = GetRealType();
+
+ if (realType == "bool" || realType == "double" || realType == "int")
+ return realType;
+ else
+ return "const " + realType + "&";
+ }
+};
+
+struct Field
+{
+ int Attributes{0};
+ FieldType Type;
+ std::string Name;
+ std::string AlternativeName;
+ std::string GetAccessor;
+ bool PureGetAccessor{false};
+ std::string SetAccessor;
+ bool PureSetAccessor{false};
+ std::string DefaultAccessor;
+ std::string TrackAccessor;
+ std::string NavigationName;
+ std::string NavigateAccessor;
+ bool PureNavigateAccessor{false};
+ int Priority{0};
+
+ inline std::string GetFriendlyName() const
+ {
+ if (!AlternativeName.empty())
+ return AlternativeName;
+
+ bool cap = true;
+ std::string name = Name;
+
+ for (char& ch : name) {
+ if (ch == '_') {
+ cap = true;
+ continue;
+ }
+
+ if (cap) {
+ ch = toupper(ch);
+ cap = false;
+ }
+ }
+
+ name.erase(
+ std::remove(name.begin(), name.end(), '_'),
+ name.end()
+ );
+
+ /* TODO: figure out name */
+ return name;
+ }
+};
+
+enum TypeAttribute
+{
+ TAAbstract = 1,
+ TAVarArgConstructor = 2
+};
+
+struct Klass
+{
+ std::string Name;
+ std::string Parent;
+ std::string TypeBase;
+ int Attributes;
+ std::vector<Field> Fields;
+ std::vector<std::string> LoadDependencies;
+ int ActivationPriority{0};
+};
+
+enum RuleAttribute
+{
+ RARequired = 1
+};
+
+struct Rule
+{
+ int Attributes;
+ bool IsName;
+ std::string Type;
+ std::string Pattern;
+
+ std::vector<Rule> Rules;
+};
+
+enum ValidatorType
+{
+ ValidatorField,
+ ValidatorArray,
+ ValidatorDictionary
+};
+
+struct Validator
+{
+ std::string Name;
+ std::vector<Rule> Rules;
+};
+
+class ClassCompiler
+{
+public:
+ ClassCompiler(std::string path, std::istream& input, std::ostream& oimpl, std::ostream& oheader);
+ ~ClassCompiler();
+
+ void Compile();
+
+ std::string GetPath() const;
+
+ void InitializeScanner();
+ void DestroyScanner();
+
+ void *GetScanner();
+
+ size_t ReadInput(char *buffer, size_t max_size);
+
+ void HandleInclude(const std::string& path, const ClassDebugInfo& locp);
+ void HandleAngleInclude(const std::string& path, const ClassDebugInfo& locp);
+ void HandleImplInclude(const std::string& path, const ClassDebugInfo& locp);
+ void HandleAngleImplInclude(const std::string& path, const ClassDebugInfo& locp);
+ void HandleClass(const Klass& klass, const ClassDebugInfo& locp);
+ void HandleValidator(const Validator& validator, const ClassDebugInfo& locp);
+ void HandleNamespaceBegin(const std::string& name, const ClassDebugInfo& locp);
+ void HandleNamespaceEnd(const ClassDebugInfo& locp);
+ void HandleCode(const std::string& code, const ClassDebugInfo& locp);
+ void HandleLibrary(const std::string& library, const ClassDebugInfo& locp);
+ void HandleMissingValidators();
+
+ void CodeGenValidator(const std::string& name, const std::string& klass, const std::vector<Rule>& rules, const std::string& field, const FieldType& fieldType, ValidatorType validatorType);
+ void CodeGenValidatorSubrules(const std::string& name, const std::string& klass, const std::vector<Rule>& rules);
+
+ static void CompileFile(const std::string& inputpath, const std::string& implpath,
+ const std::string& headerpath);
+ static void CompileStream(const std::string& path, std::istream& input,
+ std::ostream& oimpl, std::ostream& oheader);
+
+ static void OptimizeStructLayout(std::vector<Field>& fields);
+
+private:
+ std::string m_Path;
+ std::istream& m_Input;
+ std::ostream& m_Impl;
+ std::ostream& m_Header;
+ void *m_Scanner;
+
+ std::string m_Library;
+
+ std::map<std::pair<std::string, std::string>, Field> m_MissingValidators;
+
+ static unsigned long SDBM(const std::string& str, size_t len);
+ static std::string BaseName(const std::string& path);
+ static std::string FileNameToGuardName(const std::string& path);
+};
+
+}
+
+#endif /* CLASSCOMPILER_H */
+
diff --git a/tools/mkclass/mkclass.cpp b/tools/mkclass/mkclass.cpp
new file mode 100644
index 0000000..eb0d8f8
--- /dev/null
+++ b/tools/mkclass/mkclass.cpp
@@ -0,0 +1,16 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include "classcompiler.hpp"
+#include <iostream>
+
+using namespace icinga;
+
+int main(int argc, char **argv)
+{
+ if (argc < 4) {
+ std::cerr << "Syntax: " << argv[0] << " <input> <output-impl> <output-header>" << std::endl;
+ return 1;
+ }
+
+ ClassCompiler::CompileFile(argv[1], argv[2], argv[3]);
+}
diff --git a/tools/mkembedconfig/CMakeLists.txt b/tools/mkembedconfig/CMakeLists.txt
new file mode 100644
index 0000000..90fe0ce
--- /dev/null
+++ b/tools/mkembedconfig/CMakeLists.txt
@@ -0,0 +1,24 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+set(mkembedconfig_SOURCES
+ mkembedconfig.c
+)
+
+add_executable(mkembedconfig ${mkembedconfig_SOURCES})
+
+set_target_properties (
+ mkembedconfig PROPERTIES
+ FOLDER Bin
+)
+
+macro(MKEMBEDCONFIG_TARGET EmbedInput EmbedOutput)
+ add_custom_command(
+ OUTPUT ${EmbedOutput}
+ COMMAND mkembedconfig
+ ARGS ${EmbedInput} ${CMAKE_CURRENT_BINARY_DIR}/${EmbedOutput}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ DEPENDS mkembedconfig ${EmbedInput}
+ )
+ set_property(SOURCE ${CMAKE_CURRENT_BINARY_DIR}/${EmbedOutput} PROPERTY EXCLUDE_UNITY_BUILD TRUE)
+endmacro()
+
diff --git a/tools/mkembedconfig/mkembedconfig.c b/tools/mkembedconfig/mkembedconfig.c
new file mode 100644
index 0000000..ae23087
--- /dev/null
+++ b/tools/mkembedconfig/mkembedconfig.c
@@ -0,0 +1,51 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+ FILE *infp, *outfp;
+
+ if (argc < 3) {
+ fprintf(stderr, "Syntax: %s <in-file> <out-file>\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ infp = fopen(argv[1], "r");
+
+ if (!infp) {
+ perror("fopen");
+ return EXIT_FAILURE;
+ }
+
+ outfp = fopen(argv[2], "w");
+
+ if (!outfp) {
+ fclose(infp);
+ perror("fopen");
+ return EXIT_FAILURE;
+ }
+
+ fprintf(outfp, "/* This file has been automatically generated\n"
+ " from the input file \"%s\". */\n\n", argv[1]);
+ fputs("#include \"config/configfragment.hpp\"\n\nnamespace {\n\nconst char *fragment = R\"CONFIG_FRAGMENT(", outfp);
+
+ while (!feof(infp)) {
+ char buf[1024];
+ size_t rc = fread(buf, 1, sizeof(buf), infp);
+
+ if (rc == 0)
+ break;
+
+ fwrite(buf, rc, 1, outfp);
+ }
+
+ fprintf(outfp, ")CONFIG_FRAGMENT\";\n\nREGISTER_CONFIG_FRAGMENT(\"%s\", fragment);\n\n}", argv[1]);
+
+ fclose(outfp);
+ fclose(infp);
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/mkunity/CMakeLists.txt b/tools/mkunity/CMakeLists.txt
new file mode 100644
index 0000000..8fa0f20
--- /dev/null
+++ b/tools/mkunity/CMakeLists.txt
@@ -0,0 +1,47 @@
+# Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+
+
+define_property(
+ SOURCE
+ PROPERTY EXCLUDE_UNITY_BUILD
+ BRIEF_DOCS "Whether to exclude the source file from unity builds"
+ FULL_DOCS "Specified whether a source file should be excluded from unity builds and should be built separately"
+)
+
+if(ICINGA2_UNITY_BUILD)
+ set(mkunity_SOURCES
+ mkunity.c
+ )
+
+ add_executable(mkunity ${mkunity_SOURCES})
+
+ set_target_properties (
+ mkunity PROPERTIES
+ FOLDER Bin
+ )
+
+ function(MKUNITY_TARGET Target Prefix UnityInputRef)
+ set(UnityInput ${${UnityInputRef}})
+ set(UnityOutput ${CMAKE_CURRENT_BINARY_DIR}/${Target}_unity.cpp)
+ set(RealSources "")
+ set(UnitySources "")
+ foreach(UnitySource ${UnityInput})
+ get_property(SourceExcluded SOURCE ${UnitySource} PROPERTY EXCLUDE_UNITY_BUILD)
+ if(SourceExcluded MATCHES TRUE OR NOT ${UnitySource} MATCHES "\\.(cpp|cxx|cc)\$")
+ list(APPEND RealSources ${UnitySource})
+ else()
+ list(APPEND UnitySources ${UnitySource})
+ endif()
+ endforeach()
+ add_custom_command(
+ OUTPUT ${UnityOutput}
+ COMMAND mkunity
+ ARGS ${Prefix} ${UnitySources} > ${UnityOutput}.tmp
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy ${UnityOutput}.tmp ${UnityOutput}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ DEPENDS mkunity ${UnitySources}
+ )
+ list(APPEND RealSources ${UnityOutput})
+ set(${UnityInputRef} ${RealSources} PARENT_SCOPE)
+ endfunction()
+endif()
diff --git a/tools/mkunity/mkunity.c b/tools/mkunity/mkunity.c
new file mode 100644
index 0000000..cf36f60
--- /dev/null
+++ b/tools/mkunity/mkunity.c
@@ -0,0 +1,20 @@
+/* Icinga 2 | (c) 2012 Icinga GmbH | GPLv2+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+int main(int argc, char **argv)
+{
+ int i;
+
+ if (argc < 3) {
+ fprintf(stderr, "Syntax: %s <prefix> [<file> ...]\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ for (i = 2; i < argc; i++) {
+ printf("#include \"%s/%s\"\n", argv[1], argv[i]);
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/selinux/icinga2.fc b/tools/selinux/icinga2.fc
new file mode 100644
index 0000000..325728d
--- /dev/null
+++ b/tools/selinux/icinga2.fc
@@ -0,0 +1,23 @@
+/etc/rc\.d/init\.d/icinga2 -- gen_context(system_u:object_r:icinga2_initrc_exec_t,s0)
+
+/usr/lib/systemd/system/icinga2.* -- gen_context(system_u:object_r:icinga2_unit_file_t,s0)
+
+/etc/icinga2(/.*)? gen_context(system_u:object_r:icinga2_etc_t,s0)
+
+/etc/icinga2/scripts(/.*)? -- gen_context(system_u:object_r:nagios_notification_plugin_exec_t,s0)
+
+/usr/sbin/icinga2 -- gen_context(system_u:object_r:icinga2_exec_t,s0)
+/usr/lib/icinga2/sbin/icinga2 -- gen_context(system_u:object_r:icinga2_exec_t,s0)
+/usr/lib/icinga2/safe-reload -- gen_context(system_u:object_r:icinga2_exec_t,s0)
+
+/var/lib/icinga2(/.*)? gen_context(system_u:object_r:icinga2_var_lib_t,s0)
+
+/var/log/icinga2(/.*)? gen_context(system_u:object_r:icinga2_log_t,s0)
+
+/var/run/icinga2(/.*)? gen_context(system_u:object_r:icinga2_var_run_t,s0)
+
+/var/run/icinga2/cmd(/.*)? gen_context(system_u:object_r:icinga2_command_t,s0)
+
+/var/spool/icinga2(/.*)? gen_context(system_u:object_r:icinga2_spool_t,s0)
+
+/var/cache/icinga2(/.*)? gen_context(system_u:object_r:icinga2_cache_t,s0)
diff --git a/tools/selinux/icinga2.if b/tools/selinux/icinga2.if
new file mode 100644
index 0000000..15fb034
--- /dev/null
+++ b/tools/selinux/icinga2.if
@@ -0,0 +1,453 @@
+
+## <summary>policy for icinga2</summary>
+
+########################################
+## <summary>
+## Execute TEMPLATE in the icinga2 domin.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`icinga2_domtrans',`
+ gen_require(`
+ type icinga2_t, icinga2_exec_t;
+ ')
+
+ corecmd_search_bin($1)
+ domtrans_pattern($1, icinga2_exec_t, icinga2_t)
+')
+
+########################################
+## <summary>
+## Execute icinga2 server in the icinga2 domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_initrc_domtrans',`
+ gen_require(`
+ type icinga2_initrc_exec_t;
+ ')
+
+ init_labeled_script_domtrans($1, icinga2_initrc_exec_t)
+')
+
+########################################
+## <summary>
+## Execute icinga2 daemon in the icinga2 domain.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed to transition.
+## </summary>
+## </param>
+#
+interface(`icinga2_systemctl',`
+ gen_require(`
+ type icinga2_t;
+ type icinga2_unit_file_t;
+ ')
+
+ systemd_exec_systemctl($1)
+ allow $1 icinga2_unit_file_t:file read_file_perms;
+ allow $1 icinga2_unit_file_t:service manage_service_perms;
+
+ ps_process_pattern($1, icinga2_t)
+ init_dbus_chat($1)
+')
+
+########################################
+## <summary>
+## Allow the specified domain to read
+## icinga2 configuration files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`icinga2_read_config',`
+ gen_require(`
+ type icinga2_etc_t;
+ ')
+
+ files_search_etc($1)
+ list_dirs_pattern($1, icinga2_etc_t, icinga2_etc_t)
+ read_files_pattern($1, icinga2_etc_t, icinga2_etc_t)
+')
+
+########################################
+## <summary>
+## Allow the specified domain to read
+## and write icinga2 configuration files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`icinga2_manage_config',`
+ gen_require(`
+ type icinga2_etc_t;
+ ')
+
+ files_search_etc($1)
+ manage_dirs_pattern($1, icinga2_etc_t, icinga2_etc_t)
+ manage_files_pattern($1, icinga2_etc_t, icinga2_etc_t)
+')
+
+########################################
+## <summary>
+## Read icinga2's log files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`icinga2_read_log',`
+ gen_require(`
+ type icinga2_log_t;
+ ')
+
+ logging_search_logs($1)
+ read_files_pattern($1, icinga2_log_t, icinga2_log_t)
+')
+
+########################################
+## <summary>
+## Append to icinga2 log files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_append_log',`
+ gen_require(`
+ type icinga2_log_t;
+ ')
+
+ logging_search_logs($1)
+ append_files_pattern($1, icinga2_log_t, icinga2_log_t)
+')
+
+########################################
+## <summary>
+## Manage icinga2 log files
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_manage_log',`
+ gen_require(`
+ type icinga2_log_t;
+ ')
+
+ logging_search_logs($1)
+ manage_dirs_pattern($1, icinga2_log_t, icinga2_log_t)
+ manage_files_pattern($1, icinga2_log_t, icinga2_log_t)
+ manage_lnk_files_pattern($1, icinga2_log_t, icinga2_log_t)
+')
+
+########################################
+## <summary>
+## Search icinga2 lib directories.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_search_lib',`
+ gen_require(`
+ type icinga2_var_lib_t;
+ ')
+
+ allow $1 icinga2_var_lib_t:dir search_dir_perms;
+ files_search_var_lib($1)
+')
+
+########################################
+## <summary>
+## Read icinga2 lib files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_read_lib_files',`
+ gen_require(`
+ type icinga2_var_lib_t;
+ ')
+
+ files_search_var_lib($1)
+ read_files_pattern($1, icinga2_var_lib_t, icinga2_var_lib_t)
+')
+
+########################################
+## <summary>
+## Manage icinga2 lib files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_manage_lib_files',`
+ gen_require(`
+ type icinga2_var_lib_t;
+ ')
+
+ files_search_var_lib($1)
+ manage_files_pattern($1, icinga2_var_lib_t, icinga2_var_lib_t)
+')
+
+########################################
+## <summary>
+## Manage icinga2 lib directories.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_manage_lib_dirs',`
+ gen_require(`
+ type icinga2_var_lib_t;
+ ')
+
+ files_search_var_lib($1)
+ manage_dirs_pattern($1, icinga2_var_lib_t, icinga2_var_lib_t)
+')
+
+########################################
+## <summary>
+## Manage icinga2 spool files.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`icinga2_manage_spool_files',`
+ gen_require(`
+ type icinga2_spool_t;
+ ')
+
+ files_search_var_lib($1)
+ manage_files_pattern($1, icinga2_spool_t, icinga2_spool_t)
+')
+
+########################################
+## <summary>
+## All of the rules required to administrate
+## an icinga2 environment
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`icinga2_admin',`
+ gen_require(`
+ type icinga2_t;
+ type icinga2_initrc_exec_t;
+ type icinga2_log_t;
+ type icinga2_var_lib_t;
+ ')
+
+ allow $1 icinga2_t:process { signal_perms };
+ ps_process_pattern($1, icinga2_t)
+
+ tunable_policy(`deny_ptrace',`',`
+ allow $1 icinga2_t:process ptrace;
+ ')
+
+ icinga2_initrc_domtrans($1)
+ domain_system_change_exemption($1)
+ role_transition $2 icinga2_initrc_exec_t system_r;
+ allow $2 system_r;
+
+ files_list_etc($1)
+ admin_pattern($1, icinga2_etc_t)
+
+ logging_search_logs($1)
+ admin_pattern($1, icinga2_log_t)
+
+ files_search_var_lib($1)
+ admin_pattern($1, icinga2_var_lib_t)
+
+ admin_pattern($1, icinga2_var_run_t)
+ admin_pattern($1, icinga2_command_t)
+ admin_pattern($1, icinga2_spool_t)
+ admin_pattern($1, icinga2_cache_t)
+
+ icinga2_systemctl($1)
+ admin_pattern($1, icinga2_unit_file_t)
+ allow $1 icinga2_unit_file_t:service all_service_perms;
+
+ optional_policy(`
+ systemd_passwd_agent_exec($1)
+ systemd_read_fifo_file_passwd_run($1)
+ ')
+')
+
+########################################
+### <summary>
+### Send icinga2 commands through pipe
+### </summary>
+### <param name="domain">
+### <summary>
+### Domain allowed to send commands.
+### </summary>
+### </param>
+#
+interface(`icinga2_send_commands',`
+ gen_require(`
+ type icinga2_var_run_t;
+ ')
+
+ files_search_pids($1)
+ read_files_pattern($1, icinga2_var_run_t, icinga2_var_run_t)
+ read_files_pattern($1, icinga2_command_t, icinga2_command_t)
+ write_fifo_files_pattern($1, icinga2_command_t, icinga2_command_t)
+')
+
+########################################
+## <summary>
+## For domains icinga2 should transition to (e.g. Plugins).
+## </summary>
+## <param name="executable">
+## <summary>
+## Context of the executable.
+## </summary>
+## </param>
+## <param name="domain">
+## <summary>
+## Domain icinga should transition to.
+## </summary>
+## </param>
+#
+interface(`icinga2_execstrans',`
+ gen_require(`
+ type icinga2_t;
+ ')
+
+ domtrans_pattern(icinga2_t, $1, $2)
+ allow icinga2_t $2:process sigkill;
+')
+
+######################################
+## <summary>
+## Dontaudit read and write an leaked file descriptors
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain to not audit.
+## </summary>
+## </param>
+#
+interface(`icinga2_dontaudit_leaks_fifo',`
+ gen_require(`
+ type icinga2_t;
+ ')
+
+ dontaudit $1 icinga2_t:fifo_file write;
+')
+
+## <summary>Icinga2 administrator role.</summary>
+
+########################################
+## <summary>
+## Change to the Icinga2 administrator role.
+## </summary>
+## <param name="role">
+## <summary>
+## Role allowed access.
+## </summary>
+## </param>
+## <rolecap/>
+#
+interface(`icinga2adm_role_change',`
+ gen_require(`
+ role icinga2adm_r;
+ ')
+
+ allow $1 icinga2adm_r;
+')
+
+########################################
+## <summary>
+## For domains icinga2adm should transition to (e.g. Plugins).
+## </summary>
+## <param name="executable">
+## <summary>
+## Context of the executable.
+## </summary>
+## </param>
+## <param name="domain">
+## <summary>
+## Domain icinga should transition to.
+## </summary>
+## </param>
+#
+interface(`icinga2adm_execstrans',`
+ gen_require(`
+ type icinga2adm_t;
+ ')
+
+ role icinga2adm_r types $2;
+ allow icinga2adm_r system_r;
+ type_transition icinga2adm_t $1:process $2;
+ allow icinga2adm_t $2:process transition;
+ allow $2 icinga2adm_t:process sigchld;
+ role_transition icinga2adm_r $1 system_r;
+')
+
+########################################
+## <summary>
+## Make a TCP connection to the icinga2 port.
+## </summary>
+## <param name="domain">
+## <summary>
+## Domain allowed access.
+## </summary>
+## </param>
+#
+interface(`corenet_tcp_connect_icinga2_port',`
+ gen_require(`
+ type icinga2_port_t;
+ ')
+
+ allow $1 icinga2_port_t:tcp_socket name_connect;
+')
diff --git a/tools/selinux/icinga2.sh b/tools/selinux/icinga2.sh
new file mode 100755
index 0000000..9d5dd59
--- /dev/null
+++ b/tools/selinux/icinga2.sh
@@ -0,0 +1,74 @@
+#!/bin/sh -e
+
+DIRNAME=`dirname $0`
+cd $DIRNAME
+USAGE="$0 [ --update ]"
+if [ `id -u` != 0 ]; then
+echo 'You must be root to run this script'
+exit 1
+fi
+
+if [ $# -eq 1 ]; then
+ if [ "$1" = "--update" ] ; then
+ time=`ls -l --time-style="+%x %X" icinga2.te | awk '{ printf "%s %s", $6, $7 }'`
+ rules=`ausearch --start $time -m avc --raw -se icinga2`
+ if [ x"$rules" != "x" ] ; then
+ echo "Found avc's to update policy with"
+ echo -e "$rules" | audit2allow -R
+ echo "Do you want these changes added to policy [y/n]?"
+ read ANS
+ if [ "$ANS" = "y" -o "$ANS" = "Y" ] ; then
+ echo "Updating policy"
+ echo -e "$rules" | audit2allow -R >> icinga2.te
+ # Fall though and rebuild policy
+ else
+ exit 0
+ fi
+ else
+ echo "No new avcs found"
+ exit 0
+ fi
+ else
+ echo -e $USAGE
+ exit 1
+ fi
+elif [ $# -ge 2 ] ; then
+ echo -e $USAGE
+ exit 1
+fi
+
+echo "Building and Loading Policy"
+set -x
+make -f /usr/share/selinux/devel/Makefile icinga2.pp || exit
+/usr/sbin/semodule -i icinga2.pp
+
+# Generate a man page off the installed module
+sepolicy manpage -p . -d icinga2_t
+# Fixing the file context on /usr/sbin/icinga2
+/sbin/restorecon -F -R -v /usr/sbin/icinga2
+/sbin/restorecon -F -R -v /usr/lib64/icinga2/sbin/icinga2
+/sbin/restorecon -F -R -v /usr/lib/icinga2/safe-reload
+# Fixing the file context on /etc/rc\.d/init\.d/icinga2
+#/sbin/restorecon -F -R -v /etc/rc\.d/init\.d/icinga2
+# Fixing the file context on /usr/lib/systemd/system/icinga2.*
+/sbin/restorecon -F -R -v /usr/lib/systemd/system/icinga2.*
+# Fixing the file context on /etc/icinga2
+/sbin/restorecon -F -R -v /etc/icinga2
+# Fixing the file context on /var/log/icinga2
+/sbin/restorecon -F -R -v /var/log/icinga2
+# Fixing the file context on /var/lib/icinga2
+/sbin/restorecon -F -R -v /var/lib/icinga2
+# Fixing the file context on /var/run/icinga2
+/sbin/restorecon -F -R -v /var/run/icinga2
+# Fixing the file context on /var/cache/icinga2
+/sbin/restorecon -F -R -v /var/cache/icinga2
+# Fixing the file context on /var/spool/icinga2
+/sbin/restorecon -F -R -v /var/spool/icinga2
+
+# Label the port 5665
+/sbin/semanage port -a -t icinga2_port_t -p tcp 5665
+/sbin/semanage port -a -t redis_port_t -p tcp 6380
+
+# Generate a rpm package for the newly generated policy
+pwd=$(pwd)
+#rpmbuild --define "_sourcedir ${pwd}" --define "_specdir ${pwd}" --define "_builddir ${pwd}" --define "_srcrpmdir ${pwd}" --define "_rpmdir ${pwd}" --define "_buildrootdir ${pwd}/.build" -ba icinga2_selinux.spec
diff --git a/tools/selinux/icinga2.te b/tools/selinux/icinga2.te
new file mode 100644
index 0000000..1b9f051
--- /dev/null
+++ b/tools/selinux/icinga2.te
@@ -0,0 +1,290 @@
+policy_module(icinga2, 0.2.2)
+
+########################################
+#
+# Declarations
+#
+
+## <desc>
+## <p>
+## Allow Icinga 2 to connect to all ports
+## </p>
+## </desc>
+gen_tunable(icinga2_can_connect_all, false)
+
+## <desc>
+## <p>
+## Allow Apache to connect to Icinga 2 API
+## </p>
+## </desc>
+gen_tunable(httpd_can_connect_icinga2_api, true)
+
+## <desc>
+## <p>
+## Allow Apache to write into Icinga 2 Commandpipe
+## </p>
+## </desc>
+gen_tunable(httpd_can_write_icinga2_command, true)
+
+## <desc>
+## <p>
+## Allow Icinga 2 to run plugins via sudo
+## </p>
+## </desc>
+gen_tunable(icinga2_run_sudo, false)
+
+require {
+ type nagios_admin_plugin_t; type nagios_admin_plugin_exec_t;
+ type nagios_checkdisk_plugin_t; type nagios_checkdisk_plugin_exec_t;
+ type nagios_mail_plugin_t; type nagios_mail_plugin_exec_t;
+ type nagios_services_plugin_t; type nagios_services_plugin_exec_t;
+ type nagios_system_plugin_t; type nagios_system_plugin_exec_t;
+ type nagios_unconfined_plugin_t; type nagios_unconfined_plugin_exec_t;
+ type nagios_eventhandler_plugin_t; type nagios_eventhandler_plugin_exec_t;
+ type nagios_openshift_plugin_t; type nagios_openshift_plugin_exec_t;
+ type httpd_t; type system_mail_t;
+ type redis_t; type redis_var_run_t; type redis_port_t;
+ type devlog_t;
+ role staff_r;
+ attribute unreserved_port_type;
+}
+
+type icinga2_t;
+type icinga2_exec_t;
+init_daemon_domain(icinga2_t, icinga2_exec_t)
+
+#permissive icinga2_t;
+
+type icinga2_initrc_exec_t;
+init_script_file(icinga2_initrc_exec_t)
+
+type icinga2_unit_file_t;
+systemd_unit_file(icinga2_unit_file_t)
+
+type icinga2_etc_t;
+files_config_file(icinga2_etc_t)
+
+type icinga2_log_t;
+logging_log_file(icinga2_log_t)
+
+type icinga2_var_lib_t;
+files_type(icinga2_var_lib_t)
+
+type icinga2_var_run_t;
+files_pid_file(icinga2_var_run_t)
+
+type icinga2_command_t;
+files_type(icinga2_command_t)
+
+type icinga2_spool_t;
+files_type(icinga2_spool_t)
+
+type icinga2_cache_t;
+files_type(icinga2_cache_t)
+
+type icinga2_tmp_t;
+files_tmp_file(icinga2_tmp_t)
+
+type icinga2_port_t;
+# There is no interface for unreserved_port_type
+typeattribute icinga2_port_t unreserved_port_type;
+corenet_port(icinga2_port_t)
+
+########################################
+#
+# icinga2 local policy
+#
+allow icinga2_t self:capability { setgid setuid sys_resource kill };
+allow icinga2_t self:process { setsched signal setrlimit };
+allow icinga2_t self:fifo_file rw_fifo_file_perms;
+allow icinga2_t self:unix_dgram_socket create_socket_perms;
+allow icinga2_t self:unix_stream_socket create_stream_socket_perms;
+
+allow icinga2_t icinga2_exec_t:file execute_no_trans;
+
+list_dirs_pattern(icinga2_t, icinga2_etc_t, icinga2_etc_t)
+read_files_pattern(icinga2_t, icinga2_etc_t, icinga2_etc_t)
+read_lnk_files_pattern(icinga2_t, icinga2_etc_t, icinga2_etc_t)
+
+manage_dirs_pattern(icinga2_t, icinga2_log_t, icinga2_log_t)
+manage_files_pattern(icinga2_t, icinga2_log_t, icinga2_log_t)
+manage_lnk_files_pattern(icinga2_t, icinga2_log_t, icinga2_log_t)
+logging_log_filetrans(icinga2_t, icinga2_log_t, { dir file lnk_file })
+logging_send_syslog_msg(icinga2_t)
+
+manage_dirs_pattern(icinga2_t, icinga2_var_lib_t, icinga2_var_lib_t)
+manage_files_pattern(icinga2_t, icinga2_var_lib_t, icinga2_var_lib_t)
+manage_lnk_files_pattern(icinga2_t, icinga2_var_lib_t, icinga2_var_lib_t)
+files_var_lib_filetrans(icinga2_t, icinga2_var_lib_t, { dir file lnk_file })
+
+manage_dirs_pattern(icinga2_t, icinga2_var_run_t, icinga2_var_run_t)
+manage_files_pattern(icinga2_t, icinga2_var_run_t, icinga2_var_run_t)
+files_pid_filetrans(icinga2_t, icinga2_var_run_t, { dir file })
+
+manage_dirs_pattern(icinga2_t, icinga2_command_t, icinga2_command_t)
+manage_files_pattern(icinga2_t, icinga2_command_t, icinga2_command_t)
+manage_fifo_files_pattern(icinga2_t, icinga2_command_t, icinga2_command_t)
+
+manage_dirs_pattern(icinga2_t, icinga2_spool_t, icinga2_spool_t)
+manage_files_pattern(icinga2_t, icinga2_spool_t, icinga2_spool_t)
+files_spool_filetrans(icinga2_t, icinga2_spool_t, { dir file })
+
+manage_dirs_pattern(icinga2_t, icinga2_cache_t, icinga2_cache_t)
+manage_files_pattern(icinga2_t, icinga2_cache_t, icinga2_cache_t)
+
+manage_files_pattern(icinga2_t, icinga2_tmp_t, icinga2_tmp_t)
+manage_dirs_pattern(icinga2_t, icinga2_tmp_t, icinga2_tmp_t)
+files_tmp_filetrans(icinga2_t, icinga2_tmp_t, { dir file })
+
+domain_use_interactive_fds(icinga2_t)
+
+files_read_etc_files(icinga2_t)
+
+auth_use_nsswitch(icinga2_t)
+
+miscfiles_read_localization(icinga2_t)
+
+corecmd_exec_shell(icinga2_t)
+corecmd_exec_bin(icinga2_t)
+
+kernel_read_system_state(icinga2_t)
+kernel_read_network_state(icinga2_t)
+kernel_dgram_send(icinga2_t)
+
+# should be moved to nagios_plugin_template in nagios.if
+icinga2_execstrans(nagios_admin_plugin_exec_t, nagios_admin_plugin_t)
+icinga2_execstrans(nagios_checkdisk_plugin_exec_t, nagios_checkdisk_plugin_t)
+icinga2_execstrans(nagios_mail_plugin_exec_t, nagios_mail_plugin_t)
+icinga2_execstrans(nagios_services_plugin_exec_t, nagios_services_plugin_t)
+icinga2_execstrans(nagios_system_plugin_exec_t, nagios_system_plugin_t)
+icinga2_execstrans(nagios_unconfined_plugin_exec_t, nagios_unconfined_plugin_t)
+icinga2_execstrans(nagios_eventhandler_plugin_exec_t, nagios_eventhandler_plugin_t)
+icinga2_execstrans(nagios_openshift_plugin_exec_t, nagios_openshift_plugin_t)
+
+# should be moved nagios.te
+nagios_plugin_template(notification)
+icinga2_execstrans(nagios_notification_plugin_exec_t, nagios_notification_plugin_t)
+allow nagios_notification_plugin_t icinga2_etc_t:dir search;
+allow nagios_notification_plugin_t nagios_notification_plugin_exec_t:dir search;
+#permissive nagios_notification_plugin_t;
+corecmd_exec_bin(nagios_notification_plugin_t)
+hostname_exec(nagios_notification_plugin_t)
+type nagios_notification_plugin_tmp_t;
+files_tmp_file(nagios_notification_plugin_tmp_t)
+manage_files_pattern(nagios_notification_plugin_t, nagios_notification_plugin_tmp_t, nagios_notification_plugin_tmp_t)
+manage_dirs_pattern(nagios_notification_plugin_t, nagios_notification_plugin_tmp_t, nagios_notification_plugin_tmp_t)
+files_tmp_filetrans(nagios_notification_plugin_t, nagios_notification_plugin_tmp_t, { dir file })
+fs_dontaudit_getattr_xattr_fs(nagios_notification_plugin_t)
+optional_policy(`
+ mta_send_mail(nagios_notification_plugin_t)
+')
+icinga2_dontaudit_leaks_fifo(system_mail_t)
+# direct smtp notification
+corenet_tcp_connect_smtp_port(nagios_notification_plugin_t)
+# hipsaint notification
+auth_read_passwd(nagios_notification_plugin_t)
+sysnet_read_config(nagios_notification_plugin_t)
+allow nagios_notification_plugin_t self:udp_socket create_stream_socket_perms;
+allow nagios_notification_plugin_t self:tcp_socket create_stream_socket_perms;
+allow nagios_notification_plugin_t self:netlink_route_socket create_netlink_socket_perms;
+corenet_tcp_connect_http_port(nagios_notification_plugin_t)
+miscfiles_read_generic_certs(nagios_notification_plugin_t)
+
+allow icinga2_t icinga2_port_t:tcp_socket name_bind;
+allow icinga2_t self:tcp_socket create_stream_socket_perms;
+corenet_tcp_connect_icinga2_port(icinga2_t)
+
+mysql_stream_connect(icinga2_t)
+mysql_tcp_connect(icinga2_t)
+postgresql_stream_connect(icinga2_t)
+postgresql_tcp_connect(icinga2_t)
+
+# graphite is using port 2003 which is lmtp_port_t
+corenet_tcp_connect_lmtp_port(icinga2_t)
+
+# Allow icinga2 to connect to redis using unix domain sockets
+stream_connect_pattern(icinga2_t, redis_var_run_t, redis_var_run_t, redis_t)
+
+# Just like `redis_tcp_connect(icinga2_t)`, though this interface does not exist on centos7
+corenet_tcp_recvfrom_labeled(icinga2_t, redis_t)
+corenet_tcp_sendrecv_redis_port(icinga2_t)
+corenet_tcp_connect_redis_port(icinga2_t)
+
+# This is for other feature that do not use a confined port
+# or if you run one one with a non standard port.
+tunable_policy(`icinga2_can_connect_all',`
+ corenet_tcp_connect_all_ports(icinga2_t)
+')
+
+# This is for plugins requiring to be executed via sudo
+tunable_policy(`icinga2_run_sudo',`
+ allow icinga2_t self:capability { audit_write net_admin };
+ allow icinga2_t self:netlink_audit_socket { create_netlink_socket_perms nlmsg_relay };
+ allow icinga2_t devlog_t:sock_file write;
+
+ init_read_utmp(icinga2_t)
+
+ auth_domtrans_chkpwd(icinga2_t)
+ allow icinga2_t chkpwd_t:process { noatsecure rlimitinh siginh };
+
+ selinux_compute_access_vector(icinga2_t)
+
+ dbus_send_system_bus(icinga2_t)
+ dbus_stream_connect_system_dbusd(icinga2_t)
+ systemd_dbus_chat_logind(icinga2_t)
+ # Without this it works but is very slow
+ systemd_write_inherited_logind_sessions_pipes(icinga2_t)
+')
+
+optional_policy(`
+ tunable_policy(`icinga2_run_sudo',`
+ sudo_exec(icinga2_t)
+ ')
+')
+
+
+
+########################################
+#
+# Icinga Webinterfaces
+#
+
+optional_policy(`
+ # should be a boolean in apache-policy
+ tunable_policy(`httpd_can_write_icinga2_command',`
+ icinga2_send_commands(httpd_t)
+ ')
+')
+
+optional_policy(`
+ # should be a boolean in apache-policy
+ tunable_policy(`httpd_can_connect_icinga2_api',`
+ corenet_tcp_connect_icinga2_port(httpd_t)
+ ')
+')
+
+########################################
+#
+# Icinga2 Admin Role
+#
+
+role icinga2adm_r;
+userdom_unpriv_user_template(icinga2adm)
+
+icinga2_admin(icinga2adm_t, icinga2adm_r)
+
+allow icinga2adm_t self:capability { dac_read_search dac_override };
+
+# should be moved to staff.te
+icinga2adm_role_change(staff_r)
+
+# should be moved to nagios_plugin_template in nagios.if
+icinga2adm_execstrans(nagios_admin_plugin_exec_t, nagios_admin_plugin_t)
+icinga2adm_execstrans(nagios_checkdisk_plugin_exec_t, nagios_checkdisk_plugin_t)
+icinga2adm_execstrans(nagios_mail_plugin_exec_t, nagios_mail_plugin_t)
+icinga2adm_execstrans(nagios_services_plugin_exec_t, nagios_services_plugin_t)
+icinga2adm_execstrans(nagios_system_plugin_exec_t, nagios_system_plugin_t)
+icinga2adm_execstrans(nagios_unconfined_plugin_exec_t, nagios_unconfined_plugin_t)
+icinga2adm_execstrans(nagios_eventhandler_plugin_exec_t, nagios_eventhandler_plugin_t)
+icinga2adm_execstrans(nagios_openshift_plugin_exec_t, nagios_openshift_plugin_t)
+icinga2adm_execstrans(nagios_notification_plugin_exec_t, nagios_notification_plugin_t)
diff --git a/tools/syntax/nano/icinga2.nanorc b/tools/syntax/nano/icinga2.nanorc
new file mode 100644
index 0000000..cff78be
--- /dev/null
+++ b/tools/syntax/nano/icinga2.nanorc
@@ -0,0 +1,157 @@
+### Nano synteax file
+### Icinga2 object configuration file
+
+syntax "icinga2" "/etc/icinga2/.*\.conf$" "/usr/share/icinga2/include/(plugin|itl|.*\.conf$)"
+
+## objects types
+icolor brightgreen "object[ \t]+(host|hostgroup|service|servicegroup|user|usergroup)"
+icolor brightgreen "object[ \t]+(checkcommand|notificationcommand|eventcommand|notification)"
+icolor brightgreen "object[ \t]+(timeperiod|scheduleddowntime|dependency|perfdatawriter)"
+icolor brightgreen "object[ \t]+(graphitewriter|idomysqlconnection|idomysqlconnection)"
+icolor brightgreen "object[ \t]+(livestatuslistener|externalcommandlistener)"
+icolor brightgreen "object[ \t]+(compatlogger|checkcomponent|notificationcomponent)"
+icolor brightgreen "object[ \t]+(filelogger|sysloglogger|journaldlogger|apilistener|endpoint|zone)"
+
+## apply def
+icolor brightgreen "apply[ \t]+(Service|Dependency|Notification|ScheduledDowntime)"
+
+
+## objects attributes
+icolor red "(^|^\s+)(accept_commands|accept_config|action_url|address|address6|arguments|author|bind_host)"
+icolor red "(^|^\s+)(bind_port|ca_path|categories|cert_path|check_command|check_interval)"
+icolor red "(^|^\s+)(check_period|child_host_name|child_service_name|cleanup|command|command_endpoint|command_path)"
+icolor red "(^|^\s+)(comment|compat_log_path|crl_path|database|disable_checks|disable_notifications)"
+icolor red "(^|^\s+)(display_name|duration|email|enable_active_checks|enable_event_handler)"
+icolor red "(^|^\s+)(enable_flapping|enable_ha|enable_notifications|enable_passive_checks|enable_perfdata)"
+icolor red "(^|^\s+)(endpoints|env|event_command|failover_timeout|fixed|flapping_threshold|groups|host)"
+icolor red "(^|^\s+)(host_format_template|host_name|host_name_template|host_perfdata_path|host_temp_path|icon_image)"
+icolor red "(^|^\s+)(icon_image_alt|instance_description|instance_name|interval|key_path|log_dir)"
+icolor red "(^|^\s+)(log_duration|max_check_attempts|methods|name|notes|notes_url|objects_path)"
+icolor red "(^|^\s+)(pager|parent|parent_host_name|parent_service_name|password|path|period)"
+icolor red "(^|^\s+)(port|ranges|retry_interval|rotation_interval|rotation_method)"
+icolor red "(^|^\s+)(service_format_template|service_name|service_name_template|service_perfdata_path|service_temp_path)"
+icolor red "(^|^\s+)(severity|socket_path|socket_type|spool_dir|states|status_path|table_prefix)"
+icolor red "(^|^\s+)(timeout|times|types|update_interval|user|user_groups|users|volatile|zone)"
+icolor red "(^|^\s+)(vars\.\w+)"
+
+
+## keywords
+icolor red "(^|^\s+)|(icinga2Keyword|template|const|import|include|include_recursive|var|function|return|to|use|locals|globals|this)\s+"
+
+## Assign conditions
+icolor magenta "(assign|ignore)[ \t]+where"
+
+## Global functions
+icolor white "(regex|match|len|union|intersection|keys|string|number|bool|random|log|typeof|get_time|exit)"
+
+## Accessor Functions
+icolor white "(get_host|get_service|get_user|get_check_command|get_event_command|get_notification_command)"
+icolor white "(get_host_group|get_service_group|get_user_group|get_time_period)"
+
+
+## Math functions
+icolor white "(Math.E|Math.LN2|Math.LN10|Math.LOG2E|Math.PI|Math.SQRT1_2|Math.SQRT2)"
+icolor white "(Math.abs|Math.acos|Math.asin|Math.atan|Math.atan2|Math.ceil|Math.cos)"
+icolor white "(Math.exp|Math.floor|Math.isinf|Math.isnan|Math.log|Math.max|Math.min)"
+icolor white "(Math.pow|Math.random|Math.round|Math.sign|Math.sin|Math.sqrt|Math.tan)"
+
+## Json functions
+icolor white "(Json.encode|Json.decode)"
+
+## String functions
+icolor white "(\.to_string)"
+icolor white "(\.find)"
+icolor white "(\.contains)"
+icolor white "(\.len)"
+icolor white "(\.lower)"
+icolor white "(\.upper)"
+icolor white "(\.replace)"
+icolor white "(\.split)"
+icolor white "(\.substr)"
+
+## Array and Dict Functions
+icolor white "(\.add)"
+icolor white "(\.clear)"
+icolor white "(\.clone)"
+icolor white "(\.contains)"
+icolor white "(\.len)"
+icolor white "(\.remove)"
+icolor white "(\.set)"
+icolor white "(\.remove)"
+icolor white "(\.sort)"
+icolor white "(\.join)"
+icolor white "(\.clone)"
+icolor white "(\.call)"
+icolor white "(\.callv)"
+
+
+## Conditional statements
+icolor white "(if|else)"
+
+## Loops
+icolor white "(while|for|break|continue)"
+
+## Operators
+icolor green "\s(\.)\s"
+icolor green "\s(!)\s"
+icolor green "\s(\~)\s"
+icolor green "\s(\+)\s"
+icolor green "\s(-)\s"
+icolor green "\s(\*)\s"
+icolor green "\s(/)\s"
+icolor green "\s(%)\s"
+icolor green "\s(=)\s"
+icolor green "\s(<)\s"
+icolor green "\s(>)\s"
+icolor green "\s(<<)\s"
+icolor green "\s(>>)\s"
+icolor green "\s(<=)\s"
+icolor green "\s(>=)\s"
+icolor green "\s(in)\s"
+icolor green "\s(!in)\s"
+icolor green "\s(==)\s"
+icolor green "\s(!=)\s"
+icolor green "\s(&)\s"
+icolor green "\s(\^)\s"
+icolor green "\s(|)\s"
+icolor green "\s(&&)\s"
+icolor green "\s(||)\s"
+icolor green "\s(=>)\s"
+icolor green "\s(\+=)\s"
+icolor green "\s(-=)\s"
+icolor green "\s(\*=)\s"
+icolor green "\s(/=)\s"
+
+## Global constats
+icolor yellow "(PrefixDir|SysconfDir|ZonesDir|LocalStateDir|RunDir|PkgDataDir|StatePath|ObjectsPath)"
+icolor yellow "(PidPath|NodeName|ApplicationType|EnableNotifications|EnableEventHandlers|EnableFlapping)"
+icolor yellow "(EnableHostChecks|EnableServiceChecks|EnablePerfdata|UseVfork|RunAsUser|RunAsGroup|PluginDir)"
+icolor yellow "(Vars\s+)"
+
+## Boolean
+icolor blue "(true|false)"
+
+# Null
+icolor blue "(null)"
+
+
+## comments
+color brightblue "\/\/.*"
+color brightblue "^[ \t]*\*\($\|[ \t]\+\)"
+color brightblue start="/\*" end="\*/"
+
+## Braces and Parens definition
+# - Braces are used in dictionary definition
+
+color magenta "(\(|\))"
+color magenta "(\[|\])"
+color magenta "(\{|\})"
+
+## type definitions
+# - double quotes "
+# - single quotes '
+# - brackets <>
+
+color brightyellow "'"
+color brightyellow """
+color brightyellow start="<" end=">"
diff --git a/tools/syntax/vim/ftdetect/icinga2.vim b/tools/syntax/vim/ftdetect/icinga2.vim
new file mode 100644
index 0000000..5a78d0a
--- /dev/null
+++ b/tools/syntax/vim/ftdetect/icinga2.vim
@@ -0,0 +1,2 @@
+" Modify
+au BufNewFile,BufRead /*etc/icinga2/*.conf,/*usr/share/icinga2/include/{itl,plugins,*.conf} set filetype=icinga2
diff --git a/tools/syntax/vim/syntax/icinga2.vim b/tools/syntax/vim/syntax/icinga2.vim
new file mode 100644
index 0000000..5201d6f
--- /dev/null
+++ b/tools/syntax/vim/syntax/icinga2.vim
@@ -0,0 +1,361 @@
+" Vim syntax file
+" Filename: icinga2.vim
+" Language: Icinga2 object configuration file
+" Author: Carlos Cesario <carloscesario@gmail.com>, Michael Friedrich <michael.friedrich@icinga.com>
+" Version: 1.0.0
+" Based: javascript.vim / nagios.vim
+
+" For version 5.x: Clear all syntax items
+" For version 6.x: Quit when a syntax file was already loaded
+if !exists("main_syntax")
+ if version < 600
+ syntax clear
+ elseif exists("b:current_syntax")
+ finish
+ endif
+ let main_syntax = 'icinga2'
+endif
+
+" case off
+syntax case ignore
+
+" ########################################
+" ### General settings
+
+" comments
+syn keyword icinga2CommentTodo TODO FIXME XXX TBD contained
+syn match icinga2LineComment "\/\/.*" contains=icinga2CommentTodo
+syn match icinga2LineComment "#.*" contains=icinga2CommentTodo
+syn match icinga2CommentSkip "^[ \t]*\*\($\|[ \t]\+\)"
+syn region icinga2Comment start="/\*" end="\*/" contains=icinga2CommentTodo
+
+" type definitions
+" - double quotes "
+" - single quotes '
+" - brackets <>
+
+syn match angleBrackets "<.*>"
+syn region macro start=+\$+ end=+\$+ oneline
+syn region StringD start=+"+ end=+"\|$+ contains=macro
+syn region StringS start=+'+ end=+'\|$+ contains=macro
+
+
+" Braces and Parens definition
+" Braces are used in dictionary definition
+
+syn match Braces "[{}\[\]]"
+syn match Parens "[()]"
+syn match Lambda "{{}}"
+
+
+" ########################################
+" ### Match objects, attributes and keywords
+
+" Object types
+syn keyword icinga2ObjType ApiListener ApiUser CheckCommand CheckerComponent
+syn keyword icinga2ObjType Comment Dependency Downtime ElasticsearchWriter
+syn keyword icinga2ObjType Endpoint EventCommand ExternalCommandListener
+syn keyword icinga2ObjType FileLogger GelfWriter GraphiteWriter Host HostGroup
+syn keyword icinga2ObjType IcingaApplication IdoMysqlConnection IdoPgsqlConnection
+syn keyword icinga2ObjType InfluxdbWriter Influxdb2Writer JournaldLogger
+syn keyword icinga2ObjType LivestatusListener Notification NotificationCommand
+syn keyword icinga2ObjType NotificationComponent OpenTsdbWriter PerfdataWriter
+syn keyword icinga2ObjType ScheduledDowntime Service ServiceGroup SyslogLogger
+syn keyword icinga2ObjType TimePeriod User UserGroup WindowsEventLogLogger Zone
+
+" Object/Template marker (simplified)
+syn match icinga2ObjDef "\(object\|template\)[ \t]\+.*"
+
+" Apply rules
+syn match icinga2ApplyDef "apply[ \t]\+\(Service\|Dependency\|Notification\|ScheduledDowntime\)"
+
+
+" Objects attributes
+"
+" find . -type f -name '*.ti' -exec sh -c 'grep config {}' \;
+" Don't add 'host', etc. from apply rules here, they should match icinga2ObjType instead.
+syn keyword icinga2ObjAttr contained accept_commands accept_config access_control_allow_origin action_url address address6 arguments author bind_host
+syn keyword icinga2ObjAttr contained bind_port ca_path categories cert_path check_command check_interval
+syn keyword icinga2ObjAttr contained check_period check_timeout child_host_name child_options child_service_name cipher_list
+syn keyword icinga2ObjAttr contained cleanup client_cn command command_endpoint command_path
+syn keyword icinga2ObjAttr contained comment compat_log_path concurrent_checks crl_path database disable_checks disable_notifications
+syn keyword icinga2ObjAttr contained display_name duration email enable_active_checks enable_event_handlers enable_event_handler
+syn keyword icinga2ObjAttr contained enable_flapping enable_ha enable_host_checks enable_notifications enable_passive_checks enable_perfdata
+syn keyword icinga2ObjAttr contained enable_send_metadata enable_send_perfdata enable_send_thresholds enable_tls enable_service_checks
+syn keyword icinga2ObjAttr contained endpoints env event_command excludes failover_timeout fixed flapping_threshold_low flapping_threshold_high
+syn keyword icinga2ObjAttr contained flush_interval flush_threshold global groups
+syn keyword icinga2ObjAttr contained host_format_template host_name host_name_template host_perfdata_path host_temp_path host_template icon_image
+syn keyword icinga2ObjAttr contained icon_image_alt ignore_soft_states includes index instance_description instance_name interval key_path log_dir
+syn keyword icinga2ObjAttr contained log_duration max_anonymous_clients max_check_attempts methods name notes notes_url objects_path
+syn keyword icinga2ObjAttr contained pager parent parent_host_name parent_service_name password path period permissions
+syn keyword icinga2ObjAttr contained port prefer_includes ranges retry_interval rotation_interval rotation_method
+syn keyword icinga2ObjAttr contained service_format_template service_name service_name_template service_perfdata_path service_temp_path service_template
+syn keyword icinga2ObjAttr contained severity socket_path socket_type source spool_dir
+syn keyword icinga2ObjAttr contained ssl_ca ssl_capath ssl_ca_cert ssl_cert ssl_cipher ssl_enable ssl_mode ssl_key
+syn keyword icinga2ObjAttr contained states status_path table_prefix ticket_salt
+syn keyword icinga2ObjAttr contained timeout times tls_handshake_timeout tls_protocolmin
+syn keyword icinga2ObjAttr contained types update_interval user user_groups username users volatile zone
+syn match icinga2ObjAttr contained "\(vars.\w\+\)"
+
+" keywords: https://icinga.com/docs/icinga2/latest/doc/17-language-reference/#reserved-keywords
+syn keyword icinga2Keyword object template include include_recursive include_zones library
+syn keyword icinga2Keyword const var this globals locals use default ignore_on_error
+syn keyword icinga2Keyword current_filename current_line apply to where import assign
+syn keyword icinga2Keyword ignore function return in
+
+
+" Assign conditions
+syn match icinga2AssignCond contained "\(assign[ \t]\+\where\|ignore[ \t]\+\where\)"
+
+
+" Documentation reference: https://icinga.com/docs/icinga2/latest/doc/18-library-reference/
+
+" Global functions
+syn keyword icinga2GFunction contained regex match cidr_match range len union intersection keys string
+syn keyword icinga2GFunction contained number bool random log typeof get_time parse_performance_data dirname
+syn keyword icinga2GFunction contained basename path_exists glob glob_recursive
+syn keyword icinga2GFunction contained escape_shell_arg escape_shell_cmd escape_create_process_arg sleep exit
+syn keyword icinga2GFunction contained macro
+
+
+" Accessor Functions
+syn keyword icinga2AFunction contained get_check_command get_event_command get_notification_command
+syn keyword icinga2AFunction contained get_host get_service get_services get_user
+syn keyword icinga2AFunction contained get_host_group get_service_group get_user_group
+syn keyword icinga2AFunction contained get_timeperiod
+syn keyword icinga2AFunction contained get_object get_objects
+
+" Math functions
+syn match icinga2MathFunction contained "\(Math.E\|Math.LN2\|Math.LN10\|Math.LOG2E\|Math.PI\|Math.SQRT1_2\|Math.SQRT2\)"
+syn match icinga2MathFunction contained "\(Math.abs\|Math.acos\|Math.asin\|Math.atan\|Math.atan2\|Math.ceil\|Math.cos\)"
+syn match icinga2MathFunction contained "\(Math.exp\|Math.floor\|Math.isinf\|Math.isnan\|Math.log\|Math.max\|Math.min\)"
+syn match icinga2MathFunction contained "\(Math.pow\|Math.random\|Math.round\|Math.sign\|Math.sin\|Math.sqrt\|Math.tan\)"
+
+" Json functions
+syn match icinga2JsonFunction contained "\(Json.encode\|Json.decode\)"
+
+" Number functions
+syn match icinga2NumberFunction contained "\(\.to_string\)"
+
+" Boolean functions
+syn match icinga2BoolFunction contained "\(\.to_string\)"
+
+" String functions
+syn match icinga2StrFunction contained "\(\.find\)"
+syn match icinga2StrFunction contained "\(\.contains\)"
+syn match icinga2StrFunction contained "\(\.len\)"
+syn match icinga2StrFunction contained "\(\.lower\)"
+syn match icinga2StrFunction contained "\(\.upper\)"
+syn match icinga2StrFunction contained "\(\.replace\)"
+syn match icinga2StrFunction contained "\(\.split\)"
+syn match icinga2StrFunction contained "\(\.substr\)"
+syn match icinga2StrFunction contained "\(\.to_string\)"
+syn match icinga2StrFunction contained "\(\.reverse\)"
+syn match icinga2StrFunction contained "\(\.trim\)"
+
+" Object functions
+syn match icinga2ObjectFunction contained "\(\.clone\)"
+syn match icinga2ObjectFunction contained "\(\.to_string\)"
+syn match icinga2ObjectFunction contained "\(\.type\)"
+
+" Type functions
+syn match icinga2TypeFunction contained "\(\.base\)"
+" needs an exception for 'vars'
+syn match icinga2TypeFunction contained "\(^[vars]\.name\)"
+syn match icinga2TypeFunction contained "\(\.prototype\)"
+
+" Array functions
+syn match icinga2ArrFunction contained "\(\.add(\)"
+syn match icinga2ArrFunction contained "\(\.clear\)"
+syn match icinga2ArrFunction contained "\(\.shallow_clone\)"
+syn match icinga2ArrFunction contained "\(\.contains\)"
+syn match icinga2ArrFunction contained "\(\.freeze\)"
+syn match icinga2ArrFunction contained "\(\.len\)"
+syn match icinga2ArrFunction contained "\(\.remove\)"
+syn match icinga2ArrFunction contained "\(\.set\)"
+syn match icinga2ArrFunction contained "\(\.get\)"
+syn match icinga2ArrFunction contained "\(\.sort\)"
+syn match icinga2ArrFunction contained "\(\.join\)"
+syn match icinga2ArrFunction contained "\(\.reverse\)"
+syn match icinga2ArrFunction contained "\(\.map\)"
+syn match icinga2ArrFunction contained "\(\.reduce\)"
+syn match icinga2ArrFunction contained "\(\.filter\)"
+syn match icinga2ArrFunction contained "\(\.any\)"
+syn match icinga2ArrFunction contained "\(\.all\)"
+
+" Dictionary functions
+syn match icinga2DictFunction contained "\(\.shallow_clone\)"
+syn match icinga2DictFunction contained "\(\.contains\)"
+syn match icinga2DictFunction contained "\(\.freeze\)"
+syn match icinga2DictFunction contained "\(\.len\)"
+syn match icinga2DictFunction contained "\(\.remove\)"
+syn match icinga2DictFunction contained "\(\.set\)"
+syn match icinga2DictFunction contained "\(\.get\)"
+syn match icinga2DictFunction contained "\(\.keys\)"
+syn match icinga2DictFunction contained "\(\.values\)"
+
+" Function functions
+syn match icinga2FuncFunction contained "\(\.call\)"
+syn match icinga2FuncFunction contained "\(\.callv\)"
+
+" DateTime functions
+syn match icinga2DTFunction contained "\(DateTime\)"
+syn match icinga2DTFunction contained "\(\.format\)"
+syn match icinga2DTFunction contained "\(\.to_string\)"
+
+" Conditional statements
+syn keyword icinga2Cond if else
+
+" Loops
+syn keyword icinga2Loop while for break continue
+
+" Exceptions
+syn keyword icinga2Exception throw try except
+
+" Debugger
+syn keyword icinga2Debugger debugger
+
+" References
+syn keyword icinga2Reference & *
+" Namespace
+syn keyword icinga2Namespace namespace using
+
+" Operators
+syn match icinga2Operators "[ \t]\+\(\.\)\+"
+syn match icinga2Operators "[ \t]\+\(!\)\+"
+syn match icinga2Operators "[ \t]\+\(\~\)\+"
+syn match icinga2Operators "[ \t]\+\(+\)\+"
+syn match icinga2Operators "[ \t]\+\(-\)\+"
+syn match icinga2Operators "[ \t]\+\(*\)\+"
+syn match icinga2Operators "[ \t]\+\(/[^/\*]\)\+"
+syn match icinga2Operators "[ \t]\+\(%\)\+"
+syn match icinga2Operators "[ \t]\+\(+\)\+"
+syn match icinga2Operators "[ \t]\+\(-\)\+"
+syn match icinga2Operators "[ \t]\+\(=\)\+"
+syn match icinga2Operators "[ \t]\+\(<\)[ \t]\+"
+syn match icinga2Operators "[ \t]\+\(>\)[ \t]\+"
+syn match icinga2Operators "[ \t]\+\(<<\)\+"
+syn match icinga2Operators "[ \t]\+\(>>\)\+"
+syn match icinga2Operators "[ \t]\+\(<=\)\+"
+syn match icinga2Operators "[ \t]\+\(>=\)\+"
+syn match icinga2Operators "[ \t]\+\(in\)\+"
+syn match icinga2Operators "[ \t]\+\(!in\)\+"
+syn match icinga2Operators "[ \t]\+\(==\)\+"
+syn match icinga2Operators "[ \t]\+\(!=\)\+"
+syn match icinga2Operators "[ \t]\+\(&\)\+"
+syn match icinga2Operators "[ \t]\+\(\^\)\+"
+syn match icinga2Operators "[ \t]\+\(|\)\+"
+syn match icinga2Operators "[ \t]\+\(&&\)\+"
+syn match icinga2Operators "[ \t]\+\(||\)\+"
+syn match icinga2Operators "[ \t]\+\(=>\)\+"
+syn match icinga2Operators "[ \t]\+\(+=\)\+"
+syn match icinga2Operators "[ \t]\+\(-=\)\+"
+syn match icinga2Operators "[ \t]\+\(*=\)\+"
+syn match icinga2Operators "[ \t]\+\(/=\)\+"
+
+
+" ########################################
+" ### Global settings
+
+" Global constants
+" https://icinga.com/docs/icinga2/snapshot/doc/17-language-reference/#icinga-2-specific-constants
+
+" Path specific constants
+syn keyword icinga2PathConstant CacheDir ConfigDir DataDir IncludeConfDir InitRunDir LocalStateDir LogDir ModAttrPath
+syn keyword icinga2PathConstant ObjectsPath PidPath PkgDataDir PrefixDir ProgramData RunDir SpoolDir StatePath SysconfDir
+syn keyword icinga2PathConstant VarsPath ZonesDir
+
+" Global constants
+syn keyword icinga2GlobalConstant Vars NodeName Environment RunAsUser RunAsGroup MaxConcurrentChecks ApiBindHost ApiBindPort EventEngine AttachDebugger
+
+" Application runtime constants
+syn keyword icinga2GlobalConstant PlatformName PlatformVersion PlatformKernel PlatformKernelVersion BuildCompilerName BuildCompilerVersion BuildHostName
+syn keyword icinga2GlobalConstant ApplicationVersion
+
+" User proposed constants
+syn keyword icinga2UserConstant PluginDir ContribPluginContribDir ManubulonPluginDir TicketSalt NodeName ZoneName
+
+" Global types
+syn keyword icinga2GlobalType Number String Boolean Array Dictionary Value Object ConfigObject Command CheckResult
+syn keyword icinga2GlobalType Checkable CustomVarObject DbConnection Type PerfdataValue Comment Downtime Logger Application
+
+" Built-in Namespaces
+syn match icinga2Namespace contained "\(Icinga\.\|Internal\.\|System\.Configuration\.\|System\.\)"
+
+" Additional constants from Namespaces
+syn keyword icinga2GlobalConstant LogCritical LogDebug LogInformation LogNotice LogWarning MatchAll MatchAny
+" icinga2 console; keys(Icinga).join(" ")
+syn keyword icinga2GlobalConstant Acknowledgement Critical Custom DbCatAcknowledgement DbCatCheck DbCatComment DbCatConfig DbCatDowntime DbCatEventHandler DbCatEverything DbCatExternalCommand DbCatFlapping DbCatLog DbCatNotification DbCatProgramStatus DbCatRetention DbCatState DbCatStateHistory Down DowntimeEnd DowntimeNoChildren DowntimeNonTriggeredChildren DowntimeRemoved DowntimeStart DowntimeTriggeredChildren FlappingEnd FlappingStart HostDown HostUp OK Problem Recovery ServiceCritical ServiceOK ServiceUnknown ServiceWarning Unknown Up Warning
+
+" Value types
+"syn match valueNumber "[0-9]*"
+syn keyword valueBoolean contained true false
+syn keyword valueNull contained null
+
+
+" ########################################
+" ### Where to apply
+
+syn region icingaDefBody start='{' end='}'
+ \ contains=icinga2Comment, icinga2LineComment, StringS, StringD, macro, Braces, Parens, Lambda, icinga2ObjType, icinga2ObjDef,
+ \ icinga2ApplyDef, icinga2ObjAttr, icinga2Keyword, icinga2AssignCond,
+ \ icinga2Cond, icinga2Loop, icinga2Exception, icinga2Debugger, icinga2Operators, icinga2GFunction, icinga2AFunction,
+ \ icinga2MathFunction, icinga2GlobalConstant, icinga2PathConstant, icinga2UserConstant, icinga2Gconst, icinga2Namespace,
+ \ icinga2JsonFunction, icinga2NumberFunction, icinga2BoolFunction,
+ \ icinga2StrFunction, icinga2ObjectFunction, icinga2TypeFunction, icinga2ArrFunction, icinga2DictFunction,
+ \ icinga2DTFunction, valueNumber, valueBoolean, valueNull
+
+
+" ########################################
+" ### Highlighting
+hi link icinga2Comment Comment
+hi link icinga2LineComment Comment
+hi link icinga2CommentTodo Todo
+
+hi link Braces Function
+hi link Parens Function
+hi link Lambda Function
+
+hi link macro Underlined
+hi link StringS String
+hi link StringD String
+hi link angleBrackets String
+
+hi link icinga2ObjType Type
+hi link icinga2ObjDef Statement
+
+hi link icinga2ApplyDef Statement
+hi link icinga2ObjAttr Define
+hi link icinga2Keyword Keyword
+
+hi link icinga2AssignCond Conditional
+
+hi link icinga2Cond Conditional
+hi link icinga2Loop Repeat
+hi link icinga2Exception Conditional
+hi link icinga2Debugger Debug
+
+hi link icinga2Operators Operator
+
+hi link icinga2AFunction Function
+hi link icinga2MathFunction Function
+hi link icinga2GFunction Function
+hi link icinga2JsonFunction Function
+hi link icinga2NumberFunction Function
+hi link icinga2BoolFunction Function
+hi link icinga2StrFunction Function
+hi link icinga2ObjectFunction Function
+hi link icinga2TypeFunction Function
+hi link icinga2ArrFunction Function
+hi link icinga2DictFunction Function
+hi link icinga2DTFunction Function
+
+hi link icinga2GlobalConstant Statement
+hi link icinga2PathConstant Statement
+hi link icinga2UserConstant Statement
+hi link icinga2Gconst Statement
+hi link icinga2Namespace Statement
+
+hi link valueNumber Number
+hi link valueBoolean Boolean
+hi link valueNull Special
diff --git a/tools/win32/build-choco.ps1 b/tools/win32/build-choco.ps1
new file mode 100644
index 0000000..32138bd
--- /dev/null
+++ b/tools/win32/build-choco.ps1
@@ -0,0 +1,42 @@
+Set-PsDebug -Trace 1
+
+if(-not (Test-Path "$($env:ProgramData)\chocolatey\choco.exe")) {
+ throw "Could not find Choco executable. Abort."
+}
+
+if (-not (Test-Path env:ICINGA2_BUILDPATH)) {
+ $env:ICINGA2_BUILDPATH = '.\build'
+}
+
+if(-not (Test-Path "$($env:ICINGA2_BUILDPATH)\choco\chocolateyInstall.ps1.template")) {
+ throw "Could not find Chocolatey install script template. Abort."
+}
+
+$chocoInstallScriptTemplatePath = "$($env:ICINGA2_BUILDPATH)\choco\chocolateyInstall.ps1.template"
+$chocoInstallScript = Get-Content $chocoInstallScriptTemplatePath
+
+if(-not (Test-Path "$($env:ICINGA2_BUILDPATH)\*-x86.msi")) {
+ throw "Could not find Icinga 2 32 bit MSI package. Abort."
+}
+
+$hashMSIpackage32 = Get-FileHash "$($env:ICINGA2_BUILDPATH)\*-x86.msi"
+Write-Output "File Hash for 32 bit MSI package: $($hashMSIpackage32.Hash)."
+
+if(-not (Test-Path "$($env:ICINGA2_BUILDPATH)\*-x86_64.msi")) {
+ throw "Could not find Icinga 2 64 bit MSI package. Abort."
+}
+
+$hashMSIpackage64 = Get-FileHash "$($env:ICINGA2_BUILDPATH)\*-x86_64.msi"
+Write-Output "File Hash for 32 bit MSI package: $($hashMSIpackage64.Hash)"
+
+$chocoInstallScript = $chocoInstallScript.Replace("%CHOCO_32BIT_CHECKSUM%", "$($hashMSIpackage32.Hash)")
+$chocoInstallScript = $chocoInstallScript.Replace("%CHOCO_64BIT_CHECKSUM%", "$($hashMSIpackage64.Hash)")
+Write-Output $chocoInstallScript
+
+Set-Content -Path "$($env:ICINGA2_BUILDPATH)\choco\chocolateyInstall.ps1" -Value $chocoInstallScript
+
+cd "$($env:ICINGA2_BUILDPATH)\choco"
+& "$($env:ProgramData)\chocolatey\choco.exe" "pack"
+cd "..\.."
+
+Move-Item -Path "$($env:ICINGA2_BUILDPATH)\choco\*.nupkg" -Destination "$($env:ICINGA2_BUILDPATH)" \ No newline at end of file
diff --git a/tools/win32/build.ps1 b/tools/win32/build.ps1
new file mode 100644
index 0000000..33346a9
--- /dev/null
+++ b/tools/win32/build.ps1
@@ -0,0 +1,27 @@
+Set-PsDebug -Trace 1
+
+if (-not (Test-Path env:ICINGA2_BUILDPATH)) {
+ $env:ICINGA2_BUILDPATH = '.\build'
+}
+
+if (-not (Test-Path env:CMAKE_BUILD_TYPE)) {
+ $env:CMAKE_BUILD_TYPE = 'RelWithDebInfo'
+}
+
+if (-not (Test-Path $env:ICINGA2_BUILDPATH)) {
+ Write-Host "Path '$env:ICINGA2_BUILDPATH' does not exist!"
+ exit 1
+}
+
+if (-not (Test-Path env:CMAKE_PATH)) {
+ $env:CMAKE_PATH = 'C:\Program Files\CMake\bin'
+}
+if (-not ($env:PATH -contains $env:CMAKE_PATH)) {
+ $env:PATH = $env:CMAKE_PATH + ';' + $env:PATH
+}
+
+cmake.exe --build "$env:ICINGA2_BUILDPATH" --target ALL_BUILD --config $env:CMAKE_BUILD_TYPE
+if ($lastexitcode -ne 0) { exit $lastexitcode }
+
+cmake.exe --build "$env:ICINGA2_BUILDPATH" --target PACKAGE --config $env:CMAKE_BUILD_TYPE
+if ($lastexitcode -ne 0) { exit $lastexitcode }
diff --git a/tools/win32/configure-dev.ps1 b/tools/win32/configure-dev.ps1
new file mode 100644
index 0000000..6fc12b3
--- /dev/null
+++ b/tools/win32/configure-dev.ps1
@@ -0,0 +1,69 @@
+Set-PsDebug -Trace 1
+
+# Specify default targets for VS 2019 for developers.
+
+if (-not (Test-Path env:ICINGA2_BUILDPATH)) {
+ $env:ICINGA2_BUILDPATH = '.\debug'
+}
+if (-not (Test-Path "$env:ICINGA2_BUILDPATH")) {
+ mkdir "$env:ICINGA2_BUILDPATH" | out-null
+}
+
+if (-not (Test-Path env:CMAKE_BUILD_TYPE)) {
+ $env:CMAKE_BUILD_TYPE = 'Debug'
+}
+if (-not (Test-Path env:ICINGA2_INSTALLPATH)) {
+ $env:ICINGA2_INSTALLPATH = 'C:\Program Files\Icinga2-debug'
+}
+if (-not (Test-Path env:CMAKE_PATH)) {
+ $env:CMAKE_PATH = 'C:\Program Files\CMake\bin'
+}
+if (-not ($env:PATH -contains $env:CMAKE_PATH)) {
+ $env:PATH = $env:CMAKE_PATH + ';' + $env:PATH
+}
+if (-not (Test-Path env:CMAKE_GENERATOR)) {
+ $env:CMAKE_GENERATOR = 'Visual Studio 16 2019'
+}
+if (-not (Test-Path env:CMAKE_GENERATOR_PLATFORM)) {
+ $env:CMAKE_GENERATOR_PLATFORM = 'x64'
+}
+if (-not (Test-Path env:OPENSSL_ROOT_DIR)) {
+ $env:OPENSSL_ROOT_DIR = 'c:\local\OpenSSL-Win64'
+}
+if (-not (Test-Path env:BOOST_ROOT)) {
+ $env:BOOST_ROOT = 'c:\local\boost_1_84_0'
+}
+if (-not (Test-Path env:BOOST_LIBRARYDIR)) {
+ $env:BOOST_LIBRARYDIR = 'c:\local\boost_1_84_0\lib64-msvc-14.2'
+}
+if (-not (Test-Path env:FLEX_BINARY)) {
+ $env:FLEX_BINARY = 'C:\ProgramData\chocolatey\bin\win_flex.exe'
+}
+if (-not (Test-Path env:BISON_BINARY)) {
+ $env:BISON_BINARY = 'C:\ProgramData\chocolatey\bin\win_bison.exe'
+}
+
+$sourcePath = Get-Location
+
+cd "$env:ICINGA2_BUILDPATH"
+
+# Invalidate cache in case something in the build environment changed
+if (Test-Path CMakeCache.txt) {
+ Remove-Item -Force CMakeCache.txt | Out-Null
+}
+
+& cmake.exe "$sourcePath" `
+ -DCMAKE_BUILD_TYPE="$env:CMAKE_BUILD_TYPE" `
+ -G "$env:CMAKE_GENERATOR" -A "$env:CMAKE_GENERATOR_PLATFORM" -DCPACK_GENERATOR=WIX `
+ -DCMAKE_INSTALL_PREFIX="$env:ICINGA2_INSTALLPATH" `
+ -DOPENSSL_ROOT_DIR="$env:OPENSSL_ROOT_DIR" `
+ -DBOOST_LIBRARYDIR="$env:BOOST_LIBRARYDIR" `
+ -DBOOST_INCLUDEDIR="$env:BOOST_ROOT" `
+ -DFLEX_EXECUTABLE="$env:FLEX_BINARY" `
+ -DBISON_EXECUTABLE="$env:BISON_BINARY"
+
+cd "$sourcePath"
+
+if ($lastexitcode -ne 0) {
+ exit $lastexitcode
+}
diff --git a/tools/win32/configure.ps1 b/tools/win32/configure.ps1
new file mode 100644
index 0000000..55f0a58
--- /dev/null
+++ b/tools/win32/configure.ps1
@@ -0,0 +1,72 @@
+Set-PsDebug -Trace 1
+
+if (-not (Test-Path env:ICINGA2_BUILDPATH)) {
+ $env:ICINGA2_BUILDPATH = '.\build'
+}
+
+if (-not (Test-Path env:CMAKE_BUILD_TYPE)) {
+ $env:CMAKE_BUILD_TYPE = 'RelWithDebInfo'
+}
+if (-not (Test-Path "$env:ICINGA2_BUILDPATH")) {
+ mkdir "$env:ICINGA2_BUILDPATH" | out-null
+}
+if (-not (Test-Path env:CMAKE_PATH)) {
+ $env:CMAKE_PATH = 'C:\Program Files\CMake\bin'
+}
+if (-not ($env:PATH -contains $env:CMAKE_PATH)) {
+ $env:PATH = $env:CMAKE_PATH + ';' + $env:PATH
+}
+if (-not (Test-Path env:CMAKE_GENERATOR)) {
+ $env:CMAKE_GENERATOR = 'Visual Studio 16 2019'
+}
+if (-not (Test-Path env:BITS)) {
+ $env:BITS = 64
+}
+if (-not (Test-Path env:CMAKE_GENERATOR_PLATFORM)) {
+ if ($env:BITS -eq 32) {
+ $env:CMAKE_GENERATOR_PLATFORM = 'Win32'
+ } else {
+ $env:CMAKE_GENERATOR_PLATFORM = 'x64'
+ }
+}
+if (-not (Test-Path env:OPENSSL_ROOT_DIR)) {
+ $env:OPENSSL_ROOT_DIR = "c:\local\OpenSSL_3_0_12-Win${env:BITS}"
+}
+if (-not (Test-Path env:BOOST_ROOT)) {
+ $env:BOOST_ROOT = "c:\local\boost_1_84_0-Win${env:BITS}"
+}
+if (-not (Test-Path env:BOOST_LIBRARYDIR)) {
+ $env:BOOST_LIBRARYDIR = "c:\local\boost_1_84_0-Win${env:BITS}\lib${env:BITS}-msvc-14.2"
+}
+if (-not (Test-Path env:FLEX_BINARY)) {
+ $env:FLEX_BINARY = 'C:\ProgramData\chocolatey\bin\win_flex.exe'
+}
+if (-not (Test-Path env:BISON_BINARY)) {
+ $env:BISON_BINARY = 'C:\ProgramData\chocolatey\bin\win_bison.exe'
+}
+
+$sourcePath = Get-Location
+
+cd "$env:ICINGA2_BUILDPATH"
+
+#-DCMAKE_INSTALL_PREFIX="C:\Program Files\Icinga2" `
+
+# Invalidate cache in case something in the build environment changed
+if (Test-Path CMakeCache.txt) {
+ Remove-Item -Force CMakeCache.txt | Out-Null
+}
+
+& cmake.exe "$sourcePath" `
+ -DCMAKE_BUILD_TYPE="$env:CMAKE_BUILD_TYPE" `
+ -G "$env:CMAKE_GENERATOR" -A "$env:CMAKE_GENERATOR_PLATFORM" -DCPACK_GENERATOR=WIX `
+ -DOPENSSL_ROOT_DIR="$env:OPENSSL_ROOT_DIR" `
+ -DBOOST_LIBRARYDIR="$env:BOOST_LIBRARYDIR" `
+ -DBOOST_INCLUDEDIR="$env:BOOST_ROOT" `
+ -DFLEX_EXECUTABLE="$env:FLEX_BINARY" `
+ -DBISON_EXECUTABLE="$env:BISON_BINARY"
+
+cd "$sourcePath"
+
+if ($lastexitcode -ne 0) {
+ exit $lastexitcode
+}
diff --git a/tools/win32/load-vsenv.ps1 b/tools/win32/load-vsenv.ps1
new file mode 100644
index 0000000..c5323dc
--- /dev/null
+++ b/tools/win32/load-vsenv.ps1
@@ -0,0 +1,59 @@
+# why that env handling, see
+# https://help.appveyor.com/discussions/questions/18777-how-to-use-vcvars64bat-from-powershell#comment_44999171
+
+Set-PsDebug -Trace 1
+
+$SOURCE = Get-Location
+
+if (Test-Path env:ICINGA2_BUILDPATH) {
+ $BUILD = $env:ICINGA2_BUILDPATH
+} else {
+ $BUILD = "${SOURCE}\Build"
+}
+
+if (-not (Test-Path $BUILD)) {
+ mkdir $BUILD | Out-Null
+}
+
+if (Test-Path env:VS_INSTALL_PATH) {
+ $VSBASE = $env:VS_INSTALL_PATH
+} else {
+ $VSBASE = "C:\Program Files (x86)\Microsoft Visual Studio\2019"
+}
+
+if (Test-Path env:BITS) {
+ $bits = $env:BITS
+} else {
+ $bits = 64
+}
+
+# Execute vcvars in cmd and store env
+$vcvars_locations = @(
+ "${VSBASE}\BuildTools\VC\Auxiliary\Build\vcvars${bits}.bat"
+ "${VSBASE}\Community\VC\Auxiliary\Build\vcvars${bits}.bat"
+ "${VSBASE}\Enterprise\VC\Auxiliary\Build\vcvars${bits}.bat"
+)
+
+$vcvars = $null
+foreach ($file in $vcvars_locations) {
+ if (Test-Path $file) {
+ $vcvars = $file
+ break
+ }
+}
+
+if ($vcvars -eq $null) {
+ throw "Could not get Build environment script at locations: ${vcvars_locations}"
+}
+
+cmd.exe /c "call `"${vcvars}`" && set > `"${BUILD}\vcvars.txt`""
+if ($LastExitCode -ne 0) {
+ throw "Could not load Build environment from: ${vcvars}"
+}
+
+# Load environment for PowerShell
+Get-Content "${BUILD}\vcvars.txt" | Foreach-Object {
+ if ($_ -match "^(VSCMD.*?)=(.*)$") {
+ Set-Content ("env:" + $matches[1]) $matches[2]
+ }
+}
diff --git a/tools/win32/test.ps1 b/tools/win32/test.ps1
new file mode 100644
index 0000000..d7ad90c
--- /dev/null
+++ b/tools/win32/test.ps1
@@ -0,0 +1,33 @@
+Set-PsDebug -Trace 1
+
+if (-not (Test-Path env:ICINGA2_BUILDPATH)) {
+ $env:ICINGA2_BUILDPATH = 'build'
+}
+
+if (-not (Test-Path env:CMAKE_BUILD_TYPE)) {
+ $env:CMAKE_BUILD_TYPE = 'RelWithDebInfo'
+}
+
+[string]$pwd = Get-Location
+
+if (-not (Test-Path $env:ICINGA2_BUILDPATH)) {
+ Write-Host "Path '$pwd\$env:ICINGA2_BUILDPATH' does not exist!"
+ exit 1
+}
+
+if (-not (Test-Path env:CMAKE_PATH)) {
+ $env:CMAKE_PATH = 'C:\Program Files\CMake\bin'
+}
+if (-not ($env:PATH -contains $env:CMAKE_PATH)) {
+ $env:PATH = $env:CMAKE_PATH + ';' + $env:PATH
+}
+
+cd "$env:ICINGA2_BUILDPATH"
+
+ctest.exe -C "${env:CMAKE_BUILD_TYPE}" -T test -O $env:ICINGA2_BUILDPATH/Test.xml --output-on-failure --log_level=all
+if ($lastexitcode -ne 0) {
+ cd ..
+ exit $lastexitcode
+}
+
+cd ..